Merge branch 'master' into modesetting-101
Conflicts: linux-core/drm_bufs.c shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.cmain
commit
5ce43a346c
|
@ -2487,6 +2487,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
|
|||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
fence->signaled = arg.signaled;
|
||||
fence->sequence = arg.sequence;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
|||
i915_compat.o
|
||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
|
||||
nv04_timer.o \
|
||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||
|
|
|
@ -267,12 +267,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_i
|
|||
}
|
||||
EXPORT_SYMBOL(drm_ati_pcigart_init);
|
||||
|
||||
static int ati_pcigart_needs_unbind_cache_adjust(drm_ttm_backend_t *backend)
|
||||
static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
|
||||
{
|
||||
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
||||
}
|
||||
|
||||
static int ati_pcigart_populate(drm_ttm_backend_t *backend,
|
||||
static int ati_pcigart_populate(struct drm_ttm_backend *backend,
|
||||
unsigned long num_pages,
|
||||
struct page **pages)
|
||||
{
|
||||
|
@ -329,7 +329,7 @@ static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
|
||||
static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
|
@ -353,7 +353,7 @@ static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
|
||||
static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
|
@ -367,7 +367,7 @@ static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
|
|||
atipci_be->num_pages = 0;
|
||||
}
|
||||
|
||||
static void ati_pcigart_destroy_ttm(drm_ttm_backend_t *backend)
|
||||
static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be;
|
||||
if (backend) {
|
||||
|
|
|
@ -1311,19 +1311,5 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
|
|||
|
||||
/*@}*/
|
||||
|
||||
/** Type for the OS's non-sleepable mutex lock */
|
||||
#define DRM_SPINTYPE spinlock_t
|
||||
/**
|
||||
* Initialize the lock for use. name is an optional string describing the
|
||||
* lock
|
||||
*/
|
||||
#define DRM_SPININIT(l,name) spin_lock_init(l)
|
||||
#define DRM_SPINUNINIT(l)
|
||||
#define DRM_SPINLOCK(l) spin_lock(l)
|
||||
#define DRM_SPINUNLOCK(l) spin_unlock(l)
|
||||
#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
|
||||
#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
|
||||
#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
@ -506,7 +506,7 @@ static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) {
|
|||
static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages,
|
||||
struct page **pages) {
|
||||
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
container_of(backend, struct drm_agp_ttm_backend, backend);
|
||||
struct page **cur_page, **last_page = pages + num_pages;
|
||||
DRM_AGP_MEM *mem;
|
||||
|
@ -562,7 +562,7 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
|||
|
||||
static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
|
||||
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
container_of(backend, struct drm_agp_ttm_backend, backend);
|
||||
|
||||
DRM_DEBUG("drm_agp_unbind_ttm\n");
|
||||
|
@ -574,7 +574,7 @@ static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
|
|||
|
||||
static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
|
||||
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
struct drm_agp_ttm_backend *agp_be =
|
||||
container_of(backend, struct drm_agp_ttm_backend, backend);
|
||||
DRM_AGP_MEM *mem = agp_be->mem;
|
||||
|
||||
|
@ -604,7 +604,7 @@ static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) {
|
|||
}
|
||||
}
|
||||
|
||||
static struct drm_ttm_backend_func agp_ttm_backend =
|
||||
static struct drm_ttm_backend_func agp_ttm_backend =
|
||||
{
|
||||
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
|
||||
.populate = drm_agp_populate,
|
||||
|
@ -637,7 +637,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
|
||||
if (!agp_be)
|
||||
return NULL;
|
||||
|
|
|
@ -36,23 +36,23 @@
|
|||
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
|
||||
* when there is a chance that it can be zero before or after the operation.
|
||||
*
|
||||
* dev->struct_mutex also protects all lists and list heads. Hash tables and hash
|
||||
* heads.
|
||||
* dev->struct_mutex also protects all lists and list heads,
|
||||
* Hash tables and hash heads.
|
||||
*
|
||||
* bo->mutex protects the buffer object itself excluding the usage field.
|
||||
* bo->mutex does also protect the buffer list heads, so to manipulate those, we need
|
||||
* both the bo->mutex and the dev->struct_mutex.
|
||||
* bo->mutex does also protect the buffer list heads, so to manipulate those,
|
||||
* we need both the bo->mutex and the dev->struct_mutex.
|
||||
*
|
||||
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
|
||||
* complicated. When dev->struct_mutex is released to grab bo->mutex, the list
|
||||
* traversal will, in general, need to be restarted.
|
||||
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
|
||||
* is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
|
||||
* the list traversal will, in general, need to be restarted.
|
||||
*
|
||||
*/
|
||||
|
||||
static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
|
||||
static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
|
||||
static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
|
||||
static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
|
||||
static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
|
||||
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
|
||||
static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
|
||||
static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
|
||||
|
||||
static inline uint64_t drm_bo_type_flags(unsigned type)
|
||||
{
|
||||
|
@ -63,7 +63,7 @@ static inline uint64_t drm_bo_type_flags(unsigned type)
|
|||
* bo locked. dev->struct_mutex locked.
|
||||
*/
|
||||
|
||||
void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
|
||||
void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_mem_type_manager *man;
|
||||
|
||||
|
@ -74,7 +74,7 @@ void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
|
|||
list_add_tail(&bo->pinned_lru, &man->pinned);
|
||||
}
|
||||
|
||||
void drm_bo_add_to_lru(struct drm_buffer_object * bo)
|
||||
void drm_bo_add_to_lru(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_mem_type_manager *man;
|
||||
|
||||
|
@ -89,7 +89,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)
|
|||
}
|
||||
}
|
||||
|
||||
static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
|
||||
static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
|
||||
{
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
int ret;
|
||||
|
@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
|
||||
static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
|
||||
{
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
int ret;
|
||||
|
@ -133,7 +133,7 @@ static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
|
|||
* Call bo->mutex locked.
|
||||
*/
|
||||
|
||||
static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
||||
static int drm_bo_add_ttm(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
int ret = 0;
|
||||
|
@ -171,8 +171,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||
struct drm_bo_mem_reg * mem,
|
||||
static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
|
||||
struct drm_bo_mem_reg *mem,
|
||||
int evict, int no_wait)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
|
@ -255,7 +255,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
|||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
out_err:
|
||||
if (old_is_pci || new_is_pci)
|
||||
drm_bo_vm_post_move(bo);
|
||||
|
||||
|
@ -274,7 +274,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
|||
* Wait until the buffer is idle.
|
||||
*/
|
||||
|
||||
int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||
int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
|
||||
int no_wait)
|
||||
{
|
||||
int ret;
|
||||
|
@ -286,11 +286,10 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
|||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
if (no_wait) {
|
||||
if (no_wait)
|
||||
return -EBUSY;
|
||||
}
|
||||
ret =
|
||||
drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
||||
|
||||
ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
||||
bo->fence_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -301,7 +300,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_bo_wait);
|
||||
|
||||
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
||||
static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -336,7 +335,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
|||
* fence object and removing from lru lists and memory managers.
|
||||
*/
|
||||
|
||||
static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
|
||||
static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -358,9 +357,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (!atomic_dec_and_test(&bo->usage)) {
|
||||
if (!atomic_dec_and_test(&bo->usage))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!bo->fence) {
|
||||
list_del_init(&bo->lru);
|
||||
|
@ -388,7 +386,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
|
|||
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
|
||||
}
|
||||
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -398,7 +396,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
|
|||
* to the buffer object. Then destroy it.
|
||||
*/
|
||||
|
||||
static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
|
||||
static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -446,7 +444,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
|
|||
* Call dev->struct_mutex locked.
|
||||
*/
|
||||
|
||||
static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
|
||||
static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
||||
|
@ -465,9 +463,8 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
|
|||
|
||||
drm_bo_cleanup_refs(entry, remove_all);
|
||||
|
||||
if (nentry) {
|
||||
if (nentry)
|
||||
atomic_dec(&nentry->usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -501,21 +498,20 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
|
||||
void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
|
||||
{
|
||||
struct drm_buffer_object *tmp_bo = *bo;
|
||||
struct drm_buffer_object *tmp_bo = *bo;
|
||||
bo = NULL;
|
||||
|
||||
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
|
||||
|
||||
if (atomic_dec_and_test(&tmp_bo->usage)) {
|
||||
if (atomic_dec_and_test(&tmp_bo->usage))
|
||||
drm_bo_destroy_locked(tmp_bo);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
||||
|
||||
static void drm_bo_base_deref_locked(struct drm_file * file_priv,
|
||||
struct drm_user_object * uo)
|
||||
static void drm_bo_base_deref_locked(struct drm_file *file_priv,
|
||||
struct drm_user_object *uo)
|
||||
{
|
||||
struct drm_buffer_object *bo =
|
||||
drm_user_object_entry(uo, struct drm_buffer_object, base);
|
||||
|
@ -526,7 +522,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv,
|
|||
drm_bo_usage_deref_locked(&bo);
|
||||
}
|
||||
|
||||
void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
|
||||
void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
|
||||
{
|
||||
struct drm_buffer_object *tmp_bo = *bo;
|
||||
struct drm_device *dev = tmp_bo->dev;
|
||||
|
@ -582,8 +578,8 @@ EXPORT_SYMBOL(drm_putback_buffer_objects);
|
|||
int drm_fence_buffer_objects(struct drm_device *dev,
|
||||
struct list_head *list,
|
||||
uint32_t fence_flags,
|
||||
struct drm_fence_object * fence,
|
||||
struct drm_fence_object ** used_fence)
|
||||
struct drm_fence_object *fence,
|
||||
struct drm_fence_object **used_fence)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_buffer_object *entry;
|
||||
|
@ -667,7 +663,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
|
|||
l = list->next;
|
||||
}
|
||||
DRM_DEBUG("Fenced %d buffers\n", count);
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
*used_fence = fence;
|
||||
return ret;
|
||||
|
@ -678,7 +674,7 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);
|
|||
* bo->mutex locked
|
||||
*/
|
||||
|
||||
static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
||||
static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
|
||||
int no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -686,7 +682,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
|||
struct drm_bo_mem_reg evict_mem;
|
||||
|
||||
/*
|
||||
* Someone might have modified the buffer before we took the buffer mutex.
|
||||
* Someone might have modified the buffer before we took the
|
||||
* buffer mutex.
|
||||
*/
|
||||
|
||||
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
|
||||
|
@ -737,7 +734,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
|||
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
|
||||
_DRM_BO_FLAG_EVICTED);
|
||||
|
||||
out:
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -745,8 +742,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
|||
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
||||
* space, or we've evicted everything and there isn't enough space.
|
||||
*/
|
||||
static int drm_bo_mem_force_space(struct drm_device * dev,
|
||||
struct drm_bo_mem_reg * mem,
|
||||
static int drm_bo_mem_force_space(struct drm_device *dev,
|
||||
struct drm_bo_mem_reg *mem,
|
||||
uint32_t mem_type, int no_wait)
|
||||
{
|
||||
struct drm_mm_node *node;
|
||||
|
@ -794,10 +791,10 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
|
||||
static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
|
||||
int disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint64_t mask, uint32_t * res_mask)
|
||||
uint64_t mask, uint32_t *res_mask)
|
||||
{
|
||||
uint64_t cur_flags = drm_bo_type_flags(mem_type);
|
||||
uint64_t flag_diff;
|
||||
|
@ -830,7 +827,7 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
|
|||
|
||||
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
|
||||
((mask & DRM_BO_FLAG_MAPPABLE) ||
|
||||
(mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
|
||||
(mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
||||
return 0;
|
||||
|
||||
*res_mask = cur_flags;
|
||||
|
@ -845,8 +842,8 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
|
|||
* drm_bo_mem_force_space is attempted in priority order to evict and find
|
||||
* space.
|
||||
*/
|
||||
int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||
struct drm_bo_mem_reg * mem, int no_wait)
|
||||
int drm_bo_mem_space(struct drm_buffer_object *bo,
|
||||
struct drm_bo_mem_reg *mem, int no_wait)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -940,31 +937,27 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
|
|||
ret = (has_eagain) ? -EAGAIN : -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_mem_space);
|
||||
|
||||
static int drm_bo_new_mask(struct drm_buffer_object * bo,
|
||||
static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
||||
uint64_t new_flags, uint64_t used_mask)
|
||||
{
|
||||
uint32_t new_props;
|
||||
|
||||
if (bo->type == drm_bo_type_user &&
|
||||
((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
||||
((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
||||
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
|
||||
DRM_ERROR("User buffers require cache-coherent memory.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||
DRM_ERROR
|
||||
("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
|
||||
"processes.\n");
|
||||
DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
|
||||
DRM_ERROR
|
||||
("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
|
||||
DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
|
@ -1014,7 +1007,7 @@ EXPORT_SYMBOL(drm_lookup_buffer_object);
|
|||
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
|
||||
*/
|
||||
|
||||
static int drm_bo_quick_busy(struct drm_buffer_object * bo)
|
||||
static int drm_bo_quick_busy(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_fence_object *fence = bo->fence;
|
||||
|
||||
|
@ -1034,7 +1027,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object * bo)
|
|||
* Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
|
||||
*/
|
||||
|
||||
static int drm_bo_busy(struct drm_buffer_object * bo)
|
||||
static int drm_bo_busy(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_fence_object *fence = bo->fence;
|
||||
|
||||
|
@ -1054,7 +1047,7 @@ static int drm_bo_busy(struct drm_buffer_object * bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_bo_evict_cached(struct drm_buffer_object * bo)
|
||||
static int drm_bo_evict_cached(struct drm_buffer_object *bo)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1068,7 +1061,7 @@ static int drm_bo_evict_cached(struct drm_buffer_object * bo)
|
|||
* Wait until a buffer is unmapped.
|
||||
*/
|
||||
|
||||
static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
|
||||
static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1084,7 +1077,7 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
|
||||
static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -1099,7 +1092,7 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
|
|||
* Until then, we cannot really do anything with it except delete it.
|
||||
*/
|
||||
|
||||
static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
|
||||
static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
|
||||
int eagain_if_wait)
|
||||
{
|
||||
int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
|
@ -1132,7 +1125,7 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
|
|||
* Bo locked.
|
||||
*/
|
||||
|
||||
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
||||
static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
||||
struct drm_bo_info_rep *rep)
|
||||
{
|
||||
if (!rep)
|
||||
|
@ -1236,7 +1229,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
|
|||
|
||||
} else
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
|
@ -1265,7 +1258,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
|
|||
|
||||
drm_remove_ref_object(file_priv, ro);
|
||||
drm_bo_usage_deref_locked(&bo);
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1275,7 +1268,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
|
|||
*/
|
||||
|
||||
static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
|
||||
struct drm_user_object * uo,
|
||||
struct drm_user_object *uo,
|
||||
enum drm_ref_type action)
|
||||
{
|
||||
struct drm_buffer_object *bo =
|
||||
|
@ -1297,7 +1290,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
|
|||
* Note that new_mem_flags are NOT transferred to the bo->mem.mask.
|
||||
*/
|
||||
|
||||
int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
|
||||
int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
|
||||
int no_wait, int move_unfenced)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
|
@ -1337,7 +1330,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
|
|||
|
||||
ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
|
||||
|
||||
out_unlock:
|
||||
out_unlock:
|
||||
if (ret || !move_unfenced) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (mem.mm_node) {
|
||||
|
@ -1352,7 +1345,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
||||
static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
|
||||
{
|
||||
uint32_t flag_diff = (mem->mask ^ mem->flags);
|
||||
|
||||
|
@ -1360,9 +1353,9 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
|||
return 0;
|
||||
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
||||
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
||||
return 0;
|
||||
}
|
||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
|
||||
return 0;
|
||||
|
||||
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
|
||||
((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
|
||||
(mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
||||
|
@ -1374,7 +1367,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
|||
* bo locked.
|
||||
*/
|
||||
|
||||
static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||
static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
||||
uint32_t fence_class,
|
||||
int move_unfenced, int no_wait)
|
||||
{
|
||||
|
@ -1417,7 +1410,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
|||
|
||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||
if (ret) {
|
||||
DRM_ERROR("Timed out waiting for buffer unmap.\n");
|
||||
DRM_ERROR("Timed out waiting for buffer unmap.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1534,12 +1527,12 @@ out:
|
|||
EXPORT_SYMBOL(drm_bo_do_validate);
|
||||
|
||||
|
||||
int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
||||
uint32_t fence_class,
|
||||
uint64_t flags, uint64_t mask,
|
||||
uint64_t flags, uint64_t mask,
|
||||
uint32_t hint,
|
||||
int use_old_fence_class,
|
||||
struct drm_bo_info_rep * rep,
|
||||
struct drm_bo_info_rep *rep,
|
||||
struct drm_buffer_object **bo_rep)
|
||||
{
|
||||
struct drm_device *dev = file_priv->head->dev;
|
||||
|
@ -1551,7 +1544,7 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
|||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!bo)
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
if (use_old_fence_class)
|
||||
|
@ -1561,10 +1554,10 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
|||
* Only allow creator to change shared buffer mask.
|
||||
*/
|
||||
|
||||
if (bo->base.owner != file_priv)
|
||||
if (bo->base.owner != file_priv)
|
||||
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
|
||||
|
||||
|
||||
|
||||
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
|
||||
no_wait, rep);
|
||||
|
||||
|
@ -1587,9 +1580,9 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
|||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!bo) {
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
|
||||
(void)drm_bo_busy(bo);
|
||||
|
@ -1612,9 +1605,8 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
|
|||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!bo) {
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||
|
@ -1626,7 +1618,7 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
|
|||
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
|
@ -1639,7 +1631,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
uint32_t hint,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
struct drm_buffer_object ** buf_obj)
|
||||
struct drm_buffer_object **buf_obj)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_buffer_object *bo;
|
||||
|
@ -1705,7 +1697,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
*buf_obj = bo;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
out_err:
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
@ -1730,7 +1722,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv,
|
|||
bo->base.ref_struct_locked = NULL;
|
||||
bo->base.unref = drm_buffer_user_object_unmap;
|
||||
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1763,14 +1755,14 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
req->buffer_start, &entry);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
||||
ret = drm_bo_add_user_object(file_priv, entry,
|
||||
req->mask & DRM_BO_FLAG_SHAREABLE);
|
||||
if (ret) {
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
mutex_lock(&entry->mutex);
|
||||
drm_bo_fill_rep_arg(entry, rep);
|
||||
mutex_unlock(&entry->mutex);
|
||||
|
@ -1779,7 +1771,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int drm_bo_setstatus_ioctl(struct drm_device *dev,
|
||||
int drm_bo_setstatus_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_bo_map_wait_idle_arg *arg = data;
|
||||
|
@ -1860,7 +1852,7 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
drm_buffer_type, &uo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
ret = drm_bo_handle_info(file_priv, req->handle, rep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1920,7 +1912,7 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_bo_leave_list(struct drm_buffer_object * bo,
|
||||
static int drm_bo_leave_list(struct drm_buffer_object *bo,
|
||||
uint32_t mem_type,
|
||||
int free_pinned,
|
||||
int allow_errors)
|
||||
|
@ -1966,7 +1958,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1985,7 +1977,7 @@ static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
|
|||
* dev->struct_mutex locked.
|
||||
*/
|
||||
|
||||
static int drm_bo_force_list_clean(struct drm_device * dev,
|
||||
static int drm_bo_force_list_clean(struct drm_device *dev,
|
||||
struct list_head *head,
|
||||
unsigned mem_type,
|
||||
int free_pinned,
|
||||
|
@ -2050,7 +2042,7 @@ restart:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
|
||||
int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_mem_type_manager *man = &bm->man[mem_type];
|
||||
|
@ -2092,7 +2084,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm);
|
|||
*point since we have the hardware lock.
|
||||
*/
|
||||
|
||||
static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
|
||||
static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
|
||||
{
|
||||
int ret;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -2117,7 +2109,7 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int drm_bo_init_mm(struct drm_device * dev,
|
||||
int drm_bo_init_mm(struct drm_device *dev,
|
||||
unsigned type,
|
||||
unsigned long p_offset, unsigned long p_size)
|
||||
{
|
||||
|
@ -2164,11 +2156,11 @@ EXPORT_SYMBOL(drm_bo_init_mm);
|
|||
/*
|
||||
* This function is intended to be called on drm driver unload.
|
||||
* If you decide to call it from lastclose, you must protect the call
|
||||
* from a potentially racing drm_bo_driver_init in firstopen.
|
||||
* from a potentially racing drm_bo_driver_init in firstopen.
|
||||
* (This may happen on X server restart).
|
||||
*/
|
||||
|
||||
int drm_bo_driver_finish(struct drm_device * dev)
|
||||
int drm_bo_driver_finish(struct drm_device *dev)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
int ret = 0;
|
||||
|
@ -2195,24 +2187,22 @@ int drm_bo_driver_finish(struct drm_device * dev)
|
|||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!cancel_delayed_work(&bm->wq)) {
|
||||
if (!cancel_delayed_work(&bm->wq))
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_bo_delayed_delete(dev, 1);
|
||||
if (list_empty(&bm->ddestroy)) {
|
||||
if (list_empty(&bm->ddestroy))
|
||||
DRM_DEBUG("Delayed destroy list was clean\n");
|
||||
}
|
||||
if (list_empty(&bm->man[0].lru)) {
|
||||
|
||||
if (list_empty(&bm->man[0].lru))
|
||||
DRM_DEBUG("Swap list was clean\n");
|
||||
}
|
||||
if (list_empty(&bm->man[0].pinned)) {
|
||||
|
||||
if (list_empty(&bm->man[0].pinned))
|
||||
DRM_DEBUG("NO_MOVE list was clean\n");
|
||||
}
|
||||
if (list_empty(&bm->unfenced)) {
|
||||
|
||||
if (list_empty(&bm->unfenced))
|
||||
DRM_DEBUG("Unfenced list was clean\n");
|
||||
}
|
||||
out:
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
unlock_page(bm->dummy_read_page);
|
||||
|
@ -2220,6 +2210,8 @@ int drm_bo_driver_finish(struct drm_device * dev)
|
|||
ClearPageReserved(bm->dummy_read_page);
|
||||
#endif
|
||||
__free_page(bm->dummy_read_page);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2228,11 +2220,11 @@ EXPORT_SYMBOL(drm_bo_driver_finish);
|
|||
/*
|
||||
* This function is intended to be called on drm driver load.
|
||||
* If you decide to call it from firstopen, you must protect the call
|
||||
* from a potentially racing drm_bo_driver_finish in lastclose.
|
||||
* from a potentially racing drm_bo_driver_finish in lastclose.
|
||||
* (This may happen on X server restart).
|
||||
*/
|
||||
|
||||
int drm_bo_driver_init(struct drm_device * dev)
|
||||
int drm_bo_driver_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -2275,11 +2267,10 @@ int drm_bo_driver_init(struct drm_device * dev)
|
|||
bm->cur_pages = 0;
|
||||
INIT_LIST_HEAD(&bm->unfenced);
|
||||
INIT_LIST_HEAD(&bm->ddestroy);
|
||||
out_unlock:
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_driver_init);
|
||||
|
||||
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
|
@ -2389,13 +2380,13 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
|
||||
ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_bo_lock_mm(dev, arg->mem_type);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -2407,8 +2398,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_mm_unlock_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
int drm_mm_unlock_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_mm_type_arg *arg = data;
|
||||
|
@ -2425,7 +2416,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2433,7 +2424,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,
|
|||
* buffer object vm functions.
|
||||
*/
|
||||
|
||||
int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
|
||||
int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
|
||||
|
@ -2450,7 +2441,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
|
|||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_mem_reg_is_pci);
|
||||
|
||||
/**
|
||||
|
@ -2496,7 +2486,7 @@ int drm_bo_pci_offset(struct drm_device *dev,
|
|||
* Call bo->mutex locked.
|
||||
*/
|
||||
|
||||
void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
|
||||
void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
|
||||
|
@ -2508,9 +2498,9 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
|
|||
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
|
||||
}
|
||||
|
||||
static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
|
||||
static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_map_list *list;
|
||||
struct drm_map_list *list;
|
||||
drm_local_map_t *map;
|
||||
struct drm_device *dev = bo->dev;
|
||||
|
||||
|
@ -2538,7 +2528,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
|
|||
drm_bo_usage_deref_locked(&bo);
|
||||
}
|
||||
|
||||
static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
|
||||
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_map_list *list = &bo->map_list;
|
||||
drm_local_map_t *map;
|
||||
|
@ -2579,11 +2569,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_bo_version_ioctl(struct drm_device *dev, void *data,
|
||||
int drm_bo_version_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
|
||||
|
||||
|
||||
arg->major = DRM_BO_INIT_MAJOR;
|
||||
arg->minor = DRM_BO_INIT_MINOR;
|
||||
arg->patchlevel = DRM_BO_INIT_PATCH;
|
||||
|
|
|
@ -31,19 +31,19 @@
|
|||
/*
|
||||
* This file implements a simple replacement for the buffer manager use
|
||||
* of the heavyweight hardware lock.
|
||||
* The lock is a read-write lock. Taking it in read mode is fast, and
|
||||
* The lock is a read-write lock. Taking it in read mode is fast, and
|
||||
* intended for in-kernel use only.
|
||||
* Taking it in write mode is slow.
|
||||
*
|
||||
* The write mode is used only when there is a need to block all
|
||||
* user-space processes from allocating a
|
||||
* The write mode is used only when there is a need to block all
|
||||
* user-space processes from allocating a
|
||||
* new memory area.
|
||||
* Typical use in write mode is X server VT switching, and it's allowed
|
||||
* to leave kernel space with the write lock held. If a user-space process
|
||||
* dies while having the write-lock, it will be released during the file
|
||||
* descriptor release.
|
||||
*
|
||||
* The read lock is typically placed at the start of an IOCTL- or
|
||||
* The read lock is typically placed at the start of an IOCTL- or
|
||||
* user-space callable function that may end up allocating a memory area.
|
||||
* This includes setstatus, super-ioctls and no_pfn; the latter may move
|
||||
* unmappable regions to mappable. It's a bug to leave kernel space with the
|
||||
|
@ -53,7 +53,7 @@
|
|||
* latency. The locking functions will return -EAGAIN if interrupted by a
|
||||
* signal.
|
||||
*
|
||||
* Locking order: The lock should be taken BEFORE any kernel mutexes
|
||||
* Locking order: The lock should be taken BEFORE any kernel mutexes
|
||||
* or spinlocks.
|
||||
*/
|
||||
|
||||
|
@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock)
|
|||
if (atomic_read(&lock->readers) == 0)
|
||||
wake_up_interruptible(&lock->queue);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_read_unlock);
|
||||
|
||||
int drm_bo_read_lock(struct drm_bo_lock *lock)
|
||||
|
@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_read_lock);
|
||||
|
||||
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
|
||||
|
@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
|||
int ret = 0;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) {
|
||||
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
|
||||
ret = wait_event_interruptible
|
||||
|
@ -140,7 +137,7 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
|||
|
||||
/*
|
||||
* Add a dummy user-object, the destructor of which will
|
||||
* make sure the lock is released if the client dies
|
||||
* make sure the lock is released if the client dies
|
||||
* while holding it.
|
||||
*/
|
||||
|
||||
|
@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
|||
ret = drm_add_user_object(file_priv, &lock->base, 0);
|
||||
lock->base.remove = &drm_bo_write_lock_remove;
|
||||
lock->base.type = drm_lock_type;
|
||||
if (ret) {
|
||||
if (ret)
|
||||
(void)__drm_bo_write_unlock(lock);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -10,7 +10,7 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
|
@ -19,8 +19,8 @@
|
|||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
@ -35,7 +35,7 @@
|
|||
* have not been requested to free also pinned regions.
|
||||
*/
|
||||
|
||||
static void drm_bo_free_old_node(struct drm_buffer_object * bo)
|
||||
static void drm_bo_free_old_node(struct drm_buffer_object *bo)
|
||||
{
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
|
@ -48,8 +48,8 @@ static void drm_bo_free_old_node(struct drm_buffer_object * bo)
|
|||
old_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
||||
int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_ttm *ttm = bo->ttm;
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
|
@ -82,7 +82,6 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
|||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_move_ttm);
|
||||
|
||||
/**
|
||||
|
@ -90,17 +89,17 @@ EXPORT_SYMBOL(drm_bo_move_ttm);
|
|||
*
|
||||
* \param bo The buffer object.
|
||||
* \return Failure indication.
|
||||
*
|
||||
*
|
||||
* Returns -EINVAL if the buffer object is currently not mappable.
|
||||
* Returns -ENOMEM if the ioremap operation failed.
|
||||
* Otherwise returns zero.
|
||||
*
|
||||
*
|
||||
* After a successfull call, bo->iomap contains the virtual address, or NULL
|
||||
* if the buffer object content is not accessible through PCI space.
|
||||
* if the buffer object content is not accessible through PCI space.
|
||||
* Call bo->mutex locked.
|
||||
*/
|
||||
|
||||
int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
|
||||
int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
|
@ -136,7 +135,7 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap);
|
|||
* Call bo->mutex locked.
|
||||
*/
|
||||
|
||||
void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
|
||||
void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct drm_buffer_manager *bm;
|
||||
|
@ -145,9 +144,8 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
|
|||
bm = &dev->bm;
|
||||
man = &bm->man[mem->mem_type];
|
||||
|
||||
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
|
||||
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
|
||||
iounmap(virtual);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mem_reg_iounmap);
|
||||
|
||||
|
@ -164,7 +162,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page)
|
||||
static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
|
||||
unsigned long page)
|
||||
{
|
||||
struct page *d = drm_ttm_get_page(ttm, page);
|
||||
void *dst;
|
||||
|
@ -182,7 +181,7 @@ static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long p
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page)
|
||||
static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
|
||||
{
|
||||
struct page *s = drm_ttm_get_page(ttm, page);
|
||||
void *src;
|
||||
|
@ -200,8 +199,8 @@ static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long p
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_bo_move_memcpy(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
||||
int drm_bo_move_memcpy(struct drm_buffer_object *bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
|
||||
|
@ -252,7 +251,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
|
|||
goto out1;
|
||||
}
|
||||
mb();
|
||||
out2:
|
||||
out2:
|
||||
drm_bo_free_old_node(bo);
|
||||
|
||||
*old_mem = *new_mem;
|
||||
|
@ -266,13 +265,12 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
|
|||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
out1:
|
||||
out1:
|
||||
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
|
||||
out:
|
||||
out:
|
||||
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_move_memcpy);
|
||||
|
||||
/*
|
||||
|
@ -281,8 +279,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy);
|
|||
* object. Call bo->mutex locked.
|
||||
*/
|
||||
|
||||
int drm_buffer_object_transfer(struct drm_buffer_object * bo,
|
||||
struct drm_buffer_object ** new_obj)
|
||||
int drm_buffer_object_transfer(struct drm_buffer_object *bo,
|
||||
struct drm_buffer_object **new_obj)
|
||||
{
|
||||
struct drm_buffer_object *fbo;
|
||||
struct drm_device *dev = bo->dev;
|
||||
|
@ -323,12 +321,10 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,
|
|||
* We cannot restart until it has finished.
|
||||
*/
|
||||
|
||||
int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||
int evict,
|
||||
int no_wait,
|
||||
uint32_t fence_class,
|
||||
uint32_t fence_type,
|
||||
uint32_t fence_flags, struct drm_bo_mem_reg * new_mem)
|
||||
int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
||||
int evict, int no_wait, uint32_t fence_class,
|
||||
uint32_t fence_type, uint32_t fence_flags,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
|
||||
|
@ -350,11 +346,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
|||
#ifdef DRM_ODD_MM_COMPAT
|
||||
/*
|
||||
* In this mode, we don't allow pipelining a copy blit,
|
||||
* since the buffer will be accessible from user space
|
||||
* since the buffer will be accessible from user space
|
||||
* the moment we return and rebuild the page tables.
|
||||
*
|
||||
* With normal vm operation, page tables are rebuilt
|
||||
* on demand using fault(), which waits for buffer idle.
|
||||
* on demand using fault(), which waits for buffer idle.
|
||||
*/
|
||||
if (1)
|
||||
#else
|
||||
|
@ -408,7 +404,6 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
|||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
||||
|
||||
int drm_bo_same_page(unsigned long offset,
|
||||
|
@ -421,13 +416,11 @@ EXPORT_SYMBOL(drm_bo_same_page);
|
|||
unsigned long drm_bo_offset_end(unsigned long offset,
|
||||
unsigned long end)
|
||||
{
|
||||
|
||||
offset = (offset + PAGE_SIZE) & PAGE_MASK;
|
||||
return (end < offset) ? end : offset;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_offset_end);
|
||||
|
||||
|
||||
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
|
||||
{
|
||||
pgprot_t tmp = PAGE_KERNEL;
|
||||
|
@ -476,8 +469,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
|
|||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page,
|
||||
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
||||
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct drm_bo_kmap_obj *map)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||
|
@ -504,7 +498,7 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
|
|||
* Populate the part we're mapping;
|
||||
*/
|
||||
|
||||
for (i = start_page; i< start_page + num_pages; ++i) {
|
||||
for (i = start_page; i < start_page + num_pages; ++i) {
|
||||
d = drm_ttm_get_page(ttm, i);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
@ -531,7 +525,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
|
|||
* and caching policy the buffer currently has.
|
||||
* Mapping multiple pages or buffers that live in io memory is a bit slow and
|
||||
* consumes vmalloc space. Be restrictive with such mappings.
|
||||
* Mapping single pages usually returns the logical kernel address, (which is fast)
|
||||
* Mapping single pages usually returns the logical kernel address,
|
||||
* (which is fast)
|
||||
* BUG may use slower temporary mappings for high memory pages or
|
||||
* uncached / write-combined pages.
|
||||
*
|
||||
|
@ -582,7 +577,7 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
|
|||
if (!map->virtual)
|
||||
return;
|
||||
|
||||
switch(map->bo_kmap_type) {
|
||||
switch (map->bo_kmap_type) {
|
||||
case bo_map_iomap:
|
||||
iounmap(map->virtual);
|
||||
break;
|
||||
|
|
|
@ -80,10 +80,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
|
|||
int ret;
|
||||
hash->key = user_token >> PAGE_SHIFT;
|
||||
ret = drm_ht_insert_item(&dev->map_hash, hash);
|
||||
if (ret != -EINVAL)
|
||||
if (ret != -EINVAL)
|
||||
return ret;
|
||||
}
|
||||
return drm_ht_just_insert_please(&dev->map_hash, hash,
|
||||
return drm_ht_just_insert_please(&dev->map_hash, hash,
|
||||
user_token, 32 - PAGE_SHIFT - 3,
|
||||
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
|
|||
|
||||
/* Assign a 32-bit handle */
|
||||
|
||||
user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
|
||||
user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
|
||||
map->offset;
|
||||
ret = drm_map_handle(dev, &list->hash, user_token, 0);
|
||||
|
||||
|
@ -379,7 +379,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
|
|||
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
|
||||
if (r_list->map == map) {
|
||||
list_del(&r_list->head);
|
||||
drm_ht_remove_key(&dev->map_hash,
|
||||
drm_ht_remove_key(&dev->map_hash,
|
||||
r_list->user_token >> PAGE_SHIFT);
|
||||
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
|
||||
found = 1;
|
||||
|
@ -822,9 +822,9 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
|
|||
page_count = 0;
|
||||
|
||||
while (entry->buf_count < count) {
|
||||
|
||||
|
||||
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
|
||||
|
||||
|
||||
if (!dmah) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
|
@ -1601,5 +1601,3 @@ int drm_order(unsigned long size)
|
|||
return order;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_order);
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
*
|
||||
* This kernel module is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
|
@ -13,7 +13,7 @@
|
|||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* This code provides access to unexported mm kernel features. It is necessary
|
||||
|
@ -21,7 +21,7 @@
|
|||
* directly.
|
||||
*
|
||||
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Linux kernel mm subsystem authors.
|
||||
* Linux kernel mm subsystem authors.
|
||||
* (Most code taken from there).
|
||||
*/
|
||||
|
||||
|
@ -50,7 +50,7 @@ int drm_unmap_page_from_agp(struct page *page)
|
|||
* performance reasons */
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
|
@ -80,22 +80,22 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
|||
|
||||
/*
|
||||
* vm code for kernels below 2.6.15 in which version a major vm write
|
||||
* occured. This implement a simple straightforward
|
||||
* occured. This implement a simple straightforward
|
||||
* version similar to what's going to be
|
||||
* in kernel 2.6.19+
|
||||
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
|
||||
* nopfn.
|
||||
*/
|
||||
*/
|
||||
|
||||
static struct {
|
||||
spinlock_t lock;
|
||||
struct page *dummy_page;
|
||||
atomic_t present;
|
||||
} drm_np_retry =
|
||||
} drm_np_retry =
|
||||
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
|
||||
|
||||
|
||||
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||
struct fault_data *data);
|
||||
|
||||
|
||||
|
@ -126,7 +126,7 @@ void free_nopage_retry(void)
|
|||
}
|
||||
|
||||
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
unsigned long address,
|
||||
int *type)
|
||||
{
|
||||
struct fault_data data;
|
||||
|
@ -204,14 +204,14 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|||
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page = NULL;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_device *dev;
|
||||
unsigned long pfn;
|
||||
int err;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
|
||||
|
||||
dev = bo->dev;
|
||||
while(drm_bo_read_lock(&dev->bm.bm_lock));
|
||||
|
||||
|
@ -219,12 +219,12 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|||
|
||||
err = drm_bo_wait(bo, 0, 1, 0);
|
||||
if (err) {
|
||||
data->type = (err == -EAGAIN) ?
|
||||
data->type = (err == -EAGAIN) ?
|
||||
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* If buffer happens to be in a non-mappable location,
|
||||
* move it to a mappable.
|
||||
|
@ -253,7 +253,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
dev = bo->dev;
|
||||
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
|
||||
if (err) {
|
||||
|
@ -286,7 +286,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|||
err = vm_insert_pfn(vma, address, pfn);
|
||||
|
||||
if (!err || err == -EBUSY)
|
||||
data->type = VM_FAULT_MINOR;
|
||||
data->type = VM_FAULT_MINOR;
|
||||
else
|
||||
data->type = VM_FAULT_OOM;
|
||||
out_unlock:
|
||||
|
@ -330,7 +330,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
|||
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
|
||||
* workaround for a single BUG statement in do_no_page in these versions. The
|
||||
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
|
||||
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
||||
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
||||
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
|
||||
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
|
||||
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
|
||||
|
@ -351,13 +351,13 @@ typedef struct vma_entry {
|
|||
|
||||
|
||||
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
unsigned long address,
|
||||
int *type)
|
||||
{
|
||||
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_device *dev;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
|
@ -369,7 +369,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
|||
page = NOPAGE_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
||||
dev = bo->dev;
|
||||
|
||||
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
|
||||
|
@ -403,8 +403,8 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
|
|||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
|
||||
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||
|
||||
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||
&bus_offset, &bus_size);
|
||||
BUG_ON(ret);
|
||||
|
||||
|
@ -419,7 +419,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
|
||||
{
|
||||
|
@ -493,7 +493,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
|
|||
{
|
||||
p_mm_entry_t *entry;
|
||||
int lock_ok = 1;
|
||||
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
BUG_ON(entry->locked);
|
||||
if (!down_write_trylock(&entry->mm->mmap_sem)) {
|
||||
|
@ -507,7 +507,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
|
|||
return 0;
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
if (!entry->locked)
|
||||
if (!entry->locked)
|
||||
break;
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
entry->locked = 0;
|
||||
|
@ -524,7 +524,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
|
|||
void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
|
||||
{
|
||||
p_mm_entry_t *entry;
|
||||
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
BUG_ON(!entry->locked);
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
|
@ -532,7 +532,7 @@ void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
|
|||
}
|
||||
}
|
||||
|
||||
int drm_bo_remap_bound(struct drm_buffer_object *bo)
|
||||
int drm_bo_remap_bound(struct drm_buffer_object *bo)
|
||||
{
|
||||
vma_entry_t *v_entry;
|
||||
int ret = 0;
|
||||
|
@ -553,9 +553,9 @@ void drm_bo_finish_unmap(struct drm_buffer_object *bo)
|
|||
vma_entry_t *v_entry;
|
||||
|
||||
list_for_each_entry(v_entry, &bo->vma_list, head) {
|
||||
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
||||
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ enum {
|
|||
#define __user
|
||||
#endif
|
||||
|
||||
#if !defined(__put_page)
|
||||
#if !defined(__put_page)
|
||||
#define __put_page(p) atomic_dec(&(p)->count)
|
||||
#endif
|
||||
|
||||
|
@ -111,7 +111,7 @@ enum {
|
|||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
|
||||
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
|
||||
{
|
||||
return remap_page_range(vma, from,
|
||||
return remap_page_range(vma, from,
|
||||
pfn << PAGE_SHIFT,
|
||||
size,
|
||||
pgprot);
|
||||
|
@ -185,7 +185,7 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
|
|||
|
||||
|
||||
/*
|
||||
* Flush relevant caches and clear a VMA structure so that page references
|
||||
* Flush relevant caches and clear a VMA structure so that page references
|
||||
* will cause a page fault. Don't flush tlbs.
|
||||
*/
|
||||
|
||||
|
@ -193,7 +193,7 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
|
|||
unsigned long addr, unsigned long end);
|
||||
|
||||
/*
|
||||
* Return the PTE protection map entries for the VMA flags given by
|
||||
* Return the PTE protection map entries for the VMA flags given by
|
||||
* flags. This is a functional interface to the kernel's protection map.
|
||||
*/
|
||||
|
||||
|
@ -230,7 +230,7 @@ extern void free_nopage_retry(void);
|
|||
#ifndef DRM_FULL_MM_COMPAT
|
||||
|
||||
/*
|
||||
* For now, just return a dummy page that we've allocated out of
|
||||
* For now, just return a dummy page that we've allocated out of
|
||||
* static space. The page will be put by do_nopage() since we've already
|
||||
* filled out the pte.
|
||||
*/
|
||||
|
@ -240,13 +240,13 @@ struct fault_data {
|
|||
unsigned long address;
|
||||
pgoff_t pgoff;
|
||||
unsigned int flags;
|
||||
|
||||
|
||||
int type;
|
||||
};
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
unsigned long address,
|
||||
int *type);
|
||||
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
||||
!defined(DRM_FULL_MM_COMPAT)
|
||||
|
@ -261,22 +261,22 @@ struct drm_buffer_object;
|
|||
|
||||
|
||||
/*
|
||||
* Add a vma to the ttm vma list, and the
|
||||
* Add a vma to the ttm vma list, and the
|
||||
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
||||
*/
|
||||
|
||||
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
|
||||
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
|
||||
struct vm_area_struct *vma);
|
||||
/*
|
||||
* Delete a vma and the corresponding mm pointer from the
|
||||
* ttm lists. Needs the ttm mutex.
|
||||
*/
|
||||
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
|
||||
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/*
|
||||
* Attempts to lock all relevant mmap_sems for a ttm, while
|
||||
* not releasing the ttm mutex. May return -EAGAIN to avoid
|
||||
* not releasing the ttm mutex. May return -EAGAIN to avoid
|
||||
* deadlocks. In that case the caller shall release the ttm mutex,
|
||||
* schedule() and try again.
|
||||
*/
|
||||
|
@ -299,7 +299,7 @@ extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
|
|||
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
|
||||
|
||||
/*
|
||||
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
||||
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
||||
* fault these pfns in, because the first one will set the vma VM_PFNMAP
|
||||
* flag, which will make the next fault bug in do_nopage(). The function
|
||||
* releases the mmap_sems for this ttm.
|
||||
|
|
|
@ -89,7 +89,7 @@ again:
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return new_id;
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
|
|||
request->handle = NULL;
|
||||
list_for_each_entry(_entry, &dev->maplist, head) {
|
||||
if (_entry->map == map) {
|
||||
request->handle =
|
||||
request->handle =
|
||||
(void *)(unsigned long)_entry->user_token;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ int drm_dma_setup(struct drm_device * dev)
|
|||
* \param dev DRM device.
|
||||
*
|
||||
* Free all pages associated with DMA buffers, the buffers and pages lists, and
|
||||
* finally the the drm_device::dma structure itself.
|
||||
* finally the drm_device::dma structure itself.
|
||||
*/
|
||||
void drm_dma_takedown(struct drm_device * dev)
|
||||
{
|
||||
|
|
|
@ -129,13 +129,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
|
||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
|
||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
|
||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
|
||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
|
||||
|
@ -248,7 +248,7 @@ int drm_lastclose(struct drm_device * dev)
|
|||
list_del(&vma->head);
|
||||
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
|
||||
}
|
||||
|
||||
|
||||
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
|
||||
drm_rmmap_locked(dev, r_list->map);
|
||||
r_list = NULL;
|
||||
|
@ -328,7 +328,7 @@ int drm_init(struct drm_driver *driver,
|
|||
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
|
||||
pid->subdevice, pdev))) {
|
||||
/* Are there device class requirements? */
|
||||
if ((pid->class != 0)
|
||||
if ((pid->class != 0)
|
||||
&& ((pdev->class & pid->class_mask) != pid->class)) {
|
||||
continue;
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ int drm_init(struct drm_driver *driver,
|
|||
pid->subvendor, pid->subdevice,
|
||||
pdev))) {
|
||||
/* Are there device class requirements? */
|
||||
if ((pid->class != 0)
|
||||
if ((pid->class != 0)
|
||||
&& ((pdev->class & pid->class_mask) != pid->class)) {
|
||||
continue;
|
||||
}
|
||||
|
@ -477,19 +477,19 @@ static int __init drm_core_init(void)
|
|||
unsigned long max_memctl_mem;
|
||||
|
||||
si_meminfo(&si);
|
||||
|
||||
|
||||
/*
|
||||
* AGP only allows low / DMA32 memory ATM.
|
||||
*/
|
||||
|
||||
avail_memctl_mem = si.totalram - si.totalhigh;
|
||||
|
||||
/*
|
||||
* Avoid overflows
|
||||
/*
|
||||
* Avoid overflows
|
||||
*/
|
||||
|
||||
max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
|
||||
max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
|
||||
max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
|
||||
|
||||
if (avail_memctl_mem >= max_memctl_mem)
|
||||
avail_memctl_mem = max_memctl_mem;
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* Typically called by the IRQ handler.
|
||||
*/
|
||||
|
||||
void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
||||
void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t sequence, uint32_t type, uint32_t error)
|
||||
{
|
||||
int wake = 0;
|
||||
|
@ -58,9 +58,8 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
|||
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
||||
ge_last_exe = diff < driver->wrap_diff;
|
||||
|
||||
if (is_exe && ge_last_exe) {
|
||||
if (is_exe && ge_last_exe)
|
||||
fc->last_exe_flush = sequence;
|
||||
}
|
||||
|
||||
if (list_empty(&fc->ring))
|
||||
return;
|
||||
|
@ -123,11 +122,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
|||
*/
|
||||
|
||||
if ((fc->pending_flush & type) != type) {
|
||||
head = head->prev;
|
||||
head = head->prev;
|
||||
list_for_each_entry(fence, head, ring) {
|
||||
if (&fence->ring == &fc->ring)
|
||||
break;
|
||||
diff = (fc->last_exe_flush - fence->sequence) &
|
||||
diff = (fc->last_exe_flush - fence->sequence) &
|
||||
driver->sequence_mask;
|
||||
if (diff > driver->wrap_diff)
|
||||
break;
|
||||
|
@ -141,10 +140,9 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
|||
DRM_WAKEUP(&fc->fence_queue);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_handler);
|
||||
|
||||
static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
|
||||
static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
|
@ -154,7 +152,7 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
|
|||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
|
||||
void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
|
||||
{
|
||||
struct drm_fence_object *tmp_fence = *fence;
|
||||
struct drm_device *dev = tmp_fence->dev;
|
||||
|
@ -173,7 +171,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_usage_deref_locked);
|
||||
|
||||
void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
||||
void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
|
||||
{
|
||||
struct drm_fence_object *tmp_fence = *fence;
|
||||
struct drm_device *dev = tmp_fence->dev;
|
||||
|
@ -212,7 +210,8 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
||||
|
||||
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
||||
static void drm_fence_object_destroy(struct drm_file *priv,
|
||||
struct drm_user_object *base)
|
||||
{
|
||||
struct drm_fence_object *fence =
|
||||
drm_user_object_entry(base, struct drm_fence_object, base);
|
||||
|
@ -220,7 +219,7 @@ static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_obje
|
|||
drm_fence_usage_deref_locked(&fence);
|
||||
}
|
||||
|
||||
int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||
int drm_fence_object_signaled(struct drm_fence_object *fence,
|
||||
uint32_t mask, int poke_flush)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -240,8 +239,9 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_object_signaled);
|
||||
|
||||
static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
||||
struct drm_fence_driver * driver, uint32_t sequence)
|
||||
static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
|
||||
struct drm_fence_driver *driver,
|
||||
uint32_t sequence)
|
||||
{
|
||||
uint32_t diff;
|
||||
|
||||
|
@ -249,15 +249,13 @@ static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
|||
fc->exe_flush_sequence = sequence;
|
||||
fc->pending_exe_flush = 1;
|
||||
} else {
|
||||
diff =
|
||||
(sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
||||
if (diff < driver->wrap_diff) {
|
||||
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
||||
if (diff < driver->wrap_diff)
|
||||
fc->exe_flush_sequence = sequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||
int drm_fence_object_flush(struct drm_fence_object *fence,
|
||||
uint32_t type)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
|
@ -296,7 +294,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
|||
* wrapped around and reused.
|
||||
*/
|
||||
|
||||
void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
|
||||
void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t sequence)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
||||
|
@ -328,12 +327,10 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
if (diff < driver->wrap_diff) {
|
||||
if (diff < driver->wrap_diff)
|
||||
drm_fence_object_flush(fence, fence->type);
|
||||
}
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||
|
||||
static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
||||
|
@ -378,7 +375,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||
int drm_fence_object_wait(struct drm_fence_object *fence,
|
||||
int lazy, int ignore_signals, uint32_t mask)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
|
@ -431,10 +428,9 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
|||
/*
|
||||
* Avoid kernel-space busy-waits.
|
||||
*/
|
||||
#if 1
|
||||
if (!ignore_signals)
|
||||
return -EAGAIN;
|
||||
#endif
|
||||
|
||||
do {
|
||||
schedule();
|
||||
signaled = drm_fence_object_signaled(fence, mask, 1);
|
||||
|
@ -447,9 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_object_wait);
|
||||
|
||||
|
||||
int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||
uint32_t fence_flags, uint32_t fence_class, uint32_t type)
|
||||
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
|
||||
uint32_t fence_class, uint32_t type)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
|
@ -461,7 +456,8 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
|
|||
int ret;
|
||||
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
|
||||
ret = driver->emit(dev, fence_class, fence_flags, &sequence,
|
||||
&native_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -481,10 +477,10 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_object_emit);
|
||||
|
||||
static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
|
||||
static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t type,
|
||||
uint32_t fence_flags,
|
||||
struct drm_fence_object * fence)
|
||||
struct drm_fence_object *fence)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
@ -497,7 +493,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
|
|||
write_lock_irqsave(&fm->lock, flags);
|
||||
INIT_LIST_HEAD(&fence->ring);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Avoid hitting BUG() for kernel-only fence objects.
|
||||
*/
|
||||
|
||||
|
@ -517,8 +513,8 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
|
||||
int shareable)
|
||||
int drm_fence_add_user_object(struct drm_file *priv,
|
||||
struct drm_fence_object *fence, int shareable)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
@ -537,8 +533,9 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_add_user_object);
|
||||
|
||||
int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
|
||||
unsigned flags, struct drm_fence_object ** c_fence)
|
||||
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t type, unsigned flags,
|
||||
struct drm_fence_object **c_fence)
|
||||
{
|
||||
struct drm_fence_object *fence;
|
||||
int ret;
|
||||
|
@ -557,10 +554,9 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint3
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_object_create);
|
||||
|
||||
void drm_fence_manager_init(struct drm_device * dev)
|
||||
void drm_fence_manager_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fence_class;
|
||||
|
@ -578,7 +574,7 @@ void drm_fence_manager_init(struct drm_device * dev)
|
|||
fm->num_classes = fed->num_classes;
|
||||
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
|
||||
|
||||
for (i=0; i<fm->num_classes; ++i) {
|
||||
for (i = 0; i < fm->num_classes; ++i) {
|
||||
fence_class = &fm->fence_class[i];
|
||||
|
||||
INIT_LIST_HEAD(&fence_class->ring);
|
||||
|
@ -591,7 +587,8 @@ void drm_fence_manager_init(struct drm_device * dev)
|
|||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
|
||||
void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||
struct drm_fence_arg *arg)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
|
@ -608,12 +605,12 @@ void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *ar
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_fill_arg);
|
||||
|
||||
|
||||
void drm_fence_manager_takedown(struct drm_device * dev)
|
||||
void drm_fence_manager_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
|
||||
struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
|
||||
uint32_t handle)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
struct drm_user_object *uo;
|
||||
|
@ -656,14 +653,13 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
drm_fence_usage_deref_unlocked(&fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* usage > 0. No need to lock dev->struct_mutex;
|
||||
*/
|
||||
|
||||
arg->handle = fence->base.hash.key;
|
||||
|
||||
|
||||
drm_fence_fill_arg(fence, arg);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
|
||||
|
|
|
@ -273,9 +273,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
if (ret) {
|
||||
for(j=0; j<i; ++j) {
|
||||
for(j = 0; j < i; ++j)
|
||||
drm_ht_remove(&priv->refd_object_hash[j]);
|
||||
}
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -334,8 +333,8 @@ int drm_fasync(int fd, struct file *filp, int on)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
||||
static void drm_object_release(struct file *filp) {
|
||||
|
||||
static void drm_object_release(struct file *filp)
|
||||
{
|
||||
struct drm_file *priv = filp->private_data;
|
||||
struct list_head *head;
|
||||
struct drm_ref_object *ref_object;
|
||||
|
@ -356,7 +355,7 @@ static void drm_object_release(struct file *filp) {
|
|||
head = &priv->refd_objects;
|
||||
}
|
||||
|
||||
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
|
||||
for(i = 0; i < _DRM_NO_REF_TYPES; ++i) {
|
||||
drm_ht_remove(&priv->refd_object_hash[i]);
|
||||
}
|
||||
}
|
||||
|
@ -530,4 +529,3 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_poll);
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
|
|||
}
|
||||
|
||||
/*
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* used before.
|
||||
*/
|
||||
int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item,
|
||||
|
|
|
@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
|
|||
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -264,7 +264,7 @@ int drm_getstats(struct drm_device *dev, void *data,
|
|||
struct drm_stats *stats = data;
|
||||
int i;
|
||||
|
||||
memset(stats, 0, sizeof(stats));
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
|
|
@ -235,12 +235,12 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
|
|||
entry = list_entry(list, struct drm_mm_node, fl_entry);
|
||||
wasted = 0;
|
||||
|
||||
if (entry->size < size)
|
||||
if (entry->size < size)
|
||||
continue;
|
||||
|
||||
if (alignment) {
|
||||
register unsigned tmp = entry->start % alignment;
|
||||
if (tmp)
|
||||
if (tmp)
|
||||
wasted += alignment - tmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
||||
int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
|
||||
int shareable)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
|
@ -56,7 +56,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_add_user_object);
|
||||
|
||||
struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
|
||||
struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
struct drm_hash_item *hash;
|
||||
|
@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
|
|||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
||||
|
||||
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
item = drm_hash_entry(hash, struct drm_user_object, hash);
|
||||
|
||||
if (priv != item->owner) {
|
||||
|
@ -83,7 +83,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
|
|||
}
|
||||
EXPORT_SYMBOL(drm_lookup_user_object);
|
||||
|
||||
static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
|
||||
static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
@ -95,7 +95,7 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object
|
|||
}
|
||||
}
|
||||
|
||||
static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
|
||||
static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
|
||||
enum drm_ref_type action)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -114,7 +114,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object
|
|||
return ret;
|
||||
}
|
||||
|
||||
int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object,
|
||||
int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
|
||||
enum drm_ref_type ref_action)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -167,12 +167,12 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc
|
|||
|
||||
list_add(&item->list, &priv->refd_objects);
|
||||
ret = drm_object_ref_action(priv, referenced_object, ref_action);
|
||||
out:
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
||||
struct drm_user_object * referenced_object,
|
||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
|
||||
struct drm_user_object *referenced_object,
|
||||
enum drm_ref_type ref_action)
|
||||
{
|
||||
struct drm_hash_item *hash;
|
||||
|
@ -188,8 +188,8 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_lookup_ref_object);
|
||||
|
||||
static void drm_remove_other_references(struct drm_file * priv,
|
||||
struct drm_user_object * ro)
|
||||
static void drm_remove_other_references(struct drm_file *priv,
|
||||
struct drm_user_object *ro)
|
||||
{
|
||||
int i;
|
||||
struct drm_open_hash *ht;
|
||||
|
@ -205,7 +205,7 @@ static void drm_remove_other_references(struct drm_file * priv,
|
|||
}
|
||||
}
|
||||
|
||||
void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
|
||||
void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
|
||||
{
|
||||
int ret;
|
||||
struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
|
||||
|
@ -234,9 +234,10 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
|
|||
}
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(drm_remove_ref_object);
|
||||
|
||||
int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
|
||||
enum drm_object_type type, struct drm_user_object ** object)
|
||||
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
||||
enum drm_object_type type, struct drm_user_object **object)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
struct drm_user_object *uo;
|
||||
|
@ -260,12 +261,12 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
*object = uo;
|
||||
return 0;
|
||||
out_err:
|
||||
out_err:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
|
||||
int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
|
||||
enum drm_object_type type)
|
||||
{
|
||||
struct drm_device *dev = priv->head->dev;
|
||||
|
@ -287,7 +288,7 @@ int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
|
|||
drm_remove_ref_object(priv, ro);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
out_err:
|
||||
out_err:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -68,12 +68,12 @@ struct drm_user_object {
|
|||
atomic_t refcount;
|
||||
int shareable;
|
||||
struct drm_file *owner;
|
||||
void (*ref_struct_locked) (struct drm_file * priv,
|
||||
struct drm_user_object * obj,
|
||||
void (*ref_struct_locked) (struct drm_file *priv,
|
||||
struct drm_user_object *obj,
|
||||
enum drm_ref_type ref_action);
|
||||
void (*unref) (struct drm_file * priv, struct drm_user_object * obj,
|
||||
void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
|
||||
enum drm_ref_type unref_action);
|
||||
void (*remove) (struct drm_file * priv, struct drm_user_object * obj);
|
||||
void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -94,29 +94,29 @@ struct drm_ref_object {
|
|||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
|
||||
extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
||||
extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
|
||||
int shareable);
|
||||
/**
|
||||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
|
||||
extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
|
||||
extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
|
||||
uint32_t key);
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held. May temporarily release it.
|
||||
*/
|
||||
|
||||
extern int drm_add_ref_object(struct drm_file * priv,
|
||||
struct drm_user_object * referenced_object,
|
||||
extern int drm_add_ref_object(struct drm_file *priv,
|
||||
struct drm_user_object *referenced_object,
|
||||
enum drm_ref_type ref_action);
|
||||
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
*/
|
||||
|
||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
||||
struct drm_user_object * referenced_object,
|
||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
|
||||
struct drm_user_object *referenced_object,
|
||||
enum drm_ref_type ref_action);
|
||||
/*
|
||||
* Must be called with the struct_mutex held.
|
||||
|
@ -125,11 +125,11 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
|||
* This function may temporarily release the struct_mutex.
|
||||
*/
|
||||
|
||||
extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item);
|
||||
extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
|
||||
extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
|
||||
extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
||||
enum drm_object_type type,
|
||||
struct drm_user_object ** object);
|
||||
extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
|
||||
struct drm_user_object **object);
|
||||
extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
|
||||
enum drm_object_type type);
|
||||
|
||||
/***************************************************
|
||||
|
@ -138,7 +138,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
|
|||
|
||||
struct drm_fence_object {
|
||||
struct drm_user_object base;
|
||||
struct drm_device *dev;
|
||||
struct drm_device *dev;
|
||||
atomic_t usage;
|
||||
|
||||
/*
|
||||
|
@ -153,7 +153,7 @@ struct drm_fence_object {
|
|||
uint32_t sequence;
|
||||
uint32_t flush_mask;
|
||||
uint32_t submitted_flush;
|
||||
uint32_t error;
|
||||
uint32_t error;
|
||||
};
|
||||
|
||||
#define _DRM_FENCE_CLASSES 8
|
||||
|
@ -181,40 +181,44 @@ struct drm_fence_driver {
|
|||
uint32_t flush_diff;
|
||||
uint32_t sequence_mask;
|
||||
int lazy_capable;
|
||||
int (*has_irq) (struct drm_device * dev, uint32_t fence_class,
|
||||
int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t flags);
|
||||
int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags,
|
||||
uint32_t * breadcrumb, uint32_t * native_type);
|
||||
void (*poke_flush) (struct drm_device * dev, uint32_t fence_class);
|
||||
int (*emit) (struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t flags, uint32_t *breadcrumb,
|
||||
uint32_t *native_type);
|
||||
void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
|
||||
};
|
||||
|
||||
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t sequence, uint32_t type, uint32_t error);
|
||||
uint32_t sequence, uint32_t type,
|
||||
uint32_t error);
|
||||
extern void drm_fence_manager_init(struct drm_device *dev);
|
||||
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t sequence);
|
||||
extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
|
||||
extern int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||
extern int drm_fence_object_flush(struct drm_fence_object *fence,
|
||||
uint32_t type);
|
||||
extern int drm_fence_object_signaled(struct drm_fence_object *fence,
|
||||
uint32_t type, int flush);
|
||||
extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence);
|
||||
extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence);
|
||||
extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
|
||||
extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
|
||||
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
|
||||
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||
struct drm_fence_object *src);
|
||||
extern int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||
extern int drm_fence_object_wait(struct drm_fence_object *fence,
|
||||
int lazy, int ignore_signals, uint32_t mask);
|
||||
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
||||
uint32_t fence_flags, uint32_t fence_class,
|
||||
struct drm_fence_object ** c_fence);
|
||||
extern int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||
struct drm_fence_object **c_fence);
|
||||
extern int drm_fence_object_emit(struct drm_fence_object *fence,
|
||||
uint32_t fence_flags, uint32_t class,
|
||||
uint32_t type);
|
||||
extern void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||
struct drm_fence_arg *arg);
|
||||
|
||||
extern int drm_fence_add_user_object(struct drm_file * priv,
|
||||
struct drm_fence_object * fence, int shareable);
|
||||
extern int drm_fence_add_user_object(struct drm_file *priv,
|
||||
struct drm_fence_object *fence,
|
||||
int shareable);
|
||||
|
||||
extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -241,7 +245,7 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
|
|||
/*
|
||||
* The ttm backend GTT interface. (In our case AGP).
|
||||
* Any similar type of device (PCIE?)
|
||||
* needs only to implement these functions to be usable with the "TTM" interface.
|
||||
* needs only to implement these functions to be usable with the TTM interface.
|
||||
* The AGP backend implementation lives in drm_agpsupport.c
|
||||
* basically maps these calls to available functions in agpgart.
|
||||
* Each drm device driver gets an
|
||||
|
@ -256,25 +260,25 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
struct drm_ttm_backend;
|
||||
struct drm_ttm_backend_func {
|
||||
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
|
||||
int (*populate) (struct drm_ttm_backend * backend,
|
||||
unsigned long num_pages, struct page ** pages);
|
||||
void (*clear) (struct drm_ttm_backend * backend);
|
||||
int (*bind) (struct drm_ttm_backend * backend,
|
||||
struct drm_bo_mem_reg * bo_mem);
|
||||
int (*unbind) (struct drm_ttm_backend * backend);
|
||||
void (*destroy) (struct drm_ttm_backend * backend);
|
||||
int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
|
||||
int (*populate) (struct drm_ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages);
|
||||
void (*clear) (struct drm_ttm_backend *backend);
|
||||
int (*bind) (struct drm_ttm_backend *backend,
|
||||
struct drm_bo_mem_reg *bo_mem);
|
||||
int (*unbind) (struct drm_ttm_backend *backend);
|
||||
void (*destroy) (struct drm_ttm_backend *backend);
|
||||
};
|
||||
|
||||
|
||||
typedef struct drm_ttm_backend {
|
||||
struct drm_device *dev;
|
||||
uint32_t flags;
|
||||
struct drm_ttm_backend_func *func;
|
||||
} drm_ttm_backend_t;
|
||||
struct drm_ttm_backend {
|
||||
struct drm_device *dev;
|
||||
uint32_t flags;
|
||||
struct drm_ttm_backend_func *func;
|
||||
};
|
||||
|
||||
struct drm_ttm {
|
||||
struct mm_struct *user_mm;
|
||||
struct mm_struct *user_mm;
|
||||
struct page *dummy_read_page;
|
||||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
|
@ -294,13 +298,13 @@ struct drm_ttm {
|
|||
};
|
||||
|
||||
extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
||||
extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);
|
||||
extern void drm_ttm_unbind(struct drm_ttm * ttm);
|
||||
extern void drm_ttm_evict(struct drm_ttm * ttm);
|
||||
extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
||||
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
||||
extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
|
||||
extern void drm_ttm_unbind(struct drm_ttm *ttm);
|
||||
extern void drm_ttm_evict(struct drm_ttm *ttm);
|
||||
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
|
||||
extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
|
||||
extern void drm_ttm_cache_flush(void);
|
||||
extern int drm_ttm_populate(struct drm_ttm * ttm);
|
||||
extern int drm_ttm_populate(struct drm_ttm *ttm);
|
||||
extern int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||
struct task_struct *tsk,
|
||||
int write,
|
||||
|
@ -309,12 +313,12 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm,
|
|||
struct page *dummy_read_page);
|
||||
|
||||
/*
|
||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
|
||||
* when the last vma exits.
|
||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
|
||||
* this which calls this function iff there are no vmas referencing it anymore.
|
||||
* Otherwise it is called when the last vma exits.
|
||||
*/
|
||||
|
||||
extern int drm_destroy_ttm(struct drm_ttm * ttm);
|
||||
extern int drm_destroy_ttm(struct drm_ttm *ttm);
|
||||
|
||||
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
|
||||
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
|
||||
|
@ -349,8 +353,8 @@ struct drm_bo_mem_reg {
|
|||
uint32_t mem_type;
|
||||
uint64_t flags;
|
||||
uint64_t mask;
|
||||
uint32_t desired_tile_stride;
|
||||
uint32_t hw_tile_stride;
|
||||
uint32_t desired_tile_stride;
|
||||
uint32_t hw_tile_stride;
|
||||
};
|
||||
|
||||
enum drm_bo_type {
|
||||
|
@ -380,8 +384,8 @@ struct drm_buffer_object {
|
|||
|
||||
uint32_t fence_type;
|
||||
uint32_t fence_class;
|
||||
uint32_t new_fence_type;
|
||||
uint32_t new_fence_class;
|
||||
uint32_t new_fence_type;
|
||||
uint32_t new_fence_class;
|
||||
struct drm_fence_object *fence;
|
||||
uint32_t priv_flags;
|
||||
wait_queue_head_t event_queue;
|
||||
|
@ -420,7 +424,7 @@ struct drm_mem_type_manager {
|
|||
struct list_head pinned;
|
||||
uint32_t flags;
|
||||
uint32_t drm_bus_maptype;
|
||||
unsigned long gpu_offset;
|
||||
unsigned long gpu_offset;
|
||||
unsigned long io_offset;
|
||||
unsigned long io_size;
|
||||
void *io_addr;
|
||||
|
@ -442,8 +446,8 @@ struct drm_bo_lock {
|
|||
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
|
||||
|
||||
struct drm_buffer_manager {
|
||||
struct drm_bo_lock bm_lock;
|
||||
struct mutex evict_mutex;
|
||||
struct drm_bo_lock bm_lock;
|
||||
struct mutex evict_mutex;
|
||||
int nice_mode;
|
||||
int initialized;
|
||||
struct drm_file *last_to_validate;
|
||||
|
@ -467,15 +471,15 @@ struct drm_bo_driver {
|
|||
uint32_t num_mem_type_prio;
|
||||
uint32_t num_mem_busy_prio;
|
||||
struct drm_ttm_backend *(*create_ttm_backend_entry)
|
||||
(struct drm_device * dev);
|
||||
(struct drm_device *dev);
|
||||
int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
|
||||
uint32_t * type);
|
||||
int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
|
||||
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
||||
struct drm_mem_type_manager * man);
|
||||
uint32_t *type);
|
||||
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
|
||||
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
|
||||
struct drm_mem_type_manager *man);
|
||||
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
|
||||
int (*move) (struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
|
||||
int (*move) (struct drm_buffer_object *bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
void (*ttm_cache_flush)(struct drm_ttm *ttm);
|
||||
};
|
||||
|
||||
|
@ -500,43 +504,43 @@ extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_f
|
|||
extern int drm_bo_driver_finish(struct drm_device *dev);
|
||||
extern int drm_bo_driver_init(struct drm_device *dev);
|
||||
extern int drm_bo_pci_offset(struct drm_device *dev,
|
||||
struct drm_bo_mem_reg * mem,
|
||||
struct drm_bo_mem_reg *mem,
|
||||
unsigned long *bus_base,
|
||||
unsigned long *bus_offset,
|
||||
unsigned long *bus_size);
|
||||
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
|
||||
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
|
||||
|
||||
extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
|
||||
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo);
|
||||
extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
|
||||
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
|
||||
extern void drm_putback_buffer_objects(struct drm_device *dev);
|
||||
extern int drm_fence_buffer_objects(struct drm_device * dev,
|
||||
extern int drm_fence_buffer_objects(struct drm_device *dev,
|
||||
struct list_head *list,
|
||||
uint32_t fence_flags,
|
||||
struct drm_fence_object * fence,
|
||||
struct drm_fence_object ** used_fence);
|
||||
extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
|
||||
struct drm_fence_object *fence,
|
||||
struct drm_fence_object **used_fence);
|
||||
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
|
||||
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
||||
enum drm_bo_type type, uint64_t mask,
|
||||
uint32_t hint, uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
struct drm_buffer_object **bo);
|
||||
extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||
extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
|
||||
int no_wait);
|
||||
extern int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||
struct drm_bo_mem_reg * mem, int no_wait);
|
||||
extern int drm_bo_move_buffer(struct drm_buffer_object * bo,
|
||||
extern int drm_bo_mem_space(struct drm_buffer_object *bo,
|
||||
struct drm_bo_mem_reg *mem, int no_wait);
|
||||
extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
|
||||
uint64_t new_mem_flags,
|
||||
int no_wait, int move_unfenced);
|
||||
extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type);
|
||||
extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
|
||||
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
|
||||
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
|
||||
unsigned long p_offset, unsigned long p_size);
|
||||
extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
||||
uint32_t fence_class, uint64_t flags,
|
||||
uint64_t mask, uint32_t hint,
|
||||
int use_old_fence_class,
|
||||
struct drm_bo_info_rep * rep,
|
||||
struct drm_bo_info_rep *rep,
|
||||
struct drm_buffer_object **bo_rep);
|
||||
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
|
||||
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
int check_owner);
|
||||
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||
|
@ -550,18 +554,17 @@ extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
|||
* drm_bo_move.c
|
||||
*/
|
||||
|
||||
extern int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
|
||||
extern int drm_bo_move_memcpy(struct drm_buffer_object * bo,
|
||||
extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
||||
int evict, int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem);
|
||||
extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
|
||||
int evict,
|
||||
int no_wait, struct drm_bo_mem_reg * new_mem);
|
||||
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||
int evict,
|
||||
int no_wait,
|
||||
uint32_t fence_class,
|
||||
uint32_t fence_type,
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
||||
int evict, int no_wait,
|
||||
uint32_t fence_class, uint32_t fence_type,
|
||||
uint32_t fence_flags,
|
||||
struct drm_bo_mem_reg * new_mem);
|
||||
struct drm_bo_mem_reg *new_mem);
|
||||
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
|
||||
extern unsigned long drm_bo_offset_end(unsigned long offset,
|
||||
unsigned long end);
|
||||
|
@ -631,7 +634,7 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m
|
|||
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
||||
void *virtual);
|
||||
/*
|
||||
* drm_bo_lock.c
|
||||
* drm_bo_lock.c
|
||||
* Simple replacement for the hardware lock on buffer manager init and clean.
|
||||
*/
|
||||
|
||||
|
@ -639,10 +642,10 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *
|
|||
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
|
||||
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
|
||||
extern int drm_bo_read_lock(struct drm_bo_lock *lock);
|
||||
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
|
||||
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
|
||||
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
|
|
|
@ -92,9 +92,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
|
|||
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
|
||||
copy_to_user(arg1, arg2, arg3)
|
||||
/* Macros for copyfrom user, but checking readability only once */
|
||||
#define DRM_VERIFYAREA_READ( uaddr, size ) \
|
||||
#define DRM_VERIFYAREA_READ( uaddr, size ) \
|
||||
(access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
|
||||
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
__copy_from_user(arg1, arg2, arg3)
|
||||
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
__copy_to_user(arg1, arg2, arg3)
|
||||
|
@ -129,3 +129,17 @@ do { \
|
|||
|
||||
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
|
||||
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
|
||||
|
||||
/** Type for the OS's non-sleepable mutex lock */
|
||||
#define DRM_SPINTYPE spinlock_t
|
||||
/**
|
||||
* Initialize the lock for use. name is an optional string describing the
|
||||
* lock
|
||||
*/
|
||||
#define DRM_SPININIT(l,name) spin_lock_init(l)
|
||||
#define DRM_SPINUNINIT(l)
|
||||
#define DRM_SPINLOCK(l) spin_lock(l)
|
||||
#define DRM_SPINUNLOCK(l) spin_unlock(l)
|
||||
#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
|
||||
#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
|
||||
#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
|
||||
|
|
|
@ -126,7 +126,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
|
|||
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
|
||||
DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
|
||||
|
||||
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
|
||||
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
|
||||
i += PAGE_SIZE, j++) {
|
||||
entry->pagelist[j] = vmalloc_to_page((void *)i);
|
||||
if (!entry->pagelist[j])
|
||||
|
|
|
@ -75,7 +75,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
|
|||
mutex_init(&dev->bm.evict_mutex);
|
||||
|
||||
idr_init(&dev->drw_idr);
|
||||
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->pci_device = pdev->device;
|
||||
dev->pci_vendor = pdev->vendor;
|
||||
|
|
|
@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush);
|
|||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||
*/
|
||||
|
||||
static void ttm_alloc_pages(struct drm_ttm * ttm)
|
||||
static void ttm_alloc_pages(struct drm_ttm *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
|
@ -54,20 +54,19 @@ static void ttm_alloc_pages(struct drm_ttm * ttm)
|
|||
if (drm_alloc_memctl(size))
|
||||
return;
|
||||
|
||||
if (size <= PAGE_SIZE) {
|
||||
if (size <= PAGE_SIZE)
|
||||
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
|
||||
}
|
||||
|
||||
if (!ttm->pages) {
|
||||
ttm->pages = vmalloc_user(size);
|
||||
if (ttm->pages)
|
||||
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
|
||||
}
|
||||
if (!ttm->pages) {
|
||||
if (!ttm->pages)
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_free_pages(struct drm_ttm * ttm)
|
||||
static void ttm_free_pages(struct drm_ttm *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
|
||||
|
@ -85,9 +84,9 @@ static struct page *drm_ttm_alloc_page(void)
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||
if (drm_alloc_memctl(PAGE_SIZE))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
|
@ -106,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void)
|
|||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int drm_set_caching(struct drm_ttm * ttm, int noncached)
|
||||
static int drm_set_caching(struct drm_ttm *ttm, int noncached)
|
||||
{
|
||||
int i;
|
||||
struct page **cur_page;
|
||||
|
@ -153,7 +152,7 @@ static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
|
|||
dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (i=0; i<ttm->num_pages; ++i) {
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = ttm->pages[i];
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
@ -186,14 +185,10 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
|||
#else
|
||||
ClearPageReserved(*cur_page);
|
||||
#endif
|
||||
if (page_count(*cur_page) != 1) {
|
||||
DRM_ERROR("Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
}
|
||||
if (page_mapped(*cur_page)) {
|
||||
DRM_ERROR("Erroneous map count. "
|
||||
"Leaking page mappings.\n");
|
||||
}
|
||||
if (page_count(*cur_page) != 1)
|
||||
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
||||
if (page_mapped(*cur_page))
|
||||
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
||||
__free_page(*cur_page);
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
|
@ -205,7 +200,7 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
|||
* Free all resources associated with a ttm.
|
||||
*/
|
||||
|
||||
int drm_destroy_ttm(struct drm_ttm * ttm)
|
||||
int drm_destroy_ttm(struct drm_ttm *ttm)
|
||||
{
|
||||
struct drm_ttm_backend *be;
|
||||
|
||||
|
@ -234,7 +229,7 @@ int drm_destroy_ttm(struct drm_ttm * ttm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
|
||||
struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
|
||||
{
|
||||
struct page *p;
|
||||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||
|
@ -283,10 +278,9 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i=0; i<num_pages; ++i) {
|
||||
if (ttm->pages[i] == NULL) {
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (ttm->pages[i] == NULL)
|
||||
ttm->pages[i] = ttm->dummy_read_page;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -294,7 +288,7 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
|
|||
|
||||
|
||||
|
||||
int drm_ttm_populate(struct drm_ttm * ttm)
|
||||
int drm_ttm_populate(struct drm_ttm *ttm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
|
@ -318,7 +312,7 @@ int drm_ttm_populate(struct drm_ttm * ttm)
|
|||
* Initialize a ttm.
|
||||
*/
|
||||
|
||||
struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
|
||||
struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
|
||||
{
|
||||
struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
|
||||
struct drm_ttm *ttm;
|
||||
|
@ -362,7 +356,7 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
|
|||
* Unbind a ttm region from the aperture.
|
||||
*/
|
||||
|
||||
void drm_ttm_evict(struct drm_ttm * ttm)
|
||||
void drm_ttm_evict(struct drm_ttm *ttm)
|
||||
{
|
||||
struct drm_ttm_backend *be = ttm->be;
|
||||
int ret;
|
||||
|
@ -375,19 +369,18 @@ void drm_ttm_evict(struct drm_ttm * ttm)
|
|||
ttm->state = ttm_evicted;
|
||||
}
|
||||
|
||||
void drm_ttm_fixup_caching(struct drm_ttm * ttm)
|
||||
void drm_ttm_fixup_caching(struct drm_ttm *ttm)
|
||||
{
|
||||
|
||||
if (ttm->state == ttm_evicted) {
|
||||
struct drm_ttm_backend *be = ttm->be;
|
||||
if (be->func->needs_ub_cache_adjust(be)) {
|
||||
if (be->func->needs_ub_cache_adjust(be))
|
||||
drm_set_caching(ttm, 0);
|
||||
}
|
||||
ttm->state = ttm_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
void drm_ttm_unbind(struct drm_ttm * ttm)
|
||||
void drm_ttm_unbind(struct drm_ttm *ttm)
|
||||
{
|
||||
if (ttm->state == ttm_bound)
|
||||
drm_ttm_evict(ttm);
|
||||
|
@ -395,7 +388,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm)
|
|||
drm_ttm_fixup_caching(ttm);
|
||||
}
|
||||
|
||||
int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
||||
int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
|
||||
{
|
||||
struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
|
||||
int ret = 0;
|
||||
|
@ -412,13 +405,14 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
|
||||
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
|
||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||
} else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
|
||||
else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
|
||||
bo_driver->ttm_cache_flush)
|
||||
bo_driver->ttm_cache_flush(ttm);
|
||||
|
||||
if ((ret = be->func->bind(be, bo_mem))) {
|
||||
ret = be->func->bind(be, bo_mem);
|
||||
if (ret) {
|
||||
ttm->state = ttm_evicted;
|
||||
DRM_ERROR("Couldn't bind backend.\n");
|
||||
return ret;
|
||||
|
@ -429,5 +423,4 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
|||
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bind_ttm);
|
||||
|
|
|
@ -166,7 +166,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
* \param address access address.
|
||||
* \return pointer to the page structure.
|
||||
*
|
||||
* Get the the mapping, find the real physical page to map, get the page, and
|
||||
* Get the mapping, find the real physical page to map, get the page, and
|
||||
* return it.
|
||||
*/
|
||||
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
||||
|
|
|
@ -124,7 +124,7 @@ typedef struct _ffb_fbc {
|
|||
/*294*/ volatile unsigned int xpmask; /* X PlaneMask */
|
||||
/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */
|
||||
/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */
|
||||
/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
|
||||
/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
|
||||
|
||||
/* New 3dRAM III support regs */
|
||||
/*2c0*/ volatile unsigned int rawblend2;
|
||||
|
@ -266,7 +266,7 @@ typedef struct ffb_dev_priv {
|
|||
int prom_node;
|
||||
enum ffb_chip_type ffb_type;
|
||||
u64 card_phys_base;
|
||||
struct miscdevice miscdev;
|
||||
struct miscdevice miscdev;
|
||||
|
||||
/* Controller registers. */
|
||||
ffb_fbcPtr regs;
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
|
||||
#define I810_BUF_FREE 2
|
||||
#define I810_BUF_CLIENT 1
|
||||
#define I810_BUF_HARDWARE 0
|
||||
#define I810_BUF_HARDWARE 0
|
||||
|
||||
#define I810_BUF_UNMAPPED 0
|
||||
#define I810_BUF_MAPPED 1
|
||||
|
@ -867,7 +867,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
|
|||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
RING_LOCALS;
|
||||
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
||||
|
@ -888,7 +888,7 @@ static int i810_flush_queue(struct drm_device * dev)
|
|||
int i, ret = 0;
|
||||
RING_LOCALS;
|
||||
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -134,7 +134,7 @@ extern int i810_max_ioctl;
|
|||
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
|
||||
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
|
||||
#define I810_READ(reg) I810_DEREF(reg)
|
||||
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
|
||||
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
|
||||
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
|
||||
#define I810_READ16(reg) I810_DEREF16(reg)
|
||||
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
|
||||
|
@ -155,19 +155,19 @@ extern int i810_max_ioctl;
|
|||
} while (0)
|
||||
|
||||
#define ADVANCE_LP_RING() do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
|
||||
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
|
||||
dev_priv->ring.tail = outring; \
|
||||
I810_WRITE(LP_RING + RING_TAIL, outring); \
|
||||
} while(0)
|
||||
|
||||
#define OUT_RING(n) do { \
|
||||
#define OUT_RING(n) do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
|
||||
*(volatile unsigned int *)(virt + outring) = n; \
|
||||
outring += 4; \
|
||||
outring &= ringmask; \
|
||||
} while (0)
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
|
@ -184,28 +184,28 @@ extern int i810_max_ioctl;
|
|||
|
||||
#define I810REG_HWSTAM 0x02098
|
||||
#define I810REG_INT_IDENTITY_R 0x020a4
|
||||
#define I810REG_INT_MASK_R 0x020a8
|
||||
#define I810REG_INT_MASK_R 0x020a8
|
||||
#define I810REG_INT_ENABLE_R 0x020a0
|
||||
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define TAIL_ADDR 0x000FFFF8
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x000FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x000FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
|
||||
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
|
||||
#define SC_UPDATE_SCISSOR (0x1<<1)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -10,20 +10,20 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
|
@ -186,7 +186,7 @@ static int i915_move_blit(struct drm_buffer_object * bo,
|
|||
}
|
||||
|
||||
/*
|
||||
* Flip destination ttm into cached-coherent AGP,
|
||||
* Flip destination ttm into cached-coherent AGP,
|
||||
* then blit and subsequently move out again.
|
||||
*/
|
||||
|
||||
|
@ -261,7 +261,7 @@ static inline void clflush(volatile void *__p)
|
|||
#endif
|
||||
|
||||
static inline void drm_cache_flush_addr(void *virt)
|
||||
{
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
||||
|
|
|
@ -91,7 +91,7 @@ static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
|
|||
pci_write_config_dword(pdev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
|
||||
} else {
|
||||
u64 l64;
|
||||
|
||||
|
||||
temp_lo &= ~0x1;
|
||||
l64 = ((u64)temp_hi << 32) | temp_lo;
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
||||
*/
|
||||
/*
|
||||
*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -12,11 +12,11 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
|
@ -24,7 +24,7 @@
|
|||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -421,7 +421,7 @@ static int i915_resume(struct drm_device *dev)
|
|||
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
|
||||
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
|
||||
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
|
||||
|
||||
|
||||
/* Restore plane info */
|
||||
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
|
||||
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
|
||||
|
@ -452,7 +452,7 @@ static int i915_resume(struct drm_device *dev)
|
|||
if (IS_I965G(dev))
|
||||
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
|
||||
udelay(150);
|
||||
|
||||
|
||||
/* Restore mode */
|
||||
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
|
||||
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -10,20 +10,20 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
|
@ -70,7 +70,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
|||
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
|
||||
i915_user_irq_off(dev_priv);
|
||||
dev_priv->fence_irq_on = 0;
|
||||
} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
|
||||
} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
|
||||
i915_user_irq_on(dev_priv);
|
||||
dev_priv->fence_irq_on = 1;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* 32-bit ioctl compatibility routines for the i915 DRM.
|
||||
*
|
||||
* \author Alan Hourihane <alanh@fairlite.demon.co.uk>
|
||||
* \author Alan Hourihane <alanh@fairlite.demon.co.uk>
|
||||
*
|
||||
*
|
||||
* Copyright (C) Paul Mackerras 2005
|
||||
|
@ -49,11 +49,11 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
|||
{
|
||||
drm_i915_batchbuffer32_t batchbuffer32;
|
||||
drm_i915_batchbuffer_t __user *batchbuffer;
|
||||
|
||||
|
||||
if (copy_from_user
|
||||
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
|
||||
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|
||||
|| __put_user(batchbuffer32.start, &batchbuffer->start)
|
||||
|
@ -65,7 +65,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
|||
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
|
||||
&batchbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_BATCHBUFFER,
|
||||
(unsigned long) batchbuffer);
|
||||
|
@ -85,11 +85,11 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|
|||
{
|
||||
drm_i915_cmdbuffer32_t cmdbuffer32;
|
||||
drm_i915_cmdbuffer_t __user *cmdbuffer;
|
||||
|
||||
|
||||
if (copy_from_user
|
||||
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
|
||||
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|
||||
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
|
||||
|
@ -101,7 +101,7 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|
|||
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
|
||||
&cmdbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
|
||||
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ static int mga_driver_device_is_agp(struct drm_device * dev)
|
|||
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
|
||||
* device.
|
||||
*/
|
||||
|
||||
|
||||
if ((pdev->device == 0x0525) && pdev->bus->self
|
||||
&& (pdev->bus->self->vendor == 0x3388)
|
||||
&& (pdev->bus->self->device == 0x0021) ) {
|
||||
|
|
|
@ -39,17 +39,17 @@
|
|||
|
||||
typedef struct drm32_mga_init {
|
||||
int func;
|
||||
u32 sarea_priv_offset;
|
||||
u32 sarea_priv_offset;
|
||||
int chipset;
|
||||
int sgram;
|
||||
int sgram;
|
||||
unsigned int maccess;
|
||||
unsigned int fb_cpp;
|
||||
unsigned int fb_cpp;
|
||||
unsigned int front_offset, front_pitch;
|
||||
unsigned int back_offset, back_pitch;
|
||||
unsigned int depth_cpp;
|
||||
unsigned int depth_offset, depth_pitch;
|
||||
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
|
||||
unsigned int texture_size[MGA_NR_TEX_HEAPS];
|
||||
unsigned int back_offset, back_pitch;
|
||||
unsigned int depth_cpp;
|
||||
unsigned int depth_offset, depth_pitch;
|
||||
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
|
||||
unsigned int texture_size[MGA_NR_TEX_HEAPS];
|
||||
u32 fb_offset;
|
||||
u32 mmio_offset;
|
||||
u32 status_offset;
|
||||
|
@ -64,10 +64,10 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|
|||
drm_mga_init32_t init32;
|
||||
drm_mga_init_t __user *init;
|
||||
int err = 0, i;
|
||||
|
||||
|
||||
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
init = compat_alloc_user_space(sizeof(*init));
|
||||
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|
||||
|| __put_user(init32.func, &init->func)
|
||||
|
@ -90,7 +90,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|
|||
|| __put_user(init32.primary_offset, &init->primary_offset)
|
||||
|| __put_user(init32.buffers_offset, &init->buffers_offset))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
for (i=0; i<MGA_NR_TEX_HEAPS; i++)
|
||||
{
|
||||
err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]);
|
||||
|
@ -98,7 +98,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|
|||
}
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_MGA_INIT, (unsigned long) init);
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
|
|||
{
|
||||
drm_mga_getparam32_t getparam32;
|
||||
drm_mga_getparam_t __user *getparam;
|
||||
|
||||
|
||||
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -125,7 +125,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
|
|||
|| __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
|
|||
return -EFAULT;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
|
||||
sizeof(dma_bootstrap32)))
|
||||
sizeof(dma_bootstrap32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -219,7 +219,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd,
|
|||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
|
||||
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2005 Stephane Marchesin.
|
||||
* Copyright 2007 Dave Airlied
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -22,27 +22,39 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
/*
|
||||
* Authors: Jeremy Kolb <jkolb@brandeis.edu>
|
||||
* Authors: Dave Airlied <airlied@linux.ie>
|
||||
* Ben Skeggs <darktama@iinet.net.au>
|
||||
* Jeremy Kolb <jkolb@brandeis.edu>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
#ifdef NOUVEAU_HAVE_BUFFER
|
||||
|
||||
struct drm_ttm_backend *nouveau_create_ttm_backend_entry(struct drm_device * dev)
|
||||
static struct drm_ttm_backend *
|
||||
nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
return drm_agp_init_ttm(dev);
|
||||
switch (dev_priv->gart_info.type) {
|
||||
case NOUVEAU_GART_AGP:
|
||||
return drm_agp_init_ttm(dev);
|
||||
case NOUVEAU_GART_SGDMA:
|
||||
return nouveau_sgdma_init_ttm(dev);
|
||||
default:
|
||||
DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nouveau_fence_types(struct drm_buffer_object *bo,
|
||||
uint32_t *fclass,
|
||||
uint32_t *type)
|
||||
static int
|
||||
nouveau_bo_fence_type(struct drm_buffer_object *bo,
|
||||
uint32_t *fclass, uint32_t *type)
|
||||
{
|
||||
*fclass = 0;
|
||||
/* When we get called, *fclass is set to the requested fence class */
|
||||
|
||||
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
|
@ -51,95 +63,194 @@ int nouveau_fence_types(struct drm_buffer_object *bo,
|
|||
return 0;
|
||||
|
||||
}
|
||||
int nouveau_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
|
||||
|
||||
static int
|
||||
nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
|
||||
{
|
||||
/* We'll do this from user space. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nouveau_init_mem_type(struct drm_device *dev,
|
||||
uint32_t type,
|
||||
struct drm_mem_type_manager *man)
|
||||
static int
|
||||
nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||
struct drm_mem_type_manager *man)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
switch (type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CACHED;
|
||||
man->drm_bus_maptype = 0;
|
||||
break;
|
||||
case DRM_BO_MEM_VRAM:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_FIXED |
|
||||
_DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->io_addr = NULL;
|
||||
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
||||
man->io_offset = drm_get_resource_start(dev, 1);
|
||||
man->io_size = drm_get_resource_len(dev, 1);
|
||||
if (man->io_size > nouveau_mem_fb_amount(dev))
|
||||
man->io_size = nouveau_mem_fb_amount(dev);
|
||||
break;
|
||||
case DRM_BO_MEM_PRIV0:
|
||||
/* Unmappable VRAM */
|
||||
man->flags = _DRM_FLAG_MEMTYPE_CMA;
|
||||
man->drm_bus_maptype = 0;
|
||||
break;
|
||||
case DRM_BO_MEM_TT:
|
||||
switch (dev_priv->gart_info.type) {
|
||||
case NOUVEAU_GART_AGP:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CACHED;
|
||||
man->drm_bus_maptype = 0;
|
||||
break;
|
||||
|
||||
case DRM_BO_MEM_VRAM:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_FIXED |
|
||||
_DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->io_addr = NULL;
|
||||
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
||||
man->io_offset = drm_get_resource_start(dev, 0);
|
||||
man->io_size = drm_get_resource_len(dev, 0);
|
||||
break;
|
||||
|
||||
case DRM_BO_MEM_TT:
|
||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
man->io_offset = dev->agp->agp_info.aper_base;
|
||||
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
||||
man->io_addr = NULL;
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
||||
_DRM_FLAG_MEMTYPE_CSELECT |
|
||||
_DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->drm_bus_maptype = _DRM_AGP;
|
||||
break;
|
||||
|
||||
case NOUVEAU_GART_SGDMA:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CSELECT |
|
||||
_DRM_FLAG_MEMTYPE_CMA;
|
||||
man->drm_bus_maptype = _DRM_SCATTER_GATHER;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
DRM_ERROR("Unknown GART type: %d\n",
|
||||
dev_priv->gart_info.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
man->io_offset = dev_priv->gart_info.aper_base;
|
||||
man->io_size = dev_priv->gart_info.aper_size;
|
||||
man->io_addr = NULL;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t nouveau_evict_mask(struct drm_buffer_object *bo)
|
||||
static uint32_t
|
||||
nouveau_bo_evict_mask(struct drm_buffer_object *bo)
|
||||
{
|
||||
switch (bo->mem.mem_type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
case DRM_BO_MEM_TT:
|
||||
return DRM_BO_FLAG_MEM_LOCAL;
|
||||
case DRM_BO_MEM_VRAM:
|
||||
if (bo->mem.num_pages > 128)
|
||||
return DRM_BO_MEM_TT;
|
||||
else
|
||||
return DRM_BO_MEM_LOCAL;
|
||||
default:
|
||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
case DRM_BO_MEM_TT:
|
||||
return DRM_BO_FLAG_MEM_LOCAL;
|
||||
default:
|
||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nouveau_move(struct drm_buffer_object *bo,
|
||||
int evict,
|
||||
int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
|
||||
* DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
|
||||
*/
|
||||
static int
|
||||
nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_drm_channel *dchan = &dev_priv->channel;
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
uint32_t srch, dsth, page_count;
|
||||
|
||||
/* Can happen during init/takedown */
|
||||
if (!dchan->chan)
|
||||
return -EINVAL;
|
||||
|
||||
srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
|
||||
dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
|
||||
if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
|
||||
dchan->m2mf_dma_source = srch;
|
||||
dchan->m2mf_dma_destin = dsth;
|
||||
|
||||
BEGIN_RING(NvSubM2MF,
|
||||
NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
|
||||
OUT_RING (dchan->m2mf_dma_source);
|
||||
OUT_RING (dchan->m2mf_dma_destin);
|
||||
}
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
|
||||
OUT_RING (old_mem->mm_node->start << PAGE_SHIFT);
|
||||
OUT_RING (new_mem->mm_node->start << PAGE_SHIFT);
|
||||
OUT_RING (PAGE_SIZE); /* src_pitch */
|
||||
OUT_RING (PAGE_SIZE); /* dst_pitch */
|
||||
OUT_RING (PAGE_SIZE); /* line_length */
|
||||
OUT_RING (line_count);
|
||||
OUT_RING ((1<<8)|(1<<0));
|
||||
OUT_RING (0);
|
||||
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
|
||||
OUT_RING (0);
|
||||
|
||||
page_count -= line_count;
|
||||
}
|
||||
|
||||
return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
|
||||
DRM_FENCE_TYPE_EXE, 0, new_mem);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
|
||||
struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
#if 0
|
||||
if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
|
||||
#endif
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
else
|
||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
#if 0
|
||||
if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem))
|
||||
#endif
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
else {
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nouveau_flush_ttm(struct drm_ttm *ttm)
|
||||
static void
|
||||
nouveau_bo_flush_ttm(struct drm_ttm *ttm)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
static uint32_t nouveau_mem_prios[] = {
|
||||
DRM_BO_MEM_PRIV0,
|
||||
DRM_BO_MEM_VRAM,
|
||||
DRM_BO_MEM_TT,
|
||||
DRM_BO_MEM_LOCAL
|
||||
};
|
||||
static uint32_t nouveau_busy_prios[] = {
|
||||
DRM_BO_MEM_TT,
|
||||
DRM_BO_MEM_PRIV0,
|
||||
DRM_BO_MEM_VRAM,
|
||||
DRM_BO_MEM_LOCAL
|
||||
};
|
||||
|
||||
struct drm_bo_driver nouveau_bo_driver = {
|
||||
.mem_type_prio = nouveau_mem_prios,
|
||||
.mem_busy_prio = nouveau_busy_prios,
|
||||
.num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
|
||||
.num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
|
||||
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
|
||||
.fence_type = nouveau_bo_fence_type,
|
||||
.invalidate_caches = nouveau_bo_invalidate_caches,
|
||||
.init_mem_type = nouveau_bo_init_mem_type,
|
||||
.evict_mask = nouveau_bo_evict_mask,
|
||||
.move = nouveau_bo_move,
|
||||
.ttm_cache_flush= nouveau_bo_flush_ttm
|
||||
};
|
||||
|
|
|
@ -41,25 +41,6 @@ static struct pci_device_id pciidlist[] = {
|
|||
}
|
||||
};
|
||||
|
||||
#ifdef NOUVEAU_HAVE_BUFFER
|
||||
static uint32_t nouveau_mem_prios[] = { DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL };
|
||||
static uint32_t nouveau_busy_prios[] = { DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL };
|
||||
|
||||
static struct drm_bo_driver nouveau_bo_driver = {
|
||||
.mem_type_prio = nouveau_mem_prios,
|
||||
.mem_busy_prio = nouveau_busy_prios,
|
||||
.num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
|
||||
.num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
|
||||
.create_ttm_backend_entry = nouveau_create_ttm_backend_entry,
|
||||
.fence_type = nouveau_fence_types,
|
||||
.invalidate_caches = nouveau_invalidate_caches,
|
||||
.init_mem_type = nouveau_init_mem_type,
|
||||
.evict_mask = nouveau_evict_mask,
|
||||
.move = nouveau_move,
|
||||
.ttm_cache_flush= nouveau_flush_ttm
|
||||
};
|
||||
#endif
|
||||
|
||||
extern struct drm_ioctl_desc nouveau_ioctls[];
|
||||
extern int nouveau_max_ioctl;
|
||||
|
||||
|
@ -99,9 +80,9 @@ static struct drm_driver driver = {
|
|||
.probe = probe,
|
||||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
},
|
||||
#ifdef NOUVEAU_HAVE_BUFFER
|
||||
.bo_driver = &nouveau_bo_driver,
|
||||
#endif
|
||||
|
||||
.bo_driver = &nouveau_bo_driver,
|
||||
.fence_driver = &nouveau_fence_driver,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright (C) 2007 Ben Skeggs.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
static int
|
||||
nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
|
||||
|
||||
/* DRM's channel always uses IRQs to signal fences */
|
||||
if (class == dev_priv->channel.chan->id)
|
||||
return 1;
|
||||
|
||||
/* Other channels don't use IRQs at all yet */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
|
||||
uint32_t *breadcrumb, uint32_t *native_type)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->fifos[class];
|
||||
struct nouveau_drm_channel *dchan = &dev_priv->channel;
|
||||
|
||||
DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
|
||||
|
||||
/* We can't emit fences on client channels, update sequence number
|
||||
* and userspace will emit the fence
|
||||
*/
|
||||
*breadcrumb = ++chan->next_sequence;
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
if (chan != dchan->chan) {
|
||||
DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
|
||||
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
|
||||
OUT_RING (*breadcrumb);
|
||||
BEGIN_RING(NvSubM2MF, 0x0150, 1);
|
||||
OUT_RING (0);
|
||||
FIRE_RING ();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
|
||||
uint32_t pending_types = 0;
|
||||
|
||||
DRM_DEBUG("class=%d\n", class);
|
||||
|
||||
pending_types = fc->pending_flush |
|
||||
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
|
||||
DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types,
|
||||
fc->pending_flush);
|
||||
|
||||
if (pending_types) {
|
||||
uint32_t sequence = NV_READ(NV03_FIFO_REGS(class) + 0x48);
|
||||
|
||||
DRM_DEBUG("got 0x%08x\n", sequence);
|
||||
drm_fence_handler(dev, class, sequence, pending_types, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
|
||||
DRM_DEBUG("class=%d\n", class);
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
nouveau_fence_perform_flush(dev, class);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fence_handler(struct drm_device *dev, int channel)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
|
||||
DRM_DEBUG("class=%d\n", channel);
|
||||
|
||||
write_lock(&fm->lock);
|
||||
nouveau_fence_perform_flush(dev, channel);
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
||||
struct drm_fence_driver nouveau_fence_driver = {
|
||||
.num_classes = 8,
|
||||
.wrap_diff = (1 << 30),
|
||||
.flush_diff = (1 << 29),
|
||||
.sequence_mask = 0xffffffffU,
|
||||
.lazy_capable = 1,
|
||||
.has_irq = nouveau_fence_has_irq,
|
||||
.emit = nouveau_fence_emit,
|
||||
.poke_flush = nouveau_fence_poke_flush
|
||||
};
|
|
@ -128,7 +128,7 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)
|
|||
if (nvbe->is_bound) {
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
unsigned int pte;
|
||||
|
||||
|
||||
pte = nvbe->pte_start;
|
||||
while (pte < (nvbe->pte_start + nvbe->pages)) {
|
||||
uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
|
||||
|
@ -336,4 +336,3 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
|
|||
DRM_ERROR("Unimplemented on NV50\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,10 +64,10 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
|
|||
{
|
||||
drm_r128_init32_t init32;
|
||||
drm_r128_init_t __user *init;
|
||||
|
||||
|
||||
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
init = compat_alloc_user_space(sizeof(*init));
|
||||
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|
||||
|| __put_user(init32.func, &init->func)
|
||||
|
@ -94,7 +94,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
|
|||
|| __put_user(init32.agp_textures_offset,
|
||||
&init->agp_textures_offset))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_R128_INIT, (unsigned long)init);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ static int dri_library_name(struct drm_device * dev, char * buf)
|
|||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(family < CHIP_R200) ? "radeon" :
|
||||
((family < CHIP_R300) ? "r200" :
|
||||
"r300"));
|
||||
"r300"));
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
|
|
|
@ -249,7 +249,7 @@ sis_idle(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Implement a device switch here if needed
|
||||
*/
|
||||
|
|
|
@ -94,9 +94,9 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
|
|||
man->drm_bus_maptype = 0;
|
||||
break;
|
||||
|
||||
case DRM_BO_MEM_TT:
|
||||
case DRM_BO_MEM_TT:
|
||||
/* Dynamic agpgart memory */
|
||||
|
||||
|
||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
|
@ -109,21 +109,21 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
|
|||
|
||||
/* Only to get pte protection right. */
|
||||
|
||||
man->drm_bus_maptype = _DRM_AGP;
|
||||
man->drm_bus_maptype = _DRM_AGP;
|
||||
break;
|
||||
|
||||
case DRM_BO_MEM_VRAM:
|
||||
case DRM_BO_MEM_VRAM:
|
||||
/* "On-card" video ram */
|
||||
|
||||
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
||||
man->io_addr = NULL;
|
||||
return via_vram_info(dev, &man->io_offset, &man->io_size);
|
||||
break;
|
||||
|
||||
case DRM_BO_MEM_PRIV0:
|
||||
case DRM_BO_MEM_PRIV0:
|
||||
/* Pre-bound agpgart memory */
|
||||
|
||||
|
||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -16,22 +16,22 @@
|
|||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Partially based on code obtained from Digeo Inc.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Unmaps the DMA mappings.
|
||||
* FIXME: Is this a NoOp on x86? Also
|
||||
* FIXME: What happens if this one is called and a pending blit has previously done
|
||||
* the same DMA mappings?
|
||||
* Unmaps the DMA mappings.
|
||||
* FIXME: Is this a NoOp on x86? Also
|
||||
* FIXME: What happens if this one is called and a pending blit has previously done
|
||||
* the same DMA mappings?
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
int num_desc = vsg->num_desc;
|
||||
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
|
||||
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
|
||||
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
dma_addr_t next = vsg->chain_start;
|
||||
|
||||
|
@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
if (descriptor_this_page-- == 0) {
|
||||
cur_descriptor_page--;
|
||||
descriptor_this_page = vsg->descriptors_per_page - 1;
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
}
|
||||
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
|
||||
|
@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
static void
|
||||
via_map_blit_for_device(struct pci_dev *pdev,
|
||||
const drm_via_dmablit_t *xfer,
|
||||
drm_via_sg_info_t *vsg,
|
||||
drm_via_sg_info_t *vsg,
|
||||
int mode)
|
||||
{
|
||||
unsigned cur_descriptor_page = 0;
|
||||
|
@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
|
||||
drm_via_descriptor_t *desc_ptr = NULL;
|
||||
|
||||
if (mode == 1)
|
||||
if (mode == 1)
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page];
|
||||
|
||||
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
|
||||
|
@ -118,7 +118,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
line_len = xfer->line_length;
|
||||
cur_fb = fb_addr;
|
||||
cur_mem = mem_addr;
|
||||
|
||||
|
||||
while (line_len > 0) {
|
||||
|
||||
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
|
||||
|
@ -131,10 +131,10 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
vsg->direction);
|
||||
desc_ptr->dev_addr = cur_fb;
|
||||
|
||||
|
||||
desc_ptr->size = remaining_len;
|
||||
desc_ptr->next = (uint32_t) next;
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
DMA_TO_DEVICE);
|
||||
desc_ptr++;
|
||||
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
|
||||
|
@ -142,12 +142,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
num_desc++;
|
||||
cur_mem += remaining_len;
|
||||
cur_fb += remaining_len;
|
||||
}
|
||||
|
||||
|
||||
mem_addr += xfer->mem_stride;
|
||||
fb_addr += xfer->fb_stride;
|
||||
}
|
||||
|
@ -160,14 +160,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/*
|
||||
* Function that frees up all resources for a blit. It is usable even if the
|
||||
* Function that frees up all resources for a blit. It is usable even if the
|
||||
* blit info has only been partially built as long as the status enum is consistent
|
||||
* with the actual status of the used resources.
|
||||
*/
|
||||
|
||||
|
||||
static void
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
|
@ -184,7 +184,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
case dr_via_pages_locked:
|
||||
for (i=0; i<vsg->num_pages; ++i) {
|
||||
if ( NULL != (page = vsg->pages[i])) {
|
||||
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||
SetPageDirty(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
vsg->bounce_buffer = NULL;
|
||||
}
|
||||
vsg->free_on_sequence = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Fire a blit engine.
|
||||
|
@ -212,7 +212,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
|
|||
|
||||
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
|
||||
VIA_DMA_CSR_DE);
|
||||
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
|
||||
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
|
||||
|
@ -232,20 +232,20 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
{
|
||||
int ret;
|
||||
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
|
||||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
|
||||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
|
||||
first_pfn + 1;
|
||||
|
||||
|
||||
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
|
||||
return -ENOMEM;
|
||||
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
|
||||
vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
|
||||
vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
|
||||
0, vsg->pages, NULL);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (ret != vsg->num_pages) {
|
||||
if (ret < 0)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
vsg->state = dr_via_pages_locked;
|
||||
return -EINVAL;
|
||||
|
@ -261,22 +261,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
* quite large for some blits, and pages don't need to be contingous.
|
||||
*/
|
||||
|
||||
static int
|
||||
static int
|
||||
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
|
||||
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
|
||||
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
|
||||
vsg->descriptors_per_page;
|
||||
|
||||
if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
|
||||
if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
|
||||
vsg->state = dr_via_desc_pages_alloc;
|
||||
for (i=0; i<vsg->num_desc_pages; ++i) {
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
|||
vsg->num_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
via_abort_dmablit(struct drm_device *dev, int engine)
|
||||
{
|
||||
|
@ -298,7 +298,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
|
|||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
|
||||
}
|
||||
|
||||
|
||||
|
@ -309,7 +309,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
|
|||
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
|
||||
* the workqueue task takes care of processing associated with the old blit.
|
||||
*/
|
||||
|
||||
|
||||
void
|
||||
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
||||
{
|
||||
|
@ -329,19 +329,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
done_transfer = blitq->is_active &&
|
||||
done_transfer = blitq->is_active &&
|
||||
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
|
||||
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
|
||||
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
|
||||
|
||||
cur = blitq->cur;
|
||||
if (done_transfer) {
|
||||
|
||||
blitq->blits[cur]->aborted = blitq->aborting;
|
||||
blitq->done_blit_handle++;
|
||||
DRM_WAKEUP(blitq->blit_queue + cur);
|
||||
DRM_WAKEUP(blitq->blit_queue + cur);
|
||||
|
||||
cur++;
|
||||
if (cur >= VIA_NUM_BLIT_SLOTS)
|
||||
if (cur >= VIA_NUM_BLIT_SLOTS)
|
||||
cur = 0;
|
||||
blitq->cur = cur;
|
||||
|
||||
|
@ -353,7 +353,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
schedule_work(&blitq->wq);
|
||||
schedule_work(&blitq->wq);
|
||||
|
||||
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
|
||||
|
||||
|
@ -365,7 +365,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
blitq->aborting = 1;
|
||||
blitq->end = jiffies + DRM_HZ;
|
||||
}
|
||||
|
||||
|
||||
if (!blitq->is_active) {
|
||||
if (blitq->num_outstanding) {
|
||||
via_fire_dmablit(dev, blitq->blits[cur], engine);
|
||||
|
@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
}
|
||||
via_dmablit_engine_off(dev, engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (from_irq) {
|
||||
spin_unlock(&blitq->blit_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
|
|||
|
||||
return active;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Sync. Wait for at least three seconds for the blit to be performed.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
||||
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
||||
{
|
||||
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
|||
int ret = 0;
|
||||
|
||||
if (via_dmablit_active(blitq, engine, handle, &queue)) {
|
||||
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
|
||||
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
|
||||
!via_dmablit_active(blitq, engine, handle, NULL));
|
||||
}
|
||||
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
|
||||
handle, engine, ret);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
|
|||
struct drm_device *dev = blitq->dev;
|
||||
int engine = (int)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
|
||||
|
||||
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
|
||||
|
||||
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
|
||||
(unsigned long) jiffies);
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
|
||||
if (!timer_pending(&blitq->poll_timer)) {
|
||||
blitq->poll_timer.expires = jiffies+1;
|
||||
add_timer(&blitq->poll_timer);
|
||||
|
@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
|
|||
*/
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||
via_dmablit_workqueue(void *data)
|
||||
#else
|
||||
|
@ -513,38 +513,38 @@ via_dmablit_workqueue(struct work_struct *work)
|
|||
unsigned long irqsave;
|
||||
drm_via_sg_info_t *cur_sg;
|
||||
int cur_released;
|
||||
|
||||
|
||||
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
|
||||
|
||||
|
||||
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
||||
while(blitq->serviced != blitq->cur) {
|
||||
|
||||
cur_released = blitq->serviced++;
|
||||
|
||||
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
|
||||
|
||||
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
|
||||
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->serviced = 0;
|
||||
|
||||
|
||||
cur_sg = blitq->blits[cur_released];
|
||||
blitq->num_free++;
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
||||
DRM_WAKEUP(&blitq->busy_queue);
|
||||
|
||||
|
||||
via_free_sg_info(dev->pdev, cur_sg);
|
||||
kfree(cur_sg);
|
||||
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Init all blit engines. Currently we use two, but some hardware have 4.
|
||||
|
@ -558,8 +558,8 @@ via_init_dmablit(struct drm_device *dev)
|
|||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_blitq_t *blitq;
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
|
||||
blitq = dev_priv->blit_queues + i;
|
||||
blitq->dev = dev;
|
||||
|
@ -585,20 +585,20 @@ via_init_dmablit(struct drm_device *dev)
|
|||
init_timer(&blitq->poll_timer);
|
||||
blitq->poll_timer.function = &via_dmablit_timer;
|
||||
blitq->poll_timer.data = (unsigned long) blitq;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Build all info and do all mappings required for a blit.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
static int
|
||||
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
int draw = xfer->to_fb;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
vsg->bounce_buffer = NULL;
|
||||
|
||||
|
@ -612,7 +612,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
/*
|
||||
* Below check is a driver limitation, not a hardware one. We
|
||||
* don't want to lock unused pages, and don't want to incoporate the
|
||||
* extra logic of avoiding them. Make sure there are no.
|
||||
* extra logic of avoiding them. Make sure there are no.
|
||||
* (Not a big limitation anyway.)
|
||||
*/
|
||||
|
||||
|
@ -638,11 +638,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
|
||||
DRM_ERROR("Too large PCI DMA bitblt.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* we allow a negative fb stride to allow flipping of images in
|
||||
* transfer.
|
||||
* transfer.
|
||||
*/
|
||||
|
||||
if (xfer->mem_stride < xfer->line_length ||
|
||||
|
@ -668,7 +668,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
|
||||
|
@ -684,17 +684,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
return ret;
|
||||
}
|
||||
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Reserve one free slot in the blit queue. Will wait for one second for one
|
||||
* to become available. Otherwise -EBUSY is returned.
|
||||
*/
|
||||
|
||||
static int
|
||||
static int
|
||||
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
||||
{
|
||||
int ret=0;
|
||||
|
@ -709,10 +709,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
|||
if (ret) {
|
||||
return (-EINTR == ret) ? -EAGAIN : ret;
|
||||
}
|
||||
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
|
||||
blitq->num_free--;
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
@ -723,7 +723,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
|||
* Hand back a free slot if we changed our mind.
|
||||
*/
|
||||
|
||||
static void
|
||||
static void
|
||||
via_dmablit_release_slot(drm_via_blitq_t *blitq)
|
||||
{
|
||||
unsigned long irqsave;
|
||||
|
@ -739,8 +739,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
|
|||
*/
|
||||
|
||||
|
||||
static int
|
||||
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
||||
static int
|
||||
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_sg_info_t *vsg;
|
||||
|
@ -771,15 +771,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
blitq->blits[blitq->head++] = vsg;
|
||||
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
|
||||
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->head = 0;
|
||||
blitq->num_outstanding++;
|
||||
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
|
||||
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
xfer->sync.engine = engine;
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -787,7 +787,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
/*
|
||||
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
|
||||
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
|
||||
* case it returns with -EAGAIN for the signal to be delivered.
|
||||
* case it returns with -EAGAIN for the signal to be delivered.
|
||||
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
|
||||
*/
|
||||
|
||||
|
@ -797,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
|
|||
drm_via_blitsync_t *sync = data;
|
||||
int err;
|
||||
|
||||
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
|
||||
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
|
||||
return -EINVAL;
|
||||
|
||||
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
|
||||
|
@ -807,15 +807,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
|
|||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
|
||||
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
|
||||
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
|
||||
* be reissued. See the above IOCTL code.
|
||||
*/
|
||||
|
||||
int
|
||||
int
|
||||
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
|
||||
{
|
||||
drm_via_dmablit_t *xfer = data;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
*
|
||||
* Copyright 2005 Thomas Hellstrom.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -17,12 +17,12 @@
|
|||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Register info from Digeo Inc.
|
||||
*/
|
||||
|
@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
|
|||
unsigned cur;
|
||||
unsigned num_free;
|
||||
unsigned num_outstanding;
|
||||
unsigned long end;
|
||||
unsigned long end;
|
||||
int aborting;
|
||||
int is_active;
|
||||
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
|
||||
|
@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
|
|||
struct work_struct wq;
|
||||
struct timer_list poll_timer;
|
||||
} drm_via_blitq_t;
|
||||
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
* PCI DMA Registers
|
||||
* Channels 2 & 3 don't seem to be implemented in hardware.
|
||||
*/
|
||||
|
||||
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
|
||||
|
||||
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
|
||||
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
|
||||
|
||||
/* Define for DMA engine */
|
||||
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
|
||||
|
||||
/* Define for DMA engine */
|
||||
/* DPR */
|
||||
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
|
||||
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
|
||||
|
|
|
@ -115,7 +115,7 @@ void via_lastclose(struct drm_device *dev)
|
|||
dev_priv->vram_initialized = 0;
|
||||
dev_priv->agp_initialized = 0;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int via_mem_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
|
|
|
@ -78,7 +78,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
|
|||
* @type: Type of the current batch
|
||||
*
|
||||
* See section 3.2.2 "Begin" (page 15) of the 3D SPG.
|
||||
*
|
||||
*
|
||||
* This function assumes that @type is on the range [0,3].
|
||||
*/
|
||||
unsigned int get_batch_command(enum xgi_batch_type type)
|
||||
|
@ -86,7 +86,7 @@ unsigned int get_batch_command(enum xgi_batch_type type)
|
|||
static const unsigned int ports[4] = {
|
||||
0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
|
||||
};
|
||||
|
||||
|
||||
return ports[type];
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
|
|||
2 - fb
|
||||
3 - logout
|
||||
*/
|
||||
int xgi_state_change(struct xgi_info * info, unsigned int to,
|
||||
int xgi_state_change(struct xgi_info * info, unsigned int to,
|
||||
unsigned int from)
|
||||
{
|
||||
#define STATE_CONSOLE 0
|
||||
|
@ -219,7 +219,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)
|
|||
}
|
||||
|
||||
xgi_waitfor_pci_idle(info);
|
||||
|
||||
|
||||
(void) memset(&info->cmdring, 0, sizeof(info->cmdring));
|
||||
}
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ static void triggerHWCommandList(struct xgi_info * info)
|
|||
void xgi_emit_flush(struct xgi_info * info, bool stop)
|
||||
{
|
||||
const u32 flush_command[8] = {
|
||||
((0x10 << 24)
|
||||
((0x10 << 24)
|
||||
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
|
||||
BEGIN_LINK_ENABLE_MASK | (0x00004),
|
||||
0x00000000, 0x00000000,
|
||||
|
@ -266,9 +266,9 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
|
|||
info->cmdring.ring_offset = 0;
|
||||
}
|
||||
|
||||
hw_addr = info->cmdring.ring_hw_base
|
||||
hw_addr = info->cmdring.ring_hw_base
|
||||
+ info->cmdring.ring_offset;
|
||||
batch_addr = info->cmdring.ptr
|
||||
batch_addr = info->cmdring.ptr
|
||||
+ (info->cmdring.ring_offset / 4);
|
||||
|
||||
for (i = 0; i < (flush_size / 4); i++) {
|
||||
|
|
|
@ -352,7 +352,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
|||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct xgi_info *info = dev->dev_private;
|
||||
const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
|
||||
(0x2800
|
||||
(0x2800
|
||||
+ M2REG_AUTO_LINK_STATUS_ADDRESS)))
|
||||
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
|
||||
| M2REG_ACTIVE_INTERRUPT_0_MASK
|
||||
|
@ -361,7 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
|||
|
||||
|
||||
if (irq_bits != 0) {
|
||||
DRM_WRITE32(info->mmio_map,
|
||||
DRM_WRITE32(info->mmio_map,
|
||||
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
||||
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
|
||||
xgi_fence_handler(dev);
|
||||
|
@ -413,7 +413,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
fail:
|
||||
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
|
||||
return err;
|
||||
|
|
|
@ -32,7 +32,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
|||
struct drm_file * filp)
|
||||
{
|
||||
struct drm_memblock_item *block;
|
||||
const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
|
||||
const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
|
||||
? "on-card" : "GART";
|
||||
|
||||
|
||||
|
@ -43,7 +43,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((alloc->location == XGI_MEMLOC_LOCAL)
|
||||
if ((alloc->location == XGI_MEMLOC_LOCAL)
|
||||
? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
|
||||
DRM_ERROR("Attempt to allocate from uninitialized memory "
|
||||
"pool (0x%08x).\n", alloc->location);
|
||||
|
@ -118,7 +118,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,
|
|||
int xgi_fb_heap_init(struct xgi_info * info)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
||||
mutex_lock(&info->dev->struct_mutex);
|
||||
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
|
||||
XGI_FB_HEAP_START,
|
||||
|
|
|
@ -72,7 +72,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
|||
|
||||
|
||||
int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
|
||||
uint32_t flags, uint32_t * sequence,
|
||||
uint32_t flags, uint32_t * sequence,
|
||||
uint32_t * native_type)
|
||||
{
|
||||
struct xgi_info * info = dev->dev_private;
|
||||
|
|
|
@ -43,7 +43,7 @@ struct drm_map32 {
|
|||
u32 handle; /**< User-space: "Handle" to pass to mmap() */
|
||||
int mtrr; /**< MTRR slot used */
|
||||
};
|
||||
|
||||
|
||||
struct drm32_xgi_bootstrap {
|
||||
struct drm_map32 gart;
|
||||
};
|
||||
|
|
|
@ -90,7 +90,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
|||
|
||||
DRM_WRITE8(map, 0xb057, 8);
|
||||
while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
|
||||
while (0 != ((--time_out) & 0xfff))
|
||||
while (0 != ((--time_out) & 0xfff))
|
||||
/* empty */ ;
|
||||
|
||||
if (0 == time_out) {
|
||||
|
@ -117,8 +117,8 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
|||
DRM_WRITE8(map, 0x3d4, 0x36);
|
||||
old_36 = DRM_READ8(map, 0x3d5);
|
||||
DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
|
||||
|
||||
while (0 != ((--time_out) & 0xfff))
|
||||
|
||||
while (0 != ((--time_out) & 0xfff))
|
||||
/* empty */ ;
|
||||
|
||||
DRM_WRITE8(map, 0x3d5, old_36);
|
||||
|
@ -134,7 +134,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
|||
DRM_WRITE8(map, 0xb057, 0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||
{
|
||||
const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
|
||||
|
@ -143,7 +143,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
|||
/* Check GE on/off */
|
||||
if (0 == (0xffffc0f0 & int_status)) {
|
||||
if (0 != (0x1000 & int_status)) {
|
||||
/* We got GE stall interrupt.
|
||||
/* We got GE stall interrupt.
|
||||
*/
|
||||
DRM_WRITE32(info->mmio_map, 0x2810,
|
||||
cpu_to_le32(int_status | 0x04000000));
|
||||
|
@ -289,7 +289,7 @@ static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
|
|||
printk("%1x ", i);
|
||||
|
||||
for (j = 0; j < 0x10; j++) {
|
||||
u8 temp = DRM_READ8(info->mmio_map,
|
||||
u8 temp = DRM_READ8(info->mmio_map,
|
||||
regbase + (i * 0x10) + j);
|
||||
printk("%3x", temp);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/****************************************************************************
|
||||
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
|
||||
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation on the rights to use, copy, modify, merge,
|
||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
# script to create a Linux Kernel tree from the DRM tree for diffing etc..
|
||||
#
|
||||
# Original author - Dave Airlie (C) 2004 - airlied@linux.ie
|
||||
#
|
||||
# kernel_version to remove below (e.g. 2.6.24)
|
||||
|
||||
if [ $# -lt 1 ] ;then
|
||||
echo usage: $0 output_dir
|
||||
if [ $# -lt 2 ] ;then
|
||||
echo usage: $0 output_dir kernel_version
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -15,43 +15,23 @@ if [ ! -d shared-core -o ! -d linux-core ] ;then
|
|||
fi
|
||||
|
||||
OUTDIR=$1/drivers/char/drm/
|
||||
|
||||
KERNEL_VERS=$2
|
||||
echo "Copying kernel independent files"
|
||||
mkdir -p $OUTDIR
|
||||
mkdir -p $OUTDIR/.tmp
|
||||
|
||||
( cd linux-core/ ; make drm_pciids.h )
|
||||
cp shared-core/*.[ch] $OUTDIR
|
||||
cp linux-core/*.[ch] $OUTDIR
|
||||
cp linux-core/Makefile.kernel $OUTDIR/Makefile
|
||||
cp shared-core/*.[ch] $OUTDIR/.tmp
|
||||
cp linux-core/*.[ch] $OUTDIR/.tmp
|
||||
cp linux-core/Makefile.kernel $OUTDIR/.tmp/Makefile
|
||||
|
||||
echo "Copying 2.6 Kernel files"
|
||||
cp linux-core/Kconfig $OUTDIR/
|
||||
cp linux-core/Kconfig $OUTDIR/.tmp
|
||||
|
||||
./scripts/drm-scripts-gentree.pl $KERNEL_VERS $OUTDIR/.tmp $OUTDIR
|
||||
cd $OUTDIR
|
||||
|
||||
rm -rf .tmp
|
||||
rm via_ds.[ch]
|
||||
for i in via*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DVIA_HAVE_CORE_MM $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
|
||||
rm sis_ds.[ch]
|
||||
for i in sis*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
|
||||
for i in i915*.[ch]
|
||||
do
|
||||
unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
|
||||
for i in drm*.[ch]
|
||||
do
|
||||
unifdef -UDRM_ODD_MM_COMPAT -D__linux__ $i > $i.tmp
|
||||
mv $i.tmp $i
|
||||
done
|
||||
cd -
|
||||
|
|
|
@ -0,0 +1,254 @@
|
|||
#!/usr/bin/perl
|
||||
#
|
||||
# Original version were part of Gerd Knorr's v4l scripts.
|
||||
#
|
||||
# Several improvements by (c) 2005-2007 Mauro Carvalho Chehab
|
||||
#
|
||||
# Largely re-written (C) 2007 Trent Piepho <xyzzy@speakeasy.org>
|
||||
# Stolen for DRM usage by airlied
|
||||
#
|
||||
# Theory of Operation
|
||||
#
|
||||
# This acts as a sort of mini version of cpp, which will process
|
||||
# #if/#elif/#ifdef/etc directives to strip out code used to support
|
||||
# multiple kernel versions or otherwise not wanted to be sent upstream to
|
||||
# git.
|
||||
#
|
||||
# Conditional compilation directives fall into two catagories,
|
||||
# "processed" and "other". The "other" directives are ignored and simply
|
||||
# output as they come in without changes (see 'keep' exception). The
|
||||
# "processed" variaty are evaluated and only the lines in the 'true' part
|
||||
# are kept, like cpp would do.
|
||||
#
|
||||
# If gentree knows the result of an expression, that directive will be
|
||||
# "processed", otherwise it will be an "other". gentree knows the value
|
||||
# of LINUX_VERSION_CODE, BTTV_VERSION_CODE, the KERNEL_VERSION(x,y,z)
|
||||
# macro, numeric constants like 0 and 1, and a few defines like MM_KERNEL
|
||||
# and STV0297_CS2.
|
||||
#
|
||||
# An exception is if the comment "/*KEEP*/" appears after the expression,
|
||||
# in which case that directive will be considered an "other" and not
|
||||
# processed, other than to remove the keep comment.
|
||||
#
|
||||
# Known bugs:
|
||||
# don't specify the root directory e.g. '/' or even '////'
|
||||
# directives continued with a back-slash will always be ignored
|
||||
# you can't modify a source tree in-place, i.e. source dir == dest dir
|
||||
|
||||
use strict;
|
||||
use File::Find;
|
||||
use Fcntl ':mode';
|
||||
|
||||
my $VERSION = shift;
|
||||
my $SRC = shift;
|
||||
my $DESTDIR = shift;
|
||||
|
||||
if (!defined($DESTDIR)) {
|
||||
print "Usage:\ngentree.pl\t<version> <source dir> <dest dir>\n\n";
|
||||
exit;
|
||||
}
|
||||
|
||||
my $BTTVCODE = KERNEL_VERSION(0,9,17);
|
||||
my ($LINUXCODE, $extra) = kernel_version($VERSION);
|
||||
my $DEBUG = 0;
|
||||
|
||||
my %defs = (
|
||||
'LINUX_VERSION_CODE' => $LINUXCODE,
|
||||
'MM_KERNEL' => ($extra =~ /-mm/)?1:0,
|
||||
'DRM_ODD_MM_COMPAT' => 0,
|
||||
'I915_HAVE_FENCE' => 1,
|
||||
'I915_HAVE_BUFFER' => 1,
|
||||
'VIA_HAVE_DMABLIT' => 1,
|
||||
'VIA_HAVE_CORE_MM' => 1,
|
||||
'VIA_HAVE_FENCE' => 1,
|
||||
'VIA_HAVE_BUFFER' => 1,
|
||||
'SIS_HAVE_CORE_MM' => 1,
|
||||
'DRM_FULL_MM_COMPAT' => 1,
|
||||
'__linux__' => 1,
|
||||
);
|
||||
|
||||
#################################################################
|
||||
# helpers
|
||||
|
||||
sub kernel_version($) {
|
||||
$_[0] =~ m/(\d+)\.(\d+)\.(\d+)(.*)/;
|
||||
return ($1*65536 + $2*256 + $3, $4);
|
||||
}
|
||||
|
||||
# used in eval()
|
||||
sub KERNEL_VERSION($$$) { return $_[0]*65536 + $_[1]*256 + $_[2]; }
|
||||
|
||||
sub evalexp($) {
|
||||
local $_ = shift;
|
||||
s|/\*.*?\*/||go; # delete /* */ comments
|
||||
s|//.*$||o; # delete // comments
|
||||
s/\bdefined\s*\(/(/go; # defined(foo) to (foo)
|
||||
while (/\b([_A-Za-z]\w*)\b/go) {
|
||||
if (exists $defs{$1}) {
|
||||
my $id = $1; my $pos = $-[0];
|
||||
s/$id/$defs{$id}/;
|
||||
pos = $-[0];
|
||||
} elsif ($1 ne 'KERNEL_VERSION') {
|
||||
return(undef);
|
||||
}
|
||||
}
|
||||
return(eval($_) ? 1 : 0);
|
||||
}
|
||||
|
||||
#################################################################
|
||||
# filter out version-specific code
|
||||
|
||||
sub filter_source ($$) {
|
||||
my ($in,$out) = @_;
|
||||
my $line;
|
||||
my $level=0;
|
||||
my %if = ();
|
||||
my %state = ();
|
||||
|
||||
my @dbgargs = \($level, %state, %if, $line);
|
||||
sub dbgline($\@) {
|
||||
my $level = ${$_[1][0]};
|
||||
printf STDERR ("/* BP %4d $_[0] state=$_[1][1]->{$level} if=$_[1][2]->{$level} level=$level (${$_[1][3]}) */\n", $.) if $DEBUG;
|
||||
}
|
||||
|
||||
open IN, '<', $in or die "Error opening $in: $!\n";
|
||||
open OUT, '>', $out or die "Error opening $out: $!\n";
|
||||
|
||||
print STDERR "File: $in, for kernel $VERSION($LINUXCODE)/\n" if $DEBUG;
|
||||
|
||||
while ($line = <IN>) {
|
||||
chomp $line;
|
||||
next if ($line =~ m/^#include \"compat.h\"/o);
|
||||
# next if ($line =~ m/[\$]Id:/);
|
||||
|
||||
# For "#if 0 /*KEEP*/;" the ; should be dropped too
|
||||
if ($line =~ m@^\s*#\s*if(n?def)?\s.*?(\s*/\*\s*(?i)keep\s*\*/;?)@) {
|
||||
$state{$level} = "ifother";
|
||||
$if{$level} = 1;
|
||||
dbgline "#if$1 (keep)", @dbgargs;
|
||||
$line =~ s/\Q$2\E//;
|
||||
$level++;
|
||||
}
|
||||
# handle all ifdef/ifndef lines
|
||||
elsif ($line =~ /^\s*#\s*if(n?)def\s*(\w+)/o) {
|
||||
if (exists $defs{$2}) {
|
||||
$state{$level} = 'if';
|
||||
$if{$level} = ($1 eq 'n') ? !$defs{$2} : $defs{$2};
|
||||
dbgline "#if$1def $2", @dbgargs;
|
||||
$level++;
|
||||
next;
|
||||
}
|
||||
$state{$level} = "ifother";
|
||||
$if{$level} = 1;
|
||||
dbgline "#if$1def (other)", @dbgargs;
|
||||
$level++;
|
||||
}
|
||||
# handle all ifs
|
||||
elsif ($line =~ /^\s*#\s*if\s+(.*)$/o) {
|
||||
my $res = evalexp($1);
|
||||
if (defined $res) {
|
||||
$state{$level} = 'if';
|
||||
$if{$level} = $res;
|
||||
dbgline '#if '.($res?'(yes)':'(no)'), @dbgargs;
|
||||
$level++;
|
||||
next;
|
||||
} else {
|
||||
$state{$level} = 'ifother';
|
||||
$if{$level} = 1;
|
||||
dbgline '#if (other)', @dbgargs;
|
||||
$level++;
|
||||
}
|
||||
}
|
||||
# handle all elifs
|
||||
elsif ($line =~ /^\s*#\s*elif\s+(.*)$/o) {
|
||||
my $exp = $1;
|
||||
$level--;
|
||||
$level < 0 and die "more elifs than ifs";
|
||||
$state{$level} =~ /if/ or die "unmatched elif";
|
||||
|
||||
if ($state{$level} eq 'if' && !$if{$level}) {
|
||||
my $res = evalexp($exp);
|
||||
defined $res or die 'moving from if to ifother';
|
||||
$state{$level} = 'if';
|
||||
$if{$level} = $res;
|
||||
dbgline '#elif1 '.($res?'(yes)':'(no)'), @dbgargs;
|
||||
$level++;
|
||||
next;
|
||||
} elsif ($state{$level} ne 'ifother') {
|
||||
$if{$level} = 0;
|
||||
$state{$level} = 'elif';
|
||||
dbgline '#elif0', @dbgargs;
|
||||
$level++;
|
||||
next;
|
||||
}
|
||||
$level++;
|
||||
}
|
||||
elsif ($line =~ /^\s*#\s*else/o) {
|
||||
$level--;
|
||||
$level < 0 and die "more elses than ifs";
|
||||
$state{$level} =~ /if/ or die "unmatched else";
|
||||
$if{$level} = !$if{$level} if ($state{$level} eq 'if');
|
||||
$state{$level} =~ s/^if/else/o; # if -> else, ifother -> elseother, elif -> elif
|
||||
dbgline '#else', @dbgargs;
|
||||
$level++;
|
||||
next if $state{$level-1} !~ /other$/o;
|
||||
}
|
||||
elsif ($line =~ /^\s*#\s*endif/o) {
|
||||
$level--;
|
||||
$level < 0 and die "more endifs than ifs";
|
||||
dbgline '#endif', @dbgargs;
|
||||
next if $state{$level} !~ /other$/o;
|
||||
}
|
||||
|
||||
my $print = 1;
|
||||
for (my $i=0;$i<$level;$i++) {
|
||||
next if $state{$i} =~ /other$/o; # keep code in ifother/elseother blocks
|
||||
if (!$if{$i}) {
|
||||
$print = 0;
|
||||
dbgline 'DEL', @{[\$i, \%state, \%if, \$line]};
|
||||
last;
|
||||
}
|
||||
}
|
||||
print OUT "$line\n" if $print;
|
||||
}
|
||||
close IN;
|
||||
close OUT;
|
||||
}
|
||||
|
||||
#################################################################
|
||||
|
||||
sub parse_dir {
|
||||
my $file = $File::Find::name;
|
||||
|
||||
return if ($file =~ /CVS/);
|
||||
return if ($file =~ /~$/);
|
||||
|
||||
my $f2 = $file;
|
||||
$f2 =~ s/^\Q$SRC\E/$DESTDIR/;
|
||||
|
||||
my $mode = (stat($file))[2];
|
||||
if ($mode & S_IFDIR) {
|
||||
print("mkdir -p '$f2'\n");
|
||||
system("mkdir -p '$f2'"); # should check for error
|
||||
return;
|
||||
}
|
||||
print "from $file to $f2\n";
|
||||
|
||||
if ($file =~ m/.*\.[ch]$/) {
|
||||
filter_source($file, $f2);
|
||||
} else {
|
||||
system("cp $file $f2");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# main
|
||||
|
||||
printf "kernel is %s (0x%x)\n",$VERSION,$LINUXCODE;
|
||||
|
||||
# remove any trailing slashes from dir names. don't pass in just '/'
|
||||
$SRC =~ s|/*$||; $DESTDIR =~ s|/*$||;
|
||||
|
||||
print "finding files at $SRC\n";
|
||||
|
||||
find({wanted => \&parse_dir, no_chdir => 1}, $SRC);
|
|
@ -704,7 +704,7 @@ struct drm_fence_arg {
|
|||
/* Mask: Make sure the buffer is in cached memory when mapped
|
||||
* Flags: Acknowledge.
|
||||
* Buffers allocated with this flag should not be used for suballocators
|
||||
* This type may have issues on CPUs with over-aggressive caching
|
||||
* This type may have issues on CPUs with over-aggressive caching
|
||||
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
|
||||
*/
|
||||
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
|
||||
|
@ -1019,7 +1019,7 @@ struct drm_mode_mode_cmd {
|
|||
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
|
||||
|
||||
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
|
||||
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
|
||||
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
|
||||
|
||||
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
|
||||
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
#endif
|
||||
|
||||
/** Maximum number of drawables in the SAREA */
|
||||
#define SAREA_MAX_DRAWABLES 256
|
||||
#define SAREA_MAX_DRAWABLES 256
|
||||
|
||||
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -11,11 +11,11 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
|
@ -23,7 +23,7 @@
|
|||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -151,11 +151,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||
|
||||
/* Enable vblank on pipe A for older X servers
|
||||
*/
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||
|
||||
/* Program Hardware Status Page */
|
||||
if (!IS_G33(dev)) {
|
||||
dev_priv->status_page_dmah =
|
||||
dev_priv->status_page_dmah =
|
||||
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
|
||||
|
||||
if (!dev_priv->status_page_dmah) {
|
||||
|
@ -308,7 +308,7 @@ static int validate_cmd(int cmd)
|
|||
{
|
||||
int ret = do_validate_cmd(cmd);
|
||||
|
||||
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
|
||||
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -344,12 +344,12 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,
|
|||
OUT_RING(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (dwords & 1)
|
||||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -840,7 +840,7 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
|||
int ret = 0;
|
||||
|
||||
memset(&relocatee, 0, sizeof(relocatee));
|
||||
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -849,7 +849,7 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
|||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
||||
while (buf_reloc_handle) {
|
||||
ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);
|
||||
if (ret) {
|
||||
|
@ -857,11 +857,11 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_bo_usage_deref_locked(&relocatee.buf);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
@ -991,12 +991,12 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
|
||||
ret = drm_bo_read_lock(&dev->bm.bm_lock);
|
||||
if (ret)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The cmdbuf_mutex makes sure the validate-submit-fence
|
||||
* operation is atomic.
|
||||
* operation is atomic.
|
||||
*/
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
||||
|
@ -1180,7 +1180,7 @@ drm_i915_mmio_entry_t mmio_table[] = {
|
|||
I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
|
||||
0x2350,
|
||||
8
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -9,11 +9,11 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
|
@ -21,7 +21,7 @@
|
|||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I915_DRM_H_
|
||||
|
@ -294,11 +294,11 @@ typedef struct drm_i915_vblank_swap {
|
|||
unsigned int sequence;
|
||||
} drm_i915_vblank_swap_t;
|
||||
|
||||
#define I915_MMIO_READ 0
|
||||
#define I915_MMIO_READ 0
|
||||
#define I915_MMIO_WRITE 1
|
||||
|
||||
#define I915_MMIO_MAY_READ 0x1
|
||||
#define I915_MMIO_MAY_WRITE 0x2
|
||||
#define I915_MMIO_MAY_READ 0x1
|
||||
#define I915_MMIO_MAY_WRITE 0x2
|
||||
|
||||
#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
|
||||
#define MMIO_REGS_IA_VERTICES_COUNT 1
|
||||
|
@ -319,7 +319,7 @@ typedef struct drm_i915_mmio_entry {
|
|||
typedef struct drm_i915_mmio {
|
||||
unsigned int read_write:1;
|
||||
unsigned int reg:31;
|
||||
void __user *data;
|
||||
void __user *data;
|
||||
} drm_i915_mmio_t;
|
||||
|
||||
typedef struct drm_i915_hws_addr {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
|
||||
*/
|
||||
/*
|
||||
*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -12,11 +12,11 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
|
@ -24,7 +24,7 @@
|
|||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I915_DRV_H_
|
||||
|
@ -157,74 +157,74 @@ struct drm_i915_private {
|
|||
|
||||
/* Register state */
|
||||
u8 saveLBB;
|
||||
u32 saveDSPACNTR;
|
||||
u32 saveDSPBCNTR;
|
||||
u32 savePIPEACONF;
|
||||
u32 savePIPEBCONF;
|
||||
u32 savePIPEASRC;
|
||||
u32 savePIPEBSRC;
|
||||
u32 saveFPA0;
|
||||
u32 saveFPA1;
|
||||
u32 saveDPLL_A;
|
||||
u32 saveDPLL_A_MD;
|
||||
u32 saveHTOTAL_A;
|
||||
u32 saveHBLANK_A;
|
||||
u32 saveHSYNC_A;
|
||||
u32 saveVTOTAL_A;
|
||||
u32 saveVBLANK_A;
|
||||
u32 saveVSYNC_A;
|
||||
u32 saveDSPACNTR;
|
||||
u32 saveDSPBCNTR;
|
||||
u32 savePIPEACONF;
|
||||
u32 savePIPEBCONF;
|
||||
u32 savePIPEASRC;
|
||||
u32 savePIPEBSRC;
|
||||
u32 saveFPA0;
|
||||
u32 saveFPA1;
|
||||
u32 saveDPLL_A;
|
||||
u32 saveDPLL_A_MD;
|
||||
u32 saveHTOTAL_A;
|
||||
u32 saveHBLANK_A;
|
||||
u32 saveHSYNC_A;
|
||||
u32 saveVTOTAL_A;
|
||||
u32 saveVBLANK_A;
|
||||
u32 saveVSYNC_A;
|
||||
u32 saveBCLRPAT_A;
|
||||
u32 saveDSPASTRIDE;
|
||||
u32 saveDSPASIZE;
|
||||
u32 saveDSPAPOS;
|
||||
u32 saveDSPABASE;
|
||||
u32 saveDSPASURF;
|
||||
u32 saveDSPASTRIDE;
|
||||
u32 saveDSPASIZE;
|
||||
u32 saveDSPAPOS;
|
||||
u32 saveDSPABASE;
|
||||
u32 saveDSPASURF;
|
||||
u32 saveDSPATILEOFF;
|
||||
u32 savePFIT_PGM_RATIOS;
|
||||
u32 saveBLC_PWM_CTL;
|
||||
u32 saveBLC_PWM_CTL2;
|
||||
u32 saveFPB0;
|
||||
u32 saveFPB1;
|
||||
u32 saveDPLL_B;
|
||||
u32 saveDPLL_B_MD;
|
||||
u32 saveHTOTAL_B;
|
||||
u32 saveHBLANK_B;
|
||||
u32 saveHSYNC_B;
|
||||
u32 saveVTOTAL_B;
|
||||
u32 saveVBLANK_B;
|
||||
u32 saveVSYNC_B;
|
||||
u32 saveFPB0;
|
||||
u32 saveFPB1;
|
||||
u32 saveDPLL_B;
|
||||
u32 saveDPLL_B_MD;
|
||||
u32 saveHTOTAL_B;
|
||||
u32 saveHBLANK_B;
|
||||
u32 saveHSYNC_B;
|
||||
u32 saveVTOTAL_B;
|
||||
u32 saveVBLANK_B;
|
||||
u32 saveVSYNC_B;
|
||||
u32 saveBCLRPAT_B;
|
||||
u32 saveDSPBSTRIDE;
|
||||
u32 saveDSPBSIZE;
|
||||
u32 saveDSPBPOS;
|
||||
u32 saveDSPBBASE;
|
||||
u32 saveDSPBSURF;
|
||||
u32 saveDSPBSTRIDE;
|
||||
u32 saveDSPBSIZE;
|
||||
u32 saveDSPBPOS;
|
||||
u32 saveDSPBBASE;
|
||||
u32 saveDSPBSURF;
|
||||
u32 saveDSPBTILEOFF;
|
||||
u32 saveVCLK_DIVISOR_VGA0;
|
||||
u32 saveVCLK_DIVISOR_VGA1;
|
||||
u32 saveVCLK_POST_DIV;
|
||||
u32 saveVGACNTRL;
|
||||
u32 saveADPA;
|
||||
u32 saveLVDS;
|
||||
u32 saveVCLK_DIVISOR_VGA0;
|
||||
u32 saveVCLK_DIVISOR_VGA1;
|
||||
u32 saveVCLK_POST_DIV;
|
||||
u32 saveVGACNTRL;
|
||||
u32 saveADPA;
|
||||
u32 saveLVDS;
|
||||
u32 saveLVDSPP_ON;
|
||||
u32 saveLVDSPP_OFF;
|
||||
u32 saveDVOA;
|
||||
u32 saveDVOB;
|
||||
u32 saveDVOC;
|
||||
u32 savePP_ON;
|
||||
u32 savePP_OFF;
|
||||
u32 savePP_CONTROL;
|
||||
u32 savePP_CYCLE;
|
||||
u32 savePFIT_CONTROL;
|
||||
u32 save_palette_a[256];
|
||||
u32 save_palette_b[256];
|
||||
u32 saveDVOA;
|
||||
u32 saveDVOB;
|
||||
u32 saveDVOC;
|
||||
u32 savePP_ON;
|
||||
u32 savePP_OFF;
|
||||
u32 savePP_CONTROL;
|
||||
u32 savePP_CYCLE;
|
||||
u32 savePFIT_CONTROL;
|
||||
u32 save_palette_a[256];
|
||||
u32 save_palette_b[256];
|
||||
u32 saveFBC_CFB_BASE;
|
||||
u32 saveFBC_LL_BASE;
|
||||
u32 saveFBC_CONTROL;
|
||||
u32 saveFBC_CONTROL2;
|
||||
u32 saveSWF0[16];
|
||||
u32 saveSWF1[16];
|
||||
u32 saveSWF2[3];
|
||||
u32 saveSWF0[16];
|
||||
u32 saveSWF1[16];
|
||||
u32 saveSWF2[3];
|
||||
u8 saveMSR;
|
||||
u8 saveSR[8];
|
||||
u8 saveGR[24];
|
||||
|
@ -306,7 +306,7 @@ extern void i915_mem_release(struct drm_device * dev,
|
|||
extern void i915_fence_handler(struct drm_device *dev);
|
||||
extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags,
|
||||
uint32_t *sequence,
|
||||
uint32_t *sequence,
|
||||
uint32_t *native_type);
|
||||
extern void i915_poke_flush(struct drm_device *dev, uint32_t class);
|
||||
extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
|
||||
|
@ -322,7 +322,7 @@ extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
|||
struct drm_mem_type_manager *man);
|
||||
extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
|
||||
extern int i915_move(struct drm_buffer_object *bo, int evict,
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
void i915_flush_ttm(struct drm_ttm *ttm);
|
||||
#endif
|
||||
|
||||
|
@ -339,7 +339,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
|
|||
|
||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
|
||||
|
||||
#define I915_VERBOSE 0
|
||||
|
@ -443,7 +443,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define VGA_CR_INDEX_CGA 0x3d4
|
||||
#define VGA_CR_DATA_CGA 0x3d5
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
|
@ -507,7 +507,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
|
||||
#define I915REG_HWSTAM 0x02098
|
||||
#define I915REG_INT_IDENTITY_R 0x020a4
|
||||
#define I915REG_INT_MASK_R 0x020a8
|
||||
#define I915REG_INT_MASK_R 0x020a8
|
||||
#define I915REG_INT_ENABLE_R 0x020a0
|
||||
#define I915REG_INSTPM 0x020c0
|
||||
|
||||
|
@ -577,7 +577,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define SRX_INDEX 0x3c4
|
||||
#define SRX_DATA 0x3c5
|
||||
#define SR01 1
|
||||
#define SR01_SCREEN_OFF (1<<5)
|
||||
#define SR01_SCREEN_OFF (1<<5)
|
||||
|
||||
#define PPCR 0x61204
|
||||
#define PPCR_ON (1<<0)
|
||||
|
@ -599,29 +599,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define ADPA_DPMS_OFF (3<<10)
|
||||
|
||||
#define NOPID 0x2094
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
/* The binner has its own ring buffer:
|
||||
*/
|
||||
#define HWB_RING 0x2400
|
||||
|
||||
#define RING_TAIL 0x00
|
||||
#define RING_TAIL 0x00
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
|
||||
/* Instruction parser error reg:
|
||||
*/
|
||||
|
@ -639,7 +639,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
*/
|
||||
#define DMA_FADD_S 0x20d4
|
||||
|
||||
/* Cache mode 0 reg.
|
||||
/* Cache mode 0 reg.
|
||||
* - Manipulating render cache behaviour is central
|
||||
* to the concept of zone rendering, tuning this reg can help avoid
|
||||
* unnecessary render cache reads and even writes (for z/stencil)
|
||||
|
@ -668,7 +668,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define BINCTL 0x2420
|
||||
#define BC_MASK (1 << 9)
|
||||
|
||||
/* Binned scene info.
|
||||
/* Binned scene info.
|
||||
*/
|
||||
#define BINSCENE 0x2428
|
||||
#define BS_OP_LOAD (1 << 8)
|
||||
|
@ -686,7 +686,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
*/
|
||||
#define BDCD 0x2488
|
||||
|
||||
/* Binner pointer cache debug reg:
|
||||
/* Binner pointer cache debug reg:
|
||||
*/
|
||||
#define BPCD 0x248c
|
||||
|
||||
|
@ -749,9 +749,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define BLT_DEPTH_32 (3<<24)
|
||||
#define BLT_ROP_GXCOPY (0xcc<<16)
|
||||
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
|
||||
#define MI_BATCH_NON_SECURE_I965 (1<<8)
|
||||
|
@ -848,20 +848,20 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
/* I830 CRTC registers */
|
||||
#define HTOTAL_A 0x60000
|
||||
#define HBLANK_A 0x60004
|
||||
#define HSYNC_A 0x60008
|
||||
#define HSYNC_A 0x60008
|
||||
#define VTOTAL_A 0x6000c
|
||||
#define VBLANK_A 0x60010
|
||||
#define VSYNC_A 0x60014
|
||||
#define VSYNC_A 0x60014
|
||||
#define PIPEASRC 0x6001c
|
||||
#define BCLRPAT_A 0x60020
|
||||
#define VSYNCSHIFT_A 0x60028
|
||||
|
||||
#define HTOTAL_B 0x61000
|
||||
#define HBLANK_B 0x61004
|
||||
#define HSYNC_B 0x61008
|
||||
#define HSYNC_B 0x61008
|
||||
#define VTOTAL_B 0x6100c
|
||||
#define VBLANK_B 0x61010
|
||||
#define VSYNC_B 0x61014
|
||||
#define VSYNC_B 0x61014
|
||||
#define PIPEBSRC 0x6101c
|
||||
#define BCLRPAT_B 0x61020
|
||||
#define VSYNCSHIFT_B 0x61028
|
||||
|
@ -996,7 +996,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
*/
|
||||
# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
|
||||
# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
|
||||
/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
|
||||
/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
|
||||
* This best be set to the default value (3) or the CRT won't work. No,
|
||||
* I don't entirely understand what this does...
|
||||
*/
|
||||
|
@ -1017,7 +1017,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
|
||||
|
||||
#define ADPA 0x61100
|
||||
#define ADPA_DAC_ENABLE (1<<31)
|
||||
#define ADPA_DAC_ENABLE (1<<31)
|
||||
#define ADPA_DAC_DISABLE 0
|
||||
#define ADPA_PIPE_SELECT_MASK (1<<30)
|
||||
#define ADPA_PIPE_A_SELECT 0
|
||||
|
@ -1147,7 +1147,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define PIPEACONF_PIPE_UNLOCKED 0
|
||||
#define PIPEACONF_PIPE_LOCKED (1<<25)
|
||||
#define PIPEACONF_PALETTE 0
|
||||
#define PIPEACONF_GAMMA (1<<24)
|
||||
#define PIPEACONF_GAMMA (1<<24)
|
||||
#define PIPECONF_FORCE_BORDER (1<<25)
|
||||
#define PIPECONF_PROGRESSIVE (0 << 21)
|
||||
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
|
||||
|
@ -1158,7 +1158,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define PIPEBCONF_DISABLE 0
|
||||
#define PIPEBCONF_DOUBLE_WIDE (1<<30)
|
||||
#define PIPEBCONF_DISABLE 0
|
||||
#define PIPEBCONF_GAMMA (1<<24)
|
||||
#define PIPEBCONF_GAMMA (1<<24)
|
||||
#define PIPEBCONF_PALETTE 0
|
||||
|
||||
#define PIPEBGCMAXRED 0x71010
|
||||
|
@ -1170,7 +1170,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
|
||||
#define DSPACNTR 0x70180
|
||||
#define DSPBCNTR 0x71180
|
||||
#define DISPLAY_PLANE_ENABLE (1<<31)
|
||||
#define DISPLAY_PLANE_ENABLE (1<<31)
|
||||
#define DISPLAY_PLANE_DISABLE 0
|
||||
#define DISPPLANE_GAMMA_ENABLE (1<<30)
|
||||
#define DISPPLANE_GAMMA_DISABLE 0
|
||||
|
@ -1178,7 +1178,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define DISPPLANE_8BPP (0x2<<26)
|
||||
#define DISPPLANE_15_16BPP (0x4<<26)
|
||||
#define DISPPLANE_16BPP (0x5<<26)
|
||||
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
|
||||
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
|
||||
#define DISPPLANE_32BPP (0x7<<26)
|
||||
#define DISPPLANE_STEREO_ENABLE (1<<25)
|
||||
#define DISPPLANE_STEREO_DISABLE 0
|
||||
|
@ -1284,7 +1284,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
|
||||
|
||||
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
|
||||
(dev)->pci_device == 0x29B2 || \
|
||||
(dev)->pci_device == 0x29B2 || \
|
||||
(dev)->pci_device == 0x29D2)
|
||||
|
||||
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
|
@ -11,11 +11,11 @@
|
|||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
|
@ -23,7 +23,7 @@
|
|||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -310,7 +310,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
pipea_stats = I915_READ(I915REG_PIPEASTAT);
|
||||
pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
|
||||
|
||||
|
||||
temp = I915_READ16(I915REG_INT_IDENTITY_R);
|
||||
temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
|
||||
|
||||
|
@ -354,7 +354,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
if (dev_priv->swaps_pending > 0)
|
||||
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
||||
I915_WRITE(I915REG_PIPEASTAT,
|
||||
I915_WRITE(I915REG_PIPEASTAT,
|
||||
pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
|
||||
I915_VBLANK_CLEAR);
|
||||
I915_WRITE(I915REG_PIPEBSTAT,
|
||||
|
@ -407,7 +407,7 @@ void i915_user_irq_off(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||
{
|
||||
|
@ -421,7 +421,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
return 0;
|
||||
|
||||
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
|
||||
|
||||
i915_user_irq_on(dev_priv);
|
||||
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
|
@ -453,7 +453,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev,
|
|||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
(((cur_vblank = atomic_read(counter))
|
||||
- *sequence) <= (1<<23)));
|
||||
|
||||
|
||||
*sequence = cur_vblank;
|
||||
|
||||
return ret;
|
||||
|
@ -528,6 +528,7 @@ void i915_enable_interrupt (struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||
|
||||
dev_priv->irq_enable_reg = USER_INT_FLAG;
|
||||
|
||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
|
||||
dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
|
||||
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
|
||||
|
@ -551,7 +552,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
__FUNCTION__, pipe->pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
|
|||
DRM_ERROR("get_heap failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
||||
if (!*heap) {
|
||||
DRM_ERROR("heap not initialized?");
|
||||
return -EFAULT;
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
*
|
||||
* \param dev_priv pointer to device private data structure.
|
||||
* \param entries number of free entries in the FIFO to wait for.
|
||||
*
|
||||
*
|
||||
* \returns zero on success, or -EBUSY if the timeout (specificed by
|
||||
* drm_mach64_private::usec_timeout) occurs.
|
||||
*/
|
||||
|
@ -107,10 +107,10 @@ int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)
|
|||
*
|
||||
* This function should be called before writing new entries to the ring
|
||||
* buffer.
|
||||
*
|
||||
*
|
||||
* \param dev_priv pointer to device private data structure.
|
||||
* \param n number of free entries in the ring buffer to wait for.
|
||||
*
|
||||
*
|
||||
* \returns zero on success, or -EBUSY if the timeout (specificed by
|
||||
* drm_mach64_private_t::usec_timeout) occurs.
|
||||
*
|
||||
|
@ -139,7 +139,7 @@ int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)
|
|||
}
|
||||
|
||||
/**
|
||||
* Wait until all DMA requests have been processed...
|
||||
* Wait until all DMA requests have been processed...
|
||||
*
|
||||
* \sa mach64_wait_ring()
|
||||
*/
|
||||
|
|
|
@ -171,14 +171,14 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);
|
|||
#define MACH64_AGP_CNTL 0x014c
|
||||
#define MACH64_ALPHA_TST_CNTL 0x0550
|
||||
|
||||
#define MACH64_DSP_CONFIG 0x0420
|
||||
#define MACH64_DSP_ON_OFF 0x0424
|
||||
#define MACH64_EXT_MEM_CNTL 0x04ac
|
||||
#define MACH64_GEN_TEST_CNTL 0x04d0
|
||||
#define MACH64_HW_DEBUG 0x047c
|
||||
#define MACH64_MEM_ADDR_CONFIG 0x0434
|
||||
#define MACH64_MEM_BUF_CNTL 0x042c
|
||||
#define MACH64_MEM_CNTL 0x04b0
|
||||
#define MACH64_DSP_CONFIG 0x0420
|
||||
#define MACH64_DSP_ON_OFF 0x0424
|
||||
#define MACH64_EXT_MEM_CNTL 0x04ac
|
||||
#define MACH64_GEN_TEST_CNTL 0x04d0
|
||||
#define MACH64_HW_DEBUG 0x047c
|
||||
#define MACH64_MEM_ADDR_CONFIG 0x0434
|
||||
#define MACH64_MEM_BUF_CNTL 0x042c
|
||||
#define MACH64_MEM_CNTL 0x04b0
|
||||
|
||||
#define MACH64_BM_ADDR 0x0648
|
||||
#define MACH64_BM_COMMAND 0x0188
|
||||
|
@ -205,16 +205,16 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);
|
|||
#define MACH64_CLR_CMP_CLR 0x0700
|
||||
#define MACH64_CLR_CMP_CNTL 0x0708
|
||||
#define MACH64_CLR_CMP_MASK 0x0704
|
||||
#define MACH64_CONFIG_CHIP_ID 0x04e0
|
||||
#define MACH64_CONFIG_CNTL 0x04dc
|
||||
#define MACH64_CONFIG_STAT0 0x04e4
|
||||
#define MACH64_CONFIG_STAT1 0x0494
|
||||
#define MACH64_CONFIG_STAT2 0x0498
|
||||
#define MACH64_CONFIG_CHIP_ID 0x04e0
|
||||
#define MACH64_CONFIG_CNTL 0x04dc
|
||||
#define MACH64_CONFIG_STAT0 0x04e4
|
||||
#define MACH64_CONFIG_STAT1 0x0494
|
||||
#define MACH64_CONFIG_STAT2 0x0498
|
||||
#define MACH64_CONTEXT_LOAD_CNTL 0x072c
|
||||
#define MACH64_CONTEXT_MASK 0x0720
|
||||
#define MACH64_COMPOSITE_SHADOW_ID 0x0798
|
||||
#define MACH64_CRC_SIG 0x04e8
|
||||
#define MACH64_CUSTOM_MACRO_CNTL 0x04d4
|
||||
#define MACH64_CRC_SIG 0x04e8
|
||||
#define MACH64_CUSTOM_MACRO_CNTL 0x04d4
|
||||
|
||||
#define MACH64_DP_BKGD_CLR 0x06c0
|
||||
#define MACH64_DP_FOG_CLR 0x06c4
|
||||
|
@ -358,7 +358,7 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);
|
|||
#define MACH64_TEX_0_OFF 0x05c0
|
||||
#define MACH64_TEX_CNTL 0x0774
|
||||
#define MACH64_TEX_SIZE_PITCH 0x0770
|
||||
#define MACH64_TIMER_CONFIG 0x0428
|
||||
#define MACH64_TIMER_CONFIG 0x0428
|
||||
|
||||
#define MACH64_VERTEX_1_ARGB 0x0254
|
||||
#define MACH64_VERTEX_1_S 0x0240
|
||||
|
@ -758,7 +758,7 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
|
|||
|
||||
#define RING_WRITE_OFS _ring_write
|
||||
|
||||
#define BEGIN_RING( n ) \
|
||||
#define BEGIN_RING( n ) \
|
||||
do { \
|
||||
if ( MACH64_VERBOSE ) { \
|
||||
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
|
||||
|
@ -789,7 +789,7 @@ do { \
|
|||
_ring_write &= _ring_mask; \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_RING() \
|
||||
#define ADVANCE_RING() \
|
||||
do { \
|
||||
if ( MACH64_VERBOSE ) { \
|
||||
DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
|
||||
|
@ -808,12 +808,12 @@ do { \
|
|||
|
||||
#define DMALOCALS \
|
||||
drm_mach64_freelist_t *_entry = NULL; \
|
||||
struct drm_buf *_buf = NULL; \
|
||||
struct drm_buf *_buf = NULL; \
|
||||
u32 *_buf_wptr; int _outcount
|
||||
|
||||
#define GETBUFPTR( __buf ) \
|
||||
((dev_priv->is_pci) ? \
|
||||
((u32 *)(__buf)->address) : \
|
||||
((dev_priv->is_pci) ? \
|
||||
((u32 *)(__buf)->address) : \
|
||||
((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
|
||||
|
||||
#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
|
||||
|
@ -844,7 +844,7 @@ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define DMASETPTR( _p ) \
|
||||
#define DMASETPTR( _p ) \
|
||||
do { \
|
||||
_buf = (_p); \
|
||||
_outcount = 0; \
|
||||
|
@ -913,10 +913,10 @@ do { \
|
|||
__FUNCTION__, _buf->idx ); \
|
||||
return -EFAULT; \
|
||||
} \
|
||||
} else { \
|
||||
} else { \
|
||||
if (list_empty(&dev_priv->placeholders)) { \
|
||||
DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n", \
|
||||
__FUNCTION__ ); \
|
||||
__FUNCTION__ ); \
|
||||
return -EFAULT; \
|
||||
} \
|
||||
ptr = dev_priv->placeholders.next; \
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
/**
|
||||
* \file mga_dma.c
|
||||
* DMA support for MGA G200 / G400.
|
||||
*
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Jeff Hartmann <jhartmann@valinux.com>
|
||||
* \author Keith Whitwell <keith@tungstengraphics.com>
|
||||
|
@ -420,7 +420,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
/**
|
||||
* Bootstrap the driver for AGP DMA.
|
||||
*
|
||||
*
|
||||
* \todo
|
||||
* Investigate whether there is any benifit to storing the WARP microcode in
|
||||
* AGP memory. If not, the microcode may as well always be put in PCI
|
||||
|
@ -591,7 +591,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
|
||||
/**
|
||||
* Bootstrap the driver for PCI DMA.
|
||||
*
|
||||
*
|
||||
* \todo
|
||||
* The algorithm for decreasing the size of the primary DMA buffer could be
|
||||
* better. The size should be rounded up to the nearest page size, then
|
||||
|
@ -600,7 +600,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
* \todo
|
||||
* Determine whether the maximum address passed to drm_pci_alloc is correct.
|
||||
* The same goes for drm_addbufs_pci.
|
||||
*
|
||||
*
|
||||
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
|
||||
*/
|
||||
static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
||||
|
@ -613,7 +613,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
|||
int err;
|
||||
struct drm_buf_desc req;
|
||||
|
||||
|
||||
|
||||
if (dev->dma == NULL) {
|
||||
DRM_ERROR("dev->dma is NULL\n");
|
||||
return -EFAULT;
|
||||
|
@ -656,13 +656,13 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
|||
|
||||
if (dev_priv->primary->size != dma_bs->primary_size) {
|
||||
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
|
||||
dma_bs->primary_size,
|
||||
dma_bs->primary_size,
|
||||
(unsigned) dev_priv->primary->size);
|
||||
dma_bs->primary_size = dev_priv->primary->size;
|
||||
}
|
||||
|
||||
for ( bin_count = dma_bs->secondary_bin_count
|
||||
; bin_count > 0
|
||||
; bin_count > 0
|
||||
; bin_count-- ) {
|
||||
(void) memset( &req, 0, sizeof(req) );
|
||||
req.count = bin_count;
|
||||
|
@ -673,7 +673,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (bin_count == 0) {
|
||||
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
|
||||
return err;
|
||||
|
@ -736,7 +736,7 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
|
|||
if (is_agp) {
|
||||
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
|
||||
}
|
||||
|
||||
|
||||
/* If we attempted to initialize the card for AGP DMA but failed,
|
||||
* clean-up any mess that may have been created.
|
||||
*/
|
||||
|
@ -768,7 +768,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
|
|||
drm_mga_dma_bootstrap_t *bootstrap = data;
|
||||
int err;
|
||||
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
|
||||
const drm_mga_private_t * const dev_priv =
|
||||
const drm_mga_private_t * const dev_priv =
|
||||
(drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
|
||||
|
@ -951,7 +951,7 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)
|
|||
&& (dev_priv->warp->type != _DRM_CONSISTENT))
|
||||
drm_core_ioremapfree(dev_priv->warp, dev);
|
||||
|
||||
if ((dev_priv->primary != NULL)
|
||||
if ((dev_priv->primary != NULL)
|
||||
&& (dev_priv->primary->type != _DRM_CONSISTENT))
|
||||
drm_core_ioremapfree(dev_priv->primary, dev);
|
||||
|
||||
|
|
|
@ -302,10 +302,10 @@ typedef struct drm_mga_init {
|
|||
typedef struct drm_mga_dma_bootstrap {
|
||||
/**
|
||||
* \name AGP texture region
|
||||
*
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
|
||||
* be filled in with the actual AGP texture settings.
|
||||
*
|
||||
*
|
||||
* \warning
|
||||
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
|
||||
* is zero, it means that PCI memory (most likely through the use of
|
||||
|
@ -319,7 +319,7 @@ typedef struct drm_mga_dma_bootstrap {
|
|||
|
||||
/**
|
||||
* Requested size of the primary DMA region.
|
||||
*
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual AGP mode. If AGP was not available
|
||||
*/
|
||||
|
@ -328,18 +328,18 @@ typedef struct drm_mga_dma_bootstrap {
|
|||
|
||||
/**
|
||||
* Requested number of secondary DMA buffers.
|
||||
*
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual number of secondary DMA buffers
|
||||
* allocated. Particularly when PCI DMA is used, this may be
|
||||
* (subtantially) less than the number requested.
|
||||
*/
|
||||
uint32_t secondary_bin_count;
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Requested size of each secondary DMA buffer.
|
||||
*
|
||||
*
|
||||
* While the kernel \b is free to reduce
|
||||
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
|
||||
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
|
||||
|
@ -352,7 +352,7 @@ typedef struct drm_mga_dma_bootstrap {
|
|||
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
|
||||
* zero, it means that PCI DMA should be used, even if AGP is
|
||||
* possible.
|
||||
*
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual AGP mode. If AGP was not available
|
||||
* (i.e., PCI DMA was used), this value will be zero.
|
||||
|
|
|
@ -109,7 +109,7 @@ typedef struct drm_mga_private {
|
|||
|
||||
/**
|
||||
* \name MMIO region parameters.
|
||||
*
|
||||
*
|
||||
* \sa drm_mga_private_t::mmio
|
||||
*/
|
||||
/*@{*/
|
||||
|
@ -143,7 +143,7 @@ typedef struct drm_mga_private {
|
|||
drm_local_map_t *warp;
|
||||
drm_local_map_t *primary;
|
||||
drm_local_map_t *agp_textures;
|
||||
|
||||
|
||||
unsigned long agp_handle;
|
||||
unsigned int agp_size;
|
||||
} drm_mga_private_t;
|
||||
|
@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr)
|
|||
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
|
||||
#endif
|
||||
|
||||
#define DWGREG0 0x1c00
|
||||
#define DWGREG0_END 0x1dff
|
||||
#define DWGREG0 0x1c00
|
||||
#define DWGREG0_END 0x1dff
|
||||
#define DWGREG1 0x2c00
|
||||
#define DWGREG1_END 0x2dff
|
||||
|
||||
|
@ -394,22 +394,22 @@ do { \
|
|||
#define MGA_VINTCLR (1 << 4)
|
||||
#define MGA_VINTEN (1 << 5)
|
||||
|
||||
#define MGA_ALPHACTRL 0x2c7c
|
||||
#define MGA_AR0 0x1c60
|
||||
#define MGA_AR1 0x1c64
|
||||
#define MGA_AR2 0x1c68
|
||||
#define MGA_AR3 0x1c6c
|
||||
#define MGA_AR4 0x1c70
|
||||
#define MGA_AR5 0x1c74
|
||||
#define MGA_AR6 0x1c78
|
||||
#define MGA_ALPHACTRL 0x2c7c
|
||||
#define MGA_AR0 0x1c60
|
||||
#define MGA_AR1 0x1c64
|
||||
#define MGA_AR2 0x1c68
|
||||
#define MGA_AR3 0x1c6c
|
||||
#define MGA_AR4 0x1c70
|
||||
#define MGA_AR5 0x1c74
|
||||
#define MGA_AR6 0x1c78
|
||||
|
||||
#define MGA_CXBNDRY 0x1c80
|
||||
#define MGA_CXLEFT 0x1ca0
|
||||
#define MGA_CXLEFT 0x1ca0
|
||||
#define MGA_CXRIGHT 0x1ca4
|
||||
|
||||
#define MGA_DMAPAD 0x1c54
|
||||
#define MGA_DSTORG 0x2cb8
|
||||
#define MGA_DWGCTL 0x1c00
|
||||
#define MGA_DMAPAD 0x1c54
|
||||
#define MGA_DSTORG 0x2cb8
|
||||
#define MGA_DWGCTL 0x1c00
|
||||
# define MGA_OPCOD_MASK (15 << 0)
|
||||
# define MGA_OPCOD_TRAP (4 << 0)
|
||||
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
|
||||
|
@ -455,27 +455,27 @@ do { \
|
|||
# define MGA_CLIPDIS (1 << 31)
|
||||
#define MGA_DWGSYNC 0x2c4c
|
||||
|
||||
#define MGA_FCOL 0x1c24
|
||||
#define MGA_FIFOSTATUS 0x1e10
|
||||
#define MGA_FOGCOL 0x1cf4
|
||||
#define MGA_FCOL 0x1c24
|
||||
#define MGA_FIFOSTATUS 0x1e10
|
||||
#define MGA_FOGCOL 0x1cf4
|
||||
#define MGA_FXBNDRY 0x1c84
|
||||
#define MGA_FXLEFT 0x1ca8
|
||||
#define MGA_FXLEFT 0x1ca8
|
||||
#define MGA_FXRIGHT 0x1cac
|
||||
|
||||
#define MGA_ICLEAR 0x1e18
|
||||
#define MGA_ICLEAR 0x1e18
|
||||
# define MGA_SOFTRAPICLR (1 << 0)
|
||||
# define MGA_VLINEICLR (1 << 5)
|
||||
#define MGA_IEN 0x1e1c
|
||||
#define MGA_IEN 0x1e1c
|
||||
# define MGA_SOFTRAPIEN (1 << 0)
|
||||
# define MGA_VLINEIEN (1 << 5)
|
||||
|
||||
#define MGA_LEN 0x1c5c
|
||||
#define MGA_LEN 0x1c5c
|
||||
|
||||
#define MGA_MACCESS 0x1c04
|
||||
|
||||
#define MGA_PITCH 0x1c8c
|
||||
#define MGA_PLNWT 0x1c1c
|
||||
#define MGA_PRIMADDRESS 0x1e58
|
||||
#define MGA_PITCH 0x1c8c
|
||||
#define MGA_PLNWT 0x1c1c
|
||||
#define MGA_PRIMADDRESS 0x1e58
|
||||
# define MGA_DMA_GENERAL (0 << 0)
|
||||
# define MGA_DMA_BLIT (1 << 0)
|
||||
# define MGA_DMA_VECTOR (2 << 0)
|
||||
|
@ -487,43 +487,43 @@ do { \
|
|||
# define MGA_PRIMPTREN0 (1 << 0)
|
||||
# define MGA_PRIMPTREN1 (1 << 1)
|
||||
|
||||
#define MGA_RST 0x1e40
|
||||
#define MGA_RST 0x1e40
|
||||
# define MGA_SOFTRESET (1 << 0)
|
||||
# define MGA_SOFTEXTRST (1 << 1)
|
||||
|
||||
#define MGA_SECADDRESS 0x2c40
|
||||
#define MGA_SECEND 0x2c44
|
||||
#define MGA_SETUPADDRESS 0x2cd0
|
||||
#define MGA_SETUPEND 0x2cd4
|
||||
#define MGA_SECADDRESS 0x2c40
|
||||
#define MGA_SECEND 0x2c44
|
||||
#define MGA_SETUPADDRESS 0x2cd0
|
||||
#define MGA_SETUPEND 0x2cd4
|
||||
#define MGA_SGN 0x1c58
|
||||
#define MGA_SOFTRAP 0x2c48
|
||||
#define MGA_SRCORG 0x2cb4
|
||||
#define MGA_SRCORG 0x2cb4
|
||||
# define MGA_SRMMAP_MASK (1 << 0)
|
||||
# define MGA_SRCMAP_FB (0 << 0)
|
||||
# define MGA_SRCMAP_SYSMEM (1 << 0)
|
||||
# define MGA_SRCACC_MASK (1 << 1)
|
||||
# define MGA_SRCACC_PCI (0 << 1)
|
||||
# define MGA_SRCACC_AGP (1 << 1)
|
||||
#define MGA_STATUS 0x1e14
|
||||
#define MGA_STATUS 0x1e14
|
||||
# define MGA_SOFTRAPEN (1 << 0)
|
||||
# define MGA_VSYNCPEN (1 << 4)
|
||||
# define MGA_VLINEPEN (1 << 5)
|
||||
# define MGA_DWGENGSTS (1 << 16)
|
||||
# define MGA_ENDPRDMASTS (1 << 17)
|
||||
#define MGA_STENCIL 0x2cc8
|
||||
#define MGA_STENCILCTL 0x2ccc
|
||||
#define MGA_STENCILCTL 0x2ccc
|
||||
|
||||
#define MGA_TDUALSTAGE0 0x2cf8
|
||||
#define MGA_TDUALSTAGE1 0x2cfc
|
||||
#define MGA_TEXBORDERCOL 0x2c5c
|
||||
#define MGA_TEXCTL 0x2c30
|
||||
#define MGA_TDUALSTAGE0 0x2cf8
|
||||
#define MGA_TDUALSTAGE1 0x2cfc
|
||||
#define MGA_TEXBORDERCOL 0x2c5c
|
||||
#define MGA_TEXCTL 0x2c30
|
||||
#define MGA_TEXCTL2 0x2c3c
|
||||
# define MGA_DUALTEX (1 << 7)
|
||||
# define MGA_G400_TC2_MAGIC (1 << 15)
|
||||
# define MGA_MAP1_ENABLE (1 << 31)
|
||||
#define MGA_TEXFILTER 0x2c58
|
||||
#define MGA_TEXHEIGHT 0x2c2c
|
||||
#define MGA_TEXORG 0x2c24
|
||||
#define MGA_TEXFILTER 0x2c58
|
||||
#define MGA_TEXHEIGHT 0x2c2c
|
||||
#define MGA_TEXORG 0x2c24
|
||||
# define MGA_TEXORGMAP_MASK (1 << 0)
|
||||
# define MGA_TEXORGMAP_FB (0 << 0)
|
||||
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
|
||||
|
@ -534,45 +534,45 @@ do { \
|
|||
#define MGA_TEXORG2 0x2ca8
|
||||
#define MGA_TEXORG3 0x2cac
|
||||
#define MGA_TEXORG4 0x2cb0
|
||||
#define MGA_TEXTRANS 0x2c34
|
||||
#define MGA_TEXTRANSHIGH 0x2c38
|
||||
#define MGA_TEXWIDTH 0x2c28
|
||||
#define MGA_TEXTRANS 0x2c34
|
||||
#define MGA_TEXTRANSHIGH 0x2c38
|
||||
#define MGA_TEXWIDTH 0x2c28
|
||||
|
||||
#define MGA_WACCEPTSEQ 0x1dd4
|
||||
#define MGA_WCODEADDR 0x1e6c
|
||||
#define MGA_WFLAG 0x1dc4
|
||||
#define MGA_WFLAG1 0x1de0
|
||||
#define MGA_WACCEPTSEQ 0x1dd4
|
||||
#define MGA_WCODEADDR 0x1e6c
|
||||
#define MGA_WFLAG 0x1dc4
|
||||
#define MGA_WFLAG1 0x1de0
|
||||
#define MGA_WFLAGNB 0x1e64
|
||||
#define MGA_WFLAGNB1 0x1e08
|
||||
#define MGA_WFLAGNB1 0x1e08
|
||||
#define MGA_WGETMSB 0x1dc8
|
||||
#define MGA_WIADDR 0x1dc0
|
||||
#define MGA_WIADDR 0x1dc0
|
||||
#define MGA_WIADDR2 0x1dd8
|
||||
# define MGA_WMODE_SUSPEND (0 << 0)
|
||||
# define MGA_WMODE_RESUME (1 << 0)
|
||||
# define MGA_WMODE_JUMP (2 << 0)
|
||||
# define MGA_WMODE_START (3 << 0)
|
||||
# define MGA_WAGP_ENABLE (1 << 2)
|
||||
#define MGA_WMISC 0x1e70
|
||||
#define MGA_WMISC 0x1e70
|
||||
# define MGA_WUCODECACHE_ENABLE (1 << 0)
|
||||
# define MGA_WMASTER_ENABLE (1 << 1)
|
||||
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
|
||||
#define MGA_WVRTXSZ 0x1dcc
|
||||
|
||||
#define MGA_YBOT 0x1c9c
|
||||
#define MGA_YDST 0x1c90
|
||||
#define MGA_YBOT 0x1c9c
|
||||
#define MGA_YDST 0x1c90
|
||||
#define MGA_YDSTLEN 0x1c88
|
||||
#define MGA_YDSTORG 0x1c94
|
||||
#define MGA_YTOP 0x1c98
|
||||
#define MGA_YTOP 0x1c98
|
||||
|
||||
#define MGA_ZORG 0x1c0c
|
||||
#define MGA_ZORG 0x1c0c
|
||||
|
||||
/* This finishes the current batch of commands
|
||||
*/
|
||||
#define MGA_EXEC 0x0100
|
||||
#define MGA_EXEC 0x0100
|
||||
|
||||
/* AGP PLL encoding (for G200 only).
|
||||
*/
|
||||
#define MGA_AGP_PLL 0x1e4c
|
||||
#define MGA_AGP_PLL 0x1e4c
|
||||
# define MGA_AGP2XPLL_DISABLE (0 << 0)
|
||||
# define MGA_AGP2XPLL_ENABLE (1 << 0)
|
||||
|
||||
|
|
|
@ -145,6 +145,6 @@ void mga_driver_irq_uninstall(struct drm_device * dev)
|
|||
|
||||
/* Disable *all* interrupts */
|
||||
MGA_WRITE(MGA_IEN, 0);
|
||||
|
||||
|
||||
dev->irq_enabled = 0;
|
||||
}
|
||||
|
|
|
@ -162,8 +162,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
|
|||
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
|
||||
BEGIN_DMA(6);
|
||||
|
||||
|
@ -206,8 +206,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
|
|||
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
|
||||
BEGIN_DMA(5);
|
||||
|
||||
|
@ -276,7 +276,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
|
|||
unsigned int pipe = sarea_priv->warp_pipe;
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_pipe %x\n", pipe); */
|
||||
/* printk("mga_g400_emit_pipe %x\n", pipe); */
|
||||
|
||||
BEGIN_DMA(10);
|
||||
|
||||
|
|
|
@ -177,4 +177,3 @@ nouveau_dma_wait(struct drm_device *dev, int size)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,4 +95,3 @@ typedef enum {
|
|||
} while(0)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -158,4 +158,3 @@ struct drm_nouveau_sarea {
|
|||
#define DRM_NOUVEAU_MEM_FREE 0x09
|
||||
|
||||
#endif /* __NOUVEAU_DRM_H__ */
|
||||
|
||||
|
|
|
@ -39,16 +39,9 @@
|
|||
#define NOUVEAU_FAMILY 0x0000FFFF
|
||||
#define NOUVEAU_FLAGS 0xFFFF0000
|
||||
|
||||
#if 0
|
||||
#if defined(__linux__)
|
||||
#define NOUVEAU_HAVE_BUFFER
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_reg.h"
|
||||
|
||||
|
||||
struct mem_block {
|
||||
struct mem_block *next;
|
||||
struct mem_block *prev;
|
||||
|
@ -66,7 +59,7 @@ enum nouveau_flags {
|
|||
};
|
||||
|
||||
#define NVOBJ_ENGINE_SW 0
|
||||
#define NVOBJ_ENGINE_GR 1
|
||||
#define NVOBJ_ENGINE_GR 1
|
||||
#define NVOBJ_ENGINE_INT 0xdeadbeef
|
||||
|
||||
#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
|
||||
|
@ -113,6 +106,9 @@ struct nouveau_channel
|
|||
/* mapping of the regs controling the fifo */
|
||||
drm_local_map_t *regs;
|
||||
|
||||
/* Fencing */
|
||||
uint32_t next_sequence;
|
||||
|
||||
/* DMA push buffer */
|
||||
struct nouveau_gpuobj_ref *pushbuf;
|
||||
struct mem_block *pushbuf_mem;
|
||||
|
@ -232,6 +228,8 @@ struct drm_nouveau_private {
|
|||
NOUVEAU_CARD_INIT_FAILED
|
||||
} init_state;
|
||||
|
||||
int ttm;
|
||||
|
||||
/* the card type, takes NV_* as values */
|
||||
int card_type;
|
||||
/* exact chipset, derived from NV_PMC_BOOT_0 */
|
||||
|
@ -351,6 +349,7 @@ extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
|
|||
int flags, struct drm_file *);
|
||||
extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
|
||||
extern int nouveau_mem_init(struct drm_device *);
|
||||
extern int nouveau_mem_init_ttm(struct drm_device *);
|
||||
extern void nouveau_mem_close(struct drm_device *);
|
||||
|
||||
/* nouveau_notifier.c */
|
||||
|
@ -560,16 +559,12 @@ extern void nv04_timer_takedown(struct drm_device *);
|
|||
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
#ifdef NOUVEAU_HAVE_BUFFER
|
||||
/* nouveau_buffer.c */
|
||||
extern struct drm_ttm_backend *nouveau_create_ttm_backend_entry(struct drm_device *dev);
|
||||
extern int nouveau_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, uint32_t *type);
|
||||
extern int nouveau_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||
extern int nouveau_init_mem_type(struct drm_device *dev, uint32_t type, struct drm_mem_type_manager *man);
|
||||
extern uint32_t nouveau_evict_mask(struct drm_buffer_object *bo);
|
||||
extern int nouveau_move(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
void nouveau_flush_ttm(struct drm_ttm *ttm);
|
||||
#endif
|
||||
extern struct drm_bo_driver nouveau_bo_driver;
|
||||
|
||||
/* nouveau_fence.c */
|
||||
extern struct drm_fence_driver nouveau_fence_driver;
|
||||
extern void nouveau_fence_handler(struct drm_device *dev, int channel);
|
||||
|
||||
#if defined(__powerpc__)
|
||||
#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
|
||||
|
@ -592,4 +587,3 @@ void nouveau_flush_ttm(struct drm_ttm *ttm);
|
|||
#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
|
||||
|
||||
#endif /* __NOUVEAU_DRV_H__ */
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright 2005-2006 Stephane Marchesin
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -63,7 +63,7 @@ int nouveau_fifo_ctx_size(struct drm_device *dev)
|
|||
|
||||
/* voir nv_xaa.c : NVResetGraphics
|
||||
* mémoire mappée par nv_driver.c : NVMapMem
|
||||
* voir nv_driver.c : NVPreInit
|
||||
* voir nv_driver.c : NVPreInit
|
||||
*/
|
||||
|
||||
static int nouveau_fifo_instmem_configure(struct drm_device *dev)
|
||||
|
@ -71,7 +71,7 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_WRITE(NV03_PFIFO_RAMHT,
|
||||
(0x03 << 24) /* search 128 */ |
|
||||
(0x03 << 24) /* search 128 */ |
|
||||
((dev_priv->ramht_bits - 9) << 16) |
|
||||
(dev_priv->ramht_offset >> 8)
|
||||
);
|
||||
|
@ -166,7 +166,7 @@ int nouveau_fifo_init(struct drm_device *dev)
|
|||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
#endif
|
||||
0x00000000);
|
||||
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
|
||||
|
@ -282,9 +282,9 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
|
||||
/*
|
||||
* Alright, here is the full story
|
||||
* Nvidia cards have multiple hw fifo contexts (praise them for that,
|
||||
* Nvidia cards have multiple hw fifo contexts (praise them for that,
|
||||
* no complicated crash-prone context switches)
|
||||
* We allocate a new context for each app and let it write to it directly
|
||||
* We allocate a new context for each app and let it write to it directly
|
||||
* (woo, full userspace command submission !)
|
||||
* When there are no more contexts, you lost
|
||||
*/
|
||||
|
|
|
@ -82,7 +82,7 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
|
|||
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
|
||||
uint32_t mthd, data;
|
||||
int ptr;
|
||||
|
||||
|
||||
ptr = get >> 2;
|
||||
if (dev_priv->card_type < NV_40) {
|
||||
mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr));
|
||||
|
@ -115,7 +115,7 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (status) {
|
||||
DRM_INFO("Unhandled PFIFO_INTR - 0x%8x\n", status);
|
||||
DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
|
||||
NV_WRITE(NV03_PFIFO_INTR_0, status);
|
||||
}
|
||||
|
||||
|
@ -244,39 +244,53 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_pgraph_trap {
|
||||
int channel;
|
||||
int class;
|
||||
int subc, mthd, size;
|
||||
uint32_t data, data2;
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)
|
||||
nouveau_graph_trap_info(struct drm_device *dev,
|
||||
struct nouveau_pgraph_trap *trap)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t address;
|
||||
uint32_t channel, class;
|
||||
uint32_t method, subc, data, data2;
|
||||
|
||||
if (nouveau_graph_trapped_channel(dev, &trap->channel))
|
||||
trap->channel = -1;
|
||||
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
|
||||
|
||||
trap->mthd = address & 0x1FFC;
|
||||
trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
|
||||
if (dev_priv->card_type < NV_10) {
|
||||
trap->subc = (address >> 13) & 0x7;
|
||||
} else {
|
||||
trap->subc = (address >> 16) & 0x7;
|
||||
trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_10) {
|
||||
trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;
|
||||
} else if (dev_priv->card_type < NV_40) {
|
||||
trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;
|
||||
} else if (dev_priv->card_type < NV_50) {
|
||||
trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;
|
||||
} else {
|
||||
trap->class = NV_READ(0x400814);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
|
||||
struct nouveau_pgraph_trap *trap)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t nsource, nstatus;
|
||||
|
||||
if (nouveau_graph_trapped_channel(dev, &channel))
|
||||
channel = -1;
|
||||
|
||||
data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
|
||||
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
|
||||
method = address & 0x1FFC;
|
||||
if (dev_priv->card_type < NV_10) {
|
||||
subc = (address >> 13) & 0x7;
|
||||
data2= 0;
|
||||
} else {
|
||||
subc = (address >> 16) & 0x7;
|
||||
data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
|
||||
}
|
||||
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
||||
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
|
||||
if (dev_priv->card_type < NV_10) {
|
||||
class = NV_READ(0x400180 + subc*4) & 0xFF;
|
||||
} else if (dev_priv->card_type < NV_40) {
|
||||
class = NV_READ(0x400160 + subc*4) & 0xFFF;
|
||||
} else if (dev_priv->card_type < NV_50) {
|
||||
class = NV_READ(0x400160 + subc*4) & 0xFFFF;
|
||||
} else {
|
||||
class = NV_READ(0x400814);
|
||||
}
|
||||
|
||||
DRM_INFO("%s - nSource:", id);
|
||||
nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
|
||||
|
@ -291,45 +305,60 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)
|
|||
printk("\n");
|
||||
|
||||
DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
|
||||
id, channel, subc, class, method, data2, data);
|
||||
id, trap->channel, trap->subc, trap->class, trap->mthd,
|
||||
trap->data2, trap->data);
|
||||
}
|
||||
|
||||
static inline void
|
||||
nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int handled = 0;
|
||||
struct nouveau_pgraph_trap trap;
|
||||
int unhandled = 0;
|
||||
|
||||
DRM_DEBUG("PGRAPH notify interrupt\n");
|
||||
if (dev_priv->card_type == NV_04 &&
|
||||
(nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
|
||||
uint32_t class, mthd;
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
/* NV4 (nvidia TNT 1) reports software methods with
|
||||
* PGRAPH NOTIFY ILLEGAL_MTHD
|
||||
*/
|
||||
mthd = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC;
|
||||
class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF;
|
||||
DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
|
||||
mthd, class);
|
||||
trap.mthd, trap.class);
|
||||
|
||||
if (nouveau_sw_method_execute(dev, class, mthd)) {
|
||||
if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
|
||||
DRM_ERROR("Unable to execute NV04 software method %x "
|
||||
"for object class %x. Please report.\n",
|
||||
mthd, class);
|
||||
} else {
|
||||
handled = 1;
|
||||
trap.mthd, trap.class);
|
||||
unhandled = 1;
|
||||
}
|
||||
} else {
|
||||
unhandled = 1;
|
||||
}
|
||||
|
||||
if (!handled)
|
||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY");
|
||||
if (unhandled)
|
||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
|
||||
}
|
||||
|
||||
static inline void
|
||||
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
|
||||
{
|
||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR");
|
||||
struct nouveau_pgraph_trap trap;
|
||||
int unhandled = 0;
|
||||
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
if (trap.channel >= 0 && trap.mthd == 0x0150) {
|
||||
nouveau_fence_handler(dev, trap.channel);
|
||||
} else
|
||||
if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
|
||||
unhandled = 1;
|
||||
}
|
||||
} else {
|
||||
unhandled = 1;
|
||||
}
|
||||
|
||||
if (unhandled)
|
||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -337,7 +366,7 @@ nouveau_pgraph_intr_context_switch(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t chid;
|
||||
|
||||
|
||||
chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1);
|
||||
DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);
|
||||
|
||||
|
@ -389,7 +418,7 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (status) {
|
||||
DRM_INFO("Unhandled PGRAPH_INTR - 0x%8x\n", status);
|
||||
DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
|
||||
NV_WRITE(NV03_PGRAPH_INTR, status);
|
||||
}
|
||||
|
||||
|
@ -445,4 +474,3 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Free all blocks associated with the releasing file_priv
|
||||
*/
|
||||
void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
|
||||
|
@ -189,7 +189,7 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Cleanup everything
|
||||
*/
|
||||
void nouveau_mem_takedown(struct mem_block **heap)
|
||||
|
@ -288,7 +288,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
|
|||
} else {
|
||||
uint64_t mem;
|
||||
|
||||
mem = (NV_READ(NV04_FIFO_DATA) &
|
||||
mem = (NV_READ(NV04_FIFO_DATA) &
|
||||
NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
|
||||
NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
|
||||
return mem*1024*1024;
|
||||
|
@ -301,13 +301,11 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
|
|||
}
|
||||
|
||||
static int
|
||||
nouveau_mem_init_agp(struct drm_device *dev)
|
||||
nouveau_mem_init_agp(struct drm_device *dev, int ttm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_agp_info info;
|
||||
struct drm_agp_mode mode;
|
||||
struct drm_agp_buffer agp_req;
|
||||
struct drm_agp_binding bind_req;
|
||||
int ret;
|
||||
|
||||
ret = drm_agp_acquire(dev);
|
||||
|
@ -330,20 +328,25 @@ nouveau_mem_init_agp(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
agp_req.size = info.aperture_size;
|
||||
agp_req.type = 0;
|
||||
ret = drm_agp_alloc(dev, &agp_req);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to alloc AGP: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (!ttm) {
|
||||
struct drm_agp_buffer agp_req;
|
||||
struct drm_agp_binding bind_req;
|
||||
|
||||
bind_req.handle = agp_req.handle;
|
||||
bind_req.offset = 0;
|
||||
ret = drm_agp_bind(dev, &bind_req);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to bind AGP: %d\n", ret);
|
||||
return ret;
|
||||
agp_req.size = info.aperture_size;
|
||||
agp_req.type = 0;
|
||||
ret = drm_agp_alloc(dev, &agp_req);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to alloc AGP: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bind_req.handle = agp_req.handle;
|
||||
bind_req.offset = 0;
|
||||
ret = drm_agp_bind(dev, &bind_req);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to bind AGP: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
|
||||
|
@ -352,6 +355,73 @@ nouveau_mem_init_agp(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define HACK_OLD_MM
|
||||
int
|
||||
nouveau_mem_init_ttm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t vram_size, bar1_size;
|
||||
int ret;
|
||||
|
||||
dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
|
||||
dev_priv->fb_phys = drm_get_resource_start(dev,1);
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
|
||||
|
||||
drm_bo_driver_init(dev);
|
||||
|
||||
/* non-mappable vram */
|
||||
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
|
||||
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
|
||||
vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
|
||||
bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
|
||||
if (bar1_size < vram_size) {
|
||||
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
|
||||
bar1_size, vram_size - bar1_size))) {
|
||||
DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
vram_size = bar1_size;
|
||||
}
|
||||
|
||||
/* mappable vram */
|
||||
#ifdef HACK_OLD_MM
|
||||
vram_size /= 4;
|
||||
#endif
|
||||
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size))) {
|
||||
DRM_ERROR("Failed VRAM mm init: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* GART */
|
||||
#ifndef __powerpc__
|
||||
if (drm_device_is_agp(dev) && dev->agp) {
|
||||
if ((ret = nouveau_mem_init_agp(dev, 1)))
|
||||
DRM_ERROR("Error initialising AGP: %d\n", ret);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
|
||||
if ((ret = nouveau_sgdma_init(dev)))
|
||||
DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
|
||||
}
|
||||
|
||||
if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
|
||||
dev_priv->gart_info.aper_size >>
|
||||
PAGE_SHIFT))) {
|
||||
DRM_ERROR("Failed TT mm init: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef HACK_OLD_MM
|
||||
vram_size <<= PAGE_SHIFT;
|
||||
DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
|
||||
if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nouveau_mem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
@ -378,7 +448,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
|
||||
|
||||
if (fb_size>256*1024*1024) {
|
||||
/* On cards with > 256Mb, you can't map everything.
|
||||
/* On cards with > 256Mb, you can't map everything.
|
||||
* So we create a second FB heap for that type of memory */
|
||||
if (nouveau_mem_init_heap(&dev_priv->fb_heap,
|
||||
0, 256*1024*1024))
|
||||
|
@ -395,7 +465,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
#ifndef __powerpc__
|
||||
/* Init AGP / NV50 PCIEGART */
|
||||
if (drm_device_is_agp(dev) && dev->agp) {
|
||||
if ((ret = nouveau_mem_init_agp(dev)))
|
||||
if ((ret = nouveau_mem_init_agp(dev, 0)))
|
||||
DRM_ERROR("Error initialising AGP: %d\n", ret);
|
||||
}
|
||||
#endif
|
||||
|
@ -407,7 +477,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
if (!ret) {
|
||||
ret = nouveau_sgdma_nottm_hack_init(dev);
|
||||
if (ret)
|
||||
nouveau_sgdma_takedown(dev);
|
||||
nouveau_sgdma_takedown(dev);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -419,7 +489,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
0, dev_priv->gart_info.aper_size)) {
|
||||
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
|
||||
nouveau_sgdma_nottm_hack_takedown(dev);
|
||||
nouveau_sgdma_takedown(dev);
|
||||
nouveau_sgdma_takedown(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -437,7 +507,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
|||
} else {
|
||||
if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
|
||||
dev->sg->pages * PAGE_SIZE)) {
|
||||
DRM_ERROR("Unable to initialize pci_heap!");
|
||||
DRM_ERROR("Unable to initialize pci_heap!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -453,8 +523,8 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
|
|||
int type;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* Make things easier on ourselves: all allocations are page-aligned.
|
||||
/*
|
||||
* Make things easier on ourselves: all allocations are page-aligned.
|
||||
* We need that to map allocated regions into the user space
|
||||
*/
|
||||
if (alignment < PAGE_SHIFT)
|
||||
|
@ -536,7 +606,7 @@ alloc_ok:
|
|||
ret = drm_addmap(dev, block->start, block->size,
|
||||
_DRM_SCATTER_GATHER, 0, &block->map);
|
||||
|
||||
if (ret) {
|
||||
if (ret) {
|
||||
nouveau_mem_free_block(block);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -606,5 +676,3 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
|
|||
nouveau_mem_free(dev, block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
} else {
|
||||
target = NV_DMA_TARGET_AGP;
|
||||
}
|
||||
} else
|
||||
} else
|
||||
if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
|
||||
target = NV_DMA_TARGET_PCI_NONLINEAR;
|
||||
} else {
|
||||
|
@ -163,4 +163,3 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -524,7 +524,7 @@ nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
|
|||
struct nouveau_gpuobj_ref *ref;
|
||||
struct list_head *entry, *tmp;
|
||||
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
|
||||
|
||||
if (ref->handle == handle) {
|
||||
|
@ -616,7 +616,7 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
|
|||
DMA objects are used to reference a piece of memory in the
|
||||
framebuffer, PCI or AGP address space. Each object is 16 bytes big
|
||||
and looks as follows:
|
||||
|
||||
|
||||
entry[0]
|
||||
11:0 class (seems like I can always use 0 here)
|
||||
12 page table present?
|
||||
|
@ -648,7 +648,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
uint32_t is_scatter_gather = 0;
|
||||
|
||||
|
||||
/* Total number of pages covered by the request.
|
||||
*/
|
||||
const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
|
@ -671,7 +671,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan,
|
||||
is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
|
||||
16,
|
||||
|
@ -687,11 +687,11 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
adjust = offset & 0x00000fff;
|
||||
if (access != NV_DMA_ACCESS_RO)
|
||||
pte_flags |= (1<<1);
|
||||
|
||||
if ( ! is_scatter_gather )
|
||||
|
||||
if ( ! is_scatter_gather )
|
||||
{
|
||||
frame = offset & ~0x00000fff;
|
||||
|
||||
|
||||
INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
|
||||
(adjust << 20) |
|
||||
(access << 14) |
|
||||
|
@ -701,7 +701,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
|
||||
INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
/* Intial page entry in the scatter-gather area that
|
||||
* corresponds to the base offset
|
||||
|
@ -728,7 +728,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
|
||||
/*write starting at the third dword*/
|
||||
instance_offset = 2;
|
||||
|
||||
|
||||
/*for each PAGE, get its bus address, fill in the page table entry, and advance*/
|
||||
for (i = 0; i < page_count; i++) {
|
||||
if (dev->sg->busaddr[idx] == 0) {
|
||||
|
@ -745,12 +745,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
|
|||
}
|
||||
|
||||
frame = (uint32_t) dev->sg->busaddr[idx];
|
||||
INSTANCE_WR(*gpuobj, instance_offset,
|
||||
INSTANCE_WR(*gpuobj, instance_offset,
|
||||
frame | pte_flags);
|
||||
|
||||
|
||||
idx++;
|
||||
instance_offset ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
uint32_t flags0, flags5;
|
||||
|
@ -848,7 +848,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
|
|||
entry[0]:
|
||||
11:0 class (maybe uses more bits here?)
|
||||
17 user clip enable
|
||||
21:19 patch config
|
||||
21:19 patch config
|
||||
25 patch status valid ?
|
||||
entry[1]:
|
||||
15:0 DMA notifier (maybe 20:0)
|
||||
|
@ -986,7 +986,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
|||
/* NV50 VM, point offset 0-512MiB at shared PCIEGART table */
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
uint32_t vm_offset;
|
||||
|
||||
|
||||
vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
|
||||
vm_offset += chan->ramin->gpuobj->im_pramin->start;
|
||||
if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
|
||||
|
@ -1074,7 +1074,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
|||
|
||||
DRM_DEBUG("ch%d\n", chan->id);
|
||||
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
|
||||
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &ref);
|
||||
|
@ -1104,7 +1104,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
|
|||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
|
||||
|
||||
//FIXME: check args, only allow trusted objects to be created
|
||||
|
||||
|
||||
if (init->handle == ~0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1145,4 +1145,3 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -550,4 +550,3 @@
|
|||
#define NV40_RAMFC_UNK_48 0x48
|
||||
#define NV40_RAMFC_UNK_4C 0x4C
|
||||
#define NV40_RAMFC_UNK_50 0x50
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright 2005 Stephane Marchesin
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -40,7 +40,7 @@ static int nouveau_init_card_mappings(struct drm_device *dev)
|
|||
|
||||
/* map the mmio regs */
|
||||
ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
|
||||
drm_get_resource_len(dev, 0),
|
||||
drm_get_resource_len(dev, 0),
|
||||
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to initialize the mmio mapping (%d). "
|
||||
|
@ -278,6 +278,7 @@ nouveau_card_init(struct drm_device *dev)
|
|||
|
||||
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
|
||||
return 0;
|
||||
dev_priv->ttm = 0;
|
||||
|
||||
/* Map any PCI resources we need on the card */
|
||||
ret = nouveau_init_card_mappings(dev);
|
||||
|
@ -315,8 +316,13 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret) return ret;
|
||||
|
||||
/* Setup the memory manager */
|
||||
ret = nouveau_mem_init(dev);
|
||||
if (ret) return ret;
|
||||
if (dev_priv->ttm) {
|
||||
ret = nouveau_mem_init_ttm(dev);
|
||||
if (ret) return ret;
|
||||
} else {
|
||||
ret = nouveau_mem_init(dev);
|
||||
if (ret) return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_init(dev);
|
||||
if (ret) return ret;
|
||||
|
@ -425,7 +431,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);
|
||||
|
||||
/* Time to determine the card architecture */
|
||||
regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
|
||||
regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
|
||||
if (!regs) {
|
||||
DRM_ERROR("Could not ioremap to determine register\n");
|
||||
return -ENOMEM;
|
||||
|
@ -553,7 +559,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *
|
|||
case NOUVEAU_GETPARAM_PCI_PHYSICAL:
|
||||
if ( dev -> sg )
|
||||
getparam->value=(uint64_t) dev->sg->virtual;
|
||||
else
|
||||
else
|
||||
{
|
||||
DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
|
||||
return -EINVAL;
|
||||
|
@ -635,5 +641,3 @@ void nouveau_wait_for_idle(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -189,5 +189,3 @@ static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t o
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -31,4 +31,3 @@
|
|||
*/
|
||||
|
||||
int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
|
||||
|
||||
|
|
|
@ -21,4 +21,3 @@ void
|
|||
nv04_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
||||
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
|
@ -88,11 +88,11 @@ nv04_fifo_load_context(struct nouveau_channel *chan)
|
|||
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
|
||||
|
||||
|
||||
tmp = RAMFC_RD(DMA_INSTANCE);
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
|
||||
|
||||
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));
|
||||
NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE));
|
||||
|
@ -123,7 +123,6 @@ nv04_fifo_save_context(struct nouveau_channel *chan)
|
|||
RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
|
||||
RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
|
||||
RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright 2007 Stephane Marchesin
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
|
|
@ -134,7 +134,7 @@ nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|||
if (gpuobj->im_bound)
|
||||
dev_priv->Engine.instmem.unbind(dev, gpuobj);
|
||||
gpuobj->im_backing = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -156,4 +156,3 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|||
gpuobj->im_bound = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,4 +20,3 @@ void
|
|||
nv04_mc_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -42,4 +42,3 @@ void
|
|||
nv04_timer_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -23,4 +23,3 @@ void
|
|||
nv10_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -157,4 +157,3 @@ nv10_fifo_save_context(struct nouveau_channel *chan)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -732,7 +732,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev)
|
|||
if (last) {
|
||||
nouveau_wait_for_idle(dev);
|
||||
nv10_graph_save_context(last);
|
||||
}
|
||||
}
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
|
@ -907,4 +907,3 @@ int nv10_graph_init(struct drm_device *dev) {
|
|||
void nv10_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -667,10 +667,16 @@ int nv20_graph_save_context(struct nouveau_channel *chan)
|
|||
|
||||
static void nv20_graph_rdi(struct drm_device *dev) {
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
int i, writecount = 32;
|
||||
uint32_t rdi_index = 0x2c80000;
|
||||
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000);
|
||||
for (i = 0; i < 32; i++)
|
||||
if (dev_priv->chipset == 0x20) {
|
||||
rdi_index = 0x3d0000;
|
||||
writecount = 15;
|
||||
}
|
||||
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index);
|
||||
for (i = 0; i < writecount; i++)
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
@ -706,7 +712,7 @@ int nv20_graph_init(struct drm_device *dev) {
|
|||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
|
||||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
|
||||
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
|
||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */
|
||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
|
||||
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
|
||||
NV_WRITE(0x40009C , 0x00000040);
|
||||
|
||||
|
@ -718,9 +724,9 @@ int nv20_graph_init(struct drm_device *dev) {
|
|||
NV_WRITE(0x400098, 0x40000080);
|
||||
NV_WRITE(0x400B88, 0x000000ff);
|
||||
} else {
|
||||
NV_WRITE(0x400880, 0x00080000);
|
||||
NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */
|
||||
NV_WRITE(0x400094, 0x00000005);
|
||||
NV_WRITE(0x400B80, 0x45CAA208);
|
||||
NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */
|
||||
NV_WRITE(0x400B84, 0x24000000);
|
||||
NV_WRITE(0x400098, 0x00000040);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
|
||||
|
@ -730,12 +736,28 @@ int nv20_graph_init(struct drm_device *dev) {
|
|||
}
|
||||
|
||||
/* copy tile info from PFB */
|
||||
for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
|
||||
NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
|
||||
NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
|
||||
NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
|
||||
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
|
||||
NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
|
||||
/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i)));
|
||||
NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
|
||||
/* which is NV40_PGRAPH_TSIZE0(i) ?? */
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i)));
|
||||
NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
|
||||
/* which is NV40_PGRAPH_TILE0(i) ?? */
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i)));
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4));
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));
|
||||
}
|
||||
NV_WRITE(0x4009a0, NV_READ(0x100324));
|
||||
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
|
||||
NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324));
|
||||
|
||||
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
|
||||
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
|
||||
|
@ -865,4 +887,3 @@ int nv30_graph_init(struct drm_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,4 +53,3 @@ void
|
|||
nv40_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -205,4 +205,3 @@ nv40_fifo_init(struct drm_device *dev)
|
|||
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
|
||||
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
|
||||
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
|
||||
|
@ -1555,7 +1555,7 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
|
|||
tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
|
||||
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
|
||||
|
||||
|
||||
tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304);
|
||||
tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
|
||||
|
@ -1877,35 +1877,35 @@ static uint32_t nv49_4b_ctx_voodoo[] ={
|
|||
|
||||
|
||||
static uint32_t nv4a_ctx_voodoo[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
|
||||
0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
|
||||
0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
|
||||
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
|
||||
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
|
||||
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
|
||||
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
|
||||
0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,
|
||||
0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
|
||||
0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
|
||||
0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
|
||||
0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
|
||||
0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
|
||||
0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
|
||||
0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,
|
||||
0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
|
||||
0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
|
||||
0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,
|
||||
0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,
|
||||
0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,
|
||||
0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
|
||||
0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,
|
||||
0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,
|
||||
0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,
|
||||
0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,
|
||||
0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
|
||||
0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
|
||||
0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
|
||||
0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
|
||||
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
|
||||
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
|
||||
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
|
||||
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
|
||||
0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,
|
||||
0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
|
||||
0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
|
||||
0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
|
||||
0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
|
||||
0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
|
||||
0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
|
||||
0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,
|
||||
0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
|
||||
0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
|
||||
0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,
|
||||
0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,
|
||||
0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,
|
||||
0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
|
||||
0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,
|
||||
0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,
|
||||
0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,
|
||||
0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,
|
||||
0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
|
||||
0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,
|
||||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||
};
|
||||
|
||||
|
@ -2026,7 +2026,7 @@ nv40_graph_init(struct drm_device *dev)
|
|||
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No context present currently */
|
||||
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
|
||||
|
@ -2221,4 +2221,3 @@ nv40_graph_init(struct drm_device *dev)
|
|||
void nv40_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -36,4 +36,3 @@ void
|
|||
nv40_mc_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -324,4 +324,3 @@ nv50_fifo_save_context(struct nouveau_channel *chan)
|
|||
DRM_ERROR("stub!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue