Merge branch 'ttm-vram-0-1-branch'
commit
a78f70faad
|
@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
||||||
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
|
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
|
||||||
drm_memory_debug.o ati_pcigart.o drm_sman.o \
|
drm_memory_debug.o ati_pcigart.o drm_sman.o \
|
||||||
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
|
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
|
||||||
drm_fence.o drm_ttm.o drm_bo.o
|
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o
|
||||||
tdfx-objs := tdfx_drv.o
|
tdfx-objs := tdfx_drv.o
|
||||||
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
||||||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||||
|
|
|
@ -595,78 +595,8 @@ typedef struct ati_pcigart_info {
|
||||||
drm_local_map_t mapping;
|
drm_local_map_t mapping;
|
||||||
} drm_ati_pcigart_info;
|
} drm_ati_pcigart_info;
|
||||||
|
|
||||||
/*
|
|
||||||
* User space objects and their references.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
drm_fence_type,
|
|
||||||
drm_buffer_type,
|
|
||||||
drm_ttm_type
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add other user space object types here.
|
|
||||||
*/
|
|
||||||
|
|
||||||
} drm_object_type_t;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A user object is a structure that helps the drm give out user handles
|
|
||||||
* to kernel internal objects and to keep track of these objects so that
|
|
||||||
* they can be destroyed, for example when the user space process exits.
|
|
||||||
* Designed to be accessible using a user space 32-bit handle.
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef struct drm_user_object{
|
|
||||||
drm_hash_item_t hash;
|
|
||||||
struct list_head list;
|
|
||||||
drm_object_type_t type;
|
|
||||||
atomic_t refcount;
|
|
||||||
int shareable;
|
|
||||||
drm_file_t *owner;
|
|
||||||
void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
|
|
||||||
drm_ref_t ref_action);
|
|
||||||
void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
|
|
||||||
drm_ref_t unref_action);
|
|
||||||
void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
|
|
||||||
} drm_user_object_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A ref object is a structure which is used to
|
|
||||||
* keep track of references to user objects and to keep track of these
|
|
||||||
* references so that they can be destroyed for example when the user space
|
|
||||||
* process exits. Designed to be accessible using a pointer to the _user_ object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct drm_ref_object {
|
|
||||||
drm_hash_item_t hash;
|
|
||||||
struct list_head list;
|
|
||||||
atomic_t refcount;
|
|
||||||
drm_ref_t unref_action;
|
|
||||||
} drm_ref_object_t;
|
|
||||||
|
|
||||||
|
|
||||||
#include "drm_ttm.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* buffer object driver
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef struct drm_bo_driver{
|
|
||||||
int cached[DRM_BO_MEM_TYPES];
|
|
||||||
drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
|
|
||||||
drm_ttm_backend_t *(*create_ttm_backend_entry)
|
|
||||||
(struct drm_device *dev);
|
|
||||||
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
|
|
||||||
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
|
|
||||||
} drm_bo_driver_t;
|
|
||||||
|
|
||||||
|
#include "drm_objects.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DRM driver structure. This structure represent the common code for
|
* DRM driver structure. This structure represent the common code for
|
||||||
|
@ -755,63 +685,6 @@ typedef struct drm_head {
|
||||||
} drm_head_t;
|
} drm_head_t;
|
||||||
|
|
||||||
|
|
||||||
typedef struct drm_fence_driver{
|
|
||||||
int no_types;
|
|
||||||
uint32_t wrap_diff;
|
|
||||||
uint32_t flush_diff;
|
|
||||||
uint32_t sequence_mask;
|
|
||||||
int lazy_capable;
|
|
||||||
int (*emit) (struct drm_device *dev, uint32_t flags,
|
|
||||||
uint32_t *breadcrumb,
|
|
||||||
uint32_t *native_type);
|
|
||||||
void (*poke_flush) (struct drm_device *dev);
|
|
||||||
} drm_fence_driver_t;
|
|
||||||
|
|
||||||
#define _DRM_FENCE_TYPE_EXE 0x00
|
|
||||||
|
|
||||||
typedef struct drm_fence_manager{
|
|
||||||
int initialized;
|
|
||||||
rwlock_t lock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The list below should be maintained in sequence order and
|
|
||||||
* access is protected by the above spinlock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct list_head ring;
|
|
||||||
struct list_head *fence_types[32];
|
|
||||||
volatile uint32_t pending_flush;
|
|
||||||
wait_queue_head_t fence_queue;
|
|
||||||
int pending_exe_flush;
|
|
||||||
uint32_t last_exe_flush;
|
|
||||||
uint32_t exe_flush_sequence;
|
|
||||||
atomic_t count;
|
|
||||||
} drm_fence_manager_t;
|
|
||||||
|
|
||||||
typedef struct drm_buffer_manager{
|
|
||||||
struct mutex init_mutex;
|
|
||||||
int nice_mode;
|
|
||||||
int initialized;
|
|
||||||
drm_file_t *last_to_validate;
|
|
||||||
int has_type[DRM_BO_MEM_TYPES];
|
|
||||||
int use_type[DRM_BO_MEM_TYPES];
|
|
||||||
drm_mm_t manager[DRM_BO_MEM_TYPES];
|
|
||||||
struct list_head lru[DRM_BO_MEM_TYPES];
|
|
||||||
struct list_head pinned[DRM_BO_MEM_TYPES];
|
|
||||||
struct list_head unfenced;
|
|
||||||
struct list_head ddestroy;
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
|
||||||
struct work_struct wq;
|
|
||||||
#else
|
|
||||||
struct delayed_work wq;
|
|
||||||
#endif
|
|
||||||
uint32_t fence_type;
|
|
||||||
unsigned long cur_pages;
|
|
||||||
atomic_t count;
|
|
||||||
} drm_buffer_manager_t;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DRM device structure. This structure represent a complete card that
|
* DRM device structure. This structure represent a complete card that
|
||||||
* may contain multiple heads.
|
* may contain multiple heads.
|
||||||
|
@ -966,62 +839,6 @@ typedef struct drm_agp_ttm_priv {
|
||||||
} drm_agp_ttm_priv;
|
} drm_agp_ttm_priv;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct drm_fence_object{
|
|
||||||
drm_user_object_t base;
|
|
||||||
atomic_t usage;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The below three fields are protected by the fence manager spinlock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct list_head ring;
|
|
||||||
int class;
|
|
||||||
uint32_t native_type;
|
|
||||||
uint32_t type;
|
|
||||||
uint32_t signaled;
|
|
||||||
uint32_t sequence;
|
|
||||||
uint32_t flush_mask;
|
|
||||||
uint32_t submitted_flush;
|
|
||||||
} drm_fence_object_t;
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct drm_buffer_object{
|
|
||||||
drm_device_t *dev;
|
|
||||||
drm_user_object_t base;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there is a possibility that the usage variable is zero,
|
|
||||||
* then dev->struct_mutext should be locked before incrementing it.
|
|
||||||
*/
|
|
||||||
|
|
||||||
atomic_t usage;
|
|
||||||
drm_ttm_object_t *ttm_object;
|
|
||||||
drm_ttm_t *ttm;
|
|
||||||
unsigned long num_pages;
|
|
||||||
unsigned long buffer_start;
|
|
||||||
drm_bo_type_t type;
|
|
||||||
unsigned long offset;
|
|
||||||
uint32_t page_alignment;
|
|
||||||
atomic_t mapped;
|
|
||||||
uint32_t flags;
|
|
||||||
uint32_t mask;
|
|
||||||
uint32_t mem_type;
|
|
||||||
|
|
||||||
drm_mm_node_t *mm_node; /* MM node for on-card RAM */
|
|
||||||
struct list_head lru;
|
|
||||||
struct list_head ddestroy;
|
|
||||||
|
|
||||||
uint32_t fence_type;
|
|
||||||
uint32_t fence_class;
|
|
||||||
drm_fence_object_t *fence;
|
|
||||||
uint32_t priv_flags;
|
|
||||||
wait_queue_head_t event_queue;
|
|
||||||
struct mutex mutex;
|
|
||||||
} drm_buffer_object_t;
|
|
||||||
|
|
||||||
#define _DRM_BO_FLAG_UNFENCED 0x00000001
|
|
||||||
#define _DRM_BO_FLAG_EVICTED 0x00000002
|
|
||||||
|
|
||||||
|
|
||||||
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
||||||
int feature)
|
int feature)
|
||||||
|
@ -1365,105 +1182,9 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* User space object bookkeeping (drm_object.c)
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
*/
|
|
||||||
int shareable);
|
|
||||||
extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
|
|
||||||
* release the struct_mutex before calling drm_remove_ref_object.
|
|
||||||
* This function may temporarily release the struct_mutex.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held. May temporarily release it.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
|
|
||||||
drm_ref_t ref_action);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
*/
|
|
||||||
|
|
||||||
drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
|
|
||||||
drm_user_object_t *referenced_object,
|
|
||||||
drm_ref_t ref_action);
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
|
|
||||||
* release the struct_mutex before calling drm_remove_ref_object.
|
|
||||||
* This function may temporarily release the struct_mutex.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
|
|
||||||
extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
|
|
||||||
drm_user_object_t **object);
|
|
||||||
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* fence objects (drm_fence.c)
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
|
|
||||||
extern void drm_fence_manager_init(drm_device_t *dev);
|
|
||||||
extern void drm_fence_manager_takedown(drm_device_t *dev);
|
|
||||||
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
|
|
||||||
extern int drm_fence_object_flush(drm_device_t * dev,
|
|
||||||
volatile drm_fence_object_t * fence,
|
|
||||||
uint32_t type);
|
|
||||||
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
|
||||||
uint32_t type);
|
|
||||||
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
|
|
||||||
drm_fence_object_t * fence);
|
|
||||||
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
|
||||||
drm_fence_object_t * fence);
|
|
||||||
extern int drm_fence_object_wait(drm_device_t * dev,
|
|
||||||
volatile drm_fence_object_t * fence,
|
|
||||||
int lazy, int ignore_signals, uint32_t mask);
|
|
||||||
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
|
|
||||||
uint32_t fence_flags,
|
|
||||||
drm_fence_object_t **c_fence);
|
|
||||||
extern int drm_fence_add_user_object(drm_file_t *priv,
|
|
||||||
drm_fence_object_t *fence,
|
|
||||||
int shareable);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* buffer objects (drm_bo.c)
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
|
|
||||||
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
|
|
||||||
extern int drm_bo_driver_finish(drm_device_t *dev);
|
|
||||||
extern int drm_bo_driver_init(drm_device_t *dev);
|
|
||||||
extern int drm_fence_buffer_objects(drm_file_t * priv,
|
|
||||||
struct list_head *list,
|
|
||||||
uint32_t fence_flags,
|
|
||||||
drm_fence_object_t *fence,
|
|
||||||
drm_fence_object_t **used_fence);
|
|
||||||
|
|
||||||
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
||||||
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
||||||
|
|
|
@ -606,8 +606,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
DRM_DEBUG("drm_agp_bind_ttm\n");
|
DRM_DEBUG("drm_agp_bind_ttm\n");
|
||||||
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
|
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
|
||||||
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
|
DRM_BE_FLAG_BOUND_CACHED);
|
||||||
mem->is_flushed = TRUE;
|
mem->is_flushed = TRUE;
|
||||||
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
|
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
|
||||||
ret = drm_agp_bind_memory(mem, offset);
|
ret = drm_agp_bind_memory(mem, offset);
|
||||||
|
@ -710,7 +710,6 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
||||||
agp_priv->uncached_type = AGP_USER_MEMORY;
|
agp_priv->uncached_type = AGP_USER_MEMORY;
|
||||||
agp_priv->bridge = dev->agp->bridge;
|
agp_priv->bridge = dev->agp->bridge;
|
||||||
agp_priv->populated = FALSE;
|
agp_priv->populated = FALSE;
|
||||||
agp_be->aperture_base = dev->agp->agp_info.aper_base;
|
|
||||||
agp_be->private = (void *) agp_priv;
|
agp_be->private = (void *) agp_priv;
|
||||||
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
|
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
|
||||||
agp_be->populate = drm_agp_populate;
|
agp_be->populate = drm_agp_populate;
|
||||||
|
@ -718,10 +717,8 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
||||||
agp_be->bind = drm_agp_bind_ttm;
|
agp_be->bind = drm_agp_bind_ttm;
|
||||||
agp_be->unbind = drm_agp_unbind_ttm;
|
agp_be->unbind = drm_agp_unbind_ttm;
|
||||||
agp_be->destroy = drm_agp_destroy_ttm;
|
agp_be->destroy = drm_agp_destroy_ttm;
|
||||||
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
|
DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0,
|
||||||
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
|
DRM_BE_FLAG_NEEDS_FREE);
|
||||||
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
|
|
||||||
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
|
|
||||||
agp_be->drm_map_type = _DRM_AGP;
|
agp_be->drm_map_type = _DRM_AGP;
|
||||||
return agp_be;
|
return agp_be;
|
||||||
}
|
}
|
||||||
|
|
1591
linux-core/drm_bo.c
1591
linux-core/drm_bo.c
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,410 @@
|
||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "drmP.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free the old memory node unless it's a pinned region and we
|
||||||
|
* have not been requested to free also pinned regions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void drm_bo_free_old_node(drm_buffer_object_t * bo)
|
||||||
|
{
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
|
||||||
|
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
|
||||||
|
mutex_lock(&bo->dev->struct_mutex);
|
||||||
|
drm_mm_put_block(old_mem->mm_node);
|
||||||
|
old_mem->mm_node = NULL;
|
||||||
|
mutex_unlock(&bo->dev->struct_mutex);
|
||||||
|
}
|
||||||
|
old_mem->mm_node = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_bo_move_ttm(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_ttm_t *ttm = bo->ttm;
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
uint32_t save_flags = old_mem->flags;
|
||||||
|
uint32_t save_mask = old_mem->mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (old_mem->mem_type == DRM_BO_MEM_TT) {
|
||||||
|
if (evict)
|
||||||
|
drm_ttm_evict(ttm);
|
||||||
|
else
|
||||||
|
drm_ttm_unbind(ttm);
|
||||||
|
|
||||||
|
drm_bo_free_old_node(bo);
|
||||||
|
DRM_FLAG_MASKED(old_mem->flags,
|
||||||
|
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
|
||||||
|
DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
|
||||||
|
old_mem->mem_type = DRM_BO_MEM_LOCAL;
|
||||||
|
save_flags = old_mem->flags;
|
||||||
|
}
|
||||||
|
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
|
||||||
|
ret = drm_bind_ttm(ttm,
|
||||||
|
new_mem->flags & DRM_BO_FLAG_CACHED,
|
||||||
|
new_mem->mm_node->start);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
*old_mem = *new_mem;
|
||||||
|
new_mem->mm_node = NULL;
|
||||||
|
old_mem->mask = save_mask;
|
||||||
|
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(drm_bo_move_ttm);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \c Return a kernel virtual address to the buffer object PCI memory.
|
||||||
|
*
|
||||||
|
* \param bo The buffer object.
|
||||||
|
* \return Failure indication.
|
||||||
|
*
|
||||||
|
* Returns -EINVAL if the buffer object is currently not mappable.
|
||||||
|
* Returns -ENOMEM if the ioremap operation failed.
|
||||||
|
* Otherwise returns zero.
|
||||||
|
*
|
||||||
|
* After a successfull call, bo->iomap contains the virtual address, or NULL
|
||||||
|
* if the buffer object content is not accessible through PCI space.
|
||||||
|
* Call bo->mutex locked.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
|
||||||
|
void **virtual)
|
||||||
|
{
|
||||||
|
drm_buffer_manager_t *bm = &dev->bm;
|
||||||
|
drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
unsigned long bus_base;
|
||||||
|
int ret;
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
*virtual = NULL;
|
||||||
|
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
|
||||||
|
if (ret || bus_size == 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
|
||||||
|
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||||
|
else {
|
||||||
|
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
|
||||||
|
if (!addr)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
*virtual = addr;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \c Unmap mapping obtained using drm_bo_ioremap
|
||||||
|
*
|
||||||
|
* \param bo The buffer object.
|
||||||
|
*
|
||||||
|
* Call bo->mutex locked.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
|
||||||
|
void *virtual)
|
||||||
|
{
|
||||||
|
drm_buffer_manager_t *bm;
|
||||||
|
drm_mem_type_manager_t *man;
|
||||||
|
|
||||||
|
bm = &dev->bm;
|
||||||
|
man = &bm->man[mem->mem_type];
|
||||||
|
|
||||||
|
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
|
||||||
|
iounmap(virtual);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||||
|
{
|
||||||
|
uint32_t *dstP =
|
||||||
|
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
|
||||||
|
uint32_t *srcP =
|
||||||
|
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
|
||||||
|
iowrite32(ioread32(srcP++), dstP++);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
|
||||||
|
{
|
||||||
|
struct page *d = drm_ttm_get_page(ttm, page);
|
||||||
|
void *dst;
|
||||||
|
|
||||||
|
if (!d)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||||
|
dst = kmap(d);
|
||||||
|
if (!dst)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||||
|
kunmap(d);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
|
||||||
|
{
|
||||||
|
struct page *s = drm_ttm_get_page(ttm, page);
|
||||||
|
void *src;
|
||||||
|
|
||||||
|
if (!s)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||||
|
src = kmap(s);
|
||||||
|
if (!src)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memcpy_toio(dst, src, PAGE_SIZE);
|
||||||
|
kunmap(s);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_bo_move_memcpy(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
|
||||||
|
drm_ttm_t *ttm = bo->ttm;
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
drm_bo_mem_reg_t old_copy = *old_mem;
|
||||||
|
void *old_iomap;
|
||||||
|
void *new_iomap;
|
||||||
|
int ret;
|
||||||
|
uint32_t save_flags = old_mem->flags;
|
||||||
|
uint32_t save_mask = old_mem->mask;
|
||||||
|
unsigned long i;
|
||||||
|
unsigned long page;
|
||||||
|
unsigned long add = 0;
|
||||||
|
int dir;
|
||||||
|
|
||||||
|
ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (old_iomap == NULL && new_iomap == NULL)
|
||||||
|
goto out2;
|
||||||
|
if (old_iomap == NULL && ttm == NULL)
|
||||||
|
goto out2;
|
||||||
|
|
||||||
|
add = 0;
|
||||||
|
dir = 1;
|
||||||
|
|
||||||
|
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||||
|
(new_mem->mm_node->start <
|
||||||
|
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
||||||
|
dir = -1;
|
||||||
|
add = new_mem->num_pages - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||||
|
page = i * dir + add;
|
||||||
|
if (old_iomap == NULL)
|
||||||
|
ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
|
||||||
|
else if (new_iomap == NULL)
|
||||||
|
ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
|
||||||
|
else
|
||||||
|
ret = drm_copy_io_page(new_iomap, old_iomap, page);
|
||||||
|
if (ret)
|
||||||
|
goto out1;
|
||||||
|
}
|
||||||
|
mb();
|
||||||
|
out2:
|
||||||
|
drm_bo_free_old_node(bo);
|
||||||
|
|
||||||
|
*old_mem = *new_mem;
|
||||||
|
new_mem->mm_node = NULL;
|
||||||
|
old_mem->mask = save_mask;
|
||||||
|
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||||
|
|
||||||
|
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
|
||||||
|
drm_ttm_unbind(ttm);
|
||||||
|
drm_destroy_ttm(ttm);
|
||||||
|
bo->ttm = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
out1:
|
||||||
|
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
|
||||||
|
out:
|
||||||
|
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(drm_bo_move_memcpy);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Transfer a buffer object's memory and LRU status to a newly
|
||||||
|
* created object. User-space references remains with the old
|
||||||
|
* object. Call bo->mutex locked.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_buffer_object_transfer(drm_buffer_object_t * bo,
|
||||||
|
drm_buffer_object_t ** new_obj)
|
||||||
|
{
|
||||||
|
drm_buffer_object_t *fbo;
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
drm_buffer_manager_t *bm = &dev->bm;
|
||||||
|
|
||||||
|
fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
|
||||||
|
if (!fbo)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
*fbo = *bo;
|
||||||
|
mutex_init(&fbo->mutex);
|
||||||
|
mutex_lock(&fbo->mutex);
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
DRM_INIT_WAITQUEUE(&bo->event_queue);
|
||||||
|
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||||
|
INIT_LIST_HEAD(&fbo->lru);
|
||||||
|
INIT_LIST_HEAD(&fbo->pinned_lru);
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
INIT_LIST_HEAD(&fbo->vma_list);
|
||||||
|
INIT_LIST_HEAD(&fbo->p_mm_list);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
atomic_inc(&bo->fence->usage);
|
||||||
|
fbo->pinned_node = NULL;
|
||||||
|
fbo->mem.mm_node->private = (void *)fbo;
|
||||||
|
atomic_set(&fbo->usage, 1);
|
||||||
|
atomic_inc(&bm->count);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
mutex_unlock(&fbo->mutex);
|
||||||
|
|
||||||
|
*new_obj = fbo;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since move is underway, we need to block signals in this function.
|
||||||
|
* We cannot restart until it has finished.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
|
||||||
|
int evict,
|
||||||
|
int no_wait,
|
||||||
|
uint32_t fence_type,
|
||||||
|
uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
int ret;
|
||||||
|
uint32_t save_flags = old_mem->flags;
|
||||||
|
uint32_t save_mask = old_mem->mask;
|
||||||
|
drm_buffer_object_t *old_obj;
|
||||||
|
|
||||||
|
if (bo->fence)
|
||||||
|
drm_fence_usage_deref_unlocked(dev, bo->fence);
|
||||||
|
ret = drm_fence_object_create(dev, fence_type,
|
||||||
|
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||||
|
&bo->fence);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
/*
|
||||||
|
* In this mode, we don't allow pipelining a copy blit,
|
||||||
|
* since the buffer will be accessible from user space
|
||||||
|
* the moment we return and rebuild the page tables.
|
||||||
|
*
|
||||||
|
* With normal vm operation, page tables are rebuilt
|
||||||
|
* on demand using fault(), which waits for buffer idle.
|
||||||
|
*/
|
||||||
|
if (1)
|
||||||
|
#else
|
||||||
|
if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
|
||||||
|
bo->mem.mm_node != NULL))
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
ret = drm_bo_wait(bo, 0, 1, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
drm_bo_free_old_node(bo);
|
||||||
|
|
||||||
|
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
|
||||||
|
drm_ttm_unbind(bo->ttm);
|
||||||
|
drm_destroy_ttm(bo->ttm);
|
||||||
|
bo->ttm = NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
|
||||||
|
/* This should help pipeline ordinary buffer moves.
|
||||||
|
*
|
||||||
|
* Hang old buffer memory on a new buffer object,
|
||||||
|
* and leave it to be released when the GPU
|
||||||
|
* operation has completed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = drm_buffer_object_transfer(bo, &old_obj);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
|
||||||
|
old_obj->ttm = NULL;
|
||||||
|
else
|
||||||
|
bo->ttm = NULL;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
list_del_init(&old_obj->lru);
|
||||||
|
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
|
||||||
|
drm_bo_add_to_lru(old_obj);
|
||||||
|
|
||||||
|
drm_bo_usage_deref_locked(old_obj);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*old_mem = *new_mem;
|
||||||
|
new_mem->mm_node = NULL;
|
||||||
|
old_mem->mask = save_mask;
|
||||||
|
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
|
@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm code for kernels below 2,6,15 in which version a major vm write
|
* vm code for kernels below 2.6.15 in which version a major vm write
|
||||||
* occured. This implement a simple straightforward
|
* occured. This implement a simple straightforward
|
||||||
* version similar to what's going to be
|
* version similar to what's going to be
|
||||||
* in kernel 2.6.20+?
|
* in kernel 2.6.19+
|
||||||
|
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
|
||||||
|
* nopfn.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int drm_pte_is_clear(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
int ret = 1;
|
|
||||||
pte_t *pte;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pud_t *pud;
|
|
||||||
pgd_t *pgd;
|
|
||||||
|
|
||||||
|
|
||||||
spin_lock(&mm->page_table_lock);
|
|
||||||
pgd = pgd_offset(mm, addr);
|
|
||||||
if (pgd_none(*pgd))
|
|
||||||
goto unlock;
|
|
||||||
pud = pud_offset(pgd, addr);
|
|
||||||
if (pud_none(*pud))
|
|
||||||
goto unlock;
|
|
||||||
pmd = pmd_offset(pud, addr);
|
|
||||||
if (pmd_none(*pmd))
|
|
||||||
goto unlock;
|
|
||||||
pte = pte_offset_map(pmd, addr);
|
|
||||||
if (!pte)
|
|
||||||
goto unlock;
|
|
||||||
ret = pte_none(*pte);
|
|
||||||
pte_unmap(pte);
|
|
||||||
unlock:
|
|
||||||
spin_unlock(&mm->page_table_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
||||||
unsigned long pfn, pgprot_t pgprot)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
if (!drm_pte_is_clear(vma, addr))
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct page *dummy_page;
|
struct page *dummy_page;
|
||||||
|
@ -160,7 +120,7 @@ void free_nopage_retry(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
int *type)
|
int *type)
|
||||||
{
|
{
|
||||||
|
@ -171,7 +131,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||||
|
|
||||||
data.address = address;
|
data.address = address;
|
||||||
data.vma = vma;
|
data.vma = vma;
|
||||||
drm_vm_ttm_fault(vma, &data);
|
drm_bo_vm_fault(vma, &data);
|
||||||
switch (data.type) {
|
switch (data.type) {
|
||||||
case VM_FAULT_OOM:
|
case VM_FAULT_OOM:
|
||||||
return NOPAGE_OOM;
|
return NOPAGE_OOM;
|
||||||
|
@ -186,10 +146,85 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if !defined(DRM_FULL_MM_COMPAT) && \
|
||||||
|
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
|
||||||
|
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
|
||||||
|
|
||||||
|
static int drm_pte_is_clear(struct vm_area_struct *vma,
|
||||||
|
unsigned long addr)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
int ret = 1;
|
||||||
|
pte_t *pte;
|
||||||
|
pmd_t *pmd;
|
||||||
|
pud_t *pud;
|
||||||
|
pgd_t *pgd;
|
||||||
|
|
||||||
|
spin_lock(&mm->page_table_lock);
|
||||||
|
pgd = pgd_offset(mm, addr);
|
||||||
|
if (pgd_none(*pgd))
|
||||||
|
goto unlock;
|
||||||
|
pud = pud_offset(pgd, addr);
|
||||||
|
if (pud_none(*pud))
|
||||||
|
goto unlock;
|
||||||
|
pmd = pmd_offset(pud, addr);
|
||||||
|
if (pmd_none(*pmd))
|
||||||
|
goto unlock;
|
||||||
|
pte = pte_offset_map(pmd, addr);
|
||||||
|
if (!pte)
|
||||||
|
goto unlock;
|
||||||
|
ret = pte_none(*pte);
|
||||||
|
pte_unmap(pte);
|
||||||
|
unlock:
|
||||||
|
spin_unlock(&mm->page_table_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
unsigned long pfn)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
if (!drm_pte_is_clear(vma, addr))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* While waiting for the fault() handler to appear in
|
||||||
|
* we accomplish approximately
|
||||||
|
* the same wrapping it with nopfn.
|
||||||
|
*/
|
||||||
|
|
||||||
|
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
struct fault_data data;
|
||||||
|
data.address = address;
|
||||||
|
|
||||||
|
(void) drm_bo_vm_fault(vma, &data);
|
||||||
|
if (data.type == VM_FAULT_OOM)
|
||||||
|
return NOPFN_OOM;
|
||||||
|
else if (data.type == VM_FAULT_SIGBUS)
|
||||||
|
return NOPFN_SIGBUS;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pfn already set.
|
||||||
|
*/
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
|
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
|
||||||
* workaround for a single BUG statement in do_no_page in these versions. The
|
* workaround for a single BUG statement in do_no_page in these versions. The
|
||||||
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
|
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
|
||||||
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
||||||
|
@ -212,109 +247,100 @@ typedef struct vma_entry {
|
||||||
} vma_entry_t;
|
} vma_entry_t;
|
||||||
|
|
||||||
|
|
||||||
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
int *type)
|
int *type)
|
||||||
{
|
{
|
||||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
unsigned long page_offset;
|
unsigned long page_offset;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
drm_ttm_t *ttm;
|
drm_ttm_t *ttm;
|
||||||
drm_buffer_manager_t *bm;
|
|
||||||
drm_device_t *dev;
|
drm_device_t *dev;
|
||||||
|
|
||||||
/*
|
mutex_lock(&bo->mutex);
|
||||||
* FIXME: Check can't map aperture flag.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (type)
|
if (type)
|
||||||
*type = VM_FAULT_MINOR;
|
*type = VM_FAULT_MINOR;
|
||||||
|
|
||||||
if (!map)
|
if (address > vma->vm_end) {
|
||||||
return NOPAGE_OOM;
|
page = NOPAGE_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (address > vma->vm_end)
|
dev = bo->dev;
|
||||||
return NOPAGE_SIGBUS;
|
|
||||||
|
|
||||||
ttm = (drm_ttm_t *) map->offset;
|
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
|
||||||
dev = ttm->dev;
|
DRM_ERROR("Invalid compat nopage.\n");
|
||||||
mutex_lock(&dev->struct_mutex);
|
page = NOPAGE_SIGBUS;
|
||||||
drm_fixup_ttm_caching(ttm);
|
goto out_unlock;
|
||||||
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
|
}
|
||||||
|
|
||||||
bm = &dev->bm;
|
ttm = bo->ttm;
|
||||||
|
drm_ttm_fixup_caching(ttm);
|
||||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||||
page = ttm->pages[page_offset];
|
page = drm_ttm_get_page(ttm, page_offset);
|
||||||
|
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
page = NOPAGE_OOM;
|
||||||
page = NOPAGE_OOM;
|
goto out_unlock;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
page = ttm->pages[page_offset] =
|
|
||||||
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
|
||||||
if (!page) {
|
|
||||||
drm_free_memctl(PAGE_SIZE);
|
|
||||||
page = NOPAGE_OOM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
++bm->cur_pages;
|
|
||||||
SetPageLocked(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
out:
|
out_unlock:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&bo->mutex);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int drm_ttm_map_bound(struct vm_area_struct *vma)
|
int drm_bo_map_bound(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
|
drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
|
||||||
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
unsigned long bus_base;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
|
||||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
|
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||||
unsigned long pfn = ttm->aper_offset +
|
&bus_offset, &bus_size);
|
||||||
(ttm->be->aperture_base >> PAGE_SHIFT);
|
BUG_ON(ret);
|
||||||
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
|
|
||||||
|
|
||||||
|
if (bus_size) {
|
||||||
|
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
|
||||||
|
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
|
||||||
|
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
|
||||||
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||||
vma->vm_end - vma->vm_start,
|
vma->vm_end - vma->vm_start,
|
||||||
pgprot);
|
pgprot);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
p_mm_entry_t *entry, *n_entry;
|
p_mm_entry_t *entry, *n_entry;
|
||||||
vma_entry_t *v_entry;
|
vma_entry_t *v_entry;
|
||||||
drm_local_map_t *map = (drm_local_map_t *)
|
|
||||||
vma->vm_private_data;
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
|
||||||
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
|
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
|
||||||
if (!v_entry) {
|
if (!v_entry) {
|
||||||
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
v_entry->vma = vma;
|
v_entry->vma = vma;
|
||||||
map->handle = (void *) v_entry;
|
|
||||||
list_add_tail(&v_entry->head, &ttm->vma_list);
|
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
list_add_tail(&v_entry->head, &bo->vma_list);
|
||||||
|
|
||||||
|
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||||
if (mm == entry->mm) {
|
if (mm == entry->mm) {
|
||||||
atomic_inc(&entry->refcount);
|
atomic_inc(&entry->refcount);
|
||||||
return 0;
|
return 0;
|
||||||
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
|
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
|
||||||
if (!n_entry) {
|
if (!n_entry) {
|
||||||
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -328,29 +354,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
p_mm_entry_t *entry, *n;
|
p_mm_entry_t *entry, *n;
|
||||||
vma_entry_t *v_entry, *v_n;
|
vma_entry_t *v_entry, *v_n;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
|
||||||
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
|
list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
|
||||||
if (v_entry->vma == vma) {
|
if (v_entry->vma == vma) {
|
||||||
found = 1;
|
found = 1;
|
||||||
list_del(&v_entry->head);
|
list_del(&v_entry->head);
|
||||||
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
|
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BUG_ON(!found);
|
BUG_ON(!found);
|
||||||
|
|
||||||
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
|
list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
|
||||||
if (mm == entry->mm) {
|
if (mm == entry->mm) {
|
||||||
if (atomic_add_negative(-1, &entry->refcount)) {
|
if (atomic_add_negative(-1, &entry->refcount)) {
|
||||||
list_del(&entry->head);
|
list_del(&entry->head);
|
||||||
BUG_ON(entry->locked);
|
BUG_ON(entry->locked);
|
||||||
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
|
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -360,12 +386,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int drm_ttm_lock_mm(drm_ttm_t * ttm)
|
int drm_bo_lock_kmm(drm_buffer_object_t * bo)
|
||||||
{
|
{
|
||||||
p_mm_entry_t *entry;
|
p_mm_entry_t *entry;
|
||||||
int lock_ok = 1;
|
int lock_ok = 1;
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||||
BUG_ON(entry->locked);
|
BUG_ON(entry->locked);
|
||||||
if (!down_write_trylock(&entry->mm->mmap_sem)) {
|
if (!down_write_trylock(&entry->mm->mmap_sem)) {
|
||||||
lock_ok = 0;
|
lock_ok = 0;
|
||||||
|
@ -377,7 +403,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
|
||||||
if (lock_ok)
|
if (lock_ok)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||||
if (!entry->locked)
|
if (!entry->locked)
|
||||||
break;
|
break;
|
||||||
up_write(&entry->mm->mmap_sem);
|
up_write(&entry->mm->mmap_sem);
|
||||||
|
@ -392,43 +418,40 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
|
void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
|
||||||
{
|
{
|
||||||
p_mm_entry_t *entry;
|
p_mm_entry_t *entry;
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||||
BUG_ON(!entry->locked);
|
BUG_ON(!entry->locked);
|
||||||
up_write(&entry->mm->mmap_sem);
|
up_write(&entry->mm->mmap_sem);
|
||||||
entry->locked = 0;
|
entry->locked = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_ttm_remap_bound(drm_ttm_t *ttm)
|
int drm_bo_remap_bound(drm_buffer_object_t *bo)
|
||||||
{
|
{
|
||||||
vma_entry_t *v_entry;
|
vma_entry_t *v_entry;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry(v_entry, &ttm->vma_list, head) {
|
if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
|
||||||
ret = drm_ttm_map_bound(v_entry->vma);
|
list_for_each_entry(v_entry, &bo->vma_list, head) {
|
||||||
if (ret)
|
ret = drm_bo_map_bound(v_entry->vma);
|
||||||
break;
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_ttm_unlock_mm(ttm);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
|
void drm_bo_finish_unmap(drm_buffer_object_t *bo)
|
||||||
{
|
{
|
||||||
vma_entry_t *v_entry;
|
vma_entry_t *v_entry;
|
||||||
|
|
||||||
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
|
list_for_each_entry(v_entry, &bo->vma_list, head) {
|
||||||
return;
|
|
||||||
|
|
||||||
list_for_each_entry(v_entry, &ttm->vma_list, head) {
|
|
||||||
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
||||||
}
|
}
|
||||||
drm_ttm_unlock_mm(ttm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -158,11 +158,14 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \
|
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
|
||||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
|
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
|
||||||
#define DRM_ODD_MM_COMPAT
|
#define DRM_ODD_MM_COMPAT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
|
||||||
|
#define DRM_FULL_MM_COMPAT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -200,15 +203,20 @@ extern int drm_map_page_into_agp(struct page *page);
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
extern struct page *get_nopage_retry(void);
|
extern struct page *get_nopage_retry(void);
|
||||||
extern void free_nopage_retry(void);
|
extern void free_nopage_retry(void);
|
||||||
struct fault_data;
|
|
||||||
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
|
||||||
struct fault_data *data);
|
|
||||||
|
|
||||||
#define NOPAGE_REFAULT get_nopage_retry()
|
#define NOPAGE_REFAULT get_nopage_retry()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if !defined(DRM_FULL_MM_COMPAT) && \
|
||||||
|
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
|
||||||
|
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
|
struct fault_data;
|
||||||
|
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
|
struct fault_data *data);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
#ifndef DRM_FULL_MM_COMPAT
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
|
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
|
||||||
|
@ -228,17 +236,21 @@ struct fault_data {
|
||||||
|
|
||||||
|
|
||||||
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn, pgprot_t pgprot);
|
unsigned long pfn);
|
||||||
|
|
||||||
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||||
unsigned long address,
|
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||||
int *type);
|
unsigned long address,
|
||||||
|
int *type);
|
||||||
#endif
|
#else
|
||||||
|
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||||
|
unsigned long address);
|
||||||
|
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
|
||||||
|
#endif /* ndef DRM_FULL_MM_COMPAT */
|
||||||
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
|
||||||
struct drm_ttm;
|
struct drm_buffer_object;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -246,14 +258,14 @@ struct drm_ttm;
|
||||||
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
|
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
/*
|
/*
|
||||||
* Delete a vma and the corresponding mm pointer from the
|
* Delete a vma and the corresponding mm pointer from the
|
||||||
* ttm lists. Needs the ttm mutex.
|
* ttm lists. Needs the ttm mutex.
|
||||||
*/
|
*/
|
||||||
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
|
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempts to lock all relevant mmap_sems for a ttm, while
|
* Attempts to lock all relevant mmap_sems for a ttm, while
|
||||||
|
@ -262,12 +274,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
|
||||||
* schedule() and try again.
|
* schedule() and try again.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
|
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlock all relevant mmap_sems for a ttm.
|
* Unlock all relevant mmap_sems for a ttm.
|
||||||
*/
|
*/
|
||||||
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
|
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the ttm was bound to the aperture, this function shall be called
|
* If the ttm was bound to the aperture, this function shall be called
|
||||||
|
@ -277,7 +289,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
|
||||||
* releases the mmap_sems for this ttm.
|
* releases the mmap_sems for this ttm.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
|
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
||||||
|
@ -286,14 +298,14 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
|
||||||
* releases the mmap_sems for this ttm.
|
* releases the mmap_sems for this ttm.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
|
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remap a vma for a bound ttm. Call with the ttm mutex held and
|
* Remap a vma for a bound ttm. Call with the ttm mutex held and
|
||||||
* the relevant mmap_sem locked.
|
* the relevant mmap_sem locked.
|
||||||
*/
|
*/
|
||||||
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
|
extern int drm_bo_map_bound(struct vm_area_struct *vma);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||||
* All Rights Reserved.
|
* All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
@ -11,6 +11,10 @@
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
* the following conditions:
|
* the following conditions:
|
||||||
*
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
@ -19,11 +23,6 @@
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
**************************************************************************/
|
||||||
/*
|
/*
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||||
|
@ -35,7 +34,8 @@
|
||||||
* Typically called by the IRQ handler.
|
* Typically called by the IRQ handler.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
|
void drm_fence_handler(drm_device_t * dev, uint32_t class,
|
||||||
|
uint32_t sequence, uint32_t type)
|
||||||
{
|
{
|
||||||
int wake = 0;
|
int wake = 0;
|
||||||
uint32_t diff;
|
uint32_t diff;
|
||||||
|
@ -147,7 +147,7 @@ static void drm_fence_object_destroy(drm_file_t * priv,
|
||||||
drm_fence_usage_deref_locked(dev, fence);
|
drm_fence_usage_deref_locked(dev, fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fence_signaled(drm_device_t * dev, volatile
|
static int fence_signaled(drm_device_t * dev,
|
||||||
drm_fence_object_t * fence,
|
drm_fence_object_t * fence,
|
||||||
uint32_t mask, int poke_flush)
|
uint32_t mask, int poke_flush)
|
||||||
{
|
{
|
||||||
|
@ -157,7 +157,7 @@ static int fence_signaled(drm_device_t * dev, volatile
|
||||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||||
|
|
||||||
if (poke_flush)
|
if (poke_flush)
|
||||||
driver->poke_flush(dev);
|
driver->poke_flush(dev, fence->class);
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
read_lock_irqsave(&fm->lock, flags);
|
||||||
signaled =
|
signaled =
|
||||||
(fence->type & mask & fence->signaled) == (fence->type & mask);
|
(fence->type & mask & fence->signaled) == (fence->type & mask);
|
||||||
|
@ -172,13 +172,12 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
|
||||||
uint32_t diff;
|
uint32_t diff;
|
||||||
|
|
||||||
if (!fm->pending_exe_flush) {
|
if (!fm->pending_exe_flush) {
|
||||||
volatile struct list_head *list;
|
struct list_head *list;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Last_exe_flush is invalid. Find oldest sequence.
|
* Last_exe_flush is invalid. Find oldest sequence.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
|
|
||||||
list = &fm->ring;
|
list = &fm->ring;
|
||||||
if (list->next == &fm->ring) {
|
if (list->next == &fm->ring) {
|
||||||
return;
|
return;
|
||||||
|
@ -202,14 +201,15 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
int drm_fence_object_signaled(drm_fence_object_t * fence,
|
||||||
uint32_t type)
|
uint32_t type)
|
||||||
{
|
{
|
||||||
return ((fence->signaled & type) == type);
|
return ((fence->signaled & type) == type);
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_fence_object_flush(drm_device_t * dev,
|
int drm_fence_object_flush(drm_device_t * dev,
|
||||||
volatile drm_fence_object_t * fence, uint32_t type)
|
drm_fence_object_t * fence,
|
||||||
|
uint32_t type)
|
||||||
{
|
{
|
||||||
drm_fence_manager_t *fm = &dev->fm;
|
drm_fence_manager_t *fm = &dev->fm;
|
||||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||||
|
@ -235,7 +235,7 @@ int drm_fence_object_flush(drm_device_t * dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
driver->poke_flush(dev);
|
driver->poke_flush(dev, fence->class);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,11 +274,37 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||||
|
|
||||||
int drm_fence_object_wait(drm_device_t * dev,
|
static int drm_fence_lazy_wait(drm_device_t *dev,
|
||||||
volatile drm_fence_object_t * fence,
|
drm_fence_object_t *fence,
|
||||||
int lazy, int ignore_signals, uint32_t mask)
|
int ignore_signals, uint32_t mask)
|
||||||
{
|
{
|
||||||
drm_fence_manager_t *fm = &dev->fm;
|
drm_fence_manager_t *fm = &dev->fm;
|
||||||
|
unsigned long _end = jiffies + 3*DRM_HZ;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
|
||||||
|
fence_signaled(dev, fence, mask, 1));
|
||||||
|
if (time_after_eq(jiffies, _end))
|
||||||
|
break;
|
||||||
|
} while (ret == -EINTR && ignore_signals);
|
||||||
|
if (time_after_eq(jiffies, _end) && (ret != 0))
|
||||||
|
ret = -EBUSY;
|
||||||
|
if (ret) {
|
||||||
|
if (ret == -EBUSY) {
|
||||||
|
DRM_ERROR("Fence timeout. "
|
||||||
|
"GPU lockup or fence driver was "
|
||||||
|
"taken down.\n");
|
||||||
|
}
|
||||||
|
return ((ret == -EINTR) ? -EAGAIN : ret);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_fence_object_wait(drm_device_t * dev,
|
||||||
|
drm_fence_object_t * fence,
|
||||||
|
int lazy, int ignore_signals, uint32_t mask)
|
||||||
|
{
|
||||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long _end;
|
unsigned long _end;
|
||||||
|
@ -299,46 +325,32 @@ int drm_fence_object_wait(drm_device_t * dev,
|
||||||
|
|
||||||
if (lazy && driver->lazy_capable) {
|
if (lazy && driver->lazy_capable) {
|
||||||
|
|
||||||
do {
|
ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
|
||||||
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
|
|
||||||
fence_signaled(dev, fence, mask, 1));
|
|
||||||
if (time_after_eq(jiffies, _end))
|
|
||||||
break;
|
|
||||||
} while (ret == -EINTR && ignore_signals);
|
|
||||||
if (time_after_eq(jiffies, _end) && (ret != 0))
|
|
||||||
ret = -EBUSY;
|
|
||||||
if (ret) {
|
|
||||||
if (ret == -EBUSY) {
|
|
||||||
DRM_ERROR("Fence timeout. "
|
|
||||||
"GPU lockup or fence driver was "
|
|
||||||
"taken down.\n");
|
|
||||||
}
|
|
||||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
|
||||||
}
|
|
||||||
} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
|
|
||||||
driver->lazy_capable) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We use IRQ wait for EXE fence if available to gain
|
|
||||||
* CPU in some cases.
|
|
||||||
*/
|
|
||||||
|
|
||||||
do {
|
|
||||||
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
|
|
||||||
fence_signaled(dev, fence,
|
|
||||||
DRM_FENCE_TYPE_EXE, 1));
|
|
||||||
if (time_after_eq(jiffies, _end))
|
|
||||||
break;
|
|
||||||
} while (ret == -EINTR && ignore_signals);
|
|
||||||
if (time_after_eq(jiffies, _end) && (ret != 0))
|
|
||||||
ret = -EBUSY;
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
if (driver->has_irq(dev, fence->class,
|
||||||
|
DRM_FENCE_TYPE_EXE)) {
|
||||||
|
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
|
||||||
|
DRM_FENCE_TYPE_EXE);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (driver->has_irq(dev, fence->class,
|
||||||
|
mask & ~DRM_FENCE_TYPE_EXE)) {
|
||||||
|
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
|
||||||
|
mask);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (fence_signaled(dev, fence, mask, 0))
|
if (fence_signaled(dev, fence, mask, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
DRM_ERROR("Busy wait\n");
|
||||||
/*
|
/*
|
||||||
* Avoid kernel-space busy-waits.
|
* Avoid kernel-space busy-waits.
|
||||||
*/
|
*/
|
||||||
|
@ -368,7 +380,7 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
drm_fence_unring(dev, &fence->ring);
|
drm_fence_unring(dev, &fence->ring);
|
||||||
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
|
ret = driver->emit(dev, fence->class, fence_flags, &sequence, &native_type);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -217,6 +217,7 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
||||||
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
|
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mm_put_block);
|
||||||
|
|
||||||
drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||||
* All Rights Reserved.
|
* All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
@ -11,6 +11,10 @@
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
* the following conditions:
|
* the following conditions:
|
||||||
*
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
@ -19,12 +23,10 @@
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,469 @@
|
||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _DRM_OBJECTS_H
|
||||||
|
#define _DRM_OJBECTS_H
|
||||||
|
#define DRM_HAS_TTM
|
||||||
|
|
||||||
|
struct drm_device;
|
||||||
|
|
||||||
|
/***************************************************
|
||||||
|
* User space objects. (drm_object.c)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
drm_fence_type,
|
||||||
|
drm_buffer_type,
|
||||||
|
drm_ttm_type
|
||||||
|
/*
|
||||||
|
* Add other user space object types here.
|
||||||
|
*/
|
||||||
|
} drm_object_type_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A user object is a structure that helps the drm give out user handles
|
||||||
|
* to kernel internal objects and to keep track of these objects so that
|
||||||
|
* they can be destroyed, for example when the user space process exits.
|
||||||
|
* Designed to be accessible using a user space 32-bit handle.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct drm_user_object {
|
||||||
|
drm_hash_item_t hash;
|
||||||
|
struct list_head list;
|
||||||
|
drm_object_type_t type;
|
||||||
|
atomic_t refcount;
|
||||||
|
int shareable;
|
||||||
|
drm_file_t *owner;
|
||||||
|
void (*ref_struct_locked) (drm_file_t * priv,
|
||||||
|
struct drm_user_object * obj,
|
||||||
|
drm_ref_t ref_action);
|
||||||
|
void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
|
||||||
|
drm_ref_t unref_action);
|
||||||
|
void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
|
||||||
|
} drm_user_object_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A ref object is a structure which is used to
|
||||||
|
* keep track of references to user objects and to keep track of these
|
||||||
|
* references so that they can be destroyed for example when the user space
|
||||||
|
* process exits. Designed to be accessible using a pointer to the _user_ object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct drm_ref_object {
|
||||||
|
drm_hash_item_t hash;
|
||||||
|
struct list_head list;
|
||||||
|
atomic_t refcount;
|
||||||
|
drm_ref_t unref_action;
|
||||||
|
} drm_ref_object_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Must be called with the struct_mutex held.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
|
||||||
|
int shareable);
|
||||||
|
/**
|
||||||
|
* Must be called with the struct_mutex held.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
|
||||||
|
uint32_t key);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be called with the struct_mutex held.
|
||||||
|
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
|
||||||
|
* release the struct_mutex before calling drm_remove_ref_object.
|
||||||
|
* This function may temporarily release the struct_mutex.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be called with the struct_mutex held. May temporarily release it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_add_ref_object(drm_file_t * priv,
|
||||||
|
drm_user_object_t * referenced_object,
|
||||||
|
drm_ref_t ref_action);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be called with the struct_mutex held.
|
||||||
|
*/
|
||||||
|
|
||||||
|
drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
|
||||||
|
drm_user_object_t * referenced_object,
|
||||||
|
drm_ref_t ref_action);
|
||||||
|
/*
|
||||||
|
* Must be called with the struct_mutex held.
|
||||||
|
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
|
||||||
|
* release the struct_mutex before calling drm_remove_ref_object.
|
||||||
|
* This function may temporarily release the struct_mutex.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
|
||||||
|
extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
|
||||||
|
drm_object_type_t type,
|
||||||
|
drm_user_object_t ** object);
|
||||||
|
extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
|
||||||
|
drm_object_type_t type);
|
||||||
|
|
||||||
|
/***************************************************
|
||||||
|
* Fence objects. (drm_fence.c)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct drm_fence_object {
|
||||||
|
drm_user_object_t base;
|
||||||
|
atomic_t usage;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The below three fields are protected by the fence manager spinlock.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct list_head ring;
|
||||||
|
int class;
|
||||||
|
uint32_t native_type;
|
||||||
|
uint32_t type;
|
||||||
|
uint32_t signaled;
|
||||||
|
uint32_t sequence;
|
||||||
|
uint32_t flush_mask;
|
||||||
|
uint32_t submitted_flush;
|
||||||
|
} drm_fence_object_t;
|
||||||
|
|
||||||
|
#define _DRM_FENCE_TYPE_EXE 0x00
|
||||||
|
|
||||||
|
typedef struct drm_fence_manager {
|
||||||
|
int initialized;
|
||||||
|
rwlock_t lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The list below should be maintained in sequence order and
|
||||||
|
* access is protected by the above spinlock.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct list_head ring;
|
||||||
|
struct list_head *fence_types[32];
|
||||||
|
volatile uint32_t pending_flush;
|
||||||
|
wait_queue_head_t fence_queue;
|
||||||
|
int pending_exe_flush;
|
||||||
|
uint32_t last_exe_flush;
|
||||||
|
uint32_t exe_flush_sequence;
|
||||||
|
atomic_t count;
|
||||||
|
} drm_fence_manager_t;
|
||||||
|
|
||||||
|
typedef struct drm_fence_driver {
|
||||||
|
int no_types;
|
||||||
|
uint32_t wrap_diff;
|
||||||
|
uint32_t flush_diff;
|
||||||
|
uint32_t sequence_mask;
|
||||||
|
int lazy_capable;
|
||||||
|
int (*has_irq) (struct drm_device * dev, uint32_t class,
|
||||||
|
uint32_t flags);
|
||||||
|
int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
|
||||||
|
uint32_t * breadcrumb, uint32_t * native_type);
|
||||||
|
void (*poke_flush) (struct drm_device * dev, uint32_t class);
|
||||||
|
} drm_fence_driver_t;
|
||||||
|
|
||||||
|
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
|
||||||
|
uint32_t sequence, uint32_t type);
|
||||||
|
extern void drm_fence_manager_init(struct drm_device *dev);
|
||||||
|
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||||
|
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t sequence);
|
||||||
|
extern int drm_fence_object_flush(struct drm_device *dev,
|
||||||
|
drm_fence_object_t * fence, uint32_t type);
|
||||||
|
extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
|
||||||
|
extern void drm_fence_usage_deref_locked(struct drm_device *dev,
|
||||||
|
drm_fence_object_t * fence);
|
||||||
|
extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
|
||||||
|
drm_fence_object_t * fence);
|
||||||
|
extern int drm_fence_object_wait(struct drm_device *dev,
|
||||||
|
drm_fence_object_t * fence,
|
||||||
|
int lazy, int ignore_signals, uint32_t mask);
|
||||||
|
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
||||||
|
uint32_t fence_flags,
|
||||||
|
drm_fence_object_t ** c_fence);
|
||||||
|
extern int drm_fence_add_user_object(drm_file_t * priv,
|
||||||
|
drm_fence_object_t * fence, int shareable);
|
||||||
|
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
|
/**************************************************
|
||||||
|
*TTMs
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The ttm backend GTT interface. (In our case AGP).
|
||||||
|
* Any similar type of device (PCIE?)
|
||||||
|
* needs only to implement these functions to be usable with the "TTM" interface.
|
||||||
|
* The AGP backend implementation lives in drm_agpsupport.c
|
||||||
|
* basically maps these calls to available functions in agpgart.
|
||||||
|
* Each drm device driver gets an
|
||||||
|
* additional function pointer that creates these types,
|
||||||
|
* so that the device can choose the correct aperture.
|
||||||
|
* (Multiple AGP apertures, etc.)
|
||||||
|
* Most device drivers will let this point to the standard AGP implementation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
|
||||||
|
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
|
||||||
|
|
||||||
|
typedef struct drm_ttm_backend {
|
||||||
|
void *private;
|
||||||
|
uint32_t flags;
|
||||||
|
uint32_t drm_map_type;
|
||||||
|
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
|
||||||
|
int (*populate) (struct drm_ttm_backend * backend,
|
||||||
|
unsigned long num_pages, struct page ** pages);
|
||||||
|
void (*clear) (struct drm_ttm_backend * backend);
|
||||||
|
int (*bind) (struct drm_ttm_backend * backend,
|
||||||
|
unsigned long offset, int cached);
|
||||||
|
int (*unbind) (struct drm_ttm_backend * backend);
|
||||||
|
void (*destroy) (struct drm_ttm_backend * backend);
|
||||||
|
} drm_ttm_backend_t;
|
||||||
|
|
||||||
|
typedef struct drm_ttm {
|
||||||
|
struct page **pages;
|
||||||
|
uint32_t page_flags;
|
||||||
|
unsigned long num_pages;
|
||||||
|
unsigned long aper_offset;
|
||||||
|
atomic_t vma_count;
|
||||||
|
struct drm_device *dev;
|
||||||
|
int destroy;
|
||||||
|
uint32_t mapping_offset;
|
||||||
|
drm_ttm_backend_t *be;
|
||||||
|
enum {
|
||||||
|
ttm_bound,
|
||||||
|
ttm_evicted,
|
||||||
|
ttm_unbound,
|
||||||
|
ttm_unpopulated,
|
||||||
|
} state;
|
||||||
|
|
||||||
|
} drm_ttm_t;
|
||||||
|
|
||||||
|
extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
||||||
|
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
|
||||||
|
extern void drm_ttm_unbind(drm_ttm_t * ttm);
|
||||||
|
extern void drm_ttm_evict(drm_ttm_t * ttm);
|
||||||
|
extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
|
||||||
|
extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||||
|
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
|
||||||
|
* when the last vma exits.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_destroy_ttm(drm_ttm_t * ttm);
|
||||||
|
|
||||||
|
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
|
||||||
|
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
|
||||||
|
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Page flags.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define DRM_TTM_PAGE_UNCACHED 0x01
|
||||||
|
#define DRM_TTM_PAGE_USED 0x02
|
||||||
|
#define DRM_TTM_PAGE_BOUND 0x04
|
||||||
|
#define DRM_TTM_PAGE_PRESENT 0x08
|
||||||
|
#define DRM_TTM_PAGE_VMALLOC 0x10
|
||||||
|
|
||||||
|
/***************************************************
|
||||||
|
* Buffer objects. (drm_bo.c, drm_bo_move.c)
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct drm_bo_mem_reg {
|
||||||
|
drm_mm_node_t *mm_node;
|
||||||
|
unsigned long size;
|
||||||
|
unsigned long num_pages;
|
||||||
|
uint32_t page_alignment;
|
||||||
|
uint32_t mem_type;
|
||||||
|
uint32_t flags;
|
||||||
|
uint32_t mask;
|
||||||
|
} drm_bo_mem_reg_t;
|
||||||
|
|
||||||
|
typedef struct drm_buffer_object {
|
||||||
|
struct drm_device *dev;
|
||||||
|
drm_user_object_t base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there is a possibility that the usage variable is zero,
|
||||||
|
* then dev->struct_mutext should be locked before incrementing it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
atomic_t usage;
|
||||||
|
unsigned long buffer_start;
|
||||||
|
drm_bo_type_t type;
|
||||||
|
unsigned long offset;
|
||||||
|
atomic_t mapped;
|
||||||
|
drm_bo_mem_reg_t mem;
|
||||||
|
|
||||||
|
struct list_head lru;
|
||||||
|
struct list_head ddestroy;
|
||||||
|
|
||||||
|
uint32_t fence_type;
|
||||||
|
uint32_t fence_class;
|
||||||
|
drm_fence_object_t *fence;
|
||||||
|
uint32_t priv_flags;
|
||||||
|
wait_queue_head_t event_queue;
|
||||||
|
struct mutex mutex;
|
||||||
|
|
||||||
|
/* For pinned buffers */
|
||||||
|
drm_mm_node_t *pinned_node;
|
||||||
|
uint32_t pinned_mem_type;
|
||||||
|
struct list_head pinned_lru;
|
||||||
|
|
||||||
|
/* For vm */
|
||||||
|
|
||||||
|
drm_ttm_t *ttm;
|
||||||
|
drm_map_list_t map_list;
|
||||||
|
uint32_t memory_type;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
uint32_t vm_flags;
|
||||||
|
void *iomap;
|
||||||
|
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
/* dev->struct_mutex only protected. */
|
||||||
|
struct list_head vma_list;
|
||||||
|
struct list_head p_mm_list;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} drm_buffer_object_t;
|
||||||
|
|
||||||
|
#define _DRM_BO_FLAG_UNFENCED 0x00000001
|
||||||
|
#define _DRM_BO_FLAG_EVICTED 0x00000002
|
||||||
|
|
||||||
|
typedef struct drm_mem_type_manager {
|
||||||
|
int has_type;
|
||||||
|
int use_type;
|
||||||
|
drm_mm_t manager;
|
||||||
|
struct list_head lru;
|
||||||
|
struct list_head pinned;
|
||||||
|
uint32_t flags;
|
||||||
|
uint32_t drm_bus_maptype;
|
||||||
|
unsigned long io_offset;
|
||||||
|
unsigned long io_size;
|
||||||
|
void *io_addr;
|
||||||
|
} drm_mem_type_manager_t;
|
||||||
|
|
||||||
|
#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
|
||||||
|
#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
|
||||||
|
#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
|
||||||
|
#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
|
||||||
|
before kernel access. */
|
||||||
|
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
|
||||||
|
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
|
||||||
|
|
||||||
|
typedef struct drm_buffer_manager {
|
||||||
|
struct mutex init_mutex;
|
||||||
|
struct mutex evict_mutex;
|
||||||
|
int nice_mode;
|
||||||
|
int initialized;
|
||||||
|
drm_file_t *last_to_validate;
|
||||||
|
drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
|
||||||
|
struct list_head unfenced;
|
||||||
|
struct list_head ddestroy;
|
||||||
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
|
||||||
|
struct work_struct wq;
|
||||||
|
#else
|
||||||
|
struct delayed_work wq;
|
||||||
|
#endif
|
||||||
|
uint32_t fence_type;
|
||||||
|
unsigned long cur_pages;
|
||||||
|
atomic_t count;
|
||||||
|
} drm_buffer_manager_t;
|
||||||
|
|
||||||
|
typedef struct drm_bo_driver {
|
||||||
|
const uint32_t *mem_type_prio;
|
||||||
|
const uint32_t *mem_busy_prio;
|
||||||
|
uint32_t num_mem_type_prio;
|
||||||
|
uint32_t num_mem_busy_prio;
|
||||||
|
drm_ttm_backend_t *(*create_ttm_backend_entry)
|
||||||
|
(struct drm_device * dev);
|
||||||
|
int (*fence_type) (uint32_t flags, uint32_t * class, uint32_t * type);
|
||||||
|
int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
|
||||||
|
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
||||||
|
drm_mem_type_manager_t * man);
|
||||||
|
uint32_t(*evict_flags) (struct drm_device * dev, uint32_t type);
|
||||||
|
int (*move) (struct drm_buffer_object * bo,
|
||||||
|
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
|
||||||
|
} drm_bo_driver_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* buffer objects (drm_bo.c)
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int drm_bo_driver_finish(struct drm_device *dev);
|
||||||
|
extern int drm_bo_driver_init(struct drm_device *dev);
|
||||||
|
extern int drm_bo_pci_offset(struct drm_device *dev,
|
||||||
|
drm_bo_mem_reg_t * mem,
|
||||||
|
unsigned long *bus_base,
|
||||||
|
unsigned long *bus_offset,
|
||||||
|
unsigned long *bus_size);
|
||||||
|
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
|
||||||
|
|
||||||
|
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
|
||||||
|
extern int drm_fence_buffer_objects(drm_file_t * priv,
|
||||||
|
struct list_head *list,
|
||||||
|
uint32_t fence_flags,
|
||||||
|
drm_fence_object_t * fence,
|
||||||
|
drm_fence_object_t ** used_fence);
|
||||||
|
extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
|
||||||
|
extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
|
||||||
|
int no_wait);
|
||||||
|
extern int drm_bo_mem_space(drm_buffer_object_t * bo,
|
||||||
|
drm_bo_mem_reg_t * mem, int no_wait);
|
||||||
|
extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
|
||||||
|
int no_wait, int move_unfenced);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Buffer object memory move helpers.
|
||||||
|
* drm_bo_move.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
|
||||||
|
extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
|
||||||
|
int evict,
|
||||||
|
int no_wait, drm_bo_mem_reg_t * new_mem);
|
||||||
|
extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
|
||||||
|
int evict,
|
||||||
|
int no_wait,
|
||||||
|
uint32_t fence_type,
|
||||||
|
uint32_t fence_flags,
|
||||||
|
drm_bo_mem_reg_t * new_mem);
|
||||||
|
|
||||||
|
#endif
|
|
@ -68,6 +68,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||||
mutex_init(&dev->struct_mutex);
|
mutex_init(&dev->struct_mutex);
|
||||||
mutex_init(&dev->ctxlist_mutex);
|
mutex_init(&dev->ctxlist_mutex);
|
||||||
mutex_init(&dev->bm.init_mutex);
|
mutex_init(&dev->bm.init_mutex);
|
||||||
|
mutex_init(&dev->bm.evict_mutex);
|
||||||
|
|
||||||
dev->pdev = pdev;
|
dev->pdev = pdev;
|
||||||
dev->pci_device = pdev->device;
|
dev->pci_device = pdev->device;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
||||||
* All Rights Reserved.
|
* All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
@ -11,6 +11,10 @@
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
* the following conditions:
|
* the following conditions:
|
||||||
*
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
@ -19,12 +23,10 @@
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
|
@ -39,12 +41,11 @@ static void drm_ttm_cache_flush(void)
|
||||||
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void ttm_alloc_pages(drm_ttm_t *ttm)
|
static void ttm_alloc_pages(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
|
@ -65,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t *ttm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ttm_free_pages(drm_ttm_t *ttm)
|
static void ttm_free_pages(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||||
|
|
||||||
|
@ -79,27 +80,24 @@ static void ttm_free_pages(drm_ttm_t *ttm)
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static struct page *drm_ttm_alloc_page(void)
|
||||||
* Unmap all vma pages from vmas mapping this ttm.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int unmap_vma_pages(drm_ttm_t * ttm)
|
|
||||||
{
|
{
|
||||||
drm_device_t *dev = ttm->dev;
|
struct page *page;
|
||||||
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
|
|
||||||
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
|
|
||||||
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||||
int ret;
|
return NULL;
|
||||||
ret = drm_ttm_lock_mm(ttm);
|
}
|
||||||
if (ret)
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||||
return ret;
|
if (!page) {
|
||||||
|
drm_free_memctl(PAGE_SIZE);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||||
|
SetPageLocked(page);
|
||||||
|
#else
|
||||||
|
SetPageReserved(page);
|
||||||
#endif
|
#endif
|
||||||
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
|
return page;
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
drm_ttm_finish_unmap(ttm);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -135,7 +133,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
|
||||||
if (do_tlbflush)
|
if (do_tlbflush)
|
||||||
flush_agp_mappings();
|
flush_agp_mappings();
|
||||||
|
|
||||||
DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
|
DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -154,18 +152,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||||
if (!ttm)
|
if (!ttm)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (atomic_read(&ttm->vma_count) > 0) {
|
|
||||||
ttm->destroy = 1;
|
|
||||||
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_DEBUG("Destroying a ttm\n");
|
|
||||||
|
|
||||||
#ifdef DRM_TTM_ODD_COMPAT
|
|
||||||
BUG_ON(!list_empty(&ttm->vma_list));
|
|
||||||
BUG_ON(!list_empty(&ttm->p_mm_list));
|
|
||||||
#endif
|
|
||||||
be = ttm->be;
|
be = ttm->be;
|
||||||
if (be) {
|
if (be) {
|
||||||
be->destroy(be);
|
be->destroy(be);
|
||||||
|
@ -193,11 +179,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||||
DRM_ERROR("Erroneous map count. "
|
DRM_ERROR("Erroneous map count. "
|
||||||
"Leaking page mappings.\n");
|
"Leaking page mappings.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* End debugging.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__free_page(*cur_page);
|
__free_page(*cur_page);
|
||||||
drm_free_memctl(PAGE_SIZE);
|
drm_free_memctl(PAGE_SIZE);
|
||||||
--bm->cur_pages;
|
--bm->cur_pages;
|
||||||
|
@ -210,37 +191,36 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
|
||||||
|
{
|
||||||
|
struct page *p;
|
||||||
|
drm_buffer_manager_t *bm = &ttm->dev->bm;
|
||||||
|
|
||||||
|
p = ttm->pages[index];
|
||||||
|
if (!p) {
|
||||||
|
p = drm_ttm_alloc_page();
|
||||||
|
if (!p)
|
||||||
|
return NULL;
|
||||||
|
ttm->pages[index] = p;
|
||||||
|
++bm->cur_pages;
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
static int drm_ttm_populate(drm_ttm_t * ttm)
|
static int drm_ttm_populate(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
drm_buffer_manager_t *bm;
|
|
||||||
drm_ttm_backend_t *be;
|
drm_ttm_backend_t *be;
|
||||||
|
|
||||||
if (ttm->state != ttm_unpopulated)
|
if (ttm->state != ttm_unpopulated)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bm = &ttm->dev->bm;
|
|
||||||
be = ttm->be;
|
be = ttm->be;
|
||||||
for (i = 0; i < ttm->num_pages; ++i) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
page = ttm->pages[i];
|
page = drm_ttm_get_page(ttm, i);
|
||||||
if (!page) {
|
if (!page)
|
||||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
return -ENOMEM;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
|
||||||
if (!page) {
|
|
||||||
drm_free_memctl(PAGE_SIZE);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
|
||||||
SetPageLocked(page);
|
|
||||||
#else
|
|
||||||
SetPageReserved(page);
|
|
||||||
#endif
|
|
||||||
ttm->pages[i] = page;
|
|
||||||
++bm->cur_pages;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
be->populate(be, ttm->num_pages, ttm->pages);
|
be->populate(be, ttm->num_pages, ttm->pages);
|
||||||
ttm->state = ttm_unbound;
|
ttm->state = ttm_unbound;
|
||||||
|
@ -251,7 +231,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
|
||||||
* Initialize a ttm.
|
* Initialize a ttm.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
|
||||||
{
|
{
|
||||||
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
|
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
|
||||||
drm_ttm_t *ttm;
|
drm_ttm_t *ttm;
|
||||||
|
@ -263,11 +243,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
||||||
if (!ttm)
|
if (!ttm)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
INIT_LIST_HEAD(&ttm->p_mm_list);
|
|
||||||
INIT_LIST_HEAD(&ttm->vma_list);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ttm->dev = dev;
|
ttm->dev = dev;
|
||||||
atomic_set(&ttm->vma_count, 0);
|
atomic_set(&ttm->vma_count, 0);
|
||||||
|
|
||||||
|
@ -300,29 +275,20 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
||||||
* Unbind a ttm region from the aperture.
|
* Unbind a ttm region from the aperture.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drm_evict_ttm(drm_ttm_t * ttm)
|
void drm_ttm_evict(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
drm_ttm_backend_t *be = ttm->be;
|
drm_ttm_backend_t *be = ttm->be;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
switch (ttm->state) {
|
if (ttm->state == ttm_bound) {
|
||||||
case ttm_bound:
|
ret = be->unbind(be);
|
||||||
if (be->needs_ub_cache_adjust(be)) {
|
BUG_ON(ret);
|
||||||
ret = unmap_vma_pages(ttm);
|
|
||||||
if (ret) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
be->unbind(be);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->state = ttm_evicted;
|
ttm->state = ttm_evicted;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
|
void drm_ttm_fixup_caching(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (ttm->state == ttm_evicted) {
|
if (ttm->state == ttm_evicted) {
|
||||||
|
@ -334,18 +300,12 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_unbind_ttm(drm_ttm_t * ttm)
|
void drm_ttm_unbind(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (ttm->state == ttm_bound)
|
if (ttm->state == ttm_bound)
|
||||||
ret = drm_evict_ttm(ttm);
|
drm_ttm_evict(ttm);
|
||||||
|
|
||||||
if (ret)
|
drm_ttm_fixup_caching(ttm);
|
||||||
return ret;
|
|
||||||
|
|
||||||
drm_fixup_ttm_caching(ttm);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
|
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
|
||||||
|
@ -364,26 +324,13 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
|
||||||
ret = drm_ttm_populate(ttm);
|
ret = drm_ttm_populate(ttm);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (ttm->state == ttm_unbound && !cached) {
|
|
||||||
ret = unmap_vma_pages(ttm);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
|
if (ttm->state == ttm_unbound && !cached) {
|
||||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||||
}
|
}
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
else if (ttm->state == ttm_evicted && !cached) {
|
|
||||||
ret = drm_ttm_lock_mm(ttm);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if ((ret = be->bind(be, aper_offset, cached))) {
|
if ((ret = be->bind(be, aper_offset, cached))) {
|
||||||
ttm->state = ttm_evicted;
|
ttm->state = ttm_evicted;
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
if (be->needs_ub_cache_adjust(be))
|
|
||||||
drm_ttm_unlock_mm(ttm);
|
|
||||||
#endif
|
|
||||||
DRM_ERROR("Couldn't bind backend.\n");
|
DRM_ERROR("Couldn't bind backend.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -391,130 +338,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
|
||||||
ttm->aper_offset = aper_offset;
|
ttm->aper_offset = aper_offset;
|
||||||
ttm->state = ttm_bound;
|
ttm->state = ttm_bound;
|
||||||
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
if (be->needs_ub_cache_adjust(be)) {
|
|
||||||
ret = drm_ttm_remap_bound(ttm);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
EXPORT_SYMBOL(drm_bind_ttm);
|
||||||
* dev->struct_mutex locked.
|
|
||||||
*/
|
|
||||||
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
|
|
||||||
{
|
|
||||||
drm_map_list_t *list = &object->map_list;
|
|
||||||
drm_local_map_t *map;
|
|
||||||
|
|
||||||
if (list->user_token)
|
|
||||||
drm_ht_remove_item(&dev->map_hash, &list->hash);
|
|
||||||
|
|
||||||
if (list->file_offset_node) {
|
|
||||||
drm_mm_put_block(list->file_offset_node);
|
|
||||||
list->file_offset_node = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
map = list->map;
|
|
||||||
|
|
||||||
if (map) {
|
|
||||||
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
|
||||||
if (ttm) {
|
|
||||||
if (drm_destroy_ttm(ttm) != -EBUSY) {
|
|
||||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_test(&to->usage)) {
|
|
||||||
drm_ttm_object_remove(dev, to);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_test(&to->usage)) {
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (atomic_read(&to->usage) == 0)
|
|
||||||
drm_ttm_object_remove(dev, to);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a ttm and add it to the drm book-keeping.
|
|
||||||
* dev->struct_mutex locked.
|
|
||||||
*/
|
|
||||||
|
|
||||||
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
|
|
||||||
uint32_t flags, drm_ttm_object_t ** ttm_object)
|
|
||||||
{
|
|
||||||
drm_ttm_object_t *object;
|
|
||||||
drm_map_list_t *list;
|
|
||||||
drm_local_map_t *map;
|
|
||||||
drm_ttm_t *ttm;
|
|
||||||
|
|
||||||
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
|
|
||||||
if (!object)
|
|
||||||
return -ENOMEM;
|
|
||||||
object->flags = flags;
|
|
||||||
list = &object->map_list;
|
|
||||||
|
|
||||||
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
|
|
||||||
if (!list->map) {
|
|
||||||
drm_ttm_object_remove(dev, object);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
map = list->map;
|
|
||||||
|
|
||||||
ttm = drm_init_ttm(dev, size);
|
|
||||||
if (!ttm) {
|
|
||||||
DRM_ERROR("Could not create ttm\n");
|
|
||||||
drm_ttm_object_remove(dev, object);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
map->offset = (unsigned long)ttm;
|
|
||||||
map->type = _DRM_TTM;
|
|
||||||
map->flags = _DRM_REMOVABLE;
|
|
||||||
map->size = ttm->num_pages * PAGE_SIZE;
|
|
||||||
map->handle = (void *)object;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add a one-page "hole" to the block size to avoid the mm subsystem
|
|
||||||
* merging vmas.
|
|
||||||
* FIXME: Is this really needed?
|
|
||||||
*/
|
|
||||||
|
|
||||||
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
|
|
||||||
ttm->num_pages + 1, 0, 0);
|
|
||||||
if (!list->file_offset_node) {
|
|
||||||
drm_ttm_object_remove(dev, object);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
|
|
||||||
ttm->num_pages + 1, 0);
|
|
||||||
|
|
||||||
list->hash.key = list->file_offset_node->start;
|
|
||||||
|
|
||||||
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
|
|
||||||
drm_ttm_object_remove(dev, object);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
|
|
||||||
ttm->mapping_offset = list->hash.key;
|
|
||||||
atomic_set(&object->usage, 1);
|
|
||||||
*ttm_object = object;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,146 +0,0 @@
|
||||||
/**************************************************************************
|
|
||||||
*
|
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
|
||||||
/*
|
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _DRM_TTM_H
|
|
||||||
#define _DRM_TTM_H
|
|
||||||
#define DRM_HAS_TTM
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
|
|
||||||
* needs only to implement these functions to be usable with the "TTM" interface.
|
|
||||||
* The AGP backend implementation lives in drm_agpsupport.c
|
|
||||||
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
|
|
||||||
* additional function pointer that creates these types,
|
|
||||||
* so that the device can choose the correct aperture.
|
|
||||||
* (Multiple AGP apertures, etc.)
|
|
||||||
* Most device drivers will let this point to the standard AGP implementation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
|
|
||||||
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
|
|
||||||
#define DRM_BE_FLAG_CBA 0x00000004
|
|
||||||
|
|
||||||
typedef struct drm_ttm_backend {
|
|
||||||
unsigned long aperture_base;
|
|
||||||
void *private;
|
|
||||||
uint32_t flags;
|
|
||||||
uint32_t drm_map_type;
|
|
||||||
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
|
|
||||||
int (*populate) (struct drm_ttm_backend * backend,
|
|
||||||
unsigned long num_pages, struct page ** pages);
|
|
||||||
void (*clear) (struct drm_ttm_backend * backend);
|
|
||||||
int (*bind) (struct drm_ttm_backend * backend,
|
|
||||||
unsigned long offset, int cached);
|
|
||||||
int (*unbind) (struct drm_ttm_backend * backend);
|
|
||||||
void (*destroy) (struct drm_ttm_backend * backend);
|
|
||||||
} drm_ttm_backend_t;
|
|
||||||
|
|
||||||
typedef struct drm_ttm {
|
|
||||||
struct page **pages;
|
|
||||||
uint32_t page_flags;
|
|
||||||
unsigned long num_pages;
|
|
||||||
unsigned long aper_offset;
|
|
||||||
atomic_t vma_count;
|
|
||||||
struct drm_device *dev;
|
|
||||||
int destroy;
|
|
||||||
uint32_t mapping_offset;
|
|
||||||
drm_ttm_backend_t *be;
|
|
||||||
enum {
|
|
||||||
ttm_bound,
|
|
||||||
ttm_evicted,
|
|
||||||
ttm_unbound,
|
|
||||||
ttm_unpopulated,
|
|
||||||
} state;
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
struct list_head vma_list;
|
|
||||||
struct list_head p_mm_list;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} drm_ttm_t;
|
|
||||||
|
|
||||||
typedef struct drm_ttm_object {
|
|
||||||
atomic_t usage;
|
|
||||||
uint32_t flags;
|
|
||||||
drm_map_list_t map_list;
|
|
||||||
} drm_ttm_object_t;
|
|
||||||
|
|
||||||
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
|
|
||||||
uint32_t flags,
|
|
||||||
drm_ttm_object_t ** ttm_object);
|
|
||||||
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
|
|
||||||
drm_ttm_object_t * to);
|
|
||||||
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
|
|
||||||
drm_ttm_object_t * to);
|
|
||||||
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
|
|
||||||
uint32_t handle,
|
|
||||||
int check_owner);
|
|
||||||
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
|
|
||||||
|
|
||||||
extern int drm_unbind_ttm(drm_ttm_t * ttm);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Evict a ttm region. Keeps Aperture caching policy.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_evict_ttm(drm_ttm_t * ttm);
|
|
||||||
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
|
||||||
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
|
|
||||||
* when the last vma exits.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_destroy_ttm(drm_ttm_t * ttm);
|
|
||||||
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
|
|
||||||
|
|
||||||
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
|
|
||||||
{
|
|
||||||
return (drm_ttm_t *) to->map_list.map->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DRM_MASK_VAL(dest, mask, val) \
|
|
||||||
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
|
|
||||||
|
|
||||||
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
|
|
||||||
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Page flags.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DRM_TTM_PAGE_UNCACHED 0x01
|
|
||||||
#define DRM_TTM_PAGE_USED 0x02
|
|
||||||
#define DRM_TTM_PAGE_BOUND 0x04
|
|
||||||
#define DRM_TTM_PAGE_PRESENT 0x08
|
|
||||||
#define DRM_TTM_PAGE_VMALLOC 0x10
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -41,9 +41,9 @@
|
||||||
|
|
||||||
static void drm_vm_open(struct vm_area_struct *vma);
|
static void drm_vm_open(struct vm_area_struct *vma);
|
||||||
static void drm_vm_close(struct vm_area_struct *vma);
|
static void drm_vm_close(struct vm_area_struct *vma);
|
||||||
static void drm_vm_ttm_close(struct vm_area_struct *vma);
|
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
|
||||||
static int drm_vm_ttm_open(struct vm_area_struct *vma);
|
struct file *filp,
|
||||||
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
|
drm_local_map_t *map);
|
||||||
|
|
||||||
|
|
||||||
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
|
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
|
||||||
|
@ -159,96 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
#endif /* __OS_HAS_AGP */
|
#endif /* __OS_HAS_AGP */
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
|
|
||||||
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
|
|
||||||
static
|
|
||||||
#endif
|
|
||||||
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
|
||||||
struct fault_data *data)
|
|
||||||
{
|
|
||||||
unsigned long address = data->address;
|
|
||||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
|
||||||
unsigned long page_offset;
|
|
||||||
struct page *page;
|
|
||||||
drm_ttm_t *ttm;
|
|
||||||
drm_buffer_manager_t *bm;
|
|
||||||
drm_device_t *dev;
|
|
||||||
unsigned long pfn;
|
|
||||||
int err;
|
|
||||||
pgprot_t pgprot;
|
|
||||||
|
|
||||||
if (!map) {
|
|
||||||
data->type = VM_FAULT_OOM;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (address > vma->vm_end) {
|
|
||||||
data->type = VM_FAULT_SIGBUS;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ttm = (drm_ttm_t *) map->offset;
|
|
||||||
|
|
||||||
dev = ttm->dev;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perhaps retry here?
|
|
||||||
*/
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
drm_fixup_ttm_caching(ttm);
|
|
||||||
|
|
||||||
bm = &dev->bm;
|
|
||||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
|
||||||
page = ttm->pages[page_offset];
|
|
||||||
|
|
||||||
if (!page) {
|
|
||||||
if (drm_alloc_memctl(PAGE_SIZE)) {
|
|
||||||
data->type = VM_FAULT_OOM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
page = ttm->pages[page_offset] =
|
|
||||||
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
|
||||||
if (!page) {
|
|
||||||
drm_free_memctl(PAGE_SIZE);
|
|
||||||
data->type = VM_FAULT_OOM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
++bm->cur_pages;
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
|
||||||
SetPageLocked(page);
|
|
||||||
#else
|
|
||||||
SetPageReserved(page);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: Check can't map aperture flag.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pfn = ttm->aper_offset + page_offset +
|
|
||||||
(ttm->be->aperture_base >> PAGE_SHIFT);
|
|
||||||
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
|
|
||||||
} else {
|
|
||||||
pfn = page_to_pfn(page);
|
|
||||||
pgprot = vma->vm_page_prot;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = vm_insert_pfn(vma, address, pfn, pgprot);
|
|
||||||
|
|
||||||
if (!err || err == -EBUSY)
|
|
||||||
data->type = VM_FAULT_MINOR;
|
|
||||||
else
|
|
||||||
data->type = VM_FAULT_OOM;
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \c nopage method for shared virtual memory.
|
* \c nopage method for shared virtual memory.
|
||||||
*
|
*
|
||||||
|
@ -508,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
|
||||||
.close = drm_vm_close,
|
.close = drm_vm_close,
|
||||||
};
|
};
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
|
|
||||||
static struct vm_operations_struct drm_vm_ttm_ops = {
|
|
||||||
.nopage = drm_vm_ttm_nopage,
|
|
||||||
.open = drm_vm_ttm_open_wrapper,
|
|
||||||
.close = drm_vm_ttm_close,
|
|
||||||
};
|
|
||||||
#else
|
|
||||||
static struct vm_operations_struct drm_vm_ttm_ops = {
|
|
||||||
.fault = drm_vm_ttm_fault,
|
|
||||||
.open = drm_vm_ttm_open_wrapper,
|
|
||||||
.close = drm_vm_ttm_close,
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \c open method for shared virtual memory.
|
* \c open method for shared virtual memory.
|
||||||
*
|
*
|
||||||
|
@ -530,7 +426,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = {
|
||||||
* Create a new drm_vma_entry structure as the \p vma private data entry and
|
* Create a new drm_vma_entry structure as the \p vma private data entry and
|
||||||
* add it to drm_device::vmalist.
|
* add it to drm_device::vmalist.
|
||||||
*/
|
*/
|
||||||
static void drm_vm_open(struct vm_area_struct *vma)
|
static void drm_vm_open_locked(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
drm_file_t *priv = vma->vm_file->private_data;
|
drm_file_t *priv = vma->vm_file->private_data;
|
||||||
drm_device_t *dev = priv->head->dev;
|
drm_device_t *dev = priv->head->dev;
|
||||||
|
@ -542,36 +438,21 @@ static void drm_vm_open(struct vm_area_struct *vma)
|
||||||
|
|
||||||
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
|
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||||
if (vma_entry) {
|
if (vma_entry) {
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
vma_entry->vma = vma;
|
vma_entry->vma = vma;
|
||||||
vma_entry->next = dev->vmalist;
|
vma_entry->next = dev->vmalist;
|
||||||
vma_entry->pid = current->pid;
|
vma_entry->pid = current->pid;
|
||||||
dev->vmalist = vma_entry;
|
dev->vmalist = vma_entry;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
|
static void drm_vm_open(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
|
|
||||||
drm_ttm_t *ttm;
|
|
||||||
drm_file_t *priv = vma->vm_file->private_data;
|
drm_file_t *priv = vma->vm_file->private_data;
|
||||||
drm_device_t *dev = priv->head->dev;
|
drm_device_t *dev = priv->head->dev;
|
||||||
|
|
||||||
drm_vm_open(vma);
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
ttm = (drm_ttm_t *) map->offset;
|
drm_vm_open_locked(vma);
|
||||||
atomic_inc(&ttm->vma_count);
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
drm_ttm_add_vma(ttm, vma);
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
drm_vm_ttm_open(vma);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -608,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void drm_vm_ttm_close(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
|
|
||||||
drm_ttm_t *ttm;
|
|
||||||
drm_device_t *dev;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
drm_vm_close(vma);
|
|
||||||
if (map) {
|
|
||||||
ttm = (drm_ttm_t *) map->offset;
|
|
||||||
dev = ttm->dev;
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
drm_ttm_delete_vma(ttm, vma);
|
|
||||||
#endif
|
|
||||||
if (atomic_dec_and_test(&ttm->vma_count)) {
|
|
||||||
if (ttm->destroy) {
|
|
||||||
ret = drm_destroy_ttm(ttm);
|
|
||||||
BUG_ON(ret);
|
|
||||||
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmap DMA memory.
|
* mmap DMA memory.
|
||||||
*
|
*
|
||||||
|
@ -653,7 +506,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||||
drm_device_dma_t *dma;
|
drm_device_dma_t *dma;
|
||||||
unsigned long length = vma->vm_end - vma->vm_start;
|
unsigned long length = vma->vm_end - vma->vm_start;
|
||||||
|
|
||||||
lock_kernel();
|
|
||||||
dev = priv->head->dev;
|
dev = priv->head->dev;
|
||||||
dma = dev->dma;
|
dma = dev->dma;
|
||||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
|
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
|
||||||
|
@ -661,10 +513,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||||
|
|
||||||
/* Length must match exact page count */
|
/* Length must match exact page count */
|
||||||
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
||||||
unlock_kernel();
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
unlock_kernel();
|
|
||||||
|
|
||||||
if (!capable(CAP_SYS_ADMIN) &&
|
if (!capable(CAP_SYS_ADMIN) &&
|
||||||
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
||||||
|
@ -686,7 +536,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||||
|
|
||||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||||
drm_vm_open(vma);
|
drm_vm_open_locked(vma);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -719,7 +569,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
|
||||||
* according to the mapping type and remaps the pages. Finally sets the file
|
* according to the mapping type and remaps the pages. Finally sets the file
|
||||||
* pointer and calls vm_open().
|
* pointer and calls vm_open().
|
||||||
*/
|
*/
|
||||||
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
drm_file_t *priv = filp->private_data;
|
drm_file_t *priv = filp->private_data;
|
||||||
drm_device_t *dev = priv->head->dev;
|
drm_device_t *dev = priv->head->dev;
|
||||||
|
@ -737,6 +587,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
* the AGP mapped at physical address 0
|
* the AGP mapped at physical address 0
|
||||||
* --BenH.
|
* --BenH.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!vma->vm_pgoff
|
if (!vma->vm_pgoff
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
&& (!dev->agp
|
&& (!dev->agp
|
||||||
|
@ -833,27 +684,254 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
vma->vm_private_data = (void *)map;
|
vma->vm_private_data = (void *)map;
|
||||||
vma->vm_flags |= VM_RESERVED;
|
vma->vm_flags |= VM_RESERVED;
|
||||||
break;
|
break;
|
||||||
case _DRM_TTM: {
|
case _DRM_TTM:
|
||||||
vma->vm_ops = &drm_vm_ttm_ops;
|
return drm_bo_mmap_locked(vma, filp, map);
|
||||||
vma->vm_private_data = (void *) map;
|
|
||||||
vma->vm_file = filp;
|
|
||||||
vma->vm_flags |= VM_RESERVED | VM_IO;
|
|
||||||
#ifdef DRM_ODD_MM_COMPAT
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
drm_ttm_map_bound(vma);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
#endif
|
|
||||||
if (drm_vm_ttm_open(vma))
|
|
||||||
return -EAGAIN;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL; /* This should never happen. */
|
return -EINVAL; /* This should never happen. */
|
||||||
}
|
}
|
||||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||||
|
|
||||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||||
drm_vm_open(vma);
|
drm_vm_open_locked(vma);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
drm_file_t *priv = filp->private_data;
|
||||||
|
drm_device_t *dev = priv->head->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
ret = drm_mmap_locked(filp, vma);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(drm_mmap);
|
EXPORT_SYMBOL(drm_mmap);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* buffer object vm functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \c Pagefault method for buffer objects.
|
||||||
|
*
|
||||||
|
* \param vma Virtual memory area.
|
||||||
|
* \param data Fault data on failure or refault.
|
||||||
|
* \return Always NULL as we insert pfns directly.
|
||||||
|
*
|
||||||
|
* It's important that pfns are inserted while holding the bo->mutex lock.
|
||||||
|
* otherwise we might race with unmap_mapping_range() which is always
|
||||||
|
* called with the bo->mutex lock held.
|
||||||
|
*
|
||||||
|
* It's not pretty to modify the vma->vm_page_prot variable while not
|
||||||
|
* holding the mm semaphore in write mode. However, we have it i read mode,
|
||||||
|
* so we won't be racing with any other writers, and we only actually modify
|
||||||
|
* it when no ptes are present so it shouldn't be a big deal.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
|
||||||
|
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
|
#ifdef DRM_FULL_MM_COMPAT
|
||||||
|
static
|
||||||
|
#endif
|
||||||
|
struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
|
struct fault_data *data)
|
||||||
|
{
|
||||||
|
unsigned long address = data->address;
|
||||||
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
|
unsigned long page_offset;
|
||||||
|
struct page *page = NULL;
|
||||||
|
drm_ttm_t *ttm;
|
||||||
|
drm_device_t *dev;
|
||||||
|
unsigned long pfn;
|
||||||
|
int err;
|
||||||
|
unsigned long bus_base;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
|
||||||
|
|
||||||
|
mutex_lock(&bo->mutex);
|
||||||
|
|
||||||
|
err = drm_bo_wait(bo, 0, 0, 0);
|
||||||
|
if (err) {
|
||||||
|
data->type = (err == -EAGAIN) ?
|
||||||
|
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If buffer happens to be in a non-mappable location,
|
||||||
|
* move it to a mappable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef DRM_BO_FULL_COMPAT
|
||||||
|
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||||
|
uint32_t new_mask = bo->mem.mask |
|
||||||
|
DRM_BO_FLAG_MAPPABLE |
|
||||||
|
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||||
|
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
data->type = (err == -EAGAIN) ?
|
||||||
|
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||||
|
unsigned long _end = jiffies + 3*DRM_HZ;
|
||||||
|
uint32_t new_mask = bo->mem.mask |
|
||||||
|
DRM_BO_FLAG_MAPPABLE |
|
||||||
|
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||||
|
|
||||||
|
do {
|
||||||
|
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
||||||
|
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
DRM_ERROR("Timeout moving buffer to mappable location.\n");
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (address > vma->vm_end) {
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = bo->dev;
|
||||||
|
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||||
|
&bus_size);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (bus_size) {
|
||||||
|
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
|
||||||
|
|
||||||
|
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
|
||||||
|
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
|
||||||
|
} else {
|
||||||
|
ttm = bo->ttm;
|
||||||
|
|
||||||
|
drm_ttm_fixup_caching(ttm);
|
||||||
|
page = drm_ttm_get_page(ttm, page_offset);
|
||||||
|
if (!page) {
|
||||||
|
data->type = VM_FAULT_OOM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
pfn = page_to_pfn(page);
|
||||||
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
err = vm_insert_pfn(vma, address, pfn);
|
||||||
|
|
||||||
|
if (!err || err == -EBUSY)
|
||||||
|
data->type = VM_FAULT_MINOR;
|
||||||
|
else
|
||||||
|
data->type = VM_FAULT_OOM;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&bo->mutex);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
|
|
||||||
|
drm_vm_open_locked(vma);
|
||||||
|
atomic_inc(&bo->usage);
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
drm_bo_add_vma(bo, vma);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \c vma open method for buffer objects.
|
||||||
|
*
|
||||||
|
* \param vma virtual memory area.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void drm_bo_vm_open(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
drm_bo_vm_open_locked(vma);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \c vma close method for buffer objects.
|
||||||
|
*
|
||||||
|
* \param vma virtual memory area.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void drm_bo_vm_close(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
|
||||||
|
drm_vm_close(vma);
|
||||||
|
if (bo) {
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
drm_bo_delete_vma(bo, vma);
|
||||||
|
#endif
|
||||||
|
drm_bo_usage_deref_locked(bo);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct vm_operations_struct drm_bo_vm_ops = {
|
||||||
|
#ifdef DRM_FULL_MM_COMPAT
|
||||||
|
.fault = drm_bo_vm_fault,
|
||||||
|
#else
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||||
|
.nopfn = drm_bo_vm_nopfn,
|
||||||
|
#else
|
||||||
|
.nopage = drm_bo_vm_nopage,
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
.open = drm_bo_vm_open,
|
||||||
|
.close = drm_bo_vm_close,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mmap buffer object memory.
|
||||||
|
*
|
||||||
|
* \param vma virtual memory area.
|
||||||
|
* \param filp file pointer.
|
||||||
|
* \param map The buffer object drm map.
|
||||||
|
* \return zero on success or a negative number on failure.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_bo_mmap_locked(struct vm_area_struct *vma,
|
||||||
|
struct file *filp,
|
||||||
|
drm_local_map_t *map)
|
||||||
|
{
|
||||||
|
vma->vm_ops = &drm_bo_vm_ops;
|
||||||
|
vma->vm_private_data = map->handle;
|
||||||
|
vma->vm_file = filp;
|
||||||
|
vma->vm_flags |= VM_RESERVED | VM_IO;
|
||||||
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||||
|
vma->vm_flags |= VM_PFNMAP;
|
||||||
|
#endif
|
||||||
|
drm_bo_vm_open_locked(vma);
|
||||||
|
#ifdef DRM_ODD_MM_COMPAT
|
||||||
|
drm_bo_map_bound(vma);
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -33,7 +33,6 @@
|
||||||
#include "i915_drm.h"
|
#include "i915_drm.h"
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
||||||
|
|
||||||
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
|
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
|
||||||
{
|
{
|
||||||
return drm_agp_init_ttm(dev, NULL);
|
return drm_agp_init_ttm(dev, NULL);
|
||||||
|
@ -64,3 +63,173 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
|
||||||
|
|
||||||
return i915_emit_mi_flush(dev, flush_cmd);
|
return i915_emit_mi_flush(dev, flush_cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int i915_init_mem_type(drm_device_t * dev, uint32_t type,
|
||||||
|
drm_mem_type_manager_t * man)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case DRM_BO_MEM_LOCAL:
|
||||||
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
|
_DRM_FLAG_MEMTYPE_CACHED;
|
||||||
|
man->drm_bus_maptype = 0;
|
||||||
|
break;
|
||||||
|
case DRM_BO_MEM_TT:
|
||||||
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||||
|
(unsigned)type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
man->io_offset = dev->agp->agp_info.aper_base;
|
||||||
|
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
||||||
|
man->io_addr = NULL;
|
||||||
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
|
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
break;
|
||||||
|
case DRM_BO_MEM_PRIV0:
|
||||||
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||||
|
(unsigned)type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
man->io_offset = dev->agp->agp_info.aper_base;
|
||||||
|
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
||||||
|
man->io_addr = NULL;
|
||||||
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
|
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t i915_evict_flags(drm_device_t * dev, uint32_t type)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case DRM_BO_MEM_LOCAL:
|
||||||
|
case DRM_BO_MEM_TT:
|
||||||
|
return DRM_BO_FLAG_MEM_LOCAL;
|
||||||
|
default:
|
||||||
|
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i915_emit_copy_blit(drm_device_t * dev,
|
||||||
|
uint32_t src_offset,
|
||||||
|
uint32_t dst_offset,
|
||||||
|
uint32_t pages, int direction)
|
||||||
|
{
|
||||||
|
uint32_t cur_pages;
|
||||||
|
uint32_t stride = PAGE_SIZE;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
RING_LOCALS;
|
||||||
|
|
||||||
|
if (!dev_priv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
i915_kernel_lost_context(dev);
|
||||||
|
while (pages > 0) {
|
||||||
|
cur_pages = pages;
|
||||||
|
if (cur_pages > 2048)
|
||||||
|
cur_pages = 2048;
|
||||||
|
pages -= cur_pages;
|
||||||
|
|
||||||
|
BEGIN_LP_RING(6);
|
||||||
|
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
|
||||||
|
XY_SRC_COPY_BLT_WRITE_RGB);
|
||||||
|
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
|
||||||
|
(1 << 25) | (direction ? (1 << 30) : 0));
|
||||||
|
OUT_RING((cur_pages << 16) | PAGE_SIZE);
|
||||||
|
OUT_RING(dst_offset);
|
||||||
|
OUT_RING(stride & 0xffff);
|
||||||
|
OUT_RING(src_offset);
|
||||||
|
ADVANCE_LP_RING();
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_move_blit(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
int dir = 0;
|
||||||
|
|
||||||
|
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||||
|
(new_mem->mm_node->start <
|
||||||
|
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
||||||
|
dir = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
i915_emit_copy_blit(bo->dev,
|
||||||
|
old_mem->mm_node->start << PAGE_SHIFT,
|
||||||
|
new_mem->mm_node->start << PAGE_SHIFT,
|
||||||
|
new_mem->num_pages, dir);
|
||||||
|
|
||||||
|
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
|
||||||
|
|
||||||
|
return drm_bo_move_accel_cleanup(bo, evict, no_wait,
|
||||||
|
DRM_FENCE_TYPE_EXE |
|
||||||
|
DRM_I915_FENCE_TYPE_RW,
|
||||||
|
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flip destination ttm into cached-coherent AGP,
|
||||||
|
* then blit and subsequently move out again.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int i915_move_flip(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_device_t *dev = bo->dev;
|
||||||
|
drm_bo_mem_reg_t tmp_mem;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
tmp_mem = *new_mem;
|
||||||
|
tmp_mem.mm_node = NULL;
|
||||||
|
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
|
||||||
|
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
|
||||||
|
|
||||||
|
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
|
||||||
|
if (ret)
|
||||||
|
goto out_cleanup;
|
||||||
|
|
||||||
|
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
|
||||||
|
if (ret)
|
||||||
|
goto out_cleanup;
|
||||||
|
|
||||||
|
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
|
||||||
|
out_cleanup:
|
||||||
|
if (tmp_mem.mm_node) {
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (tmp_mem.mm_node != bo->pinned_node)
|
||||||
|
drm_mm_put_block(tmp_mem.mm_node);
|
||||||
|
tmp_mem.mm_node = NULL;
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i915_move(drm_buffer_object_t * bo,
|
||||||
|
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
|
||||||
|
{
|
||||||
|
drm_bo_mem_reg_t *old_mem = &bo->mem;
|
||||||
|
|
||||||
|
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||||
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
|
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||||
|
if (i915_move_flip(bo, evict, no_wait, new_mem))
|
||||||
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
|
} else {
|
||||||
|
if (i915_move_blit(bo, evict, no_wait, new_mem))
|
||||||
|
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -47,15 +47,25 @@ static drm_fence_driver_t i915_fence_driver = {
|
||||||
.lazy_capable = 1,
|
.lazy_capable = 1,
|
||||||
.emit = i915_fence_emit_sequence,
|
.emit = i915_fence_emit_sequence,
|
||||||
.poke_flush = i915_poke_flush,
|
.poke_flush = i915_poke_flush,
|
||||||
|
.has_irq = i915_fence_has_irq,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
#ifdef I915_HAVE_BUFFER
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
|
||||||
|
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
|
||||||
|
static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
|
||||||
|
|
||||||
static drm_bo_driver_t i915_bo_driver = {
|
static drm_bo_driver_t i915_bo_driver = {
|
||||||
.iomap = {NULL, NULL},
|
.mem_type_prio = i915_mem_prios,
|
||||||
.cached = {1, 1},
|
.mem_busy_prio = i915_busy_prios,
|
||||||
|
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
|
||||||
|
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
|
||||||
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
|
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
|
||||||
.fence_type = i915_fence_types,
|
.fence_type = i915_fence_types,
|
||||||
.invalidate_caches = i915_invalidate_caches
|
.invalidate_caches = i915_invalidate_caches,
|
||||||
|
.init_mem_type = i915_init_mem_type,
|
||||||
|
.evict_flags = i915_evict_flags,
|
||||||
|
.move = i915_move,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev)
|
||||||
|
|
||||||
diff = sequence - fm->last_exe_flush;
|
diff = sequence - fm->last_exe_flush;
|
||||||
if (diff < driver->wrap_diff && diff != 0) {
|
if (diff < driver->wrap_diff && diff != 0) {
|
||||||
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
|
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
|
||||||
}
|
}
|
||||||
|
|
||||||
diff = sequence - fm->exe_flush_sequence;
|
diff = sequence - fm->exe_flush_sequence;
|
||||||
|
@ -84,7 +84,7 @@ static void i915_perform_flush(drm_device_t * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,13 +104,13 @@ static void i915_perform_flush(drm_device_t * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_poke_flush(drm_device_t * dev)
|
void i915_poke_flush(drm_device_t * dev, uint32_t class)
|
||||||
{
|
{
|
||||||
drm_fence_manager_t *fm = &dev->fm;
|
drm_fence_manager_t *fm = &dev->fm;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -120,7 +120,7 @@ void i915_poke_flush(drm_device_t * dev)
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
|
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
|
||||||
uint32_t * sequence, uint32_t * native_type)
|
uint32_t * sequence, uint32_t * native_type)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||||
|
@ -144,3 +144,15 @@ void i915_fence_handler(drm_device_t * dev)
|
||||||
i915_perform_flush(dev);
|
i915_perform_flush(dev);
|
||||||
write_unlock(&fm->lock);
|
write_unlock(&fm->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We have an irq that tells us when we have a new breadcrumb.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -697,9 +697,10 @@ typedef struct drm_fence_arg {
|
||||||
} drm_fence_arg_t;
|
} drm_fence_arg_t;
|
||||||
|
|
||||||
/* Buffer permissions, referring to how the GPU uses the buffers.
|
/* Buffer permissions, referring to how the GPU uses the buffers.
|
||||||
these translate to fence types used for the buffers.
|
* these translate to fence types used for the buffers.
|
||||||
Typically a texture buffer is read, A destination buffer is write and
|
* Typically a texture buffer is read, A destination buffer is write and
|
||||||
a command (batch-) buffer is exe. Can be or-ed together. */
|
* a command (batch-) buffer is exe. Can be or-ed together.
|
||||||
|
*/
|
||||||
|
|
||||||
#define DRM_BO_FLAG_READ 0x00000001
|
#define DRM_BO_FLAG_READ 0x00000001
|
||||||
#define DRM_BO_FLAG_WRITE 0x00000002
|
#define DRM_BO_FLAG_WRITE 0x00000002
|
||||||
|
@ -707,47 +708,82 @@ typedef struct drm_fence_arg {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Status flags. Can be read to determine the actual state of a buffer.
|
* Status flags. Can be read to determine the actual state of a buffer.
|
||||||
|
* Can also be set in the buffer mask before validation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cannot evict this buffer. Not even with force. This type of buffer should
|
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
|
||||||
* only be available for root, and must be manually removed before buffer
|
* available to root and must be manually removed before buffer manager shutdown
|
||||||
* manager shutdown or swapout.
|
* or lock.
|
||||||
|
* Flags: Acknowledge
|
||||||
*/
|
*/
|
||||||
#define DRM_BO_FLAG_NO_EVICT 0x00000010
|
#define DRM_BO_FLAG_NO_EVICT 0x00000010
|
||||||
/* Always keep a system memory shadow to a vram buffer */
|
|
||||||
#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020
|
/*
|
||||||
/* The buffer is shareable with other processes */
|
* Mask: Require that the buffer is placed in mappable memory when validated.
|
||||||
|
* If not set the buffer may or may not be in mappable memory when validated.
|
||||||
|
* Flags: If set, the buffer is in mappable memory.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_MAPPABLE 0x00000020
|
||||||
|
|
||||||
|
/* Mask: The buffer should be shareable with other processes.
|
||||||
|
* Flags: The buffer is shareable with other processes.
|
||||||
|
*/
|
||||||
#define DRM_BO_FLAG_SHAREABLE 0x00000040
|
#define DRM_BO_FLAG_SHAREABLE 0x00000040
|
||||||
/* The buffer is currently cached */
|
|
||||||
|
/* Mask: If set, place the buffer in cache-coherent memory if available.
|
||||||
|
* If clear, never place the buffer in cache coherent memory if validated.
|
||||||
|
* Flags: The buffer is currently in cache-coherent memory.
|
||||||
|
*/
|
||||||
#define DRM_BO_FLAG_CACHED 0x00000080
|
#define DRM_BO_FLAG_CACHED 0x00000080
|
||||||
/* Make sure that every time this buffer is validated, it ends up on the same
|
|
||||||
* location. The buffer will also not be evicted when claiming space for
|
/* Mask: Make sure that every time this buffer is validated,
|
||||||
* other buffers. Basically a pinned buffer but it may be thrown out as
|
* it ends up on the same location provided that the memory mask is the same.
|
||||||
* part of buffer manager shutdown or swapout. Not supported yet.*/
|
* The buffer will also not be evicted when claiming space for
|
||||||
|
* other buffers. Basically a pinned buffer but it may be thrown out as
|
||||||
|
* part of buffer manager shutdown or locking.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
#define DRM_BO_FLAG_NO_MOVE 0x00000100
|
#define DRM_BO_FLAG_NO_MOVE 0x00000100
|
||||||
|
|
||||||
/* Make sure the buffer is in cached memory when mapped for reading */
|
/* Mask: Make sure the buffer is in cached memory when mapped for reading.
|
||||||
#define DRM_BO_FLAG_READ_CACHED 0x00080000
|
* Flags: Acknowledge.
|
||||||
/* When there is a choice between VRAM and TT, prefer VRAM.
|
*/
|
||||||
The default behaviour is to prefer TT. */
|
#define DRM_BO_FLAG_READ_CACHED 0x00080000
|
||||||
#define DRM_BO_FLAG_PREFER_VRAM 0x00040000
|
|
||||||
/* Bind this buffer cached if the hardware supports it. */
|
|
||||||
#define DRM_BO_FLAG_BIND_CACHED 0x0002000
|
|
||||||
|
|
||||||
/* System Memory */
|
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_FORCE_CACHING 0x00002000
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory type flags that can be or'ed together in the mask, but only
|
||||||
|
* one appears in flags.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* System memory */
|
||||||
#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
|
#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
|
||||||
/* Translation table memory */
|
/* Translation table memory */
|
||||||
#define DRM_BO_FLAG_MEM_TT 0x02000000
|
#define DRM_BO_FLAG_MEM_TT 0x02000000
|
||||||
/* Vram memory */
|
/* Vram memory */
|
||||||
#define DRM_BO_FLAG_MEM_VRAM 0x04000000
|
#define DRM_BO_FLAG_MEM_VRAM 0x04000000
|
||||||
/* Unmappable Vram memory */
|
/* Up to the driver to define. */
|
||||||
#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000
|
#define DRM_BO_FLAG_MEM_PRIV0 0x08000000
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV1 0x10000000
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV2 0x20000000
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV3 0x40000000
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV4 0x80000000
|
||||||
|
|
||||||
/* Memory flag mask */
|
/* Memory flag mask */
|
||||||
#define DRM_BO_MASK_MEM 0xFF000000
|
#define DRM_BO_MASK_MEM 0xFF000000
|
||||||
|
#define DRM_BO_MASK_MEMTYPE 0xFF0000A0
|
||||||
|
|
||||||
/* When creating a buffer, Avoid system storage even if allowed */
|
|
||||||
#define DRM_BO_HINT_AVOID_LOCAL 0x00000001
|
|
||||||
/* Don't block on validate and map */
|
/* Don't block on validate and map */
|
||||||
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
|
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
|
||||||
/* Don't place this buffer on the unfenced list.*/
|
/* Don't place this buffer on the unfenced list.*/
|
||||||
|
@ -756,9 +792,6 @@ typedef struct drm_fence_arg {
|
||||||
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
|
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
|
||||||
|
|
||||||
|
|
||||||
/* Driver specific flags. Could be for example rendering engine */
|
|
||||||
#define DRM_BO_MASK_DRIVER 0x00F00000
|
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
drm_bo_type_dc,
|
drm_bo_type_dc,
|
||||||
drm_bo_type_user,
|
drm_bo_type_user,
|
||||||
|
@ -826,8 +859,13 @@ typedef struct drm_bo_arg{
|
||||||
#define DRM_BO_MEM_LOCAL 0
|
#define DRM_BO_MEM_LOCAL 0
|
||||||
#define DRM_BO_MEM_TT 1
|
#define DRM_BO_MEM_TT 1
|
||||||
#define DRM_BO_MEM_VRAM 2
|
#define DRM_BO_MEM_VRAM 2
|
||||||
#define DRM_BO_MEM_VRAM_NM 3
|
#define DRM_BO_MEM_PRIV0 3
|
||||||
#define DRM_BO_MEM_TYPES 2 /* For now. */
|
#define DRM_BO_MEM_PRIV1 4
|
||||||
|
#define DRM_BO_MEM_PRIV2 5
|
||||||
|
#define DRM_BO_MEM_PRIV3 6
|
||||||
|
#define DRM_BO_MEM_PRIV4 7
|
||||||
|
|
||||||
|
#define DRM_BO_MEM_TYPES 8 /* For now. */
|
||||||
|
|
||||||
typedef union drm_mm_init_arg{
|
typedef union drm_mm_init_arg{
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -126,7 +126,9 @@ typedef struct drm_i915_private {
|
||||||
uint32_t flush_pending;
|
uint32_t flush_pending;
|
||||||
uint32_t saved_flush_status;
|
uint32_t saved_flush_status;
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
void *agp_iomap;
|
||||||
|
#endif
|
||||||
spinlock_t swaps_lock;
|
spinlock_t swaps_lock;
|
||||||
drm_i915_vbl_swap_t vbl_swaps;
|
drm_i915_vbl_swap_t vbl_swaps;
|
||||||
unsigned int swaps_pending;
|
unsigned int swaps_pending;
|
||||||
|
@ -183,10 +185,12 @@ extern void i915_mem_release(drm_device_t * dev,
|
||||||
|
|
||||||
|
|
||||||
extern void i915_fence_handler(drm_device_t *dev);
|
extern void i915_fence_handler(drm_device_t *dev);
|
||||||
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
|
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class,
|
||||||
|
uint32_t flags,
|
||||||
uint32_t *sequence,
|
uint32_t *sequence,
|
||||||
uint32_t *native_type);
|
uint32_t *native_type);
|
||||||
extern void i915_poke_flush(drm_device_t *dev);
|
extern void i915_poke_flush(drm_device_t *dev, uint32_t class);
|
||||||
|
extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef I915_HAVE_BUFFER
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
@ -194,6 +198,12 @@ extern void i915_poke_flush(drm_device_t *dev);
|
||||||
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
|
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
|
||||||
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
|
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
|
||||||
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
|
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
|
||||||
|
extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
|
||||||
|
drm_mem_type_manager_t *man);
|
||||||
|
extern uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type);
|
||||||
|
extern int i915_move(drm_buffer_object_t *bo, int evict,
|
||||||
|
int no_wait, drm_bo_mem_reg_t *new_mem);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
||||||
|
@ -331,6 +341,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
||||||
|
|
||||||
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
|
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
|
||||||
|
|
||||||
|
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
|
||||||
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
||||||
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
||||||
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
||||||
|
|
|
@ -182,7 +182,7 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,
|
||||||
drm_bo_type_dc,
|
drm_bo_type_dc,
|
||||||
DRM_BO_FLAG_READ |
|
DRM_BO_FLAG_READ |
|
||||||
DRM_BO_FLAG_WRITE |
|
DRM_BO_FLAG_WRITE |
|
||||||
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_NO_MOVE, 0, &buf));
|
DRM_BO_FLAG_MEM_LOCAL /*| DRM_BO_FLAG_NO_MOVE*/, 0, &buf));
|
||||||
curTime = fastrdtsc();
|
curTime = fastrdtsc();
|
||||||
*ticks++ = time_diff(oldTime, curTime);
|
*ticks++ = time_diff(oldTime, curTime);
|
||||||
|
|
||||||
|
@ -260,8 +260,8 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,
|
||||||
|
|
||||||
oldTime = fastrdtsc();
|
oldTime = fastrdtsc();
|
||||||
ret = drmBOValidate(ctx->drmFD, &buf,
|
ret = drmBOValidate(ctx->drmFD, &buf,
|
||||||
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_BIND_CACHED,
|
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING,
|
||||||
DRM_BO_MASK_MEM | DRM_BO_FLAG_BIND_CACHED, DRM_BO_HINT_DONT_FENCE);
|
DRM_BO_MASK_MEMTYPE | DRM_BO_FLAG_FORCE_CACHING, DRM_BO_HINT_DONT_FENCE);
|
||||||
curTime = fastrdtsc();
|
curTime = fastrdtsc();
|
||||||
drmUnlock(ctx->drmFD, ctx->hwContext);
|
drmUnlock(ctx->drmFD, ctx->hwContext);
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ static void
|
||||||
testAGP(TinyDRIContext * ctx)
|
testAGP(TinyDRIContext * ctx)
|
||||||
{
|
{
|
||||||
unsigned long ticks[128], *pTicks;
|
unsigned long ticks[128], *pTicks;
|
||||||
unsigned long size = 4096 * 1024;
|
unsigned long size = 8 * 1024;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = benchmarkBuffer(ctx, size, ticks);
|
ret = benchmarkBuffer(ctx, size, ticks);
|
||||||
|
|
Loading…
Reference in New Issue