Merge git://proxy01.pd.intel.com:9419/git/mesa/drm into crestline

main
Nian Wu 2007-02-25 17:06:13 -08:00
commit df2fc3ec62
38 changed files with 3517 additions and 1958 deletions

View File

@ -585,9 +585,9 @@ int drmOpen(const char *name, const char *busid)
void drmFreeVersion(drmVersionPtr v)
{
if (!v) return;
if (v->name) drmFree(v->name);
if (v->date) drmFree(v->date);
if (v->desc) drmFree(v->desc);
drmFree(v->name);
drmFree(v->date);
drmFree(v->desc);
drmFree(v);
}
@ -604,9 +604,9 @@ void drmFreeVersion(drmVersionPtr v)
static void drmFreeKernelVersion(drm_version_t *v)
{
if (!v) return;
if (v->name) drmFree(v->name);
if (v->date) drmFree(v->date);
if (v->desc) drmFree(v->desc);
drmFree(v->name);
drmFree(v->date);
drmFree(v->desc);
drmFree(v);
}
@ -2267,12 +2267,13 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
* DRM_FENCE_MASK_DRIVER
*/
int drmFenceCreate(int fd, unsigned flags, int class,unsigned type,
int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
arg.type = type;
arg.class = class;
arg.op = drm_fence_create;
@ -2410,8 +2411,9 @@ int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,
int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.class = fence->class;
arg.flags = flags;
arg.handle = fence->handle;
arg.type = emit_type;

View File

@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
@ -29,8 +29,8 @@ sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
ifeq ($(CONFIG_COMPAT),y)

View File

@ -458,6 +458,10 @@ typedef struct drm_lock_data {
struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
} drm_lock_data_t;
/**
@ -591,78 +595,8 @@ typedef struct ati_pcigart_info {
drm_local_map_t mapping;
} drm_ati_pcigart_info;
/*
* User space objects and their references.
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef enum {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
} drm_object_type_t;
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
typedef struct drm_user_object{
drm_hash_item_t hash;
struct list_head list;
drm_object_type_t type;
atomic_t refcount;
int shareable;
drm_file_t *owner;
void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t ref_action);
void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t unref_action);
void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
} drm_user_object_t;
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
typedef struct drm_ref_object {
drm_hash_item_t hash;
struct list_head list;
atomic_t refcount;
drm_ref_t unref_action;
} drm_ref_object_t;
#include "drm_ttm.h"
/*
* buffer object driver
*/
typedef struct drm_bo_driver{
int cached[DRM_BO_MEM_TYPES];
drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev);
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
} drm_bo_driver_t;
#include "drm_objects.h"
/**
* DRM driver structure. This structure represent the common code for
@ -712,6 +646,8 @@ struct drm_driver {
void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file * filp);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct file * filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
@ -749,63 +685,6 @@ typedef struct drm_head {
} drm_head_t;
typedef struct drm_fence_driver{
int no_types;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*emit) (struct drm_device *dev, uint32_t flags,
uint32_t *breadcrumb,
uint32_t *native_type);
void (*poke_flush) (struct drm_device *dev);
} drm_fence_driver_t;
#define _DRM_FENCE_TYPE_EXE 0x00
typedef struct drm_fence_manager{
int initialized;
rwlock_t lock;
/*
* The list below should be maintained in sequence order and
* access is protected by the above spinlock.
*/
struct list_head ring;
struct list_head *fence_types[32];
volatile uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
atomic_t count;
} drm_fence_manager_t;
typedef struct drm_buffer_manager{
struct mutex init_mutex;
int nice_mode;
int initialized;
drm_file_t *last_to_validate;
int has_type[DRM_BO_MEM_TYPES];
int use_type[DRM_BO_MEM_TYPES];
drm_mm_t manager[DRM_BO_MEM_TYPES];
struct list_head lru[DRM_BO_MEM_TYPES];
struct list_head pinned[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
struct work_struct wq;
#else
struct delayed_work wq;
#endif
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@ -960,62 +839,6 @@ typedef struct drm_agp_ttm_priv {
} drm_agp_ttm_priv;
#endif
typedef struct drm_fence_object{
drm_user_object_t base;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int class;
uint32_t native_type;
uint32_t type;
uint32_t signaled;
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
} drm_fence_object_t;
typedef struct drm_buffer_object{
drm_device_t *dev;
drm_user_object_t base;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
drm_ttm_object_t *ttm_object;
drm_ttm_t *ttm;
unsigned long num_pages;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
uint32_t page_alignment;
atomic_t mapped;
uint32_t flags;
uint32_t mask;
uint32_t mem_type;
drm_mm_node_t *mm_node; /* MM node for on-card RAM */
struct list_head lru;
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
} drm_buffer_object_t;
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@ -1193,12 +1016,14 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
extern void drm_idlelock_take(drm_lock_data_t *lock_data);
extern void drm_idlelock_release(drm_lock_data_t *lock_data);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
extern int drm_i_have_hw_lock(struct file *filp);
@ -1357,105 +1182,9 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
}
/*
* User space object bookkeeping (drm_object.c)
*/
/*
* Must be called with the struct_mutex held.
*/
extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
/*
* Must be called with the struct_mutex held.
*/
int shareable);
extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
*/
drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
drm_user_object_t **object);
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
/*
* fence objects (drm_fence.c)
*/
extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
extern void drm_fence_manager_init(drm_device_t *dev);
extern void drm_fence_manager_takedown(drm_device_t *dev);
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
extern int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence,
uint32_t type);
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type);
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t **c_fence);
extern int drm_fence_add_user_object(drm_file_t *priv,
drm_fence_object_t *fence,
int shareable);
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(drm_device_t *dev);
extern int drm_bo_driver_init(drm_device_t *dev);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t *fence,
drm_fence_object_t **used_fence);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);

View File

@ -606,8 +606,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
mem->is_flushed = TRUE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
ret = drm_agp_bind_memory(mem, offset);
@ -620,7 +620,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
@ -710,7 +710,6 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
agp_priv->uncached_type = AGP_USER_MEMORY;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
@ -718,10 +717,8 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0,
DRM_BE_FLAG_NEEDS_FREE);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
}

File diff suppressed because it is too large Load Diff

411
linux-core/drm_bo_move.c Normal file
View File

@ -0,0 +1,411 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/**
* Free the old memory node unless it's a pinned region and we
* have not been requested to free also pinned regions.
*/
static void drm_bo_free_old_node(drm_buffer_object_t * bo)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
drm_mm_put_block(old_mem->mm_node);
old_mem->mm_node = NULL;
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
}
int drm_bo_move_ttm(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_ttm_t *ttm = bo->ttm;
drm_bo_mem_reg_t *old_mem = &bo->mem;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
int ret;
if (old_mem->mem_type == DRM_BO_MEM_TT) {
if (evict)
drm_ttm_evict(ttm);
else
drm_ttm_unbind(ttm);
drm_bo_free_old_node(bo);
DRM_FLAG_MASKED(old_mem->flags,
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
old_mem->mem_type = DRM_BO_MEM_LOCAL;
save_flags = old_mem->flags;
}
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
ret = drm_bind_ttm(ttm,
new_mem->flags & DRM_BO_FLAG_CACHED,
new_mem->mm_node->start);
if (ret)
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_ttm);
/**
* \c Return a kernel virtual address to the buffer object PCI memory.
*
* \param bo The buffer object.
* \return Failure indication.
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
*
* After a successfull call, bo->iomap contains the virtual address, or NULL
* if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
void **virtual)
{
drm_buffer_manager_t *bm = &dev->bm;
drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
if (ret || bus_size == 0)
return ret;
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
else {
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
if (!addr)
return -ENOMEM;
}
*virtual = addr;
return 0;
}
/**
* \c Unmap mapping obtained using drm_bo_ioremap
*
* \param bo The buffer object.
*
* Call bo->mutex locked.
*/
void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
void *virtual)
{
drm_buffer_manager_t *bm;
drm_mem_type_manager_t *man;
bm = &dev->bm;
man = &bm->man[mem->mem_type];
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
iounmap(virtual);
}
}
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = kmap(d);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
kunmap(d);
return 0;
}
static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
{
struct page *s = drm_ttm_get_page(ttm, page);
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = kmap(s);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
kunmap(s);
return 0;
}
int drm_bo_move_memcpy(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
drm_ttm_t *ttm = bo->ttm;
drm_bo_mem_reg_t *old_mem = &bo->mem;
drm_bo_mem_reg_t old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
if (ret)
return ret;
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
if (ret)
goto out;
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL)
ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
else if (new_iomap == NULL)
ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
else
ret = drm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;
}
mb();
out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
drm_destroy_ttm(ttm);
bo->ttm = NULL;
}
out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
out:
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
return ret;
}
EXPORT_SYMBOL(drm_bo_move_memcpy);
/*
* Transfer a buffer object's memory and LRU status to a newly
* created object. User-space references remains with the old
* object. Call bo->mutex locked.
*/
int drm_buffer_object_transfer(drm_buffer_object_t * bo,
drm_buffer_object_t ** new_obj)
{
drm_buffer_object_t *fbo;
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
if (!fbo)
return -ENOMEM;
*fbo = *bo;
mutex_init(&fbo->mutex);
mutex_lock(&fbo->mutex);
mutex_lock(&dev->struct_mutex);
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->pinned_lru);
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&fbo->vma_list);
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
atomic_inc(&bo->fence->usage);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
atomic_inc(&bm->count);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&fbo->mutex);
*new_obj = fbo;
return 0;
}
/*
* Since move is underway, we need to block signals in this function.
* We cannot restart until it has finished.
*/
int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
drm_bo_mem_reg_t *old_mem = &bo->mem;
int ret;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
drm_buffer_object_t *old_obj;
if (bo->fence)
drm_fence_usage_deref_unlocked(dev, bo->fence);
ret = drm_fence_object_create(dev, fence_class, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&bo->fence);
if (ret)
return ret;
#ifdef DRM_ODD_MM_COMPAT
/*
* In this mode, we don't allow pipelining a copy blit,
* since the buffer will be accessible from user space
* the moment we return and rebuild the page tables.
*
* With normal vm operation, page tables are rebuilt
* on demand using fault(), which waits for buffer idle.
*/
if (1)
#else
if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
bo->mem.mm_node != NULL))
#endif
{
ret = drm_bo_wait(bo, 0, 1, 0);
if (ret)
return ret;
drm_bo_free_old_node(bo);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
drm_ttm_unbind(bo->ttm);
drm_destroy_ttm(bo->ttm);
bo->ttm = NULL;
}
} else {
/* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
ret = drm_buffer_object_transfer(bo, &old_obj);
if (ret)
return ret;
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
old_obj->ttm = NULL;
else
bo->ttm = NULL;
mutex_lock(&dev->struct_mutex);
list_del_init(&old_obj->lru);
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
drm_bo_add_to_lru(old_obj);
drm_bo_usage_deref_locked(old_obj);
mutex_unlock(&dev->struct_mutex);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);

View File

@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* vm code for kernels below 2,6,15 in which version a major vm write
* vm code for kernels below 2.6.15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.20+?
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
*/
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
return ret;
}
static struct {
spinlock_t lock;
struct page *dummy_page;
@ -160,7 +120,7 @@ void free_nopage_retry(void)
}
}
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
@ -171,7 +131,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
data.address = address;
data.vma = vma;
drm_vm_ttm_fault(vma, &data);
drm_bo_vm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;
@ -186,10 +146,85 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
return ret;
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
/**
* While waiting for the fault() handler to appear in
* we accomplish approximately
* the same wrapping it with nopfn.
*/
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
unsigned long address)
{
struct fault_data data;
data.address = address;
(void) drm_bo_vm_fault(vma, &data);
if (data.type == VM_FAULT_OOM)
return NOPFN_OOM;
else if (data.type == VM_FAULT_SIGBUS)
return NOPFN_SIGBUS;
/*
* pfn already set.
*/
return 0;
}
#endif
#ifdef DRM_ODD_MM_COMPAT
/*
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
@ -212,109 +247,100 @@ typedef struct vma_entry {
} vma_entry_t;
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
/*
* FIXME: Check can't map aperture flag.
*/
mutex_lock(&bo->mutex);
if (type)
*type = VM_FAULT_MINOR;
if (!map)
return NOPAGE_OOM;
if (address > vma->vm_end) {
page = NOPAGE_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
if (address > vma->vm_end)
return NOPAGE_SIGBUS;
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
DRM_ERROR("Invalid compat nopage.\n");
page = NOPAGE_SIGBUS;
goto out_unlock;
}
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
bm = &dev->bm;
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
page = NOPAGE_OOM;
goto out;
}
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
page = NOPAGE_OOM;
goto out;
}
++bm->cur_pages;
SetPageLocked(page);
page = NOPAGE_OOM;
goto out_unlock;
}
get_page(page);
out:
mutex_unlock(&dev->struct_mutex);
out_unlock:
mutex_unlock(&bo->mutex);
return page;
}
int drm_ttm_map_bound(struct vm_area_struct *vma)
int drm_bo_map_bound(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
int ret = 0;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
BUG_ON(ret);
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
unsigned long pfn = ttm->aper_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
if (bus_size) {
drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
return ret;
}
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
drm_local_map_t *map = (drm_local_map_t *)
vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
map->handle = (void *) v_entry;
list_add_tail(&v_entry->head, &ttm->vma_list);
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_add_tail(&v_entry->head, &bo->vma_list);
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
@ -328,29 +354,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
return 0;
}
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
break;
}
}
BUG_ON(!found);
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
}
return;
}
@ -360,12 +386,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
int drm_ttm_lock_mm(drm_ttm_t * ttm)
int drm_bo_lock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
@ -377,7 +403,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
if (lock_ok)
return 0;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
@ -392,43 +418,40 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
return -EAGAIN;
}
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
int drm_ttm_remap_bound(drm_ttm_t *ttm)
int drm_bo_remap_bound(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
int ret = 0;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
ret = drm_ttm_map_bound(v_entry->vma);
if (ret)
break;
if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
list_for_each_entry(v_entry, &bo->vma_list, head) {
ret = drm_bo_map_bound(v_entry->vma);
if (ret)
break;
}
}
drm_ttm_unlock_mm(ttm);
return ret;
}
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
void drm_bo_finish_unmap(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
list_for_each_entry(v_entry, &bo->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
drm_ttm_unlock_mm(ttm);
}
#endif

View File

@ -97,8 +97,6 @@
#define __GFP_COMP 0
#endif
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
@ -154,15 +152,25 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
(tmp);})
#endif
#ifndef list_for_each_entry_safe_reverse
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
#endif
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#define DRM_FULL_MM_COMPAT
#endif
/*
@ -200,18 +208,23 @@ extern int drm_map_page_into_agp(struct page *page);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
struct fault_data;
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
struct fault_data;
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#endif
#ifndef DRM_FULL_MM_COMPAT
/*
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
@ -228,17 +241,21 @@ struct fault_data {
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
unsigned long pfn);
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#else
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
#endif /* ndef DRM_FULL_MM_COMPAT */
#ifdef DRM_ODD_MM_COMPAT
struct drm_ttm;
struct drm_buffer_object;
/*
@ -246,14 +263,14 @@ struct drm_ttm;
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
struct vm_area_struct *vma);
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
@ -262,12 +279,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
* schedule() and try again.
*/
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
/*
* If the ttm was bound to the aperture, this function shall be called
@ -277,7 +294,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
* releases the mmap_sems for this ttm.
*/
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
@ -286,14 +303,14 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
* releases the mmap_sems for this ttm.
*/
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
#endif

View File

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@ -19,11 +23,6 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@ -35,21 +34,42 @@
* Typically called by the IRQ handler.
*/
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
void drm_fence_handler(drm_device_t * dev, uint32_t class,
uint32_t sequence, uint32_t type)
{
int wake = 0;
uint32_t diff;
uint32_t relevant;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
struct list_head *list, *prev;
drm_fence_object_t *fence;
struct list_head *head;
drm_fence_object_t *fence, *next;
int found = 0;
int is_exe = (type & DRM_FENCE_TYPE_EXE);
int ge_last_exe;
if (list_empty(&fm->ring))
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
fc->pending_exe_flush = 0;
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
ge_last_exe = diff < driver->wrap_diff;
if (ge_last_exe)
fc->pending_flush &= ~type;
if (is_exe && ge_last_exe) {
fc->last_exe_flush = sequence;
}
if (list_empty(&fc->ring))
return;
list_for_each_entry(fence, &fm->ring, ring) {
list_for_each_entry(fence, &fc->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff > driver->wrap_diff) {
found = 1;
@ -57,11 +77,11 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
}
}
list = (found) ? fence->ring.prev : fm->ring.prev;
prev = list->prev;
head = (found) ? &fence->ring : &fc->ring;
for (; list != &fm->ring; list = prev, prev = list->prev) {
fence = list_entry(list, drm_fence_object_t, ring);
list_for_each_entry_safe_reverse(fence, next, head, ring) {
if (&fence->ring == &fc->ring)
break;
type |= fence->native_type;
relevant = type & fence->type;
@ -78,7 +98,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
~(fence->signaled | fence->submitted_flush);
if (relevant) {
fm->pending_flush |= relevant;
fc->pending_flush |= relevant;
fence->submitted_flush = fence->flush_mask;
}
@ -89,9 +109,9 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
}
}
if (wake) {
DRM_WAKEUP(&fm->fence_queue);
DRM_WAKEUP(&fc->fence_queue);
}
}
@ -147,7 +167,7 @@ static void drm_fence_object_destroy(drm_file_t * priv,
drm_fence_usage_deref_locked(dev, fence);
}
static int fence_signaled(drm_device_t * dev, volatile
static int fence_signaled(drm_device_t * dev,
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
@ -157,7 +177,7 @@ static int fence_signaled(drm_device_t * dev, volatile
drm_fence_driver_t *driver = dev->driver->fence_driver;
if (poke_flush)
driver->poke_flush(dev);
driver->poke_flush(dev, fence->class);
read_lock_irqsave(&fm->lock, flags);
signaled =
(fence->type & mask & fence->signaled) == (fence->type & mask);
@ -166,52 +186,35 @@ static int fence_signaled(drm_device_t * dev, volatile
return signaled;
}
static void drm_fence_flush_exe(drm_fence_manager_t * fm,
static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
drm_fence_driver_t * driver, uint32_t sequence)
{
uint32_t diff;
if (!fm->pending_exe_flush) {
volatile struct list_head *list;
/*
* Last_exe_flush is invalid. Find oldest sequence.
*/
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
list = &fm->ring;
if (list->next == &fm->ring) {
return;
} else {
drm_fence_object_t *fence =
list_entry(list->next, drm_fence_object_t, ring);
fm->last_exe_flush = (fence->sequence - 1) &
driver->sequence_mask;
}
diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
if (diff >= driver->wrap_diff)
return;
fm->exe_flush_sequence = sequence;
fm->pending_exe_flush = 1;
if (!fc->pending_exe_flush) {
fc->exe_flush_sequence = sequence;
fc->pending_exe_flush = 1;
} else {
diff =
(sequence - fm->exe_flush_sequence) & driver->sequence_mask;
(sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (diff < driver->wrap_diff) {
fm->exe_flush_sequence = sequence;
fc->exe_flush_sequence = sequence;
}
}
}
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
int drm_fence_object_signaled(drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence, uint32_t type)
drm_fence_object_t * fence,
uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
@ -226,16 +229,16 @@ int drm_fence_object_flush(drm_device_t * dev,
if (fence->submitted_flush == fence->signaled) {
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
drm_fence_flush_exe(fm, driver, fence->sequence);
drm_fence_flush_exe(fc, driver, fence->sequence);
fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
} else {
fm->pending_flush |= (fence->flush_mask &
fc->pending_flush |= (fence->flush_mask &
~fence->submitted_flush);
fence->submitted_flush = fence->flush_mask;
}
}
write_unlock_irqrestore(&fm->lock, flags);
driver->poke_flush(dev);
driver->poke_flush(dev, fence->class);
return 0;
}
@ -244,24 +247,35 @@ int drm_fence_object_flush(drm_device_t * dev,
* wrapped around and reused.
*/
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t old_sequence;
unsigned long flags;
drm_fence_object_t *fence;
uint32_t diff;
write_lock_irqsave(&fm->lock, flags);
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
fc->pending_exe_flush = 1;
fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
}
write_unlock_irqrestore(&fm->lock, flags);
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
if (fm->ring.next == &fm->ring) {
if (list_empty(&fc->ring)) {
read_unlock_irqrestore(&fm->lock, flags);
mutex_unlock(&dev->struct_mutex);
return;
}
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
@ -274,11 +288,40 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
EXPORT_SYMBOL(drm_fence_flush_old);
int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
static int drm_fence_lazy_wait(drm_device_t *dev,
drm_fence_object_t *fence,
int ignore_signals,
uint32_t mask)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
unsigned long _end = jiffies + 3*DRM_HZ;
int ret = 0;
do {
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence, mask, 0));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
DRM_ERROR("Fence timeout. "
"GPU lockup or fence driver was "
"taken down.\n");
}
return ((ret == -EINTR) ? -EAGAIN : ret);
}
return 0;
}
int drm_fence_object_wait(drm_device_t * dev,
drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
@ -299,44 +342,29 @@ int drm_fence_object_wait(drm_device_t * dev,
if (lazy && driver->lazy_capable) {
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence, mask, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
DRM_ERROR("Fence timeout. "
"GPU lockup or fence driver was "
"taken down.\n");
}
return ((ret == -EINTR) ? -EAGAIN : ret);
}
} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
driver->lazy_capable) {
/*
* We use IRQ wait for EXE fence if available to gain
* CPU in some cases.
*/
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence,
DRM_FENCE_TYPE_EXE, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
if (ret)
return ((ret == -EINTR) ? -EAGAIN : ret);
}
return ret;
if (fence_signaled(dev, fence, mask, 0))
} else {
if (driver->has_irq(dev, fence->class,
DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
DRM_FENCE_TYPE_EXE);
if (ret)
return ret;
}
if (driver->has_irq(dev, fence->class,
mask & ~DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
mask);
if (ret)
return ret;
}
}
if (drm_fence_object_signaled(fence, mask))
return 0;
/*
@ -358,33 +386,38 @@ int drm_fence_object_wait(drm_device_t * dev,
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
uint32_t fence_flags, uint32_t type)
uint32_t fence_flags, uint32_t class, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
fence->class = class;
fence->type = type;
fence->flush_mask = 0x00;
fence->submitted_flush = 0x00;
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
list_add_tail(&fence->ring, &fm->ring);
if (list_empty(&fc->ring))
fc->last_exe_flush = sequence - 1;
list_add_tail(&fence->ring, &fc->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
@ -398,7 +431,7 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
fence->class = 0;
fence->class = class;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
@ -406,7 +439,8 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
fence->sequence = 0;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(dev, fence, fence_flags, type);
ret = drm_fence_object_emit(dev, fence, fence_flags,
fence->class, type);
}
return ret;
}
@ -430,7 +464,7 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t type,
int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
unsigned flags, drm_fence_object_t ** c_fence)
{
drm_fence_object_t *fence;
@ -440,7 +474,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, flags, fence);
ret = drm_fence_object_init(dev, class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
@ -456,22 +490,31 @@ EXPORT_SYMBOL(drm_fence_object_create);
void drm_fence_manager_init(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *class;
drm_fence_driver_t *fed = dev->driver->fence_driver;
int i;
fm->lock = RW_LOCK_UNLOCKED;
write_lock(&fm->lock);
INIT_LIST_HEAD(&fm->ring);
fm->pending_flush = 0;
DRM_INIT_WAITQUEUE(&fm->fence_queue);
fm->initialized = 0;
if (fed) {
fm->initialized = 1;
atomic_set(&fm->count, 0);
for (i = 0; i < fed->no_types; ++i) {
fm->fence_types[i] = &fm->ring;
}
if (!fed)
goto out_unlock;
fm->initialized = 1;
fm->num_classes = fed->num_classes;
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
for (i=0; i<fm->num_classes; ++i) {
class = &fm->class[i];
INIT_LIST_HEAD(&class->ring);
class->pending_flush = 0;
DRM_INIT_WAITQUEUE(&class->fence_queue);
}
atomic_set(&fm->count, 0);
out_unlock:
write_unlock(&fm->lock);
}
@ -518,7 +561,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
case drm_fence_create:
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
ret = drm_fence_object_create(dev, arg.class,
arg.type, arg.flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
@ -581,7 +625,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
arg.type);
break;
case drm_fence_buffers:
if (!dev->bm.initialized) {

View File

@ -427,38 +427,51 @@ int drm_release(struct inode *inode, struct file *filp)
dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
unsigned long _end = jiffies + DRM_HZ*3;
do {
retcode = drm_kernel_take_hw_lock(filp);
} while(retcode && !time_after_eq(jiffies,_end));
if (!retcode) {
if (drm_i_have_hw_lock(filp)) {
dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
/*
* FIXME: This is not a good solution. We should perhaps associate the
* DRM lock with a process context, and check whether the current process
* holds the lock. Then we can run reclaim buffers locked anyway.
* Wait for a while.
*/
DRM_ERROR("Reclaim buffers locked deadlock.\n"
"\tThis is probably a single thread having multiple\n"
"\tDRM file descriptors open either dying or"
" closing file descriptors\n"
"\twhile having the lock. I will not reclaim buffers.\n"
"\tLocking context is 0x%08x\n",
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
do{
spin_lock(&dev->lock.spinlock);
locked = dev->lock.idle_has_lock;
spin_unlock(&dev->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
}
dev->driver->reclaim_buffers_locked(dev, filp);
drm_idlelock_release(&dev->lock);
}
} else if (drm_i_have_hw_lock(filp)) {
}
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, filp);
drm_idlelock_release(&dev->lock);
}
if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev, &dev->lock.hw_lock->lock,
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}

View File

@ -422,7 +422,7 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func ||
!drm_lock_take(&dev->lock.hw_lock->lock,
!drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
@ -433,7 +433,7 @@ static void drm_locked_tasklet_func(unsigned long data)
dev->locked_tasklet_func(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL;

View File

@ -35,12 +35,6 @@
#include "drmP.h"
#if 0
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
unsigned int context);
#endif
static int drm_notifier(void *priv);
/**
@ -83,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters++;
spin_unlock(&dev->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
@ -90,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@ -104,6 +101,9 @@ int drm_lock(struct inode *inode, struct file *filp,
break;
}
}
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters--;
spin_unlock(&dev->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
@ -184,8 +184,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
lock.context)) {
if (drm_lock_free(&dev->lock,lock.context)) {
/* FIXME: Should really bail out here. */
}
}
@ -203,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_take(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
else {
new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
spin_unlock(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
@ -224,14 +231,15 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
#if 0
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
@ -244,13 +252,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
static int drm_lock_transfer(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
dev->lock.filp = NULL;
lock_data->filp = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@ -258,7 +266,6 @@ static int drm_lock_transfer(drm_device_t * dev,
} while (prev != old);
return 1;
}
#endif
/**
* Free lock.
@ -271,10 +278,19 @@ static int drm_lock_transfer(drm_device_t * dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock(&lock_data->spinlock);
return 1;
}
spin_unlock(&lock_data->spinlock);
do {
old = *lock;
@ -287,7 +303,7 @@ int drm_lock_free(drm_device_t * dev,
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
wake_up_interruptible(&dev->lock.lock_queue);
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
@ -322,11 +338,59 @@ static int drm_notifier(void *priv)
return 0;
}
/*
* Can be used by drivers to take the hardware lock if necessary.
* (Waiting for idle before reclaiming buffers etc.)
/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_idlelock_take(drm_lock_data_t *lock_data)
{
int ret = 0;
spin_lock(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(drm_lock_data_t *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct file *filp)
{
DRM_DEVICE;
@ -337,50 +401,3 @@ int drm_i_have_hw_lock(struct file *filp)
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
int drm_kernel_take_hw_lock(struct file *filp)
{
DRM_DEVICE;
int ret = 0;
unsigned long _end = jiffies + 3*DRM_HZ;
if (!drm_i_have_hw_lock(filp)) {
DECLARE_WAITQUEUE(entry, current);
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
if (time_after_eq(jiffies,_end)) {
ret = -EBUSY;
break;
}
schedule_timeout(1);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
return ret;
}
EXPORT_SYMBOL(drm_kernel_take_hw_lock);

View File

@ -217,6 +217,7 @@ void drm_mm_put_block(drm_mm_node_t * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_mm_put_block);
drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
unsigned long size,

View File

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -10,6 +10,10 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@ -18,13 +22,11 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"

470
linux-core/drm_objects.h Normal file
View File

@ -0,0 +1,470 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_OBJECTS_H
#define _DRM_OJBECTS_H
#define DRM_HAS_TTM
struct drm_device;
/***************************************************
* User space objects. (drm_object.c)
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef enum {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
} drm_object_type_t;
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
typedef struct drm_user_object {
drm_hash_item_t hash;
struct list_head list;
drm_object_type_t type;
atomic_t refcount;
int shareable;
drm_file_t *owner;
void (*ref_struct_locked) (drm_file_t * priv,
struct drm_user_object * obj,
drm_ref_t ref_action);
void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
drm_ref_t unref_action);
void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
} drm_user_object_t;
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
typedef struct drm_ref_object {
drm_hash_item_t hash;
struct list_head list;
atomic_t refcount;
drm_ref_t unref_action;
} drm_ref_object_t;
/**
* Must be called with the struct_mutex held.
*/
extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
int shareable);
/**
* Must be called with the struct_mutex held.
*/
extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
uint32_t key);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
extern int drm_add_ref_object(drm_file_t * priv,
drm_user_object_t * referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
*/
drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_user_object_t * referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type,
drm_user_object_t ** object);
extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type);
/***************************************************
* Fence objects. (drm_fence.c)
*/
typedef struct drm_fence_object {
drm_user_object_t base;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int class;
uint32_t native_type;
uint32_t type;
uint32_t signaled;
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
} drm_fence_object_t;
#define _DRM_FENCE_CLASSES 8
#define _DRM_FENCE_TYPE_EXE 0x00
typedef struct drm_fence_class_manager {
struct list_head ring;
uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
} drm_fence_class_manager_t;
typedef struct drm_fence_manager {
int initialized;
rwlock_t lock;
drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
uint32_t num_classes;
atomic_t count;
} drm_fence_manager_t;
typedef struct drm_fence_driver {
uint32_t num_classes;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*has_irq) (struct drm_device * dev, uint32_t class,
uint32_t flags);
int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * breadcrumb, uint32_t * native_type);
void (*poke_flush) (struct drm_device * dev, uint32_t class);
} drm_fence_driver_t;
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
uint32_t sequence, uint32_t type);
extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
uint32_t sequence);
extern int drm_fence_object_flush(struct drm_device *dev,
drm_fence_object_t * fence, uint32_t type);
extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
extern void drm_fence_usage_deref_locked(struct drm_device *dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(struct drm_device *dev,
drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t class,
drm_fence_object_t ** c_fence);
extern int drm_fence_add_user_object(drm_file_t * priv,
drm_fence_object_t * fence, int shareable);
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
/**************************************************
*TTMs
*/
/*
* The ttm backend GTT interface. (In our case AGP).
* Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart.
* Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
typedef struct drm_ttm_backend {
void *private;
uint32_t flags;
uint32_t drm_map_type;
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
typedef struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
} drm_ttm_t;
extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern void drm_ttm_unbind(drm_ttm_t * ttm);
extern void drm_ttm_evict(drm_ttm_t * ttm);
extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
}
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#define DRM_TTM_PAGE_VMALLOC 0x10
/***************************************************
* Buffer objects. (drm_bo.c, drm_bo_move.c)
*/
typedef struct drm_bo_mem_reg {
drm_mm_node_t *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t flags;
uint32_t mask;
} drm_bo_mem_reg_t;
typedef struct drm_buffer_object {
struct drm_device *dev;
drm_user_object_t base;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
atomic_t mapped;
drm_bo_mem_reg_t mem;
struct list_head lru;
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
/* For pinned buffers */
drm_mm_node_t *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
/* For vm */
drm_ttm_t *ttm;
drm_map_list_t map_list;
uint32_t memory_type;
unsigned long bus_offset;
uint32_t vm_flags;
void *iomap;
#ifdef DRM_ODD_MM_COMPAT
/* dev->struct_mutex only protected. */
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_buffer_object_t;
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
typedef struct drm_mem_type_manager {
int has_type;
int use_type;
drm_mm_t manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
uint32_t drm_bus_maptype;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
} drm_mem_type_manager_t;
#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
before kernel access. */
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
typedef struct drm_buffer_manager {
struct mutex init_mutex;
struct mutex evict_mutex;
int nice_mode;
int initialized;
drm_file_t *last_to_validate;
drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
struct work_struct wq;
#else
struct delayed_work wq;
#endif
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
typedef struct drm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device * dev);
int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
drm_mem_type_manager_t * man);
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
int (*move) (struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
} drm_bo_driver_t;
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
drm_bo_mem_reg_t * mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t * fence,
drm_fence_object_t ** used_fence);
extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait);
extern int drm_bo_mem_space(drm_buffer_object_t * bo,
drm_bo_mem_reg_t * mem, int no_wait);
extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
int no_wait, int move_unfenced);
/*
* Buffer object memory move helpers.
* drm_bo_move.c
*/
extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
int evict,
int no_wait, drm_bo_mem_reg_t * new_mem);
extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
uint32_t fence_flags,
drm_bo_mem_reg_t * new_mem);
#endif

View File

@ -63,10 +63,12 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->bm.init_mutex);
mutex_init(&dev->bm.evict_mutex);
dev->pdev = pdev;
dev->pci_device = pdev->device;

View File

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@ -18,13 +22,11 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
@ -33,18 +35,17 @@ static void drm_ttm_ipi_handler(void *null)
flush_agp_cache();
}
static void drm_ttm_cache_flush(void)
static void drm_ttm_cache_flush(void)
{
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
/*
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
static void ttm_alloc_pages(drm_ttm_t *ttm)
static void ttm_alloc_pages(drm_ttm_t * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
@ -65,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t *ttm)
}
}
static void ttm_free_pages(drm_ttm_t *ttm)
static void ttm_free_pages(drm_ttm_t * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
@ -79,27 +80,24 @@ static void ttm_free_pages(drm_ttm_t *ttm)
ttm->pages = NULL;
}
/*
* Unmap all vma pages from vmas mapping this ttm.
*/
static int unmap_vma_pages(drm_ttm_t * ttm)
static struct page *drm_ttm_alloc_page(void)
{
drm_device_t *dev = ttm->dev;
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
struct page *page;
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
if (drm_alloc_memctl(PAGE_SIZE)) {
return NULL;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_finish_unmap(ttm);
#endif
return 0;
return page;
}
/*
@ -116,7 +114,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
return 0;
if (noncached)
if (noncached)
drm_ttm_cache_flush();
for (i = 0; i < ttm->num_pages; ++i) {
@ -135,7 +133,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
if (do_tlbflush)
flush_agp_mappings();
DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
return 0;
}
@ -154,18 +152,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (!ttm)
return 0;
if (atomic_read(&ttm->vma_count) > 0) {
ttm->destroy = 1;
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
return -EBUSY;
}
DRM_DEBUG("Destroying a ttm\n");
#ifdef DRM_TTM_ODD_COMPAT
BUG_ON(!list_empty(&ttm->vma_list));
BUG_ON(!list_empty(&ttm->p_mm_list));
#endif
be = ttm->be;
if (be) {
be->destroy(be);
@ -193,11 +179,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
/*
* End debugging.
*/
__free_page(*cur_page);
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
@ -210,37 +191,36 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
return 0;
}
struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
{
struct page *p;
drm_buffer_manager_t *bm = &ttm->dev->bm;
p = ttm->pages[index];
if (!p) {
p = drm_ttm_alloc_page();
if (!p)
return NULL;
ttm->pages[index] = p;
++bm->cur_pages;
}
return p;
}
static int drm_ttm_populate(drm_ttm_t * ttm)
{
struct page *page;
unsigned long i;
drm_buffer_manager_t *bm;
drm_ttm_backend_t *be;
if (ttm->state != ttm_unpopulated)
return 0;
bm = &ttm->dev->bm;
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
return -ENOMEM;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
ttm->pages[i] = page;
++bm->cur_pages;
}
page = drm_ttm_get_page(ttm, i);
if (!page)
return -ENOMEM;
}
be->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
@ -251,7 +231,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
* Initialize a ttm.
*/
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
@ -263,11 +243,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
if (!ttm)
return NULL;
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&ttm->p_mm_list);
INIT_LIST_HEAD(&ttm->vma_list);
#endif
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
@ -300,29 +275,20 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
* Unbind a ttm region from the aperture.
*/
int drm_evict_ttm(drm_ttm_t * ttm)
void drm_ttm_evict(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
switch (ttm->state) {
case ttm_bound:
if (be->needs_ub_cache_adjust(be)) {
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
}
}
be->unbind(be);
break;
default:
break;
if (ttm->state == ttm_bound) {
ret = be->unbind(be);
BUG_ON(ret);
}
ttm->state = ttm_evicted;
return 0;
}
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
void drm_ttm_fixup_caching(drm_ttm_t * ttm)
{
if (ttm->state == ttm_evicted) {
@ -334,18 +300,12 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
}
}
int drm_unbind_ttm(drm_ttm_t * ttm)
void drm_ttm_unbind(drm_ttm_t * ttm)
{
int ret = 0;
if (ttm->state == ttm_bound)
ret = drm_evict_ttm(ttm);
drm_ttm_evict(ttm);
if (ret)
return ret;
drm_fixup_ttm_caching(ttm);
return 0;
drm_ttm_fixup_caching(ttm);
}
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
@ -364,26 +324,13 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !cached) {
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !cached) {
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && !cached) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
}
#endif
if ((ret = be->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be))
drm_ttm_unlock_mm(ttm);
#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
@ -391,130 +338,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
if (ret)
return ret;
}
#endif
return 0;
}
/*
* dev->struct_mutex locked.
*/
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
{
drm_map_list_t *list = &object->map_list;
drm_local_map_t *map;
if (list->user_token)
drm_ht_remove_item(&dev->map_hash, &list->hash);
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (map) {
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
if (ttm) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
} else {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
}
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
drm_ttm_object_remove(dev, to);
}
}
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&to->usage) == 0)
drm_ttm_object_remove(dev, to);
mutex_unlock(&dev->struct_mutex);
}
}
/*
* Create a ttm and add it to the drm book-keeping.
* dev->struct_mutex locked.
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
uint32_t flags, drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
drm_map_list_t *list;
drm_local_map_t *map;
drm_ttm_t *ttm;
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
if (!object)
return -ENOMEM;
object->flags = flags;
list = &object->map_list;
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
if (!list->map) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map = list->map;
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map->offset = (unsigned long)ttm;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = ttm->num_pages * PAGE_SIZE;
map->handle = (void *)object;
/*
* Add a one-page "hole" to the block size to avoid the mm subsystem
* merging vmas.
* FIXME: Is this really needed?
*/
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
ttm->num_pages + 1, 0, 0);
if (!list->file_offset_node) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
ttm->num_pages + 1, 0);
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
ttm->mapping_offset = list->hash.key;
atomic_set(&object->usage, 1);
*ttm_object = object;
return 0;
}
EXPORT_SYMBOL(drm_bind_ttm);

View File

@ -1,146 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_TTM_H
#define _DRM_TTM_H
#define DRM_HAS_TTM
/*
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
#define DRM_BE_FLAG_CBA 0x00000004
typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
uint32_t flags;
uint32_t drm_map_type;
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
typedef struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
#ifdef DRM_ODD_MM_COMPAT
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_ttm_t;
typedef struct drm_ttm_object {
atomic_t usage;
uint32_t flags;
drm_map_list_t map_list;
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
drm_ttm_object_t * to);
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
/*
* Evict a ttm region. Keeps Aperture caching policy.
*/
extern int drm_evict_ttm(drm_ttm_t * ttm);
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
{
return (drm_ttm_t *) to->map_list.map->offset;
}
#define DRM_MASK_VAL(dest, mask, val) \
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#define DRM_TTM_PAGE_VMALLOC 0x10
#endif

View File

@ -41,9 +41,9 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_close(struct vm_area_struct *vma);
static int drm_vm_ttm_open(struct vm_area_struct *vma);
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
@ -159,96 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
static
#endif
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
if (!map) {
data->type = VM_FAULT_OOM;
return NULL;
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
return NULL;
}
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
/*
* Perhaps retry here?
*/
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
data->type = VM_FAULT_OOM;
goto out;
}
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
}
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
/*
* FIXME: Check can't map aperture flag.
*/
pfn = ttm->aper_offset + page_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
} else {
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out:
mutex_unlock(&dev->struct_mutex);
return NULL;
}
#endif
/**
* \c nopage method for shared virtual memory.
*
@ -508,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#else
static struct vm_operations_struct drm_vm_ttm_ops = {
.fault = drm_vm_ttm_fault,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#endif
/**
* \c open method for shared virtual memory.
*
@ -530,7 +426,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
static void drm_vm_open(struct vm_area_struct *vma)
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
@ -542,36 +438,21 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
mutex_unlock(&dev->struct_mutex);
}
}
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm;
static void drm_vm_open(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
ttm = (drm_ttm_t *) map->offset;
atomic_inc(&ttm->vma_count);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_add_vma(ttm, vma);
#endif
drm_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
{
drm_vm_ttm_open(vma);
}
/**
@ -608,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
}
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_ttm_t *ttm;
drm_device_t *dev;
int ret;
drm_vm_close(vma);
if (map) {
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_delete_vma(ttm, vma);
#endif
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
BUG_ON(ret);
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
mutex_unlock(&dev->struct_mutex);
}
return;
}
/**
* mmap DMA memory.
*
@ -653,7 +506,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start;
lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
@ -661,10 +513,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
unlock_kernel();
return -EINVAL;
}
unlock_kernel();
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
@ -686,7 +536,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
drm_vm_open_locked(vma);
return 0;
}
@ -719,7 +569,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
@ -737,6 +587,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
* the AGP mapped at physical address 0
* --BenH.
*/
if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
@ -833,27 +684,254 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
break;
case _DRM_TTM: {
vma->vm_ops = &drm_vm_ttm_ops;
vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
drm_ttm_map_bound(vma);
mutex_unlock(&dev->struct_mutex);
#endif
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;
}
case _DRM_TTM:
return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
drm_vm_open_locked(vma);
return 0;
}
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_mmap);
/**
* buffer object vm functions.
*/
/**
* \c Pagefault method for buffer objects.
*
* \param vma Virtual memory area.
* \param data Fault data on failure or refault.
* \return Always NULL as we insert pfns directly.
*
* It's important that pfns are inserted while holding the bo->mutex lock.
* otherwise we might race with unmap_mapping_range() which is always
* called with the bo->mutex lock held.
*
* It's not pretty to modify the vma->vm_page_prot variable while not
* holding the mm semaphore in write mode. However, we have it i read mode,
* so we won't be racing with any other writers, and we only actually modify
* it when no ptes are present so it shouldn't be a big deal.
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#ifdef DRM_FULL_MM_COMPAT
static
#endif
struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
drm_ttm_t *ttm;
drm_device_t *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
mutex_lock(&bo->mutex);
err = drm_bo_wait(bo, 0, 0, 0);
if (err) {
data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
#ifdef DRM_BO_FULL_COMPAT
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
if (err) {
data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
}
#else
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
do {
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
if (err) {
DRM_ERROR("Timeout moving buffer to mappable location.\n");
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
#endif
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
data->type = VM_FAULT_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
mutex_unlock(&bo->mutex);
return NULL;
}
#endif
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_add_vma(bo, vma);
#endif
}
/**
* \c vma open method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_open(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
mutex_lock(&dev->struct_mutex);
drm_bo_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* \c vma close method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_close(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
drm_vm_close(vma);
if (bo) {
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked(bo);
mutex_unlock(&dev->struct_mutex);
}
return;
}
static struct vm_operations_struct drm_bo_vm_ops = {
#ifdef DRM_FULL_MM_COMPAT
.fault = drm_bo_vm_fault,
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
.nopfn = drm_bo_vm_nopfn,
#else
.nopage = drm_bo_vm_nopage,
#endif
#endif
.open = drm_bo_vm_open,
.close = drm_bo_vm_close,
};
/**
* mmap buffer object memory.
*
* \param vma virtual memory area.
* \param filp file pointer.
* \param map The buffer object drm map.
* \return zero on success or a negative number on failure.
*/
int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map)
{
vma->vm_ops = &drm_bo_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
vma->vm_flags |= VM_PFNMAP;
#endif
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_map_bound(vma);
#endif
return 0;
}

View File

@ -125,8 +125,8 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
VM_OFFSET(vma) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}

View File

@ -110,7 +110,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
if (remap_pfn_range(vma, vma->vm_start,
VM_OFFSET(vma) >> PAGE_SHIFT,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;

View File

@ -33,16 +33,15 @@
#include "i915_drm.h"
#include "i915_drv.h"
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
}
int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
{
*class = 0;
if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
@ -64,3 +63,173 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
int i915_init_mem_type(drm_device_t * dev, uint32_t type,
drm_mem_type_manager_t * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
break;
case DRM_BO_MEM_TT:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
case DRM_BO_MEM_PRIV0:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
uint32_t i915_evict_mask(drm_buffer_object_t *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL;
default:
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
}
}
static void i915_emit_copy_blit(drm_device_t * dev,
uint32_t src_offset,
uint32_t dst_offset,
uint32_t pages, int direction)
{
uint32_t cur_pages;
uint32_t stride = PAGE_SIZE;
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv)
return;
i915_kernel_lost_context(dev);
while (pages > 0) {
cur_pages = pages;
if (cur_pages > 2048)
cur_pages = 2048;
pages -= cur_pages;
BEGIN_LP_RING(6);
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
(1 << 25) | (direction ? (1 << 30) : 0));
OUT_RING((cur_pages << 16) | PAGE_SIZE);
OUT_RING(dst_offset);
OUT_RING(stride & 0xffff);
OUT_RING(src_offset);
ADVANCE_LP_RING();
}
return;
}
static int i915_move_blit(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
int dir = 0;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = 1;
}
i915_emit_copy_blit(bo->dev,
old_mem->mm_node->start << PAGE_SHIFT,
new_mem->mm_node->start << PAGE_SHIFT,
new_mem->num_pages, dir);
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
DRM_FENCE_TYPE_EXE |
DRM_I915_FENCE_TYPE_RW,
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
}
/*
* Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
static int i915_move_flip(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_bo_mem_reg_t tmp_mem;
int ret;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
if (ret)
goto out_cleanup;
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
return ret;
}
int i915_move(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (i915_move_flip(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
if (i915_move_blit(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
}

View File

@ -40,22 +40,32 @@ static struct pci_device_id pciidlist[] = {
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
.no_types = 2,
.num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,
.has_irq = i915_fence_has_irq,
};
#endif
#ifdef I915_HAVE_BUFFER
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
static drm_bo_driver_t i915_bo_driver = {
.iomap = {NULL, NULL},
.cached = {1, 1},
.mem_type_prio = i915_mem_prios,
.mem_busy_prio = i915_busy_prios,
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_types,
.invalidate_caches = i915_invalidate_caches
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
.evict_mask = i915_evict_mask,
.move = i915_move,
};
#endif

View File

@ -42,36 +42,34 @@ static void i915_perform_flush(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[0];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t flush_flags = 0;
uint32_t flush_sequence = 0;
uint32_t i_status;
uint32_t diff;
uint32_t sequence;
int rwflush;
if (!dev_priv)
return;
if (fm->pending_exe_flush) {
if (fc->pending_exe_flush) {
sequence = READ_BREADCRUMB(dev_priv);
/*
* First update fences with the current breadcrumb.
*/
diff = sequence - fm->last_exe_flush;
diff = sequence - fc->last_exe_flush;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}
diff = sequence - fm->exe_flush_sequence;
if (diff < driver->wrap_diff) {
fm->pending_exe_flush = 0;
if (dev_priv->fence_irq_on) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
}
} else if (!dev_priv->fence_irq_on) {
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
@ -84,17 +82,18 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
if (fm->pending_flush && !dev_priv->flush_pending) {
rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
if (rwflush && !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fm->pending_flush;
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
fm->pending_flush = 0;
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
if (dev_priv->flush_pending) {
@ -104,13 +103,13 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
}
void i915_poke_flush(drm_device_t * dev)
void i915_poke_flush(drm_device_t * dev, uint32_t class)
{
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
@ -120,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev)
write_unlock_irqrestore(&fm->lock, flags);
}
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -144,3 +143,15 @@ void i915_fence_handler(drm_device_t * dev)
i915_perform_flush(dev);
write_unlock(&fm->lock);
}
int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
{
/*
* We have an irq that tells us when we have a new breadcrumb.
*/
if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
return 1;
return 0;
}

View File

@ -74,7 +74,7 @@ static struct drm_driver driver = {
.context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_locked = sis_reclaim_buffers_locked,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,

163
linux-core/via_buffer.c Normal file
View File

@ -0,0 +1,163 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
}
int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
{
*class = 0;
*type = 3;
return 0;
}
int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
{
/*
* FIXME: Invalidate texture caches here.
*/
return 0;
}
static int via_vram_info(drm_device_t *dev,
unsigned long *offset,
unsigned long *size)
{
struct pci_dev *pdev = dev->pdev;
unsigned long flags;
int ret = DRM_ERR(EINVAL);
int i;
for (i=0; i<6; ++i) {
flags = pci_resource_flags(pdev, i);
if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
(IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
ret = 0;
break;
}
}
if (ret) {
DRM_ERROR("Could not find VRAM PCI resource\n");
return ret;
}
*offset = pci_resource_start(pdev, i);
*size = pci_resource_end(pdev, i) - *offset + 1;
return 0;
}
int via_init_mem_type(drm_device_t * dev, uint32_t type,
drm_mem_type_manager_t * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
/* System memory */
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
break;
case DRM_BO_MEM_TT:
/* Dynamic agpgart memory */
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
/* Only to get pte protection right. */
man->drm_bus_maptype = _DRM_AGP;
break;
case DRM_BO_MEM_VRAM:
/* "On-card" video ram */
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
man->io_addr = NULL;
return via_vram_info(dev, &man->io_offset, &man->io_size);
break;
case DRM_BO_MEM_PRIV0:
/* Pre-bound agpgart memory */
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
uint32_t via_evict_mask(drm_buffer_object_t *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
return DRM_BO_MEM_TT;
case DRM_BO_MEM_VRAM:
if (bo->mem.num_pages > 128)
return DRM_BO_MEM_TT;
else
return DRM_BO_MEM_LOCAL;
default:
return DRM_BO_MEM_LOCAL;
}
}

230
linux-core/via_fence.c Normal file
View File

@ -0,0 +1,230 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
/*
* DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
* DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
*/
static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_fence_class_manager_t *fc = &dev->fm.class[class];
uint32_t pending_flush_types = 0;
uint32_t signaled_flush_types = 0;
uint32_t status;
if (class != 0)
return 0;
if (!dev_priv)
return 0;
spin_lock(&dev_priv->fence_lock);
pending_flush_types = fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
if (pending_flush_types) {
/*
* Take the idlelock. This guarantees that the next time a client tries
* to grab the lock, it will stall until the idlelock is released. This
* guarantees that eventually, the GPU engines will be idle, but nothing
* else. It cannot be used to protect the hardware.
*/
if (!dev_priv->have_idlelock) {
drm_idlelock_take(&dev->lock);
dev_priv->have_idlelock = 1;
}
/*
* Check if AGP command reader is idle.
*/
if (pending_flush_types & DRM_FENCE_TYPE_EXE)
if (VIA_READ(0x41C) & 0x80000000)
signaled_flush_types |= DRM_FENCE_TYPE_EXE;
/*
* Check VRAM command queue empty and 2D + 3D engines idle.
*/
if (pending_flush_types & DRM_VIA_FENCE_TYPE_ACCEL) {
status = VIA_READ(VIA_REG_STATUS);
if ((status & VIA_VR_QUEUE_BUSY) &&
!(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
}
if (signaled_flush_types) {
pending_flush_types &= ~signaled_flush_types;
if (!pending_flush_types && dev_priv->have_idlelock) {
drm_idlelock_release(&dev->lock);
dev_priv->have_idlelock = 0;
}
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types);
}
}
spin_unlock(&dev_priv->fence_lock);
return fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
}
/**
* Emit a fence sequence.
*/
int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret = 0;
if (!dev_priv)
return -EINVAL;
switch(class) {
case 0: /* AGP command stream */
/*
* The sequence number isn't really used by the hardware yet.
*/
spin_lock(&dev_priv->fence_lock);
*sequence = ++dev_priv->emit_0_sequence;
spin_unlock(&dev_priv->fence_lock);
/*
* When drm_fence_handler() is called with flush type 0x01, and a
* sequence number, That means that the EXE flag is expired.
* Nothing else. No implicit flushing or other engines idle.
*/
*native_type = DRM_FENCE_TYPE_EXE;
break;
default:
ret = DRM_ERR(EINVAL);
break;
}
return ret;
}
/**
* Manual poll (from the fence manager).
*/
void via_poke_flush(drm_device_t * dev, uint32_t class)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
uint32_t pending_flush;
if (!dev_priv)
return ;
write_lock_irqsave(&fm->lock, flags);
pending_flush = via_perform_flush(dev, class);
if (pending_flush)
pending_flush = via_perform_flush(dev, class);
write_unlock_irqrestore(&fm->lock, flags);
/*
* Kick the timer if there are more flushes pending.
*/
if (pending_flush && !timer_pending(&dev_priv->fence_timer)) {
dev_priv->fence_timer.expires = jiffies + 1;
add_timer(&dev_priv->fence_timer);
}
}
/**
* No irq fence expirations implemented yet.
* Although both the HQV engines and PCI dmablit engines signal
* idle with an IRQ, we haven't implemented this yet.
* This means that the drm fence manager will always poll for engine idle,
* unless the caller wanting to wait for a fence object has indicated a lazy wait.
*/
int via_fence_has_irq(struct drm_device * dev, uint32_t class,
uint32_t flags)
{
return 0;
}
/**
* Regularly call the flush function. This enables lazy waits, so we can
* set lazy_capable. Lazy waits don't really care when the fence expires,
* so a timer tick delay should be fine.
*/
void via_fence_timer(unsigned long data)
{
drm_device_t *dev = (drm_device_t *) data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
uint32_t pending_flush;
drm_fence_class_manager_t *fc = &dev->fm.class[0];
if (!dev_priv)
return;
if (!fm->initialized)
goto out_unlock;
via_poke_flush(dev, 0);
pending_flush = fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
/*
* disable timer if there are no more flushes pending.
*/
if (!pending_flush && timer_pending(&dev_priv->fence_timer)) {
BUG_ON(dev_priv->have_idlelock);
del_timer(&dev_priv->fence_timer);
}
return;
out_unlock:
return;
}

View File

@ -678,7 +678,7 @@ typedef struct drm_set_version {
typedef struct drm_fence_arg {
unsigned handle;
int class;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
@ -697,9 +697,10 @@ typedef struct drm_fence_arg {
} drm_fence_arg_t;
/* Buffer permissions, referring to how the GPU uses the buffers.
these translate to fence types used for the buffers.
Typically a texture buffer is read, A destination buffer is write and
a command (batch-) buffer is exe. Can be or-ed together. */
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
#define DRM_BO_FLAG_READ 0x00000001
#define DRM_BO_FLAG_WRITE 0x00000002
@ -707,47 +708,82 @@ typedef struct drm_fence_arg {
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Cannot evict this buffer. Not even with force. This type of buffer should
* only be available for root, and must be manually removed before buffer
* manager shutdown or swapout.
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT 0x00000010
/* Always keep a system memory shadow to a vram buffer */
#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020
/* The buffer is shareable with other processes */
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE 0x00000020
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE 0x00000040
/* The buffer is currently cached */
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED 0x00000080
/* Make sure that every time this buffer is validated, it ends up on the same
* location. The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or swapout. Not supported yet.*/
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE 0x00000100
/* Make sure the buffer is in cached memory when mapped for reading */
#define DRM_BO_FLAG_READ_CACHED 0x00080000
/* When there is a choice between VRAM and TT, prefer VRAM.
The default behaviour is to prefer TT. */
#define DRM_BO_FLAG_PREFER_VRAM 0x00040000
/* Bind this buffer cached if the hardware supports it. */
#define DRM_BO_FLAG_BIND_CACHED 0x0002000
/* Mask: Make sure the buffer is in cached memory when mapped for reading.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_READ_CACHED 0x00080000
/* System Memory */
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING 0x00002000
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT 0x02000000
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM 0x04000000
/* Unmappable Vram memory */
#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 0x08000000
#define DRM_BO_FLAG_MEM_PRIV1 0x10000000
#define DRM_BO_FLAG_MEM_PRIV2 0x20000000
#define DRM_BO_FLAG_MEM_PRIV3 0x40000000
#define DRM_BO_FLAG_MEM_PRIV4 0x80000000
/* Memory flag mask */
#define DRM_BO_MASK_MEM 0xFF000000
#define DRM_BO_MASK_MEMTYPE 0xFF0000A0
/* When creating a buffer, Avoid system storage even if allowed */
#define DRM_BO_HINT_AVOID_LOCAL 0x00000001
/* Don't block on validate and map */
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/* Don't place this buffer on the unfenced list.*/
@ -756,9 +792,6 @@ typedef struct drm_fence_arg {
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
/* Driver specific flags. Could be for example rendering engine */
#define DRM_BO_MASK_DRIVER 0x00F00000
typedef enum {
drm_bo_type_dc,
drm_bo_type_user,
@ -774,7 +807,7 @@ typedef struct drm_bo_arg_request {
drm_bo_type_t type;
unsigned arg_handle;
drm_u64_t buffer_start;
unsigned page_alignment;
unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
enum {
drm_bo_create,
@ -805,11 +838,11 @@ typedef struct drm_bo_arg_reply {
drm_u64_t size;
drm_u64_t offset;
drm_u64_t arg_handle;
unsigned mask;
drm_u64_t buffer_start;
unsigned fence_flags;
unsigned rep_flags;
unsigned page_alignment;
unsigned mask;
drm_u64_t buffer_start;
unsigned fence_flags;
unsigned rep_flags;
unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
}drm_bo_arg_reply_t;
@ -826,8 +859,13 @@ typedef struct drm_bo_arg{
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_VRAM_NM 3
#define DRM_BO_MEM_TYPES 2 /* For now. */
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
#define DRM_BO_MEM_TYPES 8 /* For now. */
typedef union drm_mm_init_arg{
struct {

View File

@ -444,7 +444,7 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
OUT_RING(0);
ADVANCE_LP_RING();
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, dev_priv->counter);
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
}
@ -591,7 +591,7 @@ static int i915_dispatch_flip(drm_device_t * dev)
OUT_RING(0);
ADVANCE_LP_RING();
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, dev_priv->counter);
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;

View File

@ -126,7 +126,9 @@ typedef struct drm_i915_private {
uint32_t flush_pending;
uint32_t saved_flush_status;
#endif
#ifdef I915_HAVE_BUFFER
void *agp_iomap;
#endif
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
@ -183,17 +185,25 @@ extern void i915_mem_release(drm_device_t * dev,
extern void i915_fence_handler(drm_device_t *dev);
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class,
uint32_t flags,
uint32_t *sequence,
uint32_t *native_type);
extern void i915_poke_flush(drm_device_t *dev);
extern void i915_poke_flush(drm_device_t *dev, uint32_t class);
extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
#endif
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
drm_mem_type_manager_t *man);
extern uint32_t i915_evict_mask(drm_buffer_object_t *bo);
extern int i915_move(drm_buffer_object_t *bo, int evict,
int no_wait, drm_bo_mem_reg_t *new_mem);
#endif
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
@ -331,6 +341,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)

View File

@ -187,7 +187,7 @@ int nouveau_fifo_init(drm_device_t *dev)
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
@ -293,7 +293,7 @@ static void nouveau_nv04_context_init(drm_device_t *dev,
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}
@ -326,7 +326,7 @@ static void nouveau_nv10_context_init(drm_device_t *dev,
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}
@ -357,7 +357,7 @@ static void nouveau_nv30_context_init(drm_device_t *dev,
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
@ -421,7 +421,7 @@ static void nouveau_nv40_context_init(drm_device_t *dev,
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
RAMFC_WR(DMA_SUBROUTINE, init->put_base);
@ -493,7 +493,7 @@ nouveau_fifo_context_restore(drm_device_t *dev, int channel)
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACH1_BIG_ENDIAN |
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}

View File

@ -35,7 +35,7 @@
#include "drm_sarea.h"
#include "nouveau_drv.h"
static int meminit_ok=0;
//static int meminit_ok=0;
static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
DRMFILE filp)
@ -373,10 +373,9 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6
/*
* Init memory if needed
*/
if (meminit_ok==0)
if (dev_priv->fb_phys == 0)
{
nouveau_mem_init(dev);
meminit_ok=1;
}
/*
@ -446,8 +445,10 @@ alloc_ok:
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_INFO("freeing 0x%llx\n", block->start);
if (meminit_ok==0)
if (dev_priv->fb_phys == 0)
{
DRM_ERROR("%s called without init\n", __FUNCTION__);
return;

View File

@ -40,20 +40,6 @@
#include "via_drv.h"
#include "via_3d_reg.h"
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
/* defines for VIA 3D registers */
#define VIA_REG_STATUS 0x400
#define VIA_REG_TRANSET 0x43C
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define SetReg2DAGP(nReg, nData) { \
*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
*((uint32_t *)(vb) + 1) = (nData); \

View File

@ -58,21 +58,11 @@
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define VIA_NR_TEX_REGIONS 64
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_CTX 0x4
#define VIA_UPLOAD_BUFFERS 0x8
#define VIA_UPLOAD_TEX0 0x10
#define VIA_UPLOAD_TEX1 0x20
#define VIA_UPLOAD_CLIPRECTS 0x40
#define VIA_UPLOAD_ALL 0xff
#define DRM_VIA_FENCE_TYPE_ACCEL 0x00000002
/* VIA specific ioctls */
#define DRM_VIA_ALLOCMEM 0x00

View File

@ -38,6 +38,47 @@ static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
#ifdef VIA_HAVE_FENCE
static drm_fence_driver_t via_fence_driver = {
.num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 20),
.sequence_mask = 0xffffffffU,
.lazy_capable = 1,
.emit = via_fence_emit_sequence,
.poke_flush = via_poke_flush,
.has_irq = via_fence_has_irq,
};
#endif
#ifdef VIA_HAVE_BUFFER
/**
* If there's no thrashing. This is the preferred memory type order.
*/
static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
/**
* If we have thrashing, most memory will be evicted to TT anyway, so we might as well
* just move the new buffer into TT from the start.
*/
static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
static drm_bo_driver_t via_bo_driver = {
.mem_type_prio = via_mem_prios,
.mem_busy_prio = via_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(via_mem_prios),
.num_mem_busy_prio = ARRAY_SIZE(via_busy_prios),
.create_ttm_backend_entry = via_create_ttm_backend_entry,
.fence_type = via_fence_types,
.invalidate_caches = via_invalidate_caches,
.init_mem_type = via_init_mem_type,
.evict_mask = via_evict_mask,
.move = NULL,
};
#endif
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
@ -57,8 +98,9 @@ static struct drm_driver driver = {
.dma_quiescent = via_driver_dma_quiescent,
.dri_library_name = dri_library_name,
.reclaim_buffers = drm_core_reclaim_buffers,
.reclaim_buffers_locked = NULL,
#ifdef VIA_HAVE_CORE_MM
.reclaim_buffers_locked = via_reclaim_buffers_locked,
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
#endif
.get_map_ofs = drm_core_get_map_ofs,
@ -79,7 +121,12 @@ static struct drm_driver driver = {
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
#ifdef VIA_HAVE_FENCE
.fence_driver = &via_fence_driver,
#endif
#ifdef VIA_HAVE_BUFFER
.bo_driver = &via_bo_driver,
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = VIA_DRM_DRIVER_DATE,

View File

@ -32,6 +32,27 @@
#include "via_verifier.h"
/*
* Registers go here.
*/
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
/* defines for VIA 3D registers */
#define VIA_REG_STATUS 0x400
#define VIA_REG_TRANSET 0x43C
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#if defined(__linux__)
#include "via_dmablit.h"
@ -41,6 +62,8 @@
*/
#define VIA_HAVE_DMABLIT 1
#define VIA_HAVE_CORE_MM 1
#define VIA_HAVE_FENCE 1
#define VIA_HAVE_BUFFER 1
#endif
#define VIA_PCI_BUF_SIZE 60000
@ -103,6 +126,12 @@ typedef struct drm_via_private {
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
#endif
uint32_t dma_diff;
#ifdef VIA_HAVE_FENCE
spinlock_t fence_lock;
uint32_t emit_0_sequence;
int have_idlelock;
struct timer_list fence_timer;
#endif
} drm_via_private_t;
enum via_family {
@ -163,4 +192,26 @@ extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
extern void via_init_dmablit(drm_device_t *dev);
#endif
#ifdef VIA_HAVE_FENCE
extern void via_fence_timer(unsigned long data);
extern void via_poke_flush(drm_device_t * dev, uint32_t class);
extern int via_fence_emit_sequence(drm_device_t * dev, uint32_t class,
uint32_t flags,
uint32_t * sequence,
uint32_t * native_type);
extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
uint32_t flags);
#endif
#ifdef VIA_HAVE_BUFFER
extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev);
extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
extern int via_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
extern int via_init_mem_type(drm_device_t *dev, uint32_t type,
drm_mem_type_manager_t *man);
extern uint32_t via_evict_mask(drm_buffer_object_t *bo);
extern int via_move(drm_buffer_object_t *bo, int evict,
int no_wait, drm_bo_mem_reg_t *new_mem);
#endif
#endif

View File

@ -28,6 +28,7 @@
static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
@ -64,8 +65,22 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
#ifdef VIA_HAVE_DMABLIT
via_init_dmablit( dev );
#endif
#ifdef VIA_HAVE_FENCE
dev_priv->emit_0_sequence = 0;
dev_priv->have_idlelock = 0;
spin_lock_init(&dev_priv->fence_lock);
init_timer(&dev_priv->fence_timer);
dev_priv->fence_timer.function = &via_fence_timer;
dev_priv->fence_timer.data = (unsigned long) dev;
#endif /* VIA_HAVE_FENCE */
dev->dev_private = (void *)dev_priv;
return 0;
#ifdef VIA_HAVE_BUFFER
ret = drm_bo_driver_init(dev);
if (ret)
DRM_ERROR("Could not initialize buffer object driver.\n");
#endif
return ret;
}
int via_do_cleanup_map(drm_device_t * dev)

View File

@ -182,7 +182,7 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,
drm_bo_type_dc,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_NO_MOVE, 0, &buf));
DRM_BO_FLAG_MEM_LOCAL /*| DRM_BO_FLAG_NO_MOVE*/, 0, &buf));
curTime = fastrdtsc();
*ticks++ = time_diff(oldTime, curTime);
@ -260,8 +260,8 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,
oldTime = fastrdtsc();
ret = drmBOValidate(ctx->drmFD, &buf,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_BIND_CACHED,
DRM_BO_MASK_MEM | DRM_BO_FLAG_BIND_CACHED, DRM_BO_HINT_DONT_FENCE);
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING,
DRM_BO_MASK_MEMTYPE | DRM_BO_FLAG_FORCE_CACHING, DRM_BO_HINT_DONT_FENCE);
curTime = fastrdtsc();
drmUnlock(ctx->drmFD, ctx->hwContext);
@ -304,7 +304,7 @@ static void
testAGP(TinyDRIContext * ctx)
{
unsigned long ticks[128], *pTicks;
unsigned long size = 4096 * 1024;
unsigned long size = 8 * 1024;
int ret;
ret = benchmarkBuffer(ctx, size, ticks);