Make vm handle buffer objects instead of ttm objects.

Remove ttm objects.
Make vm aware of PCI memory type buffer objects.
(Only works for pre 2.6.16 kernels for now).
main
Thomas Hellstrom 2007-02-02 14:47:44 +01:00
parent dd733dea38
commit c269d560e4
9 changed files with 466 additions and 422 deletions

View File

@ -1014,7 +1014,6 @@ typedef struct drm_buffer_object{
*/
atomic_t usage;
drm_ttm_object_t *ttm_object;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
@ -1037,6 +1036,7 @@ typedef struct drm_buffer_object{
/* For vm */
drm_map_list_t map_list;
drm_mm_node_t *node;
uint32_t memory_type;
drm_ttm_t *ttm;
@ -1485,6 +1485,11 @@ extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(drm_device_t *dev);
extern int drm_bo_driver_init(drm_device_t *dev);
extern int drm_bo_pci_offset(const drm_buffer_object_t *bo,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,

View File

@ -32,30 +32,30 @@
#include "drmP.h"
/*
* Buffer object locking policy:
* Lock dev->struct_mutex;
* Increase usage
* Unlock dev->struct_mutex;
* Lock buffer->mutex;
* Do whatever you want;
* Unlock buffer->mutex;
* Decrease usage. Call destruction if zero.
* Locking may look a bit complicated but isn't really:
*
* User object visibility ups usage just once, since it has its own
* refcounting.
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
* when there is a chance that it can be zero before or after the operation.
*
* dev->struct_mutex also protects all lists and list heads. Hash tables and hash
* heads.
*
* Destruction:
* lock dev->struct_mutex;
* Verify that usage is zero. Otherwise unlock and continue.
* Destroy object.
* unlock dev->struct_mutex;
* bo->mutex protects the buffer object itself excluding the usage field.
* bo->mutex does also protect the buffer list heads, so to manipulate those, we need
* both the bo->mutex and the dev->struct_mutex.
*
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
* complicated. When dev->struct_mutex is released to grab bo->mutex, the list
* traversal will, in general, need to be restarted.
*
* Mutex and spinlock locking orders:
* 1.) Buffer mutex
* 2.) Refer to ttm locking orders.
*/
static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo);
static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo);
static void drm_bo_unmap_virtual(drm_buffer_object_t *bo);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
@ -110,6 +110,7 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
int ret;
if (bo->mm_node) {
drm_bo_unmap_virtual(bo);
mutex_lock(&dev->struct_mutex);
if (evict)
ret = drm_evict_ttm(bo->ttm);
@ -278,12 +279,9 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. "
"Bad. Continuing anyway\n");
}
drm_destroy_ttm(bo->ttm);
bo->ttm = NULL;
}
if (bo->ttm_object) {
drm_ttm_object_deref_locked(dev, bo->ttm_object);
}
atomic_dec(&bm->count);
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
@ -362,7 +360,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
{
if (atomic_dec_and_test(&bo->usage)) {
drm_bo_destroy_locked(bo);
@ -371,8 +369,11 @@ static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
{
drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t,
base));
drm_buffer_object_t *bo =
drm_user_object_entry(uo, drm_buffer_object_t, base);
drm_bo_takedown_vm_locked(bo);
drm_bo_usage_deref_locked(bo);
}
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
@ -608,6 +609,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
drm_bo_unmap_virtual(bo);
mutex_lock(&dev->struct_mutex);
ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
bo->mm_node->start);
@ -927,13 +929,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
rep->flags = bo->flags;
rep->size = bo->num_pages * PAGE_SIZE;
rep->offset = bo->offset;
if (bo->ttm_object) {
rep->arg_handle = bo->ttm_object->map_list.user_token;
} else {
rep->arg_handle = 0;
}
rep->arg_handle = bo->map_list.user_token;
rep->mask = bo->mask;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
@ -1322,19 +1318,21 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
{
drm_device_t *dev = bo->dev;
drm_ttm_object_t *to = NULL;
int ret = 0;
uint32_t ttm_flags = 0;
bo->ttm_object = NULL;
bo->ttm = NULL;
bo->map_list.user_token = 0ULL;
switch (bo->type) {
case drm_bo_type_dc:
mutex_lock(&dev->struct_mutex);
ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
ttm_flags, &to);
ret = drm_bo_setup_vm_locked(bo);
mutex_unlock(&dev->struct_mutex);
if (ret)
break;
bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_user:
case drm_bo_type_fake:
@ -1345,14 +1343,6 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
break;
}
if (ret) {
return ret;
}
if (to) {
bo->ttm_object = to;
bo->ttm = drm_ttm_from_object(to);
}
return ret;
}
@ -1384,7 +1374,6 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo,
bo->mm_node = NULL;
bo->ttm = NULL;
bo->ttm_object = NULL;
bo->fence = NULL;
bo->flags = 0;
@ -2023,3 +2012,211 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return 0;
}
/*
* buffer object vm functions.
*/
/**
* \c Get the PCI offset for the buffer object memory.
*
* \param bo The buffer object.
* \param bus_base On return the base of the PCI region
* \param bus_offset On return the byte offset into the PCI region
* \param bus_size On return the byte size of the buffer object or zero if
* the buffer object memory is not accessible through a PCI region.
* \return Failure indication.
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Otherwise returns zero. Call bo->mutex locked.
*/
int drm_bo_pci_offset(const drm_buffer_object_t *bo,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
drm_mem_type_manager_t *man = &bm->man[bo->mem_type];
*bus_size = 0;
if (bo->type != drm_bo_type_dc)
return -EINVAL;
if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
return -EINVAL;
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
drm_ttm_t *ttm = bo->ttm;
if (!bo->ttm) {
return -EINVAL;
}
drm_ttm_fixup_caching(ttm);
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return 0;
if (ttm->be->flags & DRM_BE_FLAG_CMA)
return 0;
*bus_base = ttm->be->aperture_base;
} else {
*bus_base = man->io_offset;
}
*bus_offset = bo->mm_node->start << PAGE_SHIFT;
*bus_size = bo->num_pages << PAGE_SHIFT;
return 0;
}
/**
* \c Return a kernel virtual address to the buffer object PCI memory.
*
* \param bo The buffer object.
* \return Failure indication.
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
*
* After a successfull call, bo->iomap contains the virtual address, or NULL
* if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
int drm_bo_ioremap(drm_buffer_object_t *bo)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
drm_mem_type_manager_t *man = &bm->man[bo->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret;
BUG_ON(bo->iomap);
ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
if (ret || bus_size == 0)
return ret;
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset);
else {
bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size);
if (bo->iomap)
return -ENOMEM;
}
return 0;
}
/**
* \c Unmap mapping obtained using drm_bo_ioremap
*
* \param bo The buffer object.
*
* Call bo->mutex locked.
*/
void drm_bo_iounmap(drm_buffer_object_t *bo)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm;
drm_mem_type_manager_t *man;
bm = &dev->bm;
man = &bm->man[bo->mem_type];
if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
iounmap(bo->iomap);
bo->iomap = NULL;
}
/**
* \c Kill all user-space virtual mappings of this buffer object.
*
* \param bo The buffer object.
*
* Call bo->mutex locked.
*/
void drm_bo_unmap_virtual(drm_buffer_object_t *bo)
{
drm_device_t *dev = bo->dev;
loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
loff_t holelen = ((loff_t) bo->num_pages) << PAGE_SHIFT;
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}
static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo)
{
drm_map_list_t *list = &bo->map_list;
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
if (list->user_token) {
drm_ht_remove_item(&dev->map_hash, &list->hash);
list->user_token = 0;
}
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (!map)
return;
drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
list->map = NULL;
list->user_token = 0ULL;
drm_bo_usage_deref_locked(bo);
}
static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo)
{
drm_map_list_t *list = &bo->map_list;
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
if (!list->map)
return -ENOMEM;
map = list->map;
map->offset = 0;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = bo->num_pages * PAGE_SIZE;
atomic_inc(&bo->usage);
map->handle = (void *) bo;
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
bo->num_pages, 0, 0);
if (!list->file_offset_node) {
drm_bo_takedown_vm_locked(bo);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
bo->num_pages, 0);
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_bo_takedown_vm_locked(bo);
return -ENOMEM;
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
return 0;
}

View File

@ -160,7 +160,7 @@ void free_nopage_retry(void)
}
}
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
@ -171,7 +171,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
data.address = address;
data.vma = vma;
drm_vm_ttm_fault(vma, &data);
drm_bo_vm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;

View File

@ -201,8 +201,8 @@ extern int drm_map_page_into_agp(struct page *page);
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
struct fault_data;
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data);
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
@ -230,9 +230,9 @@ struct fault_data {
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#endif

View File

@ -79,29 +79,28 @@ static void ttm_free_pages(drm_ttm_t *ttm)
ttm->pages = NULL;
}
/*
* Unmap all vma pages from vmas mapping this ttm.
*/
static int unmap_vma_pages(drm_ttm_t * ttm)
struct page *drm_ttm_alloc_page(void)
{
drm_device_t *dev = ttm->dev;
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
struct page *page;
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
if (drm_alloc_memctl(PAGE_SIZE)) {
return NULL;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_finish_unmap(ttm);
#endif
return 0;
return page;
}
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
@ -154,13 +153,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (!ttm)
return 0;
if (atomic_read(&ttm->vma_count) > 0) {
ttm->destroy = 1;
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
return -EBUSY;
}
DRM_DEBUG("Destroying a ttm\n");
DRM_ERROR("Drm destroy ttm\n");
#ifdef DRM_ODD_MM_COMPAT
BUG_ON(!list_empty(&ttm->vma_list));
@ -193,11 +186,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
/*
* End debugging.
*/
__free_page(*cur_page);
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
@ -225,19 +213,9 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
page = drm_ttm_alloc_page();
if (!page)
return -ENOMEM;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
ttm->pages[i] = page;
++bm->cur_pages;
}
@ -251,7 +229,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
* Initialize a ttm.
*/
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
@ -303,26 +281,15 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
int drm_evict_ttm(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
switch (ttm->state) {
case ttm_bound:
if (be->needs_ub_cache_adjust(be)) {
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
}
}
if (ttm->state == ttm_bound)
be->unbind(be);
break;
default:
break;
}
ttm->state = ttm_evicted;
return 0;
}
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
void drm_ttm_fixup_caching(drm_ttm_t * ttm)
{
if (ttm->state == ttm_evicted) {
@ -344,7 +311,7 @@ int drm_unbind_ttm(drm_ttm_t * ttm)
if (ret)
return ret;
drm_fixup_ttm_caching(ttm);
drm_ttm_fixup_caching(ttm);
return 0;
}
@ -366,10 +333,6 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
return ret;
if (ttm->state == ttm_unbound && !cached) {
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
#ifdef DRM_ODD_MM_COMPAT
@ -402,120 +365,3 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
return 0;
}
/*
* dev->struct_mutex locked.
*/
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
{
drm_map_list_t *list = &object->map_list;
drm_local_map_t *map;
if (list->user_token)
drm_ht_remove_item(&dev->map_hash, &list->hash);
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (map) {
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
if (ttm) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
} else {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
}
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
drm_ttm_object_remove(dev, to);
}
}
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&to->usage) == 0)
drm_ttm_object_remove(dev, to);
mutex_unlock(&dev->struct_mutex);
}
}
/*
* Create a ttm and add it to the drm book-keeping.
* dev->struct_mutex locked.
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
uint32_t flags, drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
drm_map_list_t *list;
drm_local_map_t *map;
drm_ttm_t *ttm;
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
if (!object)
return -ENOMEM;
object->flags = flags;
list = &object->map_list;
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
if (!list->map) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map = list->map;
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map->offset = (unsigned long)ttm;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = ttm->num_pages * PAGE_SIZE;
map->handle = (void *)object;
/*
* Add a one-page "hole" to the block size to avoid the mm subsystem
* merging vmas.
* FIXME: Is this really needed?
*/
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
ttm->num_pages + 1, 0, 0);
if (!list->file_offset_node) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
ttm->num_pages + 1, 0);
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
ttm->mapping_offset = list->hash.key;
atomic_set(&object->usage, 1);
*ttm_object = object;
return 0;
}

View File

@ -86,24 +86,10 @@ typedef struct drm_ttm {
} drm_ttm_t;
typedef struct drm_ttm_object {
atomic_t usage;
uint32_t flags;
drm_map_list_t map_list;
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
drm_ttm_object_t * to);
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
extern struct page *drm_ttm_alloc_page(void);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
/*
@ -111,7 +97,7 @@ extern int drm_unbind_ttm(drm_ttm_t * ttm);
*/
extern int drm_evict_ttm(drm_ttm_t * ttm);
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
@ -120,12 +106,6 @@ extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
{
return (drm_ttm_t *) to->map_list.map->offset;
}
#define DRM_MASK_VAL(dest, mask, val) \
(dest) = ((dest) & ~(mask)) | ((val) & (mask));

View File

@ -41,8 +41,9 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_open(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
@ -158,93 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
static
#endif
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
if (!map) {
data->type = VM_FAULT_OOM;
return NULL;
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
return NULL;
}
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
/*
* Perhaps retry here?
*/
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
data->type = VM_FAULT_OOM;
goto out;
}
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
}
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
!(ttm->be->flags & DRM_BE_FLAG_CMA)) {
pfn = ttm->aper_offset + page_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
} else {
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out:
mutex_unlock(&dev->struct_mutex);
return NULL;
}
#endif
/**
* \c nopage method for shared virtual memory.
*
@ -504,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open,
.close = drm_vm_ttm_close,
};
#else
static struct vm_operations_struct drm_vm_ttm_ops = {
.fault = drm_vm_ttm_fault,
.open = drm_vm_ttm_open,
.close = drm_vm_ttm_close,
};
#endif
/**
* \c open method for shared virtual memory.
*
@ -555,28 +455,6 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) {
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm;
drm_vm_open_locked(vma);
ttm = (drm_ttm_t *) map->offset;
atomic_inc(&ttm->vma_count);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_add_vma(ttm, vma);
#endif
}
static void drm_vm_ttm_open(struct vm_area_struct *vma) {
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_ttm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* \c close method for all virtual memory types.
*
@ -611,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
}
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_ttm_t *ttm;
drm_device_t *dev;
int ret;
drm_vm_close(vma);
if (map) {
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_delete_vma(ttm, vma);
#endif
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
BUG_ON(ret);
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
mutex_unlock(&dev->struct_mutex);
}
return;
}
/**
* mmap DMA memory.
*
@ -834,17 +684,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
break;
case _DRM_TTM: {
vma->vm_ops = &drm_vm_ttm_ops;
vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_map_bound(vma);
#endif
drm_vm_ttm_open_locked(vma);
return 0;
}
case _DRM_TTM:
return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
@ -868,3 +709,179 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
EXPORT_SYMBOL(drm_mmap);
/**
* buffer object vm functions.
*/
/**
* \c Pagefault method for buffer objects.
*
* \param vma Virtual memory area.
* \param data Fault data on failure or refault.
* \return Always NULL as we insert pfns directly.
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
static
#endif
struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_local_map_t *map;
unsigned long page_offset;
struct page *page = NULL;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
mutex_lock(&bo->mutex);
map = bo->map_list.map;
if (!map) {
data->type = VM_FAULT_OOM;
goto out_unlock;
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
if (err) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
pgprot = drm_io_prot(_DRM_AGP, vma);
} else {
bm = &dev->bm;
ttm = bo->ttm;
page = ttm->pages[page_offset];
if (!page) {
page = drm_ttm_alloc_page();
if (!page) {
data->type = VM_FAULT_OOM;
goto out_unlock;
}
ttm->pages[page_offset] = page;
++bm->cur_pages;
}
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
mutex_unlock(&bo->mutex);
return NULL;
}
#endif
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
#ifdef DRM_MM_ODD_COMPAT
drm_bo_vm_add_vma(bo, vma);
#endif
}
/**
* \c vma open method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_open(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
mutex_lock(&dev->struct_mutex);
drm_bo_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* \c vma close method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_close(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
drm_vm_close(vma);
if (bo) {
mutex_lock(&dev->struct_mutex);
#ifdef DRM_MM_ODD_COMPAT
drm_bo_vm_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked(bo);
mutex_unlock(&dev->struct_mutex);
}
return;
}
static struct vm_operations_struct drm_bo_vm_ops = {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
.nopage = drm_bo_vm_nopage,
#else
.fault = drm_bo_vm_fault,
#endif
.open = drm_bo_vm_open,
.close = drm_bo_vm_close,
};
/**
* mmap buffer object memory.
*
* \param vma virtual memory area.
* \param filp file pointer.
* \param map The buffer object drm map.
* \return zero on success or a negative number on failure.
*/
int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map)
{
vma->vm_ops = &drm_bo_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_map_bound(vma);
#endif
return 0;
}

View File

@ -70,7 +70,6 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type,
{
switch(type) {
case DRM_BO_MEM_LOCAL:
break;
case DRM_BO_MEM_TT:
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;

View File

@ -304,7 +304,7 @@ static void
testAGP(TinyDRIContext * ctx)
{
unsigned long ticks[128], *pTicks;
unsigned long size = 4096 * 1024;
unsigned long size = 8 * 1024;
int ret;
ret = benchmarkBuffer(ctx, size, ticks);