From 3024f23c6551e219b0236041a8205bf1bc60ed94 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 31 Jan 2007 14:50:57 +0100 Subject: [PATCH 01/34] memory manager: Make device driver aware of different memory types. Memory types are either fixed (on-card or pre-bound AGP) or not fixed (dynamically bound) to an aperture. They also carry information about: 1) Whether they can be mapped cached. 2) Whether they are at all mappable. 3) Whether they need an ioremap to be accessible from kernel space. In this way VRAM memory and, for example, pre-bound AGP appear identical to the memory manager. This also makes support for unmappable VRAM simple to implement. --- linux-core/drmP.h | 29 ++++++++++---- linux-core/drm_bo.c | 87 ++++++++++++++++++++++++---------------- linux-core/i915_buffer.c | 33 +++++++++++++++ linux-core/i915_drv.c | 5 +-- shared-core/drm.h | 17 ++++++-- shared-core/i915_drv.h | 6 ++- 6 files changed, 127 insertions(+), 50 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 9c748e6e..c0064bb7 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -650,17 +650,30 @@ typedef struct drm_ref_object { #include "drm_ttm.h" + +typedef struct drm_mem_type_manager { + int has_type; + int use_type; + drm_mm_t manager; + struct list_head lru; + struct list_head pinned; + uint32_t flags; + unsigned long io_offset; + unsigned long io_size; + void *io_addr; +} drm_mem_type_manager_t; + /* * buffer object driver */ typedef struct drm_bo_driver{ - int cached[DRM_BO_MEM_TYPES]; - drm_local_map_t *iomap[DRM_BO_MEM_TYPES]; drm_ttm_backend_t *(*create_ttm_backend_entry) (struct drm_device *dev); int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type); int (*invalidate_caches)(struct drm_device *dev, uint32_t flags); + int (*init_mem_type)(struct drm_device *dev, uint32_t type, + drm_mem_type_manager_t *man); } drm_bo_driver_t; @@ -782,16 +795,18 @@ typedef struct drm_fence_manager{ atomic_t count; } drm_fence_manager_t; +#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ +#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Supports cached binding */ +#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap + before kernel access. */ + typedef struct drm_buffer_manager{ struct mutex init_mutex; int nice_mode; int initialized; drm_file_t *last_to_validate; - int has_type[DRM_BO_MEM_TYPES]; - int use_type[DRM_BO_MEM_TYPES]; - drm_mm_t manager[DRM_BO_MEM_TYPES]; - struct list_head lru[DRM_BO_MEM_TYPES]; - struct list_head pinned[DRM_BO_MEM_TYPES]; + drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2b960c75..b72e9912 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -74,8 +74,10 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, drm_buffer_manager_t * bm) { struct list_head *list; - bo->mem_type = 0; + drm_mem_type_manager_t *man; + bo->mem_type = 0; + switch(bo->flags & DRM_BO_MASK_MEM) { case DRM_BO_FLAG_MEM_TT: bo->mem_type = DRM_BO_MEM_TT; @@ -89,8 +91,10 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, default: BUG_ON(1); } + + man = &bm->man[bo->mem_type]; list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type]; + &man->pinned : &man->lru; list_add_tail(&bo->lru, list); return; } @@ -543,7 +547,8 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; - drm_mm_t *mm = &bm->manager[mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + drm_mm_t *mm = &man->manager; struct list_head *lru; unsigned long size = bo->num_pages; int ret; @@ -554,7 +559,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, if (node) break; - lru = &bm->lru[mem_type]; + lru = &man->lru; if (lru->next == lru) break; @@ -638,7 +643,6 @@ static int drm_bo_new_flags(drm_device_t * dev, { uint32_t new_flags = 0; uint32_t new_props; - drm_bo_driver_t *driver = dev->driver->bo_driver; drm_buffer_manager_t *bm = &dev->bm; unsigned i; @@ -647,7 +651,7 @@ static int drm_bo_new_flags(drm_device_t * dev, */ for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { - if (!bm->use_type[i]) + if (!bm->man[i].use_type) new_mask &= ~drm_bo_type_flags(i); } @@ -659,14 +663,18 @@ static int drm_bo_new_flags(drm_device_t * dev, } if (new_mask & DRM_BO_FLAG_BIND_CACHED) { if (((new_mask & DRM_BO_FLAG_MEM_TT) && - !driver->cached[DRM_BO_MEM_TT]) && - ((new_mask & DRM_BO_FLAG_MEM_VRAM) - && !driver->cached[DRM_BO_MEM_VRAM])) { + !(bm->man[DRM_BO_MEM_TT].flags & + _DRM_FLAG_MEMTYPE_CACHED) && + ((new_mask & DRM_BO_FLAG_MEM_VRAM) + && !(bm->man[DRM_BO_MEM_VRAM].flags & + _DRM_FLAG_MEMTYPE_CACHED)))) { new_mask &= ~DRM_BO_FLAG_BIND_CACHED; } else { - if (!driver->cached[DRM_BO_MEM_TT]) + if (!(bm->man[DRM_BO_MEM_TT].flags & + _DRM_FLAG_MEMTYPE_CACHED)) new_flags &= DRM_BO_FLAG_MEM_TT; - if (!driver->cached[DRM_BO_MEM_VRAM]) + if (!(bm->man[DRM_BO_MEM_VRAM].flags & + _DRM_FLAG_MEMTYPE_CACHED)) new_flags &= DRM_BO_FLAG_MEM_VRAM; } } @@ -1735,6 +1743,8 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -1742,13 +1752,13 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } - if (!bm->has_type[mem_type]) { + if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " "memory manager type\n"); return ret; } - bm->use_type[mem_type] = 0; - bm->has_type[mem_type] = 0; + man->use_type = 0; + man->has_type = 0; ret = 0; if (mem_type > 0) { @@ -1763,15 +1773,12 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) * Throw out evicted no-move buffers. */ - drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL], - mem_type, 1, 0); - drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, - 0); - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, - 0); + drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0); - if (drm_mm_clean(&bm->manager[mem_type])) { - drm_mm_takedown(&bm->manager[mem_type]); + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); } else { ret = -EBUSY; } @@ -1784,6 +1791,7 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) { int ret; drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type); @@ -1793,11 +1801,11 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1); if (ret) return ret; ret = - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1); return ret; } @@ -1807,32 +1815,39 @@ static int drm_bo_init_mm(drm_device_t * dev, { drm_buffer_manager_t *bm = &dev->bm; int ret = -EINVAL; + drm_mem_type_manager_t *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); return ret; } - if (bm->has_type[type]) { + + man = &bm->man[type]; + if (man->has_type) { DRM_ERROR("Memory manager already initialized for type %d\n", type); return ret; } + ret = dev->driver->bo_driver->init_mem_type(dev, type, man); + if (ret) + return ret; + ret = 0; if (type != DRM_BO_MEM_LOCAL) { if (!p_size) { DRM_ERROR("Zero size memory manager type %d\n", type); return ret; } - ret = drm_mm_init(&bm->manager[type], p_offset, p_size); + ret = drm_mm_init(&man->manager, p_offset, p_size); if (ret) return ret; } - bm->has_type[type] = 1; - bm->use_type[type] = 1; + man->has_type = 1; + man->use_type = 1; - INIT_LIST_HEAD(&bm->lru[type]); - INIT_LIST_HEAD(&bm->pinned[type]); + INIT_LIST_HEAD(&man->lru); + INIT_LIST_HEAD(&man->pinned); return 0; } @@ -1847,6 +1862,7 @@ int drm_bo_driver_finish(drm_device_t * dev) drm_buffer_manager_t *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; + drm_mem_type_manager_t *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -1856,14 +1872,15 @@ int drm_bo_driver_finish(drm_device_t * dev) bm->initialized = 0; while (i--) { - if (bm->has_type[i]) { - bm->use_type[i] = 0; + man = &bm->man[i]; + if (man->has_type) { + man->use_type = 0; if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { ret = -EBUSY; DRM_ERROR("DRM memory manager type %d " "is not clean.\n", i); } - bm->has_type[i] = 0; + man->has_type = 0; } } mutex_unlock(&dev->struct_mutex); @@ -1875,10 +1892,10 @@ int drm_bo_driver_finish(drm_device_t * dev) if (list_empty(&bm->ddestroy)) { DRM_DEBUG("Delayed destroy list was clean\n"); } - if (list_empty(&bm->lru[0])) { + if (list_empty(&bm->man[0].lru)) { DRM_DEBUG("Swap list was clean\n"); } - if (list_empty(&bm->pinned[0])) { + if (list_empty(&bm->man[0].pinned)) { DRM_DEBUG("NO_MOVE list was clean\n"); } if (list_empty(&bm->unfenced)) { diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index c3e54468..53002301 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -64,3 +64,36 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) return i915_emit_mi_flush(dev, flush_cmd); } + +int i915_init_mem_type(drm_device_t *dev, uint32_t type, + drm_mem_type_manager_t *man) +{ + switch(type) { + case DRM_BO_MEM_LOCAL: + break; + case DRM_BO_MEM_TT: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + break; + case DRM_BO_MEM_PRIV0: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned) type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED | + _DRM_FLAG_MEMTYPE_FIXED | + _DRM_FLAG_NEEDS_IOREMAP; + + man->io_addr = NULL; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); + return -EINVAL; + } + return 0; +} diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 2c5b43d0..64ce3c15 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -51,11 +51,10 @@ static drm_fence_driver_t i915_fence_driver = { #endif #ifdef I915_HAVE_BUFFER static drm_bo_driver_t i915_bo_driver = { - .iomap = {NULL, NULL}, - .cached = {1, 1}, .create_ttm_backend_entry = i915_create_ttm_backend_entry, .fence_type = i915_fence_types, - .invalidate_caches = i915_invalidate_caches + .invalidate_caches = i915_invalidate_caches, + .init_mem_type = i915_init_mem_type, }; #endif diff --git a/shared-core/drm.h b/shared-core/drm.h index 9efb1dc4..38cca882 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -738,8 +738,12 @@ typedef struct drm_fence_arg { #define DRM_BO_FLAG_MEM_TT 0x02000000 /* Vram memory */ #define DRM_BO_FLAG_MEM_VRAM 0x04000000 -/* Unmappable Vram memory */ -#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000 +/* Up to the driver to define. */ +#define DRM_BO_FLAG_MEM_PRIV0 0x10000000 +#define DRM_BO_FLAG_MEM_PRIV1 0x20000000 +#define DRM_BO_FLAG_MEM_PRIV2 0x40000000 +#define DRM_BO_FLAG_MEM_PRIV3 0x80000000 + /* Memory flag mask */ #define DRM_BO_MASK_MEM 0xFF000000 @@ -823,8 +827,13 @@ typedef struct drm_bo_arg{ #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 -#define DRM_BO_MEM_VRAM_NM 3 -#define DRM_BO_MEM_TYPES 2 /* For now. */ +#define DRM_BO_MEM_PRIV0 4 +#define DRM_BO_MEM_PRIV1 5 +#define DRM_BO_MEM_PRIV2 6 +#define DRM_BO_MEM_PRIV3 7 + + +#define DRM_BO_MEM_TYPES 8 /* For now. */ typedef union drm_mm_init_arg{ struct { diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index ef9f3638..be7dd76a 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -126,7 +126,9 @@ typedef struct drm_i915_private { uint32_t flush_pending; uint32_t saved_flush_status; #endif - +#ifdef I915_HAVE_BUFFER + void *agp_iomap; +#endif spinlock_t swaps_lock; drm_i915_vbl_swap_t vbl_swaps; unsigned int swaps_pending; @@ -187,6 +189,8 @@ extern void i915_poke_flush(drm_device_t *dev); extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev); extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type); extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); +extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, + drm_mem_type_manager_t *man); #endif #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) From 333c6af47a906461678b5a8b2af415936d30babc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 1 Feb 2007 00:38:57 +0100 Subject: [PATCH 02/34] Protect drm_mmap against disappearing maps. The map lists and hash tables are protected using dev->struct_mutex, but drm_mmap strangely never locked this mutex. --- linux-core/drm_vm.c | 65 +++++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 827a7bdb..b11e09f8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -42,8 +42,7 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); static void drm_vm_ttm_close(struct vm_area_struct *vma); -static int drm_vm_ttm_open(struct vm_area_struct *vma); -static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma); +static void drm_vm_ttm_open(struct vm_area_struct *vma); pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) @@ -511,13 +510,13 @@ static struct vm_operations_struct drm_vm_sg_ops = { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) static struct vm_operations_struct drm_vm_ttm_ops = { .nopage = drm_vm_ttm_nopage, - .open = drm_vm_ttm_open_wrapper, + .open = drm_vm_ttm_open, .close = drm_vm_ttm_close, }; #else static struct vm_operations_struct drm_vm_ttm_ops = { .fault = drm_vm_ttm_fault, - .open = drm_vm_ttm_open_wrapper, + .open = drm_vm_ttm_open, .close = drm_vm_ttm_close, }; #endif @@ -530,7 +529,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = { * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ -static void drm_vm_open(struct vm_area_struct *vma) +static void drm_vm_open_locked(struct vm_area_struct *vma) { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; @@ -542,36 +541,43 @@ static void drm_vm_open(struct vm_area_struct *vma) vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { - mutex_lock(&dev->struct_mutex); vma_entry->vma = vma; vma_entry->next = dev->vmalist; vma_entry->pid = current->pid; dev->vmalist = vma_entry; - mutex_unlock(&dev->struct_mutex); } } -static int drm_vm_ttm_open(struct vm_area_struct *vma) { - - drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; - drm_ttm_t *ttm; +static void drm_vm_open(struct vm_area_struct *vma) +{ drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; - drm_vm_open(vma); mutex_lock(&dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) { + + drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; + drm_ttm_t *ttm; + + drm_vm_open_locked(vma); ttm = (drm_ttm_t *) map->offset; atomic_inc(&ttm->vma_count); #ifdef DRM_ODD_MM_COMPAT drm_ttm_add_vma(ttm, vma); #endif - mutex_unlock(&dev->struct_mutex); - return 0; } -static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) -{ - drm_vm_ttm_open(vma); +static void drm_vm_ttm_open(struct vm_area_struct *vma) { + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_ttm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); } /** @@ -653,7 +659,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) drm_device_dma_t *dma; unsigned long length = vma->vm_end - vma->vm_start; - lock_kernel(); dev = priv->head->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", @@ -664,7 +669,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) unlock_kernel(); return -EINVAL; } - unlock_kernel(); if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { @@ -686,7 +690,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } @@ -719,7 +723,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ -int drm_mmap(struct file *filp, struct vm_area_struct *vma) +static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -839,12 +843,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_file = filp; vma->vm_flags |= VM_RESERVED | VM_IO; #ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); drm_ttm_map_bound(vma); - mutex_unlock(&dev->struct_mutex); #endif - if (drm_vm_ttm_open(vma)) - return -EAGAIN; return 0; } default: @@ -853,7 +853,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } + +int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_mmap_locked(filp, vma); + mutex_unlock(&dev->struct_mutex); + + return ret; +} EXPORT_SYMBOL(drm_mmap); From 9677c5ecc6b97ef75b3141b671fb5cfbbf8a3fa8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 1 Feb 2007 10:53:07 +0100 Subject: [PATCH 03/34] Prepare for removal of the ttm_object type. --- linux-core/drmP.h | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c0064bb7..2453c756 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -650,6 +650,11 @@ typedef struct drm_ref_object { #include "drm_ttm.h" +#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ +#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Supports cached binding */ +#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap + before kernel access. */ typedef struct drm_mem_type_manager { int has_type; @@ -795,11 +800,16 @@ typedef struct drm_fence_manager{ atomic_t count; } drm_fence_manager_t; -#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ -#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ -#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Supports cached binding */ -#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap - before kernel access. */ + +typedef struct drm_bo_mem_region { + drm_mm_node_t *node; + uint32_t memory_type; + drm_ttm_t *ttm; + unsigned long bus_offset; + unsigned long num_pages; + uint32_t vm_flags; +} drm_bo_mem_region_t; + typedef struct drm_buffer_manager{ struct mutex init_mutex; @@ -1005,8 +1015,6 @@ typedef struct drm_buffer_object{ atomic_t usage; drm_ttm_object_t *ttm_object; - drm_ttm_t *ttm; - unsigned long num_pages; unsigned long buffer_start; drm_bo_type_t type; unsigned long offset; @@ -1016,7 +1024,7 @@ typedef struct drm_buffer_object{ uint32_t mask; uint32_t mem_type; - drm_mm_node_t *mm_node; /* MM node for on-card RAM */ + drm_mm_node_t *mm_node; struct list_head lru; struct list_head ddestroy; @@ -1026,6 +1034,16 @@ typedef struct drm_buffer_object{ uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; + + /* For vm */ + + drm_mm_node_t *node; + uint32_t memory_type; + drm_ttm_t *ttm; + unsigned long bus_offset; + unsigned long num_pages; + uint32_t vm_flags; + } drm_buffer_object_t; #define _DRM_BO_FLAG_UNFENCED 0x00000001 From dd733dea3856e7ddbba7c4c3928ccaba909b4535 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 1 Feb 2007 13:19:05 +0100 Subject: [PATCH 04/34] Fix missing ttm_open_vma call from previous commit. Honour the ttm backend cant-use-aperture flag. --- linux-core/drmP.h | 1 + linux-core/drm_agpsupport.c | 4 ++-- linux-core/drm_compat.c | 17 ++++++++++++----- linux-core/drm_ttm.c | 3 ++- linux-core/drm_ttm.h | 2 +- linux-core/drm_vm.c | 8 +++----- 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2453c756..090bd124 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1043,6 +1043,7 @@ typedef struct drm_buffer_object{ unsigned long bus_offset; unsigned long num_pages; uint32_t vm_flags; + void *iomap; } drm_buffer_object_t; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 9cdbdaf0..7a692af1 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -720,8 +720,8 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, agp_be->destroy = drm_agp_destroy_ttm; DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0); - DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA, - (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0); + DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CMA, + (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CMA : 0); agp_be->drm_map_type = _DRM_AGP; return agp_be; } diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 6bb58424..93d9b95d 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -277,7 +277,9 @@ int drm_ttm_map_bound(struct vm_area_struct *vma) drm_ttm_t *ttm = (drm_ttm_t *) map->offset; int ret = 0; - if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && + !(ttm->be->flags & DRM_BE_FLAG_CMA)) { + unsigned long pfn = ttm->aper_offset + (ttm->be->aperture_base >> PAGE_SHIFT); pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma); @@ -286,6 +288,7 @@ int drm_ttm_map_bound(struct vm_area_struct *vma) vma->vm_end - vma->vm_start, pgprot); } + return ret; } @@ -408,10 +411,14 @@ int drm_ttm_remap_bound(drm_ttm_t *ttm) vma_entry_t *v_entry; int ret = 0; - list_for_each_entry(v_entry, &ttm->vma_list, head) { - ret = drm_ttm_map_bound(v_entry->vma); - if (ret) - break; + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && + !(ttm->be->flags & DRM_BE_FLAG_CMA)) { + + list_for_each_entry(v_entry, &ttm->vma_list, head) { + ret = drm_ttm_map_bound(v_entry->vma); + if (ret) + break; + } } drm_ttm_unlock_mm(ttm); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index c17c41cb..f1e3ea40 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -162,7 +162,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) DRM_DEBUG("Destroying a ttm\n"); -#ifdef DRM_TTM_ODD_COMPAT +#ifdef DRM_ODD_MM_COMPAT BUG_ON(!list_empty(&ttm->vma_list)); BUG_ON(!list_empty(&ttm->p_mm_list)); #endif @@ -364,6 +364,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) ret = drm_ttm_populate(ttm); if (ret) return ret; + if (ttm->state == ttm_unbound && !cached) { ret = unmap_vma_pages(ttm); if (ret) diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index 796f2317..a7858549 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -46,7 +46,7 @@ #define DRM_BE_FLAG_NEEDS_FREE 0x00000001 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 -#define DRM_BE_FLAG_CBA 0x00000004 +#define DRM_BE_FLAG_CMA 0x00000004 /* Don't map through aperture */ typedef struct drm_ttm_backend { unsigned long aperture_base; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index b11e09f8..63cf6f56 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -222,11 +222,8 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, #endif } - if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { - - /* - * FIXME: Check can't map aperture flag. - */ + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && + !(ttm->be->flags & DRM_BE_FLAG_CMA)) { pfn = ttm->aper_offset + page_offset + (ttm->be->aperture_base >> PAGE_SHIFT); @@ -845,6 +842,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) #ifdef DRM_ODD_MM_COMPAT drm_ttm_map_bound(vma); #endif + drm_vm_ttm_open_locked(vma); return 0; } default: From c269d560e4d71448cfc9c2ea51eee3d5feafaad4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 14:47:44 +0100 Subject: [PATCH 05/34] Make vm handle buffer objects instead of ttm objects. Remove ttm objects. Make vm aware of PCI memory type buffer objects. (Only works for pre 2.6.16 kernels for now). --- linux-core/drmP.h | 7 +- linux-core/drm_bo.c | 291 +++++++++++++++++++++++++----- linux-core/drm_compat.c | 4 +- linux-core/drm_compat.h | 10 +- linux-core/drm_ttm.c | 202 +++------------------ linux-core/drm_ttm.h | 26 +-- linux-core/drm_vm.c | 345 +++++++++++++++++++----------------- linux-core/i915_buffer.c | 1 - tests/ttmtest/src/ttmtest.c | 2 +- 9 files changed, 466 insertions(+), 422 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 090bd124..84a06470 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1014,7 +1014,6 @@ typedef struct drm_buffer_object{ */ atomic_t usage; - drm_ttm_object_t *ttm_object; unsigned long buffer_start; drm_bo_type_t type; unsigned long offset; @@ -1037,6 +1036,7 @@ typedef struct drm_buffer_object{ /* For vm */ + drm_map_list_t map_list; drm_mm_node_t *node; uint32_t memory_type; drm_ttm_t *ttm; @@ -1485,6 +1485,11 @@ extern int drm_bo_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(drm_device_t *dev); extern int drm_bo_driver_init(drm_device_t *dev); +extern int drm_bo_pci_offset(const drm_buffer_object_t *bo, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size); +extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); extern int drm_fence_buffer_objects(drm_file_t * priv, struct list_head *list, uint32_t fence_flags, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b72e9912..8fe5e8ef 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -32,30 +32,30 @@ #include "drmP.h" /* - * Buffer object locking policy: - * Lock dev->struct_mutex; - * Increase usage - * Unlock dev->struct_mutex; - * Lock buffer->mutex; - * Do whatever you want; - * Unlock buffer->mutex; - * Decrease usage. Call destruction if zero. + * Locking may look a bit complicated but isn't really: * - * User object visibility ups usage just once, since it has its own - * refcounting. + * The buffer usage atomic_t needs to be protected by dev->struct_mutex + * when there is a chance that it can be zero before or after the operation. + * + * dev->struct_mutex also protects all lists and list heads. Hash tables and hash + * heads. * - * Destruction: - * lock dev->struct_mutex; - * Verify that usage is zero. Otherwise unlock and continue. - * Destroy object. - * unlock dev->struct_mutex; + * bo->mutex protects the buffer object itself excluding the usage field. + * bo->mutex does also protect the buffer list heads, so to manipulate those, we need + * both the bo->mutex and the dev->struct_mutex. + * + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit + * complicated. When dev->struct_mutex is released to grab bo->mutex, the list + * traversal will, in general, need to be restarted. * - * Mutex and spinlock locking orders: - * 1.) Buffer mutex - * 2.) Refer to ttm locking orders. */ + + static void drm_bo_destroy_locked(drm_buffer_object_t *bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -110,6 +110,7 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, int ret; if (bo->mm_node) { + drm_bo_unmap_virtual(bo); mutex_lock(&dev->struct_mutex); if (evict) ret = drm_evict_ttm(bo->ttm); @@ -278,12 +279,9 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. " "Bad. Continuing anyway\n"); } + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; } - - if (bo->ttm_object) { - drm_ttm_object_deref_locked(dev, bo->ttm_object); - } - atomic_dec(&bm->count); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); @@ -362,7 +360,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) { if (atomic_dec_and_test(&bo->usage)) { drm_bo_destroy_locked(bo); @@ -371,8 +369,11 @@ static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) { - drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t, - base)); + drm_buffer_object_t *bo = + drm_user_object_entry(uo, drm_buffer_object_t, base); + + drm_bo_takedown_vm_locked(bo); + drm_bo_usage_deref_locked(bo); } static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) @@ -608,6 +609,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); + drm_bo_unmap_virtual(bo); mutex_lock(&dev->struct_mutex); ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, bo->mm_node->start); @@ -927,13 +929,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, rep->flags = bo->flags; rep->size = bo->num_pages * PAGE_SIZE; rep->offset = bo->offset; - - if (bo->ttm_object) { - rep->arg_handle = bo->ttm_object->map_list.user_token; - } else { - rep->arg_handle = 0; - } - + rep->arg_handle = bo->map_list.user_token; rep->mask = bo->mask; rep->buffer_start = bo->buffer_start; rep->fence_flags = bo->fence_type; @@ -1322,19 +1318,21 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; - drm_ttm_object_t *to = NULL; int ret = 0; - uint32_t ttm_flags = 0; - bo->ttm_object = NULL; bo->ttm = NULL; + bo->map_list.user_token = 0ULL; switch (bo->type) { case drm_bo_type_dc: mutex_lock(&dev->struct_mutex); - ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE, - ttm_flags, &to); + ret = drm_bo_setup_vm_locked(bo); mutex_unlock(&dev->struct_mutex); + if (ret) + break; + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; break; case drm_bo_type_user: case drm_bo_type_fake: @@ -1345,14 +1343,6 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) break; } - if (ret) { - return ret; - } - - if (to) { - bo->ttm_object = to; - bo->ttm = drm_ttm_from_object(to); - } return ret; } @@ -1384,7 +1374,6 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, bo->mm_node = NULL; bo->ttm = NULL; - bo->ttm_object = NULL; bo->fence = NULL; bo->flags = 0; @@ -2023,3 +2012,211 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } + +/* + * buffer object vm functions. + */ + +/** + * \c Get the PCI offset for the buffer object memory. + * + * \param bo The buffer object. + * \param bus_base On return the base of the PCI region + * \param bus_offset On return the byte offset into the PCI region + * \param bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Otherwise returns zero. Call bo->mutex locked. + */ + +int drm_bo_pci_offset(const drm_buffer_object_t *bo, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + + *bus_size = 0; + + if (bo->type != drm_bo_type_dc) + return -EINVAL; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + return -EINVAL; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + drm_ttm_t *ttm = bo->ttm; + + if (!bo->ttm) { + return -EINVAL; + } + + drm_ttm_fixup_caching(ttm); + + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + return 0; + if (ttm->be->flags & DRM_BE_FLAG_CMA) + return 0; + + *bus_base = ttm->be->aperture_base; + } else { + *bus_base = man->io_offset; + } + + *bus_offset = bo->mm_node->start << PAGE_SHIFT; + *bus_size = bo->num_pages << PAGE_SHIFT; + + return 0; +} + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + +int drm_bo_ioremap(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + + BUG_ON(bo->iomap); + + ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset); + else { + bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size); + if (bo->iomap) + return -ENOMEM; + } + + return 0; +} + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_iounmap(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; + + + bm = &dev->bm; + man = &bm->man[bo->mem_type]; + + if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + iounmap(bo->iomap); + + bo->iomap = NULL; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_unmap_virtual(drm_buffer_object_t *bo) +{ + drm_device_t *dev = bo->dev; + loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->num_pages) << PAGE_SHIFT; + + unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +} + +static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + if (list->user_token) { + drm_ht_remove_item(&dev->map_hash, &list->hash); + list->user_token = 0; + } + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (!map) + return; + + drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); + list->map = NULL; + list->user_token = 0ULL; + drm_bo_usage_deref_locked(bo); +} + +static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->offset = 0; + map->type = _DRM_TTM; + map->flags = _DRM_REMOVABLE; + map->size = bo->num_pages * PAGE_SIZE; + atomic_inc(&bo->usage); + map->handle = (void *) bo; + + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + bo->num_pages, 0, 0); + + if (!list->file_offset_node) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + bo->num_pages, 0); + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 93d9b95d..3639ea4f 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -160,7 +160,7 @@ void free_nopage_retry(void) } } -struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, +struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { @@ -171,7 +171,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, data.address = address; data.vma = vma; - drm_vm_ttm_fault(vma, &data); + drm_bo_vm_fault(vma, &data); switch (data.type) { case VM_FAULT_OOM: return NOPAGE_OOM; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 3cb5d202..9048f021 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -201,8 +201,8 @@ extern int drm_map_page_into_agp(struct page *page); extern struct page *get_nopage_retry(void); extern void free_nopage_retry(void); struct fault_data; -extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, - struct fault_data *data); +extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + struct fault_data *data); #define NOPAGE_REFAULT get_nopage_retry() #endif @@ -230,9 +230,9 @@ struct fault_data { extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); -extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, - unsigned long address, - int *type); +extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type); #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index f1e3ea40..6699a0dd 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -79,29 +79,28 @@ static void ttm_free_pages(drm_ttm_t *ttm) ttm->pages = NULL; } -/* - * Unmap all vma pages from vmas mapping this ttm. - */ -static int unmap_vma_pages(drm_ttm_t * ttm) +struct page *drm_ttm_alloc_page(void) { - drm_device_t *dev = ttm->dev; - loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT; - loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT; + struct page *page; -#ifdef DRM_ODD_MM_COMPAT - int ret; - ret = drm_ttm_lock_mm(ttm); - if (ret) - return ret; + if (drm_alloc_memctl(PAGE_SIZE)) { + return NULL; + } + page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + if (!page) { + drm_free_memctl(PAGE_SIZE); + return NULL; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + SetPageLocked(page); +#else + SetPageReserved(page); #endif - unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_finish_unmap(ttm); -#endif - return 0; + return page; } + /* * Change caching policy for the linear kernel map * for range of pages in a ttm. @@ -154,13 +153,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) if (!ttm) return 0; - if (atomic_read(&ttm->vma_count) > 0) { - ttm->destroy = 1; - DRM_ERROR("VMAs are still alive. Skipping destruction.\n"); - return -EBUSY; - } - - DRM_DEBUG("Destroying a ttm\n"); + DRM_ERROR("Drm destroy ttm\n"); #ifdef DRM_ODD_MM_COMPAT BUG_ON(!list_empty(&ttm->vma_list)); @@ -193,11 +186,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm) DRM_ERROR("Erroneous map count. " "Leaking page mappings.\n"); } - - /* - * End debugging. - */ - __free_page(*cur_page); drm_free_memctl(PAGE_SIZE); --bm->cur_pages; @@ -225,19 +213,9 @@ static int drm_ttm_populate(drm_ttm_t * ttm) for (i = 0; i < ttm->num_pages; ++i) { page = ttm->pages[i]; if (!page) { - if (drm_alloc_memctl(PAGE_SIZE)) { + page = drm_ttm_alloc_page(); + if (!page) return -ENOMEM; - } - page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); - if (!page) { - drm_free_memctl(PAGE_SIZE); - return -ENOMEM; - } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) - SetPageLocked(page); -#else - SetPageReserved(page); -#endif ttm->pages[i] = page; ++bm->cur_pages; } @@ -251,7 +229,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) +drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size) { drm_bo_driver_t *bo_driver = dev->driver->bo_driver; drm_ttm_t *ttm; @@ -303,26 +281,15 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) int drm_evict_ttm(drm_ttm_t * ttm) { drm_ttm_backend_t *be = ttm->be; - int ret; - switch (ttm->state) { - case ttm_bound: - if (be->needs_ub_cache_adjust(be)) { - ret = unmap_vma_pages(ttm); - if (ret) { - return ret; - } - } + if (ttm->state == ttm_bound) be->unbind(be); - break; - default: - break; - } + ttm->state = ttm_evicted; return 0; } -void drm_fixup_ttm_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(drm_ttm_t * ttm) { if (ttm->state == ttm_evicted) { @@ -344,7 +311,7 @@ int drm_unbind_ttm(drm_ttm_t * ttm) if (ret) return ret; - drm_fixup_ttm_caching(ttm); + drm_ttm_fixup_caching(ttm); return 0; } @@ -366,10 +333,6 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) return ret; if (ttm->state == ttm_unbound && !cached) { - ret = unmap_vma_pages(ttm); - if (ret) - return ret; - drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); } #ifdef DRM_ODD_MM_COMPAT @@ -402,120 +365,3 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) return 0; } - -/* - * dev->struct_mutex locked. - */ -static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object) -{ - drm_map_list_t *list = &object->map_list; - drm_local_map_t *map; - - if (list->user_token) - drm_ht_remove_item(&dev->map_hash, &list->hash); - - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } - - map = list->map; - - if (map) { - drm_ttm_t *ttm = (drm_ttm_t *) map->offset; - if (ttm) { - if (drm_destroy_ttm(ttm) != -EBUSY) { - drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); - } - } else { - drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); - } - } - - drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM); -} - -void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to) -{ - if (atomic_dec_and_test(&to->usage)) { - drm_ttm_object_remove(dev, to); - } -} - -void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to) -{ - if (atomic_dec_and_test(&to->usage)) { - mutex_lock(&dev->struct_mutex); - if (atomic_read(&to->usage) == 0) - drm_ttm_object_remove(dev, to); - mutex_unlock(&dev->struct_mutex); - } -} - -/* - * Create a ttm and add it to the drm book-keeping. - * dev->struct_mutex locked. - */ - -int drm_ttm_object_create(drm_device_t * dev, unsigned long size, - uint32_t flags, drm_ttm_object_t ** ttm_object) -{ - drm_ttm_object_t *object; - drm_map_list_t *list; - drm_local_map_t *map; - drm_ttm_t *ttm; - - object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM); - if (!object) - return -ENOMEM; - object->flags = flags; - list = &object->map_list; - - list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM); - if (!list->map) { - drm_ttm_object_remove(dev, object); - return -ENOMEM; - } - map = list->map; - - ttm = drm_init_ttm(dev, size); - if (!ttm) { - DRM_ERROR("Could not create ttm\n"); - drm_ttm_object_remove(dev, object); - return -ENOMEM; - } - - map->offset = (unsigned long)ttm; - map->type = _DRM_TTM; - map->flags = _DRM_REMOVABLE; - map->size = ttm->num_pages * PAGE_SIZE; - map->handle = (void *)object; - - /* - * Add a one-page "hole" to the block size to avoid the mm subsystem - * merging vmas. - * FIXME: Is this really needed? - */ - - list->file_offset_node = drm_mm_search_free(&dev->offset_manager, - ttm->num_pages + 1, 0, 0); - if (!list->file_offset_node) { - drm_ttm_object_remove(dev, object); - return -ENOMEM; - } - list->file_offset_node = drm_mm_get_block(list->file_offset_node, - ttm->num_pages + 1, 0); - - list->hash.key = list->file_offset_node->start; - - if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { - drm_ttm_object_remove(dev, object); - return -ENOMEM; - } - - list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; - ttm->mapping_offset = list->hash.key; - atomic_set(&object->usage, 1); - *ttm_object = object; - return 0; -} diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index a7858549..a6dc30ae 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -86,24 +86,10 @@ typedef struct drm_ttm { } drm_ttm_t; -typedef struct drm_ttm_object { - atomic_t usage; - uint32_t flags; - drm_map_list_t map_list; -} drm_ttm_object_t; -extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size, - uint32_t flags, - drm_ttm_object_t ** ttm_object); -extern void drm_ttm_object_deref_locked(struct drm_device *dev, - drm_ttm_object_t * to); -extern void drm_ttm_object_deref_unlocked(struct drm_device *dev, - drm_ttm_object_t * to); -extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv, - uint32_t handle, - int check_owner); +extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern struct page *drm_ttm_alloc_page(void); extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); - extern int drm_unbind_ttm(drm_ttm_t * ttm); /* @@ -111,7 +97,7 @@ extern int drm_unbind_ttm(drm_ttm_t * ttm); */ extern int drm_evict_ttm(drm_ttm_t * ttm); -extern void drm_fixup_ttm_caching(drm_ttm_t * ttm); +extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -120,12 +106,6 @@ extern void drm_fixup_ttm_caching(drm_ttm_t * ttm); */ extern int drm_destroy_ttm(drm_ttm_t * ttm); -extern int drm_ttm_ioctl(DRM_IOCTL_ARGS); - -static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to) -{ - return (drm_ttm_t *) to->map_list.map->offset; -} #define DRM_MASK_VAL(dest, mask, val) \ (dest) = ((dest) & ~(mask)) | ((val) & (mask)); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 63cf6f56..93d1c0b8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -41,8 +41,9 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); -static void drm_vm_ttm_close(struct vm_area_struct *vma); -static void drm_vm_ttm_open(struct vm_area_struct *vma); +static int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map); pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) @@ -158,93 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } #endif /* __OS_HAS_AGP */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ - LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) -static -#endif -struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, - struct fault_data *data) -{ - unsigned long address = data->address; - drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; - unsigned long page_offset; - struct page *page; - drm_ttm_t *ttm; - drm_buffer_manager_t *bm; - drm_device_t *dev; - unsigned long pfn; - int err; - pgprot_t pgprot; - - if (!map) { - data->type = VM_FAULT_OOM; - return NULL; - } - - if (address > vma->vm_end) { - data->type = VM_FAULT_SIGBUS; - return NULL; - } - - ttm = (drm_ttm_t *) map->offset; - - dev = ttm->dev; - - /* - * Perhaps retry here? - */ - - mutex_lock(&dev->struct_mutex); - drm_fixup_ttm_caching(ttm); - - bm = &dev->bm; - page_offset = (address - vma->vm_start) >> PAGE_SHIFT; - page = ttm->pages[page_offset]; - - if (!page) { - if (drm_alloc_memctl(PAGE_SIZE)) { - data->type = VM_FAULT_OOM; - goto out; - } - page = ttm->pages[page_offset] = - alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); - if (!page) { - drm_free_memctl(PAGE_SIZE); - data->type = VM_FAULT_OOM; - goto out; - } - ++bm->cur_pages; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) - SetPageLocked(page); -#else - SetPageReserved(page); -#endif - } - - if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && - !(ttm->be->flags & DRM_BE_FLAG_CMA)) { - - pfn = ttm->aper_offset + page_offset + - (ttm->be->aperture_base >> PAGE_SHIFT); - pgprot = drm_io_prot(ttm->be->drm_map_type, vma); - } else { - pfn = page_to_pfn(page); - pgprot = vma->vm_page_prot; - } - - err = vm_insert_pfn(vma, address, pfn, pgprot); - - if (!err || err == -EBUSY) - data->type = VM_FAULT_MINOR; - else - data->type = VM_FAULT_OOM; - out: - mutex_unlock(&dev->struct_mutex); - return NULL; -} -#endif - /** * \c nopage method for shared virtual memory. * @@ -504,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = { .close = drm_vm_close, }; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) -static struct vm_operations_struct drm_vm_ttm_ops = { - .nopage = drm_vm_ttm_nopage, - .open = drm_vm_ttm_open, - .close = drm_vm_ttm_close, -}; -#else -static struct vm_operations_struct drm_vm_ttm_ops = { - .fault = drm_vm_ttm_fault, - .open = drm_vm_ttm_open, - .close = drm_vm_ttm_close, -}; -#endif - /** * \c open method for shared virtual memory. * @@ -555,28 +455,6 @@ static void drm_vm_open(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } -static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) { - - drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; - drm_ttm_t *ttm; - - drm_vm_open_locked(vma); - ttm = (drm_ttm_t *) map->offset; - atomic_inc(&ttm->vma_count); -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_add_vma(ttm, vma); -#endif -} - -static void drm_vm_ttm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_ttm_open_locked(vma); - mutex_unlock(&dev->struct_mutex); -} - /** * \c close method for all virtual memory types. * @@ -611,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma) } -static void drm_vm_ttm_close(struct vm_area_struct *vma) -{ - drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; - drm_ttm_t *ttm; - drm_device_t *dev; - int ret; - - drm_vm_close(vma); - if (map) { - ttm = (drm_ttm_t *) map->offset; - dev = ttm->dev; - mutex_lock(&dev->struct_mutex); -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_delete_vma(ttm, vma); -#endif - if (atomic_dec_and_test(&ttm->vma_count)) { - if (ttm->destroy) { - ret = drm_destroy_ttm(ttm); - BUG_ON(ret); - drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); - } - } - mutex_unlock(&dev->struct_mutex); - } - return; -} - - /** * mmap DMA memory. * @@ -834,17 +684,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = (void *)map; vma->vm_flags |= VM_RESERVED; break; - case _DRM_TTM: { - vma->vm_ops = &drm_vm_ttm_ops; - vma->vm_private_data = (void *) map; - vma->vm_file = filp; - vma->vm_flags |= VM_RESERVED | VM_IO; -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_map_bound(vma); -#endif - drm_vm_ttm_open_locked(vma); - return 0; - } + case _DRM_TTM: + return drm_bo_mmap_locked(vma, filp, map); default: return -EINVAL; /* This should never happen. */ } @@ -868,3 +709,179 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } EXPORT_SYMBOL(drm_mmap); + +/** + * buffer object vm functions. + */ + +/** + * \c Pagefault method for buffer objects. + * + * \param vma Virtual memory area. + * \param data Fault data on failure or refault. + * \return Always NULL as we insert pfns directly. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) +static +#endif +struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + struct fault_data *data) +{ + unsigned long address = data->address; + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_local_map_t *map; + unsigned long page_offset; + struct page *page = NULL; + drm_ttm_t *ttm; + drm_buffer_manager_t *bm; + drm_device_t *dev; + unsigned long pfn; + int err; + pgprot_t pgprot; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + + mutex_lock(&bo->mutex); + map = bo->map_list.map; + + if (!map) { + data->type = VM_FAULT_OOM; + goto out_unlock; + } + + if (address > vma->vm_end) { + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + + dev = bo->dev; + err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + + if (err) { + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + + if (bus_size) { + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; + pgprot = drm_io_prot(_DRM_AGP, vma); + } else { + bm = &dev->bm; + ttm = bo->ttm; + + page = ttm->pages[page_offset]; + if (!page) { + page = drm_ttm_alloc_page(); + if (!page) { + data->type = VM_FAULT_OOM; + goto out_unlock; + } + ttm->pages[page_offset] = page; + ++bm->cur_pages; + } + pfn = page_to_pfn(page); + pgprot = vma->vm_page_prot; + } + + err = vm_insert_pfn(vma, address, pfn, pgprot); + + if (!err || err == -EBUSY) + data->type = VM_FAULT_MINOR; + else + data->type = VM_FAULT_OOM; +out_unlock: + mutex_unlock(&bo->mutex); + return NULL; +} +#endif + +static void drm_bo_vm_open_locked(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + + drm_vm_open_locked(vma); + atomic_inc(&bo->usage); +#ifdef DRM_MM_ODD_COMPAT + drm_bo_vm_add_vma(bo, vma); +#endif +} + +/** + * \c vma open method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_open(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + mutex_lock(&dev->struct_mutex); + drm_bo_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +/** + * \c vma close method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_close(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + drm_vm_close(vma); + if (bo) { + mutex_lock(&dev->struct_mutex); +#ifdef DRM_MM_ODD_COMPAT + drm_bo_vm_delete_vma(bo, vma); +#endif + drm_bo_usage_deref_locked(bo); + mutex_unlock(&dev->struct_mutex); + } + return; +} + +static struct vm_operations_struct drm_bo_vm_ops = { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) + .nopage = drm_bo_vm_nopage, +#else + .fault = drm_bo_vm_fault, +#endif + .open = drm_bo_vm_open, + .close = drm_bo_vm_close, +}; + +/** + * mmap buffer object memory. + * + * \param vma virtual memory area. + * \param filp file pointer. + * \param map The buffer object drm map. + * \return zero on success or a negative number on failure. + */ + +int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map) +{ + vma->vm_ops = &drm_bo_vm_ops; + vma->vm_private_data = map->handle; + vma->vm_file = filp; + vma->vm_flags |= VM_RESERVED | VM_IO; + drm_bo_vm_open_locked(vma); +#ifdef DRM_ODD_MM_COMPAT + drm_ttm_map_bound(vma); +#endif + return 0; +} diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 53002301..dc2cca33 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -70,7 +70,6 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, { switch(type) { case DRM_BO_MEM_LOCAL: - break; case DRM_BO_MEM_TT: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; diff --git a/tests/ttmtest/src/ttmtest.c b/tests/ttmtest/src/ttmtest.c index ae261e28..085a0746 100644 --- a/tests/ttmtest/src/ttmtest.c +++ b/tests/ttmtest/src/ttmtest.c @@ -304,7 +304,7 @@ static void testAGP(TinyDRIContext * ctx) { unsigned long ticks[128], *pTicks; - unsigned long size = 4096 * 1024; + unsigned long size = 8 * 1024; int ret; ret = benchmarkBuffer(ctx, size, ticks); From 63f2abd721c40f1cddae555c79b4ab4c55aae006 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 19:49:11 +0100 Subject: [PATCH 06/34] Make also later kernels work with buffer object vm and clean up some function names. --- linux-core/drmP.h | 7 ++ linux-core/drm_agpsupport.c | 2 +- linux-core/drm_bo.c | 108 ++++++++++++++++------------ linux-core/drm_compat.c | 138 +++++++++++++++++++----------------- linux-core/drm_compat.h | 18 ++--- linux-core/drm_ttm.c | 51 +++---------- linux-core/drm_ttm.h | 13 +--- linux-core/drm_vm.c | 16 ++--- 8 files changed, 167 insertions(+), 186 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 84a06470..dd07a603 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1044,6 +1044,13 @@ typedef struct drm_buffer_object{ unsigned long num_pages; uint32_t vm_flags; void *iomap; + + +#ifdef DRM_ODD_MM_COMPAT + /* dev->struct_mutex only protected. */ + struct list_head vma_list; + struct list_head p_mm_list; +#endif } drm_buffer_object_t; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 7a692af1..177180f9 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -620,7 +620,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; - + DRM_DEBUG("drm_agp_unbind_ttm\n"); if (agp_priv->mem->is_bound) return drm_agp_unbind_memory(agp_priv->mem); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8fe5e8ef..9a27a4b5 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -107,23 +107,31 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, int force_no_move) { drm_device_t *dev = bo->dev; - int ret; + int ret = 0; if (bo->mm_node) { - drm_bo_unmap_virtual(bo); +#ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); - if (evict) - ret = drm_evict_ttm(bo->ttm); - else - ret = drm_unbind_ttm(bo->ttm); - + ret = drm_bo_lock_kmm(bo); if (ret) { mutex_unlock(&dev->struct_mutex); if (ret == -EAGAIN) schedule(); return ret; } + drm_bo_unmap_virtual(bo); + drm_bo_finish_unmap(bo); + drm_bo_unlock_kmm(bo); +#else + drm_bo_unmap_virtual(bo); + mutex_lock(&dev->struct_mutex); +#endif + if (evict) + drm_ttm_evict(bo->ttm); + else + drm_ttm_unbind(bo->ttm); + bo->mem_type = DRM_BO_MEM_LOCAL; if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { drm_mm_put_block(bo->mm_node); bo->mm_node = NULL; @@ -262,23 +270,13 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); +#ifdef DRM_ODD_MM_COMPAT + BUG_ON(!list_empty(&bo->vma_list)); + BUG_ON(!list_empty(&bo->p_mm_list)); +#endif + if (bo->ttm) { - unsigned long _end = jiffies + DRM_HZ; - int ret; - - do { - ret = drm_unbind_ttm(bo->ttm); - if (ret == -EAGAIN) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - } - } while (ret == -EAGAIN && !time_after_eq(jiffies, _end)); - - if (ret) { - DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. " - "Bad. Continuing anyway\n"); - } + drm_ttm_unbind(bo->ttm); drm_destroy_ttm(bo->ttm); bo->ttm = NULL; } @@ -597,8 +595,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) { drm_device_t *dev = bo->dev; - drm_ttm_backend_t *be; - int ret; + int ret = 0; if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { BUG_ON(bo->mm_node); @@ -608,26 +605,41 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) } DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); - - drm_bo_unmap_virtual(bo); + +#ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_kmm(bo); + if (ret) { + mutex_unlock(&dev->struct_mutex); + goto out_put_unlock; + } +#endif + drm_bo_unmap_virtual(bo); ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, bo->mm_node->start); + if (ret) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; +#ifdef DRM_ODD_MM_COMPAT + drm_bo_unlock_kmm(bo); + mutex_unlock(&dev->struct_mutex); +#endif + goto out_put_unlock; } - mutex_unlock(&dev->struct_mutex); - - if (ret) { - return ret; - } - - be = bo->ttm->be; - if (be->needs_ub_cache_adjust(be)) - bo->flags &= ~DRM_BO_FLAG_CACHED; + + if (!(bo->flags & DRM_BO_FLAG_BIND_CACHED)) + bo->flags &= DRM_BO_FLAG_CACHED; bo->flags &= ~DRM_BO_MASK_MEM; bo->flags |= DRM_BO_FLAG_MEM_TT; + bo->mem_type = DRM_BO_MEM_TT; + +#ifdef DRM_ODD_MM_COMPAT + ret = drm_bo_remap_bound(bo); + if (ret) { + /* FIXME */ + } + drm_bo_unlock_kmm(bo); + mutex_unlock(&dev->struct_mutex); +#endif if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); @@ -637,6 +649,13 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); return 0; + +out_put_unlock: + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + return ret; } static int drm_bo_new_flags(drm_device_t * dev, @@ -1120,7 +1139,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, } else { drm_move_tt_to_local(bo, 0, force_no_move); } - return 0; } @@ -1213,13 +1231,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { - mutex_lock(&dev->struct_mutex); list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); } - + bo->flags = new_flags; return 0; } @@ -1427,6 +1444,10 @@ int drm_buffer_object_create(drm_file_t * priv, DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); +#ifdef DRM_ODD_MM_COMPAT + INIT_LIST_HEAD(&bo->p_mm_list); + INIT_LIST_HEAD(&bo->vma_list); +#endif bo->dev = dev; bo->type = type; bo->num_pages = num_pages; @@ -2041,7 +2062,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; *bus_size = 0; - if (bo->type != drm_bo_type_dc) return -EINVAL; @@ -2057,11 +2077,10 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, drm_ttm_fixup_caching(ttm); - if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) return 0; if (ttm->be->flags & DRM_BE_FLAG_CMA) return 0; - *bus_base = ttm->be->aperture_base; } else { *bus_base = man->io_offset; @@ -2069,7 +2088,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, *bus_offset = bo->mm_node->start << PAGE_SHIFT; *bus_size = bo->num_pages << PAGE_SHIFT; - return 0; } diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 3639ea4f..48d598e8 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -212,78 +212,85 @@ typedef struct vma_entry { } vma_entry_t; -struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, +struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; unsigned long page_offset; struct page *page; drm_ttm_t *ttm; drm_buffer_manager_t *bm; drm_device_t *dev; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + int err; - /* - * FIXME: Check can't map aperture flag. - */ + mutex_lock(&bo->mutex); if (type) *type = VM_FAULT_MINOR; - if (!map) - return NOPAGE_OOM; + if (address > vma->vm_end) { + page = NOPAGE_SIGBUS; + goto out_unlock; + } + + dev = bo->dev; + err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + + if (err) { + page = NOPAGE_SIGBUS; + goto out_unlock; + } - if (address > vma->vm_end) - return NOPAGE_SIGBUS; - - ttm = (drm_ttm_t *) map->offset; - dev = ttm->dev; - mutex_lock(&dev->struct_mutex); - drm_fixup_ttm_caching(ttm); - BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED); + if (bus_size != 0) { + DRM_ERROR("Invalid compat nopage.\n"); + page = NOPAGE_SIGBUS; + goto out_unlock; + } bm = &dev->bm; + ttm = bo->ttm; page_offset = (address - vma->vm_start) >> PAGE_SHIFT; page = ttm->pages[page_offset]; if (!page) { - if (drm_alloc_memctl(PAGE_SIZE)) { - page = NOPAGE_OOM; - goto out; - } - page = ttm->pages[page_offset] = - alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + page = drm_ttm_alloc_page(); if (!page) { - drm_free_memctl(PAGE_SIZE); page = NOPAGE_OOM; - goto out; + goto out_unlock; } - ++bm->cur_pages; - SetPageLocked(page); + ttm->pages[page_offset] = page; + ++bm->cur_pages; } get_page(page); - out: - mutex_unlock(&dev->struct_mutex); + +out_unlock: + mutex_unlock(&bo->mutex); return page; } -int drm_ttm_map_bound(struct vm_area_struct *vma) +int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; - drm_ttm_t *ttm = (drm_ttm_t *) map->offset; + drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; int ret = 0; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + BUG_ON(ret); - if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && - !(ttm->be->flags & DRM_BE_FLAG_CMA)) { - - unsigned long pfn = ttm->aper_offset + - (ttm->be->aperture_base >> PAGE_SHIFT); - pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma); - + if (bus_size) { + unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; + pgprot_t pgprot = drm_io_prot(_DRM_AGP, vma); + ret = io_remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, pgprot); @@ -293,31 +300,29 @@ int drm_ttm_map_bound(struct vm_area_struct *vma) } -int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; - drm_local_map_t *map = (drm_local_map_t *) - vma->vm_private_data; struct mm_struct *mm = vma->vm_mm; - v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM); + v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ); if (!v_entry) { DRM_ERROR("Allocation of vma pointer entry failed\n"); return -ENOMEM; } v_entry->vma = vma; - map->handle = (void *) v_entry; - list_add_tail(&v_entry->head, &ttm->vma_list); - list_for_each_entry(entry, &ttm->p_mm_list, head) { + list_add_tail(&v_entry->head, &bo->vma_list); + + list_for_each_entry(entry, &bo->p_mm_list, head) { if (mm == entry->mm) { atomic_inc(&entry->refcount); return 0; } else if ((unsigned long)mm < (unsigned long)entry->mm) ; } - n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM); + n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ); if (!n_entry) { DRM_ERROR("Allocation of process mm pointer entry failed\n"); return -ENOMEM; @@ -331,29 +336,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) return 0; } -void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; int found = 0; struct mm_struct *mm = vma->vm_mm; - list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) { + list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) { if (v_entry->vma == vma) { found = 1; list_del(&v_entry->head); - drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); + drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ); break; } } BUG_ON(!found); - list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) { + list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) { if (mm == entry->mm) { if (atomic_add_negative(-1, &entry->refcount)) { list_del(&entry->head); BUG_ON(entry->locked); - drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM); + drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ); } return; } @@ -363,12 +368,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) -int drm_ttm_lock_mm(drm_ttm_t * ttm) +int drm_bo_lock_kmm(drm_buffer_object_t * bo) { p_mm_entry_t *entry; int lock_ok = 1; - list_for_each_entry(entry, &ttm->p_mm_list, head) { + list_for_each_entry(entry, &bo->p_mm_list, head) { BUG_ON(entry->locked); if (!down_write_trylock(&entry->mm->mmap_sem)) { lock_ok = 0; @@ -380,7 +385,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm) if (lock_ok) return 0; - list_for_each_entry(entry, &ttm->p_mm_list, head) { + list_for_each_entry(entry, &bo->p_mm_list, head) { if (!entry->locked) break; up_write(&entry->mm->mmap_sem); @@ -395,47 +400,46 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm) return -EAGAIN; } -void drm_ttm_unlock_mm(drm_ttm_t * ttm) +void drm_bo_unlock_kmm(drm_buffer_object_t * bo) { p_mm_entry_t *entry; - list_for_each_entry(entry, &ttm->p_mm_list, head) { + list_for_each_entry(entry, &bo->p_mm_list, head) { BUG_ON(!entry->locked); up_write(&entry->mm->mmap_sem); entry->locked = 0; } } -int drm_ttm_remap_bound(drm_ttm_t *ttm) +int drm_bo_remap_bound(drm_buffer_object_t *bo) { vma_entry_t *v_entry; int ret = 0; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; - if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && - !(ttm->be->flags & DRM_BE_FLAG_CMA)) { + ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + BUG_ON(ret); - list_for_each_entry(v_entry, &ttm->vma_list, head) { - ret = drm_ttm_map_bound(v_entry->vma); + if (bus_size) { + list_for_each_entry(v_entry, &bo->vma_list, head) { + ret = drm_bo_map_bound(v_entry->vma); if (ret) break; } } - drm_ttm_unlock_mm(ttm); return ret; } -void drm_ttm_finish_unmap(drm_ttm_t *ttm) +void drm_bo_finish_unmap(drm_buffer_object_t *bo) { vma_entry_t *v_entry; - - if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) - return; - list_for_each_entry(v_entry, &ttm->vma_list, head) { + list_for_each_entry(v_entry, &bo->vma_list, head) { v_entry->vma->vm_flags &= ~VM_PFNMAP; } - drm_ttm_unlock_mm(ttm); } #endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 9048f021..313aab85 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -238,7 +238,7 @@ extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, #ifdef DRM_ODD_MM_COMPAT -struct drm_ttm; +struct drm_buffer_object; /* @@ -246,14 +246,14 @@ struct drm_ttm; * process mm pointer to the ttm mm list. Needs the ttm mutex. */ -extern int drm_ttm_add_vma(struct drm_ttm * ttm, +extern int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma); /* * Delete a vma and the corresponding mm pointer from the * ttm lists. Needs the ttm mutex. */ -extern void drm_ttm_delete_vma(struct drm_ttm * ttm, - struct vm_area_struct *vma); +extern void drm_bo_delete_vma(struct drm_buffer_object * bo, + struct vm_area_struct *vma); /* * Attempts to lock all relevant mmap_sems for a ttm, while @@ -262,12 +262,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm, * schedule() and try again. */ -extern int drm_ttm_lock_mm(struct drm_ttm * ttm); +extern int drm_bo_lock_kmm(struct drm_buffer_object * bo); /* * Unlock all relevant mmap_sems for a ttm. */ -extern void drm_ttm_unlock_mm(struct drm_ttm * ttm); +extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo); /* * If the ttm was bound to the aperture, this function shall be called @@ -277,7 +277,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm); * releases the mmap_sems for this ttm. */ -extern void drm_ttm_finish_unmap(struct drm_ttm *ttm); +extern void drm_bo_finish_unmap(struct drm_buffer_object *bo); /* * Remap all vmas of this ttm using io_remap_pfn_range. We cannot @@ -286,14 +286,14 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm); * releases the mmap_sems for this ttm. */ -extern int drm_ttm_remap_bound(struct drm_ttm *ttm); +extern int drm_bo_remap_bound(struct drm_buffer_object *bo); /* * Remap a vma for a bound ttm. Call with the ttm mutex held and * the relevant mmap_sem locked. */ -extern int drm_ttm_map_bound(struct vm_area_struct *vma); +extern int drm_bo_map_bound(struct vm_area_struct *vma); #endif #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 6699a0dd..9a2ce5cd 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -153,12 +153,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm) if (!ttm) return 0; - DRM_ERROR("Drm destroy ttm\n"); - -#ifdef DRM_ODD_MM_COMPAT - BUG_ON(!list_empty(&ttm->vma_list)); - BUG_ON(!list_empty(&ttm->p_mm_list)); -#endif be = ttm->be; if (be) { be->destroy(be); @@ -241,11 +235,6 @@ drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size) if (!ttm) return NULL; -#ifdef DRM_ODD_MM_COMPAT - INIT_LIST_HEAD(&ttm->p_mm_list); - INIT_LIST_HEAD(&ttm->vma_list); -#endif - ttm->dev = dev; atomic_set(&ttm->vma_count, 0); @@ -278,15 +267,17 @@ drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size) * Unbind a ttm region from the aperture. */ -int drm_evict_ttm(drm_ttm_t * ttm) +void drm_ttm_evict(drm_ttm_t * ttm) { drm_ttm_backend_t *be = ttm->be; + int ret; - if (ttm->state == ttm_bound) - be->unbind(be); + if (ttm->state == ttm_bound) { + ret = be->unbind(be); + BUG_ON(ret); + } ttm->state = ttm_evicted; - return 0; } void drm_ttm_fixup_caching(drm_ttm_t * ttm) @@ -301,18 +292,12 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -int drm_unbind_ttm(drm_ttm_t * ttm) +void drm_ttm_unbind(drm_ttm_t * ttm) { - int ret = 0; - if (ttm->state == ttm_bound) - ret = drm_evict_ttm(ttm); - - if (ret) - return ret; + drm_ttm_evict(ttm); drm_ttm_fixup_caching(ttm); - return 0; } int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) @@ -335,19 +320,9 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) if (ttm->state == ttm_unbound && !cached) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); } -#ifdef DRM_ODD_MM_COMPAT - else if (ttm->state == ttm_evicted && !cached) { - ret = drm_ttm_lock_mm(ttm); - if (ret) - return ret; - } -#endif + if ((ret = be->bind(be, aper_offset, cached))) { ttm->state = ttm_evicted; -#ifdef DRM_ODD_MM_COMPAT - if (be->needs_ub_cache_adjust(be)) - drm_ttm_unlock_mm(ttm); -#endif DRM_ERROR("Couldn't bind backend.\n"); return ret; } @@ -355,13 +330,5 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) ttm->aper_offset = aper_offset; ttm->state = ttm_bound; -#ifdef DRM_ODD_MM_COMPAT - if (be->needs_ub_cache_adjust(be)) { - ret = drm_ttm_remap_bound(ttm); - if (ret) - return ret; - } -#endif - return 0; } diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index a6dc30ae..6aa1c5ad 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -79,10 +79,6 @@ typedef struct drm_ttm { ttm_unbound, ttm_unpopulated, } state; -#ifdef DRM_ODD_MM_COMPAT - struct list_head vma_list; - struct list_head p_mm_list; -#endif } drm_ttm_t; @@ -90,13 +86,8 @@ typedef struct drm_ttm { extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); extern struct page *drm_ttm_alloc_page(void); extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern int drm_unbind_ttm(drm_ttm_t * ttm); - -/* - * Evict a ttm region. Keeps Aperture caching policy. - */ - -extern int drm_evict_ttm(drm_ttm_t * ttm); +extern void drm_ttm_unbind(drm_ttm_t * ttm); +extern void drm_ttm_evict(drm_ttm_t * ttm); extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); /* diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 93d1c0b8..a4a9b09d 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -747,12 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); - map = bo->map_list.map; - - if (!map) { - data->type = VM_FAULT_OOM; - goto out_unlock; - } if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; @@ -808,8 +802,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) drm_vm_open_locked(vma); atomic_inc(&bo->usage); -#ifdef DRM_MM_ODD_COMPAT - drm_bo_vm_add_vma(bo, vma); +#ifdef DRM_ODD_MM_COMPAT + drm_bo_add_vma(bo, vma); #endif } @@ -843,8 +837,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) drm_vm_close(vma); if (bo) { mutex_lock(&dev->struct_mutex); -#ifdef DRM_MM_ODD_COMPAT - drm_bo_vm_delete_vma(bo, vma); +#ifdef DRM_ODD_MM_COMPAT + drm_bo_delete_vma(bo, vma); #endif drm_bo_usage_deref_locked(bo); mutex_unlock(&dev->struct_mutex); @@ -881,7 +875,7 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma, vma->vm_flags |= VM_RESERVED | VM_IO; drm_bo_vm_open_locked(vma); #ifdef DRM_ODD_MM_COMPAT - drm_ttm_map_bound(vma); + drm_bo_map_bound(vma); #endif return 0; } From 2d962332dea5ed328ae45c6ef7298ea15216b635 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 5 Feb 2007 16:13:32 +0100 Subject: [PATCH 07/34] i915: Add copy-blit operation. --- linux-core/i915_buffer.c | 44 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index dc2cca33..cdbe579a 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -96,3 +96,47 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, } return 0; } + +void i915_emit_copy_blit(drm_device_t *dev, + uint32_t src_offset, + uint32_t dst_offset, + uint32_t pages, + int direction) +{ + uint32_t cur_pages; + uint32_t stride = PAGE_SIZE; + drm_i915_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + if (!dev_priv) + return; + + if (direction) { + stride = -stride; + src_offset += (pages - 1) << PAGE_SHIFT; + dst_offset += (pages - 1) << PAGE_SHIFT; + } + + while(pages > 0) { + cur_pages = pages; + if (cur_pages > 2048) + cur_pages = 2048; + pages -= cur_pages; + + BEGIN_LP_RING(8); + OUT_RING(XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + OUT_RING((stride & 0xffff) | ( 0xcc << 16) | (1 << 24) | + (1 << 25)); + OUT_RING(0); + OUT_RING((cur_pages << 16) | (PAGE_SIZE >> 2)); + OUT_RING(dst_offset); + OUT_RING(0); + OUT_RING(stride & 0xffff); + OUT_RING(src_offset); + ADVANCE_LP_RING(); + dst_offset += (cur_pages << PAGE_SHIFT)*(direction ? -1 : 1); + src_offset += (cur_pages << PAGE_SHIFT)*(direction ? -1 : 1); + } + return; +} From 609e3b037526021d20c7cc18b7fed1152206dc68 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 14:20:33 +0100 Subject: [PATCH 08/34] Implement a policy for selecting memory types. --- linux-core/drmP.h | 27 +++-- linux-core/drm_bo.c | 214 ++++++++++++++++++++++++++++++--------- linux-core/drm_stub.c | 1 + linux-core/i915_buffer.c | 11 ++ linux-core/i915_drv.c | 9 ++ shared-core/i915_drv.h | 1 + 6 files changed, 206 insertions(+), 57 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index dd07a603..5834c9dc 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -673,12 +673,17 @@ typedef struct drm_mem_type_manager { */ typedef struct drm_bo_driver{ + const uint32_t *mem_type_prio; + const uint32_t *mem_busy_prio; + uint32_t num_mem_type_prio; + uint32_t num_mem_busy_prio; drm_ttm_backend_t *(*create_ttm_backend_entry) (struct drm_device *dev); int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type); int (*invalidate_caches)(struct drm_device *dev, uint32_t flags); int (*init_mem_type)(struct drm_device *dev, uint32_t type, drm_mem_type_manager_t *man); + uint32_t (*evict_flags) (struct drm_device *dev, uint32_t type); } drm_bo_driver_t; @@ -800,19 +805,9 @@ typedef struct drm_fence_manager{ atomic_t count; } drm_fence_manager_t; - -typedef struct drm_bo_mem_region { - drm_mm_node_t *node; - uint32_t memory_type; - drm_ttm_t *ttm; - unsigned long bus_offset; - unsigned long num_pages; - uint32_t vm_flags; -} drm_bo_mem_region_t; - - typedef struct drm_buffer_manager{ struct mutex init_mutex; + struct mutex evict_mutex; int nice_mode; int initialized; drm_file_t *last_to_validate; @@ -1003,6 +998,16 @@ typedef struct drm_fence_object{ uint32_t submitted_flush; } drm_fence_object_t; +typedef struct drm_bo_mem_reg { + drm_mm_node_t *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t flags; + uint32_t mask; +} drm_bo_mem_reg_t; + typedef struct drm_buffer_object{ drm_device_t *dev; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 9a27a4b5..fa659d04 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -56,6 +56,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo); static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); +static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, + int no_wait); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -497,6 +499,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, int ret = 0; drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + drm_bo_mem_reg_t evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -509,22 +512,39 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, ret = drm_bo_wait(bo, 0, 0, no_wait); - if (ret) { - if (ret != -EAGAIN) - DRM_ERROR("Failed to expire fence before " - "buffer eviction.\n"); + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to expire fence before " + "buffer eviction.\n"); goto out; } - if (mem_type == DRM_BO_MEM_TT) { - ret = drm_move_tt_to_local(bo, 1, force_no_move); - if (ret) - goto out; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); + evict_mem.num_pages = bo->num_pages; + evict_mem.page_alignment = bo->page_alignment; + evict_mem.size = evict_mem.num_pages << PAGE_SHIFT; + evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); + + ret = drm_bo_mem_space(dev, &evict_mem, no_wait); + + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to find memory space for " + "buffer eviction.\n"); + goto out; } + + if ((mem_type != DRM_BO_MEM_TT) && + (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) { + ret = -EINVAL; + DRM_ERROR("Unsupported memory types for eviction.\n"); + goto out; + } + + ret = drm_move_tt_to_local(bo, 1, force_no_move); + if (ret) + goto out; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); if (ret) goto out; @@ -535,26 +555,25 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -/* - * bo->mutex locked. - */ -int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait) + +static int drm_bo_mem_force_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + uint32_t mem_type, + int no_wait) { - drm_device_t *dev = bo->dev; drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; drm_mem_type_manager_t *man = &bm->man[mem_type]; - drm_mm_t *mm = &man->manager; struct list_head *lru; - unsigned long size = bo->num_pages; + unsigned long num_pages = mem->num_pages; int ret; mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(mm, size, bo->page_alignment, 1); + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); if (node) break; @@ -563,11 +582,11 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, break; entry = list_entry(lru->next, drm_buffer_object_t, lru); - atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE); + BUG_ON(entry->flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + ret = drm_bo_evict(entry, mem_type, no_wait, 0); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(entry); @@ -577,34 +596,108 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, } while (1); if (!node) { - DRM_ERROR("Out of videoram / aperture space\n"); mutex_unlock(&dev->struct_mutex); return -ENOMEM; } - node = drm_mm_get_block(node, size, bo->page_alignment); + node = drm_mm_get_block(node, num_pages, mem->page_alignment); mutex_unlock(&dev->struct_mutex); - BUG_ON(!node); - node->private = (void *)bo; - - bo->mm_node = node; - bo->offset = node->start * PAGE_SIZE; + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = drm_bo_type_flags(mem_type); return 0; } -static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) + +static int drm_bo_mem_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + int no_wait) +{ + drm_buffer_manager_t *bm= &dev->bm; + drm_mem_type_manager_t *man; + + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = DRM_BO_MEM_LOCAL; + int type_found = 0; + int type_ok = 0; + int has_eagain = 0; + drm_mm_node_t *node = NULL; + int ret; + + for (i=0; imask ; + if (!type_ok) + continue; + + if (mem_type == DRM_BO_MEM_LOCAL) + break; + + man = &bm->man[mem_type]; + mutex_lock(&dev->struct_mutex); + if (man->has_type && man->use_type) { + type_found = 1; + node = drm_mm_search_free(&man->manager, mem->num_pages, + mem->page_alignment, 1); + if (node) + node = drm_mm_get_block(node, mem->num_pages, + mem->page_alignment); + } + mutex_unlock(&dev->struct_mutex); + if (node) + break; + } + + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = drm_bo_type_flags(mem_type); + return 0; + } + + if (!type_found) { + DRM_ERROR("Requested memory types are not supported\n"); + return -EINVAL; + } + + num_prios = dev->driver->bo_driver->num_mem_busy_prio; + prios = dev->driver->bo_driver->mem_busy_prio; + + for (i=0; imask)) + continue; + + man = &bm->man[mem_type]; + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); + + if (ret == 0) + return 0; + + if (ret == -EAGAIN) + has_eagain = 1; + } + + ret = (has_eagain) ? -EAGAIN : -ENOMEM; + return ret; +} + + + + +static int drm_move_local_to_tt(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int no_wait) { drm_device_t *dev = bo->dev; int ret = 0; + + bo->mm_node = mem->mm_node; - if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { - BUG_ON(bo->mm_node); - ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait); - if (ret) - return ret; - } - - DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); + DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", + bo->mm_node->start, bo->mm_node->size); #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); @@ -631,6 +724,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) bo->flags &= ~DRM_BO_MASK_MEM; bo->flags |= DRM_BO_FLAG_MEM_TT; bo->mem_type = DRM_BO_MEM_TT; + bo->offset = bo->mm_node->start << PAGE_SHIFT; #ifdef DRM_ODD_MM_COMPAT ret = drm_bo_remap_bound(bo); @@ -1103,14 +1197,18 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * bo->mutex locked. */ -static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, +static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, int no_wait, int force_no_move) { + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; int ret = 0; + drm_bo_mem_reg_t mem; /* * Flush outstanding fences. */ + drm_bo_busy(bo); /* @@ -1126,16 +1224,38 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, */ ret = drm_bo_wait(bo, 0, 0, no_wait); - - if (ret == -EINTR) - return -EAGAIN; if (ret) return ret; - if (new_flags & DRM_BO_FLAG_MEM_TT) { - ret = drm_move_local_to_tt(bo, no_wait); - if (ret) + + mem.num_pages = bo->num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.mask = new_mem_flags; + mem.page_alignment = bo->page_alignment; + + mutex_lock(&bm->evict_mutex); + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + list_add_tail(&bo->lru,&bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&dev->struct_mutex); + + ret = drm_bo_mem_space(dev, &mem, no_wait); + mutex_unlock(&bm->evict_mutex); + + if (ret) + return ret; + + if (mem.mem_type == DRM_BO_MEM_TT) { + ret = drm_move_local_to_tt(bo, &mem, no_wait); + if (ret) { + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); return ret; + } } else { drm_move_tt_to_local(bo, 0, force_no_move); } @@ -1231,6 +1351,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 60123cdc..22592324 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -67,6 +67,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->bm.init_mutex); + mutex_init(&dev->bm.evict_mutex); dev->pdev = pdev; dev->pci_device = pdev->device; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index cdbe579a..13a3e9bb 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -97,6 +97,17 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, return 0; } +uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type) +{ + switch(type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT; + } +} + void i915_emit_copy_blit(drm_device_t *dev, uint32_t src_offset, uint32_t dst_offset, diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 64ce3c15..8c39c249 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -50,11 +50,20 @@ static drm_fence_driver_t i915_fence_driver = { }; #endif #ifdef I915_HAVE_BUFFER + +static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; +static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; + static drm_bo_driver_t i915_bo_driver = { + .mem_type_prio = i915_mem_prios, + .mem_busy_prio = i915_busy_prios, + .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t), .create_ttm_backend_entry = i915_create_ttm_backend_entry, .fence_type = i915_fence_types, .invalidate_caches = i915_invalidate_caches, .init_mem_type = i915_init_mem_type, + .evict_flags = i915_evict_flags, }; #endif diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index be7dd76a..55c8cf57 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -191,6 +191,7 @@ extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *ty extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, drm_mem_type_manager_t *man); +extern uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type); #endif #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) From 40ce53dfde11f84d7bf8db5db93fb73715b2e96e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 15:56:43 +0100 Subject: [PATCH 09/34] Implement a drm_mem_reg_t substructure in the buffer object type. --- linux-core/drmP.h | 8 +- linux-core/drm_bo.c | 173 ++++++++++++++++++++++---------------------- 2 files changed, 89 insertions(+), 92 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 5834c9dc..a8f5e3e2 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1022,13 +1022,9 @@ typedef struct drm_buffer_object{ unsigned long buffer_start; drm_bo_type_t type; unsigned long offset; - uint32_t page_alignment; atomic_t mapped; - uint32_t flags; - uint32_t mask; - uint32_t mem_type; + drm_bo_mem_reg_t mem; - drm_mm_node_t *mm_node; struct list_head lru; struct list_head ddestroy; @@ -1042,11 +1038,9 @@ typedef struct drm_buffer_object{ /* For vm */ drm_map_list_t map_list; - drm_mm_node_t *node; uint32_t memory_type; drm_ttm_t *ttm; unsigned long bus_offset; - unsigned long num_pages; uint32_t vm_flags; void *iomap; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index fa659d04..8a6b49dc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -78,24 +78,24 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, struct list_head *list; drm_mem_type_manager_t *man; - bo->mem_type = 0; + bo->mem.mem_type = 0; - switch(bo->flags & DRM_BO_MASK_MEM) { + switch(bo->mem.flags & DRM_BO_MASK_MEM) { case DRM_BO_FLAG_MEM_TT: - bo->mem_type = DRM_BO_MEM_TT; + bo->mem.mem_type = DRM_BO_MEM_TT; break; case DRM_BO_FLAG_MEM_VRAM: - bo->mem_type = DRM_BO_MEM_VRAM; + bo->mem.mem_type = DRM_BO_MEM_VRAM; break; case DRM_BO_FLAG_MEM_LOCAL: - bo->mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; break; default: BUG_ON(1); } - man = &bm->man[bo->mem_type]; - list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? + man = &bm->man[bo->mem.mem_type]; + list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &man->pinned : &man->lru; list_add_tail(&bo->lru, list); return; @@ -111,7 +111,7 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, drm_device_t *dev = bo->dev; int ret = 0; - if (bo->mm_node) { + if (bo->mem.mm_node) { #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_kmm(bo); @@ -133,16 +133,16 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, else drm_ttm_unbind(bo->ttm); - bo->mem_type = DRM_BO_MEM_LOCAL; - if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } - bo->flags &= ~DRM_BO_FLAG_MEM_TT; - bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT; + bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; return 0; } @@ -235,9 +235,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) if (!bo->fence) { list_del_init(&bo->lru); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } list_del_init(&bo->ddestroy); mutex_unlock(&bo->mutex); @@ -269,7 +269,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) { + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -507,7 +507,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; - if (!(bo->flags & drm_bo_type_flags(mem_type))) + if (!(bo->mem.flags & drm_bo_type_flags(mem_type))) goto out; ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -518,9 +518,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - evict_mem.num_pages = bo->num_pages; - evict_mem.page_alignment = bo->page_alignment; - evict_mem.size = evict_mem.num_pages << PAGE_SHIFT; + evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); ret = drm_bo_mem_space(dev, &evict_mem, no_wait); @@ -585,7 +583,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait, 0); mutex_unlock(&entry->mutex); @@ -694,10 +692,10 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, drm_device_t *dev = bo->dev; int ret = 0; - bo->mm_node = mem->mm_node; + bo->mem.mm_node = mem->mm_node; DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", - bo->mm_node->start, bo->mm_node->size); + bo->mem.mm_node->start, bo->mem.mm_node->size); #ifdef DRM_ODD_MM_COMPAT mutex_lock(&dev->struct_mutex); @@ -708,8 +706,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, } #endif drm_bo_unmap_virtual(bo); - ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, - bo->mm_node->start); + ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED, + bo->mem.mm_node->start); if (ret) { #ifdef DRM_ODD_MM_COMPAT @@ -719,12 +717,12 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, goto out_put_unlock; } - if (!(bo->flags & DRM_BO_FLAG_BIND_CACHED)) - bo->flags &= DRM_BO_FLAG_CACHED; - bo->flags &= ~DRM_BO_MASK_MEM; - bo->flags |= DRM_BO_FLAG_MEM_TT; - bo->mem_type = DRM_BO_MEM_TT; - bo->offset = bo->mm_node->start << PAGE_SHIFT; + if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) + bo->mem.flags &= DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_MASK_MEM; + bo->mem.flags |= DRM_BO_FLAG_MEM_TT; + bo->mem.mem_type = DRM_BO_MEM_TT; + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; #ifdef DRM_ODD_MM_COMPAT ret = drm_bo_remap_bound(bo); @@ -736,7 +734,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, #endif if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); + ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); if (ret) DRM_ERROR("Could not flush read caches\n"); } @@ -746,8 +744,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, out_put_unlock: mutex_lock(&dev->struct_mutex); - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); return ret; } @@ -948,7 +946,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) int ret = 0; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (bo->mm_node) + if (bo->mem.mm_node) ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); return ret; } @@ -1039,15 +1037,15 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, drm_bo_arg_reply_t * rep) { rep->handle = bo->base.hash.key; - rep->flags = bo->flags; - rep->size = bo->num_pages * PAGE_SIZE; + rep->flags = bo->mem.flags; + rep->size = bo->mem.num_pages * PAGE_SIZE; rep->offset = bo->offset; rep->arg_handle = bo->map_list.user_token; - rep->mask = bo->mask; + rep->mask = bo->mem.mask; rep->buffer_start = bo->buffer_start; rep->fence_flags = bo->fence_type; rep->rep_flags = 0; - rep->page_alignment = bo->page_alignment; + rep->page_alignment = bo->mem.page_alignment; if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, @@ -1105,14 +1103,14 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, } if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { drm_bo_read_cached(bo); } break; } else if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { /* * We are already mapped with different flags. @@ -1228,18 +1226,23 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; - mem.num_pages = bo->num_pages; + mem.num_pages = bo->mem.num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; - mem.page_alignment = bo->page_alignment; + mem.page_alignment = bo->mem.page_alignment; mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); list_del(&bo->lru); list_add_tail(&bo->lru,&bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); + /* + * Determine where to move the buffer. + */ + ret = drm_bo_mem_space(dev, &mem, no_wait); mutex_unlock(&bm->evict_mutex); @@ -1272,7 +1275,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (new_flags ^ bo->flags); + uint32_t flag_diff = (new_flags ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; @@ -1282,7 +1285,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return -EINVAL; } - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags); + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1294,8 +1297,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && - !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) { - if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) { + if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { DRM_ERROR("Cannot change caching policy of " "pinned buffer.\n"); return -EINVAL; @@ -1307,8 +1310,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } } - DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags); - flag_diff = (new_flags ^ bo->flags); + DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags); + flag_diff = (new_flags ^ bo->mem.flags); /* * Check whether we dropped no_move policy, and in that case, @@ -1318,9 +1321,9 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && !(new_flags & DRM_BO_FLAG_NO_MOVE)) { mutex_lock(&dev->struct_mutex); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } @@ -1359,7 +1362,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, mutex_unlock(&dev->struct_mutex); } - bo->flags = new_flags; + bo->mem.flags = new_flags; return 0; } @@ -1384,9 +1387,9 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, if (ret) goto out; - ret = drm_bo_new_flags(dev, bo->flags, - (flags & mask) | (bo->mask & ~mask), hint, - 0, &new_flags, &bo->mask); + ret = drm_bo_new_flags(dev, bo->mem.flags, + (flags & mask) | (bo->mem.mask & ~mask), hint, + 0, &new_flags, &bo->mem.mask); if (ret) goto out; @@ -1469,7 +1472,7 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) mutex_unlock(&dev->struct_mutex); if (ret) break; - bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -1511,12 +1514,12 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, INIT_LIST_HEAD(&fbo->lru); list_splice_init(&bo->lru, &fbo->lru); - bo->mm_node = NULL; + bo->mem.mm_node = NULL; bo->ttm = NULL; bo->fence = NULL; - bo->flags = 0; + bo->mem.flags = 0; - fbo->mm_node->private = (void *)fbo; + fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); mutex_unlock(&dev->struct_mutex); @@ -1572,9 +1575,9 @@ int drm_buffer_object_create(drm_file_t * priv, #endif bo->dev = dev; bo->type = type; - bo->num_pages = num_pages; - bo->mm_node = NULL; - bo->page_alignment = page_alignment; + bo->mem.num_pages = num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { bo->offset = buffer_start; bo->buffer_start = 0; @@ -1582,10 +1585,10 @@ int drm_buffer_object_create(drm_file_t * priv, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; atomic_inc(&bm->count); - ret = drm_bo_new_flags(dev, bo->flags, mask, hint, - 1, &new_flags, &bo->mask); + ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint, + 1, &new_flags, &bo->mem.mask); if (ret) goto out_err; ret = drm_bo_add_ttm(priv, bo); @@ -1800,7 +1803,7 @@ static int drm_bo_force_list_clean(drm_device_t * dev, drm_bo_usage_deref_locked(entry); goto retry; } - if (entry->mm_node) { + if (entry->mem.mm_node) { clean = 0; /* @@ -1836,14 +1839,14 @@ static int drm_bo_force_list_clean(drm_device_t * dev, 0); if (force_no_move) { - DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, + DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE, 0); } - if (entry->flags & DRM_BO_FLAG_NO_EVICT) { + if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " "cleanup. Removing flag and evicting.\n"); - entry->flags &= ~DRM_BO_FLAG_NO_EVICT; - entry->mask &= ~DRM_BO_FLAG_NO_EVICT; + entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } ret = drm_bo_evict(entry, mem_type, 1, force_no_move); @@ -2181,7 +2184,7 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; *bus_size = 0; if (bo->type != drm_bo_type_dc) @@ -2208,8 +2211,8 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, *bus_base = man->io_offset; } - *bus_offset = bo->mm_node->start << PAGE_SHIFT; - *bus_size = bo->num_pages << PAGE_SHIFT; + *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT; + *bus_size = bo->mem.num_pages << PAGE_SHIFT; return 0; } @@ -2232,7 +2235,7 @@ int drm_bo_ioremap(drm_buffer_object_t *bo) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem_type]; + drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -2271,7 +2274,7 @@ void drm_bo_iounmap(drm_buffer_object_t *bo) bm = &dev->bm; - man = &bm->man[bo->mem_type]; + man = &bm->man[bo->mem.mem_type]; if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) iounmap(bo->iomap); @@ -2291,7 +2294,7 @@ void drm_bo_unmap_virtual(drm_buffer_object_t *bo) { drm_device_t *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; - loff_t holelen = ((loff_t) bo->num_pages) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } @@ -2335,12 +2338,12 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) map->offset = 0; map->type = _DRM_TTM; map->flags = _DRM_REMOVABLE; - map->size = bo->num_pages * PAGE_SIZE; + map->size = bo->mem.num_pages * PAGE_SIZE; atomic_inc(&bo->usage); map->handle = (void *) bo; list->file_offset_node = drm_mm_search_free(&dev->offset_manager, - bo->num_pages, 0, 0); + bo->mem.num_pages, 0, 0); if (!list->file_offset_node) { drm_bo_takedown_vm_locked(bo); @@ -2348,7 +2351,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) } list->file_offset_node = drm_mm_get_block(list->file_offset_node, - bo->num_pages, 0); + bo->mem.num_pages, 0); list->hash.key = list->file_offset_node->start; if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { From 71b9e876f99db219fcbf4e3ab977b64b068cc2b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 16:59:45 +0100 Subject: [PATCH 10/34] Simplify pci map vs no pci map choice. --- linux-core/drmP.h | 9 ++++-- linux-core/drm_agpsupport.c | 3 -- linux-core/drm_bo.c | 58 ++++++++++++++++++++----------------- linux-core/drm_compat.c | 24 ++++----------- linux-core/drm_ttm.h | 2 -- linux-core/drm_vm.c | 4 ++- linux-core/i915_buffer.c | 15 +++++++--- 7 files changed, 58 insertions(+), 57 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index a8f5e3e2..62efddd9 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -655,6 +655,7 @@ typedef struct drm_ref_object { #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Supports cached binding */ #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap before kernel access. */ +#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ typedef struct drm_mem_type_manager { int has_type; @@ -1037,9 +1038,9 @@ typedef struct drm_buffer_object{ /* For vm */ + drm_ttm_t *ttm; drm_map_list_t map_list; uint32_t memory_type; - drm_ttm_t *ttm; unsigned long bus_offset; uint32_t vm_flags; void *iomap; @@ -1491,10 +1492,14 @@ extern int drm_bo_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(drm_device_t *dev); extern int drm_bo_driver_init(drm_device_t *dev); -extern int drm_bo_pci_offset(const drm_buffer_object_t *bo, +extern int drm_bo_pci_offset(drm_device_t *dev, + drm_bo_mem_reg_t *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); +extern int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem); + + extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); extern int drm_fence_buffer_objects(drm_file_t * priv, struct list_head *list, diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 177180f9..e28ff0c1 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -710,7 +710,6 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, agp_priv->uncached_type = AGP_USER_MEMORY; agp_priv->bridge = dev->agp->bridge; agp_priv->populated = FALSE; - agp_be->aperture_base = dev->agp->agp_info.aper_base; agp_be->private = (void *) agp_priv; agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust; agp_be->populate = drm_agp_populate; @@ -720,8 +719,6 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, agp_be->destroy = drm_agp_destroy_ttm; DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0); - DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CMA, - (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CMA : 0); agp_be->drm_map_type = _DRM_AGP; return agp_be; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8a6b49dc..16c89f61 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -718,7 +718,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, } if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) - bo->mem.flags &= DRM_BO_FLAG_CACHED; + bo->mem.flags &= ~DRM_BO_FLAG_CACHED; bo->mem.flags &= ~DRM_BO_MASK_MEM; bo->mem.flags |= DRM_BO_FLAG_MEM_TT; bo->mem.mem_type = DRM_BO_MEM_TT; @@ -2163,6 +2163,26 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ +int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + if (mem->mem_type == DRM_BO_MEM_LOCAL) + return 0; + + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) + return 0; + + if ((mem->mask & DRM_BO_FLAG_BIND_CACHED) && + (man->flags & _DRM_FLAG_MEMTYPE_CACHED)) + return 0; + } + return 1; +} +EXPORT_SYMBOL(drm_mem_reg_is_pci); + /** * \c Get the PCI offset for the buffer object memory. * @@ -2174,48 +2194,32 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * \return Failure indication. * * Returns -EINVAL if the buffer object is currently not mappable. - * Otherwise returns zero. Call bo->mutex locked. + * Otherwise returns zero. */ -int drm_bo_pci_offset(const drm_buffer_object_t *bo, +int drm_bo_pci_offset(drm_device_t *dev, + drm_bo_mem_reg_t *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; *bus_size = 0; - if (bo->type != drm_bo_type_dc) - return -EINVAL; - if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) return -EINVAL; - - if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { - drm_ttm_t *ttm = bo->ttm; - if (!bo->ttm) { - return -EINVAL; - } - - drm_ttm_fixup_caching(ttm); - - if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) - return 0; - if (ttm->be->flags & DRM_BE_FLAG_CMA) - return 0; - *bus_base = ttm->be->aperture_base; - } else { + if (drm_mem_reg_is_pci(dev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; *bus_base = man->io_offset; } - *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT; - *bus_size = bo->mem.num_pages << PAGE_SHIFT; return 0; } + /** * \c Return a kernel virtual address to the buffer object PCI memory. * @@ -2231,7 +2235,8 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo, * Call bo->mutex locked. */ -int drm_bo_ioremap(drm_buffer_object_t *bo) +#if 0 +int drm_mem_reg_ioremap(drm_bo_mem_reg_t *mem) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -2281,6 +2286,7 @@ void drm_bo_iounmap(drm_buffer_object_t *bo) bo->iomap = NULL; } +#endif /** * \c Kill all user-space virtual mappings of this buffer object. diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 48d598e8..044cf4a4 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -222,10 +222,6 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, drm_ttm_t *ttm; drm_buffer_manager_t *bm; drm_device_t *dev; - unsigned long bus_base; - unsigned long bus_offset; - unsigned long bus_size; - int err; mutex_lock(&bo->mutex); @@ -238,14 +234,8 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, } dev = bo->dev; - err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); - - if (err) { - page = NOPAGE_SIGBUS; - goto out_unlock; - } - if (bus_size != 0) { + if (drm_mem_reg_is_pci(dev, &bo->mem)) { DRM_ERROR("Invalid compat nopage.\n"); page = NOPAGE_SIGBUS; goto out_unlock; @@ -253,6 +243,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, bm = &dev->bm; ttm = bo->ttm; + drm_ttm_fixup_caching(ttm); page_offset = (address - vma->vm_start) >> PAGE_SHIFT; page = ttm->pages[page_offset]; @@ -284,7 +275,8 @@ int drm_bo_map_bound(struct vm_area_struct *vma) unsigned long bus_offset; unsigned long bus_size; - ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, + &bus_offset, &bus_size); BUG_ON(ret); if (bus_size) { @@ -415,14 +407,8 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) { vma_entry_t *v_entry; int ret = 0; - unsigned long bus_base; - unsigned long bus_offset; - unsigned long bus_size; - - ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); - BUG_ON(ret); - if (bus_size) { + if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) { list_for_each_entry(v_entry, &bo->vma_list, head) { ret = drm_bo_map_bound(v_entry->vma); if (ret) diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index 6aa1c5ad..3f649281 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -46,10 +46,8 @@ #define DRM_BE_FLAG_NEEDS_FREE 0x00000001 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 -#define DRM_BE_FLAG_CMA 0x00000004 /* Don't map through aperture */ typedef struct drm_ttm_backend { - unsigned long aperture_base; void *private; uint32_t flags; uint32_t drm_map_type; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index a4a9b09d..843fc362 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -754,7 +754,8 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, } dev = bo->dev; - err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, + &bus_size); if (err) { data->type = VM_FAULT_SIGBUS; @@ -770,6 +771,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, bm = &dev->bm; ttm = bo->ttm; + drm_ttm_fixup_caching(ttm); page = ttm->pages[page_offset]; if (!page) { page = drm_ttm_alloc_page(); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 13a3e9bb..c1cdd112 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -71,8 +71,17 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, switch(type) { case DRM_BO_MEM_LOCAL: case DRM_BO_MEM_TT: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned) type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; + _DRM_FLAG_MEMTYPE_CACHED | + _DRM_FLAG_NEEDS_IOREMAP; break; case DRM_BO_MEM_PRIV0: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -82,13 +91,11 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, } man->io_offset = dev->agp->agp_info.aper_base; man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; - + man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; - man->io_addr = NULL; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); From af24465b2eddfcc5296edc830ea5ed86065a4abd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 12:52:23 +0100 Subject: [PATCH 11/34] Fix a stray unlock_kernel() in drm_vm.c Add a file for memory move helpers, drm_bo_move.c Implement generic memory move. Cached, no_move and unmapped memory temporarily broken. --- linux-core/Makefile.kernel | 2 +- linux-core/drmP.h | 34 ++-- linux-core/drm_bo.c | 355 +++++++++++++++++-------------------- linux-core/drm_bo_move.c | 75 ++++++++ linux-core/drm_vm.c | 2 +- linux-core/i915_drv.c | 1 + 6 files changed, 265 insertions(+), 204 deletions(-) create mode 100644 linux-core/drm_bo_move.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index b531a70f..a6910d73 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 62efddd9..cdab1cb1 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -669,6 +669,16 @@ typedef struct drm_mem_type_manager { void *io_addr; } drm_mem_type_manager_t; +typedef struct drm_bo_mem_reg { + drm_mm_node_t *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t flags; + uint32_t mask; +} drm_bo_mem_reg_t; + /* * buffer object driver */ @@ -685,6 +695,10 @@ typedef struct drm_bo_driver{ int (*init_mem_type)(struct drm_device *dev, uint32_t type, drm_mem_type_manager_t *man); uint32_t (*evict_flags) (struct drm_device *dev, uint32_t type); + int (*move)(struct drm_device *dev, + struct drm_ttm *ttm, int evict, int no_wait, + struct drm_bo_mem_reg *old_mem, + struct drm_bo_mem_reg *new_mem); } drm_bo_driver_t; @@ -999,16 +1013,6 @@ typedef struct drm_fence_object{ uint32_t submitted_flush; } drm_fence_object_t; -typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; - unsigned long size; - unsigned long num_pages; - uint32_t page_alignment; - uint32_t mem_type; - uint32_t flags; - uint32_t mask; -} drm_bo_mem_reg_t; - typedef struct drm_buffer_object{ drm_device_t *dev; @@ -1506,6 +1510,16 @@ extern int drm_fence_buffer_objects(drm_file_t * priv, uint32_t fence_flags, drm_fence_object_t *fence, drm_fence_object_t **used_fence); +/* + * Buffer object memory move helpers. + * drm_bo_move.c + */ + +extern int drm_bo_move_ttm(drm_device_t *dev, + drm_ttm_t *ttm, int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 16c89f61..3f1e891d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -78,22 +78,6 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, struct list_head *list; drm_mem_type_manager_t *man; - bo->mem.mem_type = 0; - - switch(bo->mem.flags & DRM_BO_MASK_MEM) { - case DRM_BO_FLAG_MEM_TT: - bo->mem.mem_type = DRM_BO_MEM_TT; - break; - case DRM_BO_FLAG_MEM_VRAM: - bo->mem.mem_type = DRM_BO_MEM_VRAM; - break; - case DRM_BO_FLAG_MEM_LOCAL: - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - break; - default: - BUG_ON(1); - } - man = &bm->man[bo->mem.mem_type]; list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? &man->pinned : &man->lru; @@ -101,53 +85,136 @@ static void drm_bo_add_to_lru(drm_buffer_object_t * bo, return; } +static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, + int old_is_pci) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_lock_kmm(bo); + if (ret) { + if (ret == -EAGAIN) + schedule(); + return ret; + } + drm_bo_unmap_virtual(bo); + if (old_is_pci) + drm_bo_finish_unmap(bo); +#else + drm_bo_unmap_virtual(bo); +#endif + return 0; +} + +static void drm_bo_vm_post_move(drm_buffer_object_t *bo) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_remap_bound(bo); + if (ret) { + DRM_ERROR("Failed to remap a bound buffer object.\n" + "\tThis might cause a sigbus later.\n"); + } + drm_bo_unlock_kmm(bo); +#endif +} + /* - * bo locked. + * Call bo->mutex locked. */ -static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, - int force_no_move) +static int drm_bo_add_ttm(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; int ret = 0; - if (bo->mem.mm_node) { -#ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_kmm(bo); - if (ret) { - mutex_unlock(&dev->struct_mutex); - if (ret == -EAGAIN) - schedule(); - return ret; - } - drm_bo_unmap_virtual(bo); - drm_bo_finish_unmap(bo); - drm_bo_unlock_kmm(bo); -#else - drm_bo_unmap_virtual(bo); - mutex_lock(&dev->struct_mutex); -#endif - if (evict) - drm_ttm_evict(bo->ttm); - else - drm_ttm_unbind(bo->ttm); + bo->ttm = NULL; - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - mutex_unlock(&dev->struct_mutex); + switch (bo->type) { + case drm_bo_type_dc: + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + break; + case drm_bo_type_user: + case drm_bo_type_fake: + break; + default: + DRM_ERROR("Illegal buffer object type\n"); + ret = -EINVAL; + break; } - bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT; - bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + return ret; +} + + +static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, + drm_bo_mem_reg_t *mem, + int evict, + int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); + int new_is_pci = drm_mem_reg_is_pci(dev, mem); + drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + int ret = 0; + + + if (old_is_pci || new_is_pci) + ret = drm_bo_vm_pre_move(bo, old_is_pci); + if (ret) + return ret; + + if ((!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) || + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) && + (bo->ttm == NULL)) + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + + if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, + &bo->mem, mem); + } else if (dev->driver->bo_driver->move) { + ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, + no_wait, &bo->mem, mem); + } else { + ret = -EINVAL; + DRM_ERROR("Unsupported function\n"); +#if 0 + ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, + &bo->mem, mem); + ret = 0; +#endif + } + + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + if (ret) + return ret; + + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { + ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); + if (ret) + DRM_ERROR("Can not flush read caches\n"); + } + + DRM_FLAG_MASKED(bo->priv_flags, + (evict) ? _DRM_BO_FLAG_EVICTED : 0, + _DRM_BO_FLAG_EVICTED); + + if (bo->mem.mm_node) + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; return 0; } - /* * Call bo->mutex locked. * Wait until the buffer is idle. @@ -503,12 +570,11 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, /* * Someone might have modified the buffer before we took the buffer mutex. - */ +< */ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; - if (!(bo->mem.flags & drm_bo_type_flags(mem_type))) - goto out; + if (bo->mem.mem_type != mem_type) ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -520,36 +586,36 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); - ret = drm_bo_mem_space(dev, &evict_mem, no_wait); - if (ret && ret != -EAGAIN) { - DRM_ERROR("Failed to find memory space for " - "buffer eviction.\n"); + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed to find memory space for " + "buffer eviction.\n"); goto out; } - if ((mem_type != DRM_BO_MEM_TT) && - (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) { - ret = -EINVAL; - DRM_ERROR("Unsupported memory types for eviction.\n"); + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Buffer eviction failed\n"); goto out; } - - ret = drm_move_tt_to_local(bo, 1, force_no_move); - if (ret) - goto out; + mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); + if (evict_mem.mm_node) { + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); - if (ret) - goto out; - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); - out: + +out: return ret; } @@ -682,74 +748,6 @@ static int drm_bo_mem_space(drm_device_t *dev, return ret; } - - - -static int drm_move_local_to_tt(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, - int no_wait) -{ - drm_device_t *dev = bo->dev; - int ret = 0; - - bo->mem.mm_node = mem->mm_node; - - DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", - bo->mem.mm_node->start, bo->mem.mm_node->size); - -#ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_kmm(bo); - if (ret) { - mutex_unlock(&dev->struct_mutex); - goto out_put_unlock; - } -#endif - drm_bo_unmap_virtual(bo); - ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED, - bo->mem.mm_node->start); - - if (ret) { -#ifdef DRM_ODD_MM_COMPAT - drm_bo_unlock_kmm(bo); - mutex_unlock(&dev->struct_mutex); -#endif - goto out_put_unlock; - } - - if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED)) - bo->mem.flags &= ~DRM_BO_FLAG_CACHED; - bo->mem.flags &= ~DRM_BO_MASK_MEM; - bo->mem.flags |= DRM_BO_FLAG_MEM_TT; - bo->mem.mem_type = DRM_BO_MEM_TT; - bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; - -#ifdef DRM_ODD_MM_COMPAT - ret = drm_bo_remap_bound(bo); - if (ret) { - /* FIXME */ - } - drm_bo_unlock_kmm(bo); - mutex_unlock(&dev->struct_mutex); -#endif - - if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); - if (ret) - DRM_ERROR("Could not flush read caches\n"); - } - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); - - return 0; - -out_put_unlock: - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - return ret; -} - static int drm_bo_new_flags(drm_device_t * dev, uint32_t flags, uint32_t new_mask, uint32_t hint, int init, uint32_t * n_flags, uint32_t * n_mask) @@ -1249,19 +1247,21 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, if (ret) return ret; - if (mem.mem_type == DRM_BO_MEM_TT) { - ret = drm_move_local_to_tt(bo, &mem, no_wait); - if (ret) { - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - return ret; + ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); + + if (ret) { + mutex_lock(&dev->struct_mutex); + if (mem.mm_node) { + drm_mm_put_block(mem.mm_node); + mem.mm_node = NULL; } - } else { - drm_move_tt_to_local(bo, 0, force_no_move); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + list_del_init(&bo->lru); + drm_bo_add_to_lru(bo, bm); + mutex_unlock(&dev->struct_mutex); + return ret; } + return 0; } @@ -1280,11 +1280,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, int ret; - if (new_flags & DRM_BO_FLAG_MEM_VRAM) { - DRM_ERROR("Vram support not implemented yet\n"); - return -EINVAL; - } - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); if (ret) { @@ -1341,6 +1336,13 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + } + if (move_unfenced) { /* @@ -1453,41 +1455,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -/* - * Call bo->mutex locked. - */ - -static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) -{ - drm_device_t *dev = bo->dev; - int ret = 0; - - bo->ttm = NULL; - bo->map_list.user_token = 0ULL; - - switch (bo->type) { - case drm_bo_type_dc: - mutex_lock(&dev->struct_mutex); - ret = drm_bo_setup_vm_locked(bo); - mutex_unlock(&dev->struct_mutex); - if (ret) - break; - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); - if (!bo->ttm) - ret = -ENOMEM; - break; - case drm_bo_type_user: - case drm_bo_type_fake: - break; - default: - DRM_ERROR("Illegal buffer object type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - /* * Transfer a buffer object's memory and LRU status to a newly * created object. User-space references remains with the old @@ -1591,10 +1558,14 @@ int drm_buffer_object_create(drm_file_t * priv, 1, &new_flags, &bo->mem.mask); if (ret) goto out_err; - ret = drm_bo_add_ttm(priv, bo); - if (ret) - goto out_err; - + + if (bo->type == drm_bo_type_dc) { + mutex_lock(&dev->struct_mutex); + ret = drm_bo_setup_vm_locked(bo); + mutex_unlock(&dev->struct_mutex); + if (ret) + goto out_err; + } ret = drm_buffer_object_validate(bo, new_flags, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c new file mode 100644 index 00000000..abfa8f80 --- /dev/null +++ b/linux-core/drm_bo_move.c @@ -0,0 +1,75 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +int drm_bo_move_ttm(drm_device_t *dev, + drm_ttm_t *ttm, + int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem) +{ + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + int ret; + + if (old_mem->mem_type == DRM_BO_MEM_TT) { + + if (evict) + drm_ttm_evict(ttm); + else + drm_ttm_unbind(ttm); + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + mutex_unlock(&dev->struct_mutex); + save_flags |= DRM_BO_FLAG_CACHED; + + } else { + + ret = drm_bind_ttm(ttm, + new_mem->flags & DRM_BO_FLAG_BIND_CACHED, + new_mem->mm_node->start); + if (ret) + return ret; + + if (!(new_mem->flags & DRM_BO_FLAG_BIND_CACHED)) { + save_flags &= ~DRM_BO_FLAG_CACHED; + } + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_MASK_VAL(save_flags, new_mem->flags, DRM_BO_MASK_MEM); + return 0; +} diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 843fc362..416ac4ae 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -513,7 +513,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - unlock_kernel(); return -EINVAL; } @@ -588,6 +587,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) * the AGP mapped at physical address 0 * --BenH. */ + if (!vma->vm_pgoff #if __OS_HAS_AGP && (!dev->agp diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 8c39c249..19b63b7f 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -64,6 +64,7 @@ static drm_bo_driver_t i915_bo_driver = { .invalidate_caches = i915_invalidate_caches, .init_mem_type = i915_init_mem_type, .evict_flags = i915_evict_flags, + .move = NULL, }; #endif From c1fbd8a56653b91af57a408bbcf20a760a2bd8c8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 17:25:13 +0100 Subject: [PATCH 12/34] Checkpoint commit. Flag handling and memory type selection cleanup. glxgears won't start. --- linux-core/drmP.h | 4 +- linux-core/drm_agpsupport.c | 8 +- linux-core/drm_bo.c | 282 +++++++++++++----------------------- linux-core/drm_bo_move.c | 21 ++- linux-core/drm_ttm.c | 2 +- linux-core/drm_ttm.h | 5 +- linux-core/i915_buffer.c | 5 +- shared-core/drm.h | 16 +- tests/ttmtest/src/ttmtest.c | 6 +- 9 files changed, 142 insertions(+), 207 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index cdab1cb1..d3a9a2a5 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -652,10 +652,12 @@ typedef struct drm_ref_object { #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ -#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Supports cached binding */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap before kernel access. */ #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ +#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ + typedef struct drm_mem_type_manager { int has_type; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index e28ff0c1..6b93d249 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -606,8 +606,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, int ret; DRM_DEBUG("drm_agp_bind_ttm\n"); - DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED, - (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0); + DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_BE_FLAG_BOUND_CACHED); mem->is_flushed = TRUE; mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type; ret = drm_agp_bind_memory(mem, offset); @@ -717,8 +717,8 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, agp_be->bind = drm_agp_bind_ttm; agp_be->unbind = drm_agp_unbind_ttm; agp_be->destroy = drm_agp_destroy_ttm; - DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE, - (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0); + DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0, + DRM_BE_FLAG_NEEDS_FREE); agp_be->drm_map_type = _DRM_AGP; return agp_be; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3f1e891d..64abb118 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -59,10 +59,6 @@ static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, int no_wait); -#define DRM_FLAG_MASKED(_old, _new, _mask) {\ -(_old) ^= (((_old) ^ (_new)) & (_mask)); \ -} - static inline uint32_t drm_bo_type_flags(unsigned type) { return (1 << (24 + type)); @@ -570,7 +566,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, /* * Someone might have modified the buffer before we took the buffer mutex. -< */ + */ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; @@ -668,11 +664,34 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_unlock(&dev->struct_mutex); mem->mm_node = node; mem->mem_type = mem_type; - mem->flags = drm_bo_type_flags(mem_type); return 0; } +static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, + uint32_t mem_type, + uint32_t mask, + uint32_t *res_mask) +{ + uint32_t cur_flags = drm_bo_type_flags(mem_type); + + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) + cur_flags |= DRM_BO_FLAG_CACHED; + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) + cur_flags |= DRM_BO_FLAG_MAPPABLE; + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); + + if (!(mask & DRM_BO_FLAG_FORCE_CACHING)) + DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_CACHED); + if (!(mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_MAPPABLE); + + *res_mask = mask; + return ((cur_flags & mask & DRM_BO_MASK_MEMTYPE) == cur_flags); +} + + static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, int no_wait) @@ -684,6 +703,7 @@ static int drm_bo_mem_space(drm_device_t *dev, const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; uint32_t mem_type = DRM_BO_MEM_LOCAL; + uint32_t cur_flags; int type_found = 0; int type_ok = 0; int has_eagain = 0; @@ -692,14 +712,17 @@ static int drm_bo_mem_space(drm_device_t *dev, for (i=0; imask ; + man = &bm->man[mem_type]; + + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (!type_ok) continue; if (mem_type == DRM_BO_MEM_LOCAL) break; - man = &bm->man[mem_type]; mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; @@ -717,28 +740,30 @@ static int drm_bo_mem_space(drm_device_t *dev, if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { mem->mm_node = node; mem->mem_type = mem_type; - mem->flags = drm_bo_type_flags(mem_type); + mem->flags = cur_flags; return 0; } - if (!type_found) { - DRM_ERROR("Requested memory types are not supported\n"); + if (!type_found) return -EINVAL; - } - + num_prios = dev->driver->bo_driver->num_mem_busy_prio; prios = dev->driver->bo_driver->mem_busy_prio; for (i=0; imask)) + man = &bm->man[mem_type]; + + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags)) continue; - man = &bm->man[mem_type]; ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); - if (ret == 0) + if (ret == 0) { + mem->flags = cur_flags; return 0; + } if (ret == -EAGAIN) has_eagain = 1; @@ -748,23 +773,10 @@ static int drm_bo_mem_space(drm_device_t *dev, return ret; } -static int drm_bo_new_flags(drm_device_t * dev, - uint32_t flags, uint32_t new_mask, uint32_t hint, - int init, uint32_t * n_flags, uint32_t * n_mask) +static int drm_bo_new_mask(drm_buffer_object_t *bo, + uint32_t new_mask, uint32_t hint) { - uint32_t new_flags = 0; uint32_t new_props; - drm_buffer_manager_t *bm = &dev->bm; - unsigned i; - - /* - * First adjust the mask to take away nonexistant memory types. - */ - - for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { - if (!bm->man[i].use_type) - new_mask &= ~drm_bo_type_flags(i); - } if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR @@ -772,67 +784,7 @@ static int drm_bo_new_flags(drm_device_t * dev, "processes\n"); return -EPERM; } - if (new_mask & DRM_BO_FLAG_BIND_CACHED) { - if (((new_mask & DRM_BO_FLAG_MEM_TT) && - !(bm->man[DRM_BO_MEM_TT].flags & - _DRM_FLAG_MEMTYPE_CACHED) && - ((new_mask & DRM_BO_FLAG_MEM_VRAM) - && !(bm->man[DRM_BO_MEM_VRAM].flags & - _DRM_FLAG_MEMTYPE_CACHED)))) { - new_mask &= ~DRM_BO_FLAG_BIND_CACHED; - } else { - if (!(bm->man[DRM_BO_MEM_TT].flags & - _DRM_FLAG_MEMTYPE_CACHED)) - new_flags &= DRM_BO_FLAG_MEM_TT; - if (!(bm->man[DRM_BO_MEM_VRAM].flags & - _DRM_FLAG_MEMTYPE_CACHED)) - new_flags &= DRM_BO_FLAG_MEM_VRAM; - } - } - if ((new_mask & DRM_BO_FLAG_READ_CACHED) && - !(new_mask & DRM_BO_FLAG_BIND_CACHED)) { - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && - !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) { - DRM_ERROR - ("Cannot read cached from a pinned VRAM / TT buffer\n"); - return -EINVAL; - } - } - - /* - * Determine new memory location: - */ - - if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) { - - new_flags = new_mask & DRM_BO_MASK_MEM; - - if (!new_flags) { - DRM_ERROR("Invalid buffer object memory flags\n"); - return -EINVAL; - } - - if (new_flags & DRM_BO_FLAG_MEM_LOCAL) { - if ((hint & DRM_BO_HINT_AVOID_LOCAL) && - new_flags & (DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_MEM_TT)) { - new_flags &= ~DRM_BO_FLAG_MEM_LOCAL; - } else { - new_flags = DRM_BO_FLAG_MEM_LOCAL; - } - } - if (new_flags & DRM_BO_FLAG_MEM_TT) { - if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) && - new_flags & DRM_BO_FLAG_MEM_VRAM) { - new_flags = DRM_BO_FLAG_MEM_VRAM; - } else { - new_flags = DRM_BO_FLAG_MEM_TT; - } - } - } else { - new_flags = flags & DRM_BO_MASK_MEM; - } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -842,22 +794,11 @@ static int drm_bo_new_flags(drm_device_t * dev, return -EINVAL; } - new_flags |= new_mask & ~DRM_BO_MASK_MEM; + /* + * FIXME: Check what can be done about pinned buffers here. + */ - if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) && - (new_flags & DRM_BO_FLAG_NO_EVICT) && - (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) { - if (!(flags & DRM_BO_FLAG_CACHED)) { - DRM_ERROR - ("Cannot change caching policy of pinned buffer\n"); - return -EINVAL; - } else { - new_flags &= ~DRM_BO_FLAG_CACHED; - } - } - - *n_flags = new_flags; - *n_mask = new_mask; + bo->mem.mask = new_mask; return 0; } @@ -1200,7 +1141,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, drm_buffer_manager_t *bm = &dev->bm; int ret = 0; drm_bo_mem_reg_t mem; - /* * Flush outstanding fences. */ @@ -1232,7 +1172,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); list_del(&bo->lru); - list_add_tail(&bo->lru,&bm->unfenced); + list_add_tail(&bo->lru, &bm->unfenced); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); @@ -1240,7 +1180,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, /* * Determine where to move the buffer. */ - ret = drm_bo_mem_space(dev, &mem, no_wait); mutex_unlock(&bm->evict_mutex); @@ -1250,85 +1189,76 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); if (ret) { - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->struct_mutex); if (mem.mm_node) { drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - list_del_init(&bo->lru); + list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - return ret; + mutex_unlock(&dev->struct_mutex); } - return 0; + return ret; } + +static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) +{ + uint32_t + flag_diff = (mem->mask ^ mem->flags); + + if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) + return 0; + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (mem->mask & DRM_BO_FLAG_FORCE_CACHING)) + return 0; + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + return 0; + return 1; +} + /* * bo locked. */ static int drm_buffer_object_validate(drm_buffer_object_t * bo, - uint32_t new_flags, int move_unfenced, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (new_flags ^ bo->mem.flags); + uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags); - ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + bo->mem.flags); + ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } - /* - * Move out if we need to change caching policy. - */ - - if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && - !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) { - if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - DRM_ERROR("Cannot change caching policy of " - "pinned buffer.\n"); - return -EINVAL; - } - ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0); - if (ret) { - if (ret != -EAGAIN) - DRM_ERROR("Failed moving buffer.\n"); - return ret; - } - } - DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags); - flag_diff = (new_flags ^ bo->mem.flags); - /* * Check whether we dropped no_move policy, and in that case, - * release reserved manager regions. + * release reserved manager regions, if we're evicted. */ if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && - !(new_flags & DRM_BO_FLAG_NO_MOVE)) { - mutex_lock(&dev->struct_mutex); - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - mutex_unlock(&dev->struct_mutex); + !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) { + /* FIXME */ } /* * Check whether we need to move buffer. */ - if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) { - ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1); + if (!drm_bo_mem_compat(&bo->mem)) { + ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, + no_wait, 1); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1336,6 +1266,9 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + /* + * We might need to add a TTM. + */ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ret = drm_bo_add_ttm(bo); @@ -1344,11 +1277,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } if (move_unfenced) { - - /* - * Place on unfenced list. - */ - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); @@ -1356,15 +1284,19 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { - DRM_FLAG_MASKED(bo->priv_flags, 0, - _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); + list_del(&bo->lru); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(& bo->event_queue); + } drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); - } - - bo->mem.flags = new_flags; + } + + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); + return 0; } @@ -1373,10 +1305,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - uint32_t new_flags; bo = drm_lookup_buffer_object(priv, handle, 1); if (!bo) { @@ -1389,16 +1319,13 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, if (ret) goto out; - ret = drm_bo_new_flags(dev, bo->mem.flags, - (flags & mask) | (bo->mem.mask & ~mask), hint, - 0, &new_flags, &bo->mem.mask); - + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, hint); if (ret) goto out; ret = - drm_buffer_object_validate(bo, new_flags, - !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1495,6 +1422,8 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, *new_obj = fbo; return 0; } + + int drm_buffer_object_create(drm_file_t * priv, @@ -1510,7 +1439,6 @@ int drm_buffer_object_create(drm_file_t * priv, drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *bo; int ret = 0; - uint32_t new_flags; unsigned long num_pages; if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { @@ -1553,9 +1481,10 @@ int drm_buffer_object_create(drm_file_t * priv, } bo->priv_flags = 0; bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; atomic_inc(&bm->count); - ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint, - 1, &new_flags, &bo->mem.mask); + ret = drm_bo_new_mask(bo, mask, hint); + if (ret) goto out_err; @@ -1566,7 +1495,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, new_flags, 0, + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1806,12 +1735,10 @@ static int drm_bo_force_list_clean(drm_device_t * dev, entry->fence = NULL; } - DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, - 0); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); if (force_no_move) { - DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE, - 0); + DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); } if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " @@ -2146,8 +2073,7 @@ int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) if (man->flags & _DRM_FLAG_MEMTYPE_CMA) return 0; - if ((mem->mask & DRM_BO_FLAG_BIND_CACHED) && - (man->flags & _DRM_FLAG_MEMTYPE_CACHED)) + if (mem->flags & DRM_BO_FLAG_CACHED) return 0; } return 1; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index abfa8f80..b4486bfe 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -50,26 +50,25 @@ int drm_bo_move_ttm(drm_device_t *dev, mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; mutex_unlock(&dev->struct_mutex); - save_flags |= DRM_BO_FLAG_CACHED; - - } else { - + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + old_mem->mem_type = DRM_BO_MEM_LOCAL; + save_flags = old_mem->flags; + } + if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_BIND_CACHED, + new_mem->flags & DRM_BO_FLAG_CACHED, new_mem->mm_node->start); if (ret) return ret; - - if (!(new_mem->flags & DRM_BO_FLAG_BIND_CACHED)) { - save_flags &= ~DRM_BO_FLAG_CACHED; - } - } *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; - DRM_MASK_VAL(save_flags, new_mem->flags, DRM_BO_MASK_MEM); + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 9a2ce5cd..34282292 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -134,7 +134,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) if (do_tlbflush) flush_agp_mappings(); - DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached); + DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); return 0; } diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index 3f649281..6f62712d 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -96,8 +96,9 @@ extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); extern int drm_destroy_ttm(drm_ttm_t * ttm); -#define DRM_MASK_VAL(dest, mask, val) \ - (dest) = ((dest) & ~(mask)) | ((val) & (mask)); +#define DRM_FLAG_MASKED(_old, _new, _mask) {\ +(_old) ^= (((_old) ^ (_new)) & (_mask)); \ +} #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index c1cdd112..5d1c39be 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -70,6 +70,9 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, { switch(type) { case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { DRM_ERROR("AGP is not enabled for memory type %u\n", @@ -80,7 +83,7 @@ int i915_init_mem_type(drm_device_t *dev, uint32_t type, man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED | + _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; break; case DRM_BO_MEM_PRIV0: diff --git a/shared-core/drm.h b/shared-core/drm.h index 38cca882..71189559 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -713,7 +713,7 @@ typedef struct drm_fence_arg { */ #define DRM_BO_FLAG_NO_EVICT 0x00000010 /* Always keep a system memory shadow to a vram buffer */ -#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020 +#define DRM_BO_FLAG_MAPPABLE 0x00000020 /* The buffer is shareable with other processes */ #define DRM_BO_FLAG_SHAREABLE 0x00000040 /* The buffer is currently cached */ @@ -724,13 +724,16 @@ typedef struct drm_fence_arg { * part of buffer manager shutdown or swapout. Not supported yet.*/ #define DRM_BO_FLAG_NO_MOVE 0x00000100 + +/* + * Request flags. + */ + /* Make sure the buffer is in cached memory when mapped for reading */ -#define DRM_BO_FLAG_READ_CACHED 0x00080000 -/* When there is a choice between VRAM and TT, prefer VRAM. - The default behaviour is to prefer TT. */ -#define DRM_BO_FLAG_PREFER_VRAM 0x00040000 +#define DRM_BO_FLAG_READ_CACHED 0x00080000 /* Bind this buffer cached if the hardware supports it. */ -#define DRM_BO_FLAG_BIND_CACHED 0x0002000 +#define DRM_BO_FLAG_FORCE_CACHING 0x00002000 +#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000 /* System Memory */ #define DRM_BO_FLAG_MEM_LOCAL 0x01000000 @@ -746,6 +749,7 @@ typedef struct drm_fence_arg { /* Memory flag mask */ #define DRM_BO_MASK_MEM 0xFF000000 +#define DRM_BO_MASK_MEMTYPE 0xFF0000A0 /* When creating a buffer, Avoid system storage even if allowed */ #define DRM_BO_HINT_AVOID_LOCAL 0x00000001 diff --git a/tests/ttmtest/src/ttmtest.c b/tests/ttmtest/src/ttmtest.c index 085a0746..606fb0cb 100644 --- a/tests/ttmtest/src/ttmtest.c +++ b/tests/ttmtest/src/ttmtest.c @@ -182,7 +182,7 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size, drm_bo_type_dc, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_NO_MOVE, 0, &buf)); + DRM_BO_FLAG_MEM_LOCAL /*| DRM_BO_FLAG_NO_MOVE*/, 0, &buf)); curTime = fastrdtsc(); *ticks++ = time_diff(oldTime, curTime); @@ -260,8 +260,8 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size, oldTime = fastrdtsc(); ret = drmBOValidate(ctx->drmFD, &buf, - DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_BIND_CACHED, - DRM_BO_MASK_MEM | DRM_BO_FLAG_BIND_CACHED, DRM_BO_HINT_DONT_FENCE); + DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING, + DRM_BO_MASK_MEMTYPE | DRM_BO_FLAG_FORCE_CACHING, DRM_BO_HINT_DONT_FENCE); curTime = fastrdtsc(); drmUnlock(ctx->drmFD, ctx->hwContext); From 09984ad77bdeca0e9d87b1fe2be1489205fda297 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 11:55:24 +0100 Subject: [PATCH 13/34] Update memory compatibility tests. Now only pinned buffers are broken. --- linux-core/drm_bo.c | 89 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 78 insertions(+), 11 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 64abb118..f4147be2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -129,7 +129,7 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -173,10 +173,10 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, return ret; if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, &bo->mem, mem); - } else if (dev->driver->bo_driver->move) { + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, no_wait, &bo->mem, mem); } else { @@ -345,6 +345,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_destroy_ttm(bo->ttm); bo->ttm = NULL; } + atomic_dec(&bm->count); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); @@ -571,6 +572,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; if (bo->mem.mem_type != mem_type) + goto out; ret = drm_bo_wait(bo, 0, 0, no_wait); @@ -580,6 +582,9 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } + if (bo->type != drm_bo_type_dc) + goto out1; + evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); ret = drm_bo_mem_space(dev, &evict_mem, no_wait); @@ -599,6 +604,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } +out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); @@ -674,6 +680,7 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, uint32_t *res_mask) { uint32_t cur_flags = drm_bo_type_flags(mem_type); + uint32_t flag_diff; if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) cur_flags |= DRM_BO_FLAG_CACHED; @@ -682,13 +689,21 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); - if (!(mask & DRM_BO_FLAG_FORCE_CACHING)) - DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_CACHED); - if (!(mask & DRM_BO_FLAG_FORCE_MAPPABLE)) - DRM_FLAG_MASKED(mask, cur_flags, DRM_BO_FLAG_MAPPABLE); - - *res_mask = mask; - return ((cur_flags & mask & DRM_BO_MASK_MEMTYPE) == cur_flags); + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) { + return 0; + } + flag_diff = (mask ^ cur_flags); + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (mask & DRM_BO_FLAG_FORCE_CACHING)) { + return 0; + } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) { + return 0; + } + + *res_mask = cur_flags; + return 1; } @@ -778,6 +793,16 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, { uint32_t new_props; + if (bo->type == drm_bo_type_user) { + DRM_ERROR("User buffers are not supported yet\n"); + return -EINVAL; + } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " @@ -785,7 +810,6 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, return -EPERM; } - new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1220,6 +1244,43 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) return 1; } +static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + int type_ok = 0; + uint32_t mem_type = 0; + uint32_t cur_flags; + + if (drm_bo_mem_compat(mem)) + return 0; + + BUG_ON(mem->mm_node); + + for (i=0; iman[mem_type]; + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (type_ok) + break; + } + + if (type_ok) { + mem->mm_node = NULL; + mem->mem_type = mem_type; + mem->flags = cur_flags; + DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); + return 0; + } + + DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + return -EINVAL; +} + /* * bo locked. */ @@ -1242,6 +1303,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + return ret; + } + /* * Check whether we dropped no_move policy, and in that case, * release reserved manager regions, if we're evicted. From 1257907fa9a24de7aa95485e1b3ab509fdc4d4e6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 13:29:08 +0100 Subject: [PATCH 14/34] Simplify external ttm page allocation. Implement a memcpy fallback for copying between buffers. --- linux-core/drmP.h | 6 +- linux-core/drm_bo.c | 73 ---------------- linux-core/drm_bo_move.c | 177 +++++++++++++++++++++++++++++++++++++++ linux-core/drm_compat.c | 14 +--- linux-core/drm_ttm.c | 32 ++++--- linux-core/drm_ttm.h | 2 +- linux-core/drm_vm.c | 13 +-- 7 files changed, 210 insertions(+), 107 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index d3a9a2a5..aff10b62 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1522,7 +1522,11 @@ extern int drm_bo_move_ttm(drm_device_t *dev, int no_wait, drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem); - +extern int drm_bo_move_memcpy(drm_device_t *dev, + drm_ttm_t *ttm, int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f4147be2..845db3fe 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -180,13 +180,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, no_wait, &bo->mem, mem); } else { - ret = -EINVAL; - DRM_ERROR("Unsupported function\n"); -#if 0 ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, &bo->mem, mem); - ret = 0; -#endif } if (old_is_pci || new_is_pci) @@ -2184,74 +2179,6 @@ int drm_bo_pci_offset(drm_device_t *dev, } -/** - * \c Return a kernel virtual address to the buffer object PCI memory. - * - * \param bo The buffer object. - * \return Failure indication. - * - * Returns -EINVAL if the buffer object is currently not mappable. - * Returns -ENOMEM if the ioremap operation failed. - * Otherwise returns zero. - * - * After a successfull call, bo->iomap contains the virtual address, or NULL - * if the buffer object content is not accessible through PCI space. - * Call bo->mutex locked. - */ - -#if 0 -int drm_mem_reg_ioremap(drm_bo_mem_reg_t *mem) -{ - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; - int ret; - - BUG_ON(bo->iomap); - - ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) - return ret; - - if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset); - else { - bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size); - if (bo->iomap) - return -ENOMEM; - } - - return 0; -} - -/** - * \c Unmap mapping obtained using drm_bo_ioremap - * - * \param bo The buffer object. - * - * Call bo->mutex locked. - */ - -void drm_bo_iounmap(drm_buffer_object_t *bo) -{ - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; - - - bm = &dev->bm; - man = &bm->man[bo->mem.mem_type]; - - if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - iounmap(bo->iomap); - - bo->iomap = NULL; -} -#endif - /** * \c Kill all user-space virtual mappings of this buffer object. * diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index b4486bfe..23e8c0f2 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -72,3 +72,180 @@ int drm_bo_move_ttm(drm_device_t *dev, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } + + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + + +int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; + ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + addr = (void *) (((u8 *)man->io_addr) + bus_offset); + else { + addr = ioremap_nocache(bus_base + bus_offset, bus_size); + if (!addr) + return -ENOMEM; + } + *virtual = addr; + return 0; +} + + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, + void *virtual) +{ + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; + + + bm = &dev->bm; + man = &bm->man[mem->mem_type]; + + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + iounmap(virtual); +} + + +static int drm_copy_io_page(void *dst, void *src, unsigned long page) +{ + uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT)); + uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT)); + + int i; + for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + iowrite32(ioread32(srcP++), dstP++); + return 0; +} + +static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) +{ + struct page *d = drm_ttm_get_page(ttm, page); + void *dst; + + if (!d) + return -ENOMEM; + + src = (void *)((unsigned long) src + (page << PAGE_SHIFT)); + dst = kmap(d); + if (!dst) + return -ENOMEM; + + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap(dst); + return 0; +} + +static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) +{ + struct page *s = drm_ttm_get_page(ttm, page); + void *src; + + if (!s) + return -ENOMEM; + + dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT)); + src = kmap(s); + if (!src) + return -ENOMEM; + + memcpy_toio(dst, src, PAGE_SIZE); + kunmap(src); + return 0; +} + + +int drm_bo_move_memcpy(drm_device_t *dev, + drm_ttm_t *ttm, + int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem) +{ + void *old_iomap; + void *new_iomap; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + unsigned long i; + unsigned long page; + unsigned long add = 0; + int dir; + + + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); + if (ret) + return ret; + ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); + if (ret) + goto out; + + if (old_iomap == NULL && new_iomap == NULL) + goto out2; + + add = 0; + dir = 1; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = -1; + add = new_mem->num_pages - 1; + } + + for (i=0; i < new_mem->num_pages; ++i) { + page = i*dir + add; + if (old_iomap == NULL) + ret = drm_copy_ttm_io_page(ttm, new_iomap, page); + else if (new_iomap == NULL) + ret = drm_copy_io_ttm_page(ttm, old_iomap, page); + else + ret = drm_copy_io_page(new_iomap, old_iomap, page); + if (ret) + goto out1; + } + +out2: + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); +out1: + drm_mem_reg_iounmap(dev, new_mem, &new_iomap); +out: + drm_mem_reg_iounmap(dev, old_mem, old_iomap); + return ret; +} diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 044cf4a4..d0bca672 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -220,7 +220,6 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long page_offset; struct page *page; drm_ttm_t *ttm; - drm_buffer_manager_t *bm; drm_device_t *dev; mutex_lock(&bo->mutex); @@ -241,20 +240,13 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, goto out_unlock; } - bm = &dev->bm; ttm = bo->ttm; drm_ttm_fixup_caching(ttm); page_offset = (address - vma->vm_start) >> PAGE_SHIFT; - page = ttm->pages[page_offset]; - + page = drm_ttm_get_page(ttm, page_offset); if (!page) { - page = drm_ttm_alloc_page(); - if (!page) { - page = NOPAGE_OOM; - goto out_unlock; - } - ttm->pages[page_offset] = page; - ++bm->cur_pages; + page = NOPAGE_OOM; + goto out_unlock; } get_page(page); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 34282292..5c270bee 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -80,7 +80,7 @@ static void ttm_free_pages(drm_ttm_t *ttm) } -struct page *drm_ttm_alloc_page(void) +static struct page *drm_ttm_alloc_page(void) { struct page *page; @@ -192,27 +192,37 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } +struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index) +{ + struct page *p; + drm_buffer_manager_t *bm = &ttm->dev->bm; + + p = ttm->pages[index]; + if (!p) { + p = drm_ttm_alloc_page(); + if (!p) + return NULL; + ttm->pages[index] = p; + ++bm->cur_pages; + } + return p; +} + + static int drm_ttm_populate(drm_ttm_t * ttm) { struct page *page; unsigned long i; - drm_buffer_manager_t *bm; drm_ttm_backend_t *be; if (ttm->state != ttm_unpopulated) return 0; - bm = &ttm->dev->bm; be = ttm->be; for (i = 0; i < ttm->num_pages; ++i) { - page = ttm->pages[i]; - if (!page) { - page = drm_ttm_alloc_page(); - if (!page) - return -ENOMEM; - ttm->pages[i] = page; - ++bm->cur_pages; - } + page = drm_ttm_get_page(ttm, i); + if (!page) + return -ENOMEM; } be->populate(be, ttm->num_pages, ttm->pages); ttm->state = ttm_unbound; diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index 6f62712d..37003c43 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -82,11 +82,11 @@ typedef struct drm_ttm { extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern struct page *drm_ttm_alloc_page(void); extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); extern void drm_ttm_unbind(drm_ttm_t * ttm); extern void drm_ttm_evict(drm_ttm_t * ttm); extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); +extern struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 416ac4ae..25779eca 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -736,7 +736,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; - drm_buffer_manager_t *bm; drm_device_t *dev; unsigned long pfn; int err; @@ -768,19 +767,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; pgprot = drm_io_prot(_DRM_AGP, vma); } else { - bm = &dev->bm; ttm = bo->ttm; drm_ttm_fixup_caching(ttm); - page = ttm->pages[page_offset]; + page = drm_ttm_get_page(ttm, page_offset); if (!page) { - page = drm_ttm_alloc_page(); - if (!page) { - data->type = VM_FAULT_OOM; - goto out_unlock; - } - ttm->pages[page_offset] = page; - ++bm->cur_pages; + data->type = VM_FAULT_OOM; + goto out_unlock; } pfn = page_to_pfn(page); pgprot = vma->vm_page_prot; From e4b2da440699f581a8779ea8cb9e99e4c903e6a7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 16:21:38 +0100 Subject: [PATCH 15/34] A minor function interface change and some memcpy bugfixing. Hooray!! it sort of works with a fixed AGP area as faked VRAM. --- linux-core/drmP.h | 15 ++++++--------- linux-core/drm_bo.c | 10 +++------- linux-core/drm_bo_move.c | 33 ++++++++++++++++++++++----------- linux-core/drm_compat.c | 2 -- 4 files changed, 31 insertions(+), 29 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index aff10b62..7b8f2c66 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -697,9 +697,8 @@ typedef struct drm_bo_driver{ int (*init_mem_type)(struct drm_device *dev, uint32_t type, drm_mem_type_manager_t *man); uint32_t (*evict_flags) (struct drm_device *dev, uint32_t type); - int (*move)(struct drm_device *dev, - struct drm_ttm *ttm, int evict, int no_wait, - struct drm_bo_mem_reg *old_mem, + int (*move)(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem); } drm_bo_driver_t; @@ -1517,15 +1516,13 @@ extern int drm_fence_buffer_objects(drm_file_t * priv, * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_device_t *dev, - drm_ttm_t *ttm, int evict, +extern int drm_bo_move_ttm(drm_buffer_object_t *bo, + int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem); -extern int drm_bo_move_memcpy(drm_device_t *dev, - drm_ttm_t *ttm, int evict, +extern int drm_bo_move_memcpy(drm_buffer_object_t *bo, + int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 845db3fe..728afb41 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -124,7 +124,6 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; int ret = 0; - bo->ttm = NULL; switch (bo->type) { @@ -174,14 +173,11 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { - ret = drm_bo_move_ttm(dev, bo->ttm, evict, no_wait, - &bo->mem, mem); + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); } else if (dev->driver->bo_driver->move) { - ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, - no_wait, &bo->mem, mem); + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); } else { - ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, - &bo->mem, mem); + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); } if (old_is_pci || new_is_pci) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 23e8c0f2..b7a49299 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,13 +30,14 @@ #include "drmP.h" -int drm_bo_move_ttm(drm_device_t *dev, - drm_ttm_t *ttm, +int drm_bo_move_ttm(drm_buffer_object_t *bo, int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem) { + drm_device_t *dev = bo->dev; + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -135,8 +136,9 @@ void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, bm = &dev->bm; man = &bm->man[mem->mem_type]; - if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { iounmap(virtual); + } } @@ -188,13 +190,16 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) } -int drm_bo_move_memcpy(drm_device_t *dev, - drm_ttm_t *ttm, +int drm_bo_move_memcpy(drm_buffer_object_t *bo, int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem) { + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; + drm_bo_mem_reg_t old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -205,7 +210,6 @@ int drm_bo_move_memcpy(drm_device_t *dev, unsigned long add = 0; int dir; - ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); if (ret) return ret; @@ -237,15 +241,22 @@ int drm_bo_move_memcpy(drm_device_t *dev, if (ret) goto out1; } - + mb(); out2: *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { + drm_ttm_unbind(ttm); + drm_destroy_ttm(ttm); + bo->ttm = NULL; + } + out1: - drm_mem_reg_iounmap(dev, new_mem, &new_iomap); + drm_mem_reg_iounmap(dev, new_mem, new_iomap); out: - drm_mem_reg_iounmap(dev, old_mem, old_iomap); + drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index d0bca672..8dd15ded 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -250,7 +250,6 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, } get_page(page); - out_unlock: mutex_unlock(&bo->mutex); return page; @@ -274,7 +273,6 @@ int drm_bo_map_bound(struct vm_area_struct *vma) if (bus_size) { unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(_DRM_AGP, vma); - ret = io_remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, pgprot); From bf8f46d4c64eb5b66814223f7e5ddb8d8e7a555e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 18:59:02 +0100 Subject: [PATCH 16/34] Fix mm_block leak. Some other minor fixes. --- linux-core/drmP.h | 4 +--- linux-core/drm_bo_move.c | 5 +++++ linux-core/drm_vm.c | 1 - 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 7b8f2c66..55035210 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -608,9 +608,6 @@ typedef enum { } drm_object_type_t; - - - /* * A user object is a structure that helps the drm give out user handles * to kernel internal objects and to keep track of these objects so that @@ -647,6 +644,7 @@ typedef struct drm_ref_object { drm_ref_t unref_action; } drm_ref_object_t; +struct drm_buffer_object; #include "drm_ttm.h" diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index b7a49299..4ed3392d 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -243,6 +243,11 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, } mb(); out2: + if (old_mem->mm_node) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + mutex_unlock(&dev->struct_mutex); + } *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 25779eca..5afa9800 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -732,7 +732,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, { unsigned long address = data->address; drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_local_map_t *map; unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; From a0ed808d05a7965366e329a6e8f4e538350b9c23 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 19:06:39 +0100 Subject: [PATCH 17/34] Don't create a ttm just to copy from. --- linux-core/drm_bo.c | 3 +-- linux-core/drm_bo_move.c | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 728afb41..e3ecaf45 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -164,8 +164,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (ret) return ret; - if ((!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) || - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) && + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) ret = drm_bo_add_ttm(bo); if (ret) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 4ed3392d..9bfb3ef1 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -219,6 +219,8 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, if (old_iomap == NULL && new_iomap == NULL) goto out2; + if (old_iomap == NULL && ttm == NULL) + goto out2; add = 0; dir = 1; From b2bcbf874b0f26ca0c490fb0453bef64ce6d9dd7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 21:28:33 +0100 Subject: [PATCH 18/34] Add an accelerated buffer copy cleanup helper. Export helper functions and make some important buffer-object functions non-static. Add an i915 accelerated blit buffer move for pci memory buffers. --- linux-core/drmP.h | 16 ++++++ linux-core/drm_bo.c | 62 +++----------------- linux-core/drm_bo_move.c | 119 +++++++++++++++++++++++++++++++++++++++ linux-core/i915_buffer.c | 34 ++++++++++- 4 files changed, 177 insertions(+), 54 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 55035210..86dcd79f 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1509,6 +1509,15 @@ extern int drm_fence_buffer_objects(drm_file_t * priv, uint32_t fence_flags, drm_fence_object_t *fence, drm_fence_object_t **used_fence); +extern void drm_bo_add_to_lru(drm_buffer_object_t * bo, + drm_buffer_manager_t * bm); +extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait); +extern int drm_bo_mem_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + int no_wait); + + /* * Buffer object memory move helpers. * drm_bo_move.c @@ -1522,6 +1531,13 @@ extern int drm_bo_move_memcpy(drm_buffer_object_t *bo, int evict, int no_wait, drm_bo_mem_reg_t *new_mem); +extern int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, + int evict, + int no_wait, + uint32_t fence_type, + uint32_t fence_flags, + drm_bo_mem_reg_t *new_mem); + extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e3ecaf45..67e7d37f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -56,8 +56,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo); static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); -static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, - int no_wait); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -68,8 +66,8 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -static void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm) +void drm_bo_add_to_lru(drm_buffer_object_t * bo, + drm_buffer_manager_t * bm) { struct list_head *list; drm_mem_type_manager_t *man; @@ -206,8 +204,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, * Wait until the buffer is idle. */ -static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, - int no_wait) +int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait) { drm_fence_object_t *fence = bo->fence; @@ -697,9 +695,9 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, } -static int drm_bo_mem_space(drm_device_t *dev, - drm_bo_mem_reg_t *mem, - int no_wait) +int drm_bo_mem_space(drm_device_t *dev, + drm_bo_mem_reg_t *mem, + int no_wait) { drm_buffer_manager_t *bm= &dev->bm; drm_mem_type_manager_t *man; @@ -777,6 +775,8 @@ static int drm_bo_mem_space(drm_device_t *dev, ret = (has_eagain) ? -EAGAIN : -ENOMEM; return ret; } +EXPORT_SYMBOL(drm_bo_mem_space); + static int drm_bo_new_mask(drm_buffer_object_t *bo, uint32_t new_mask, uint32_t hint) @@ -1439,50 +1439,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -/* - * Transfer a buffer object's memory and LRU status to a newly - * created object. User-space references remains with the old - * object. Call bo->mutex locked. - */ - -int drm_buffer_object_transfer(drm_buffer_object_t *bo, - drm_buffer_object_t **new_obj) -{ - drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - - fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); - if (!fbo) - return -ENOMEM; - - *fbo = *bo; - mutex_init(&fbo->mutex); - mutex_lock(&fbo->mutex); - mutex_lock(&dev->struct_mutex); - - INIT_LIST_HEAD(&fbo->ddestroy); - INIT_LIST_HEAD(&fbo->lru); - list_splice_init(&bo->lru, &fbo->lru); - - bo->mem.mm_node = NULL; - bo->ttm = NULL; - bo->fence = NULL; - bo->mem.flags = 0; - - fbo->mem.mm_node->private = (void *)fbo; - atomic_set(&fbo->usage, 1); - atomic_inc(&bm->count); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&fbo->mutex); - - *new_obj = fbo; - return 0; -} - - - - int drm_buffer_object_create(drm_file_t * priv, unsigned long size, drm_bo_type_t type, diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 9bfb3ef1..d2c44501 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -73,6 +73,7 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } +EXPORT_SYMBOL(drm_bo_move_ttm); /** @@ -267,3 +268,121 @@ out: drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } +EXPORT_SYMBOL(drm_bo_move_memcpy); + +/* + * Transfer a buffer object's memory and LRU status to a newly + * created object. User-space references remains with the old + * object. Call bo->mutex locked. + */ + +int drm_buffer_object_transfer(drm_buffer_object_t *bo, + drm_buffer_object_t **new_obj) +{ + drm_buffer_object_t *fbo; + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + if (!fbo) + return -ENOMEM; + + *fbo = *bo; + mutex_init(&fbo->mutex); + mutex_lock(&fbo->mutex); + mutex_lock(&dev->struct_mutex); + + INIT_LIST_HEAD(&fbo->ddestroy); + INIT_LIST_HEAD(&fbo->lru); + + bo->mem.mm_node = NULL; + bo->ttm = NULL; + atomic_inc(&bo->fence->usage); + bo->mem.flags = 0; + + fbo->mem.mm_node->private = (void *)fbo; + atomic_set(&fbo->usage, 1); + atomic_inc(&bm->count); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&fbo->mutex); + + *new_obj = fbo; + return 0; +} + +int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, + int evict, + int no_wait, + uint32_t fence_type, + uint32_t fence_flags, + drm_bo_mem_reg_t *new_mem) +{ + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_bo_mem_reg_t *old_mem = &bo->mem; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + drm_buffer_object_t *old_obj; + + if (bo->fence) + drm_fence_usage_deref_unlocked(dev, bo->fence); + + ret = drm_fence_object_create(dev, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &bo->fence); + if (ret) + return ret; + + if (evict) { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret) + return ret; + if (old_mem->mm_node) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + (bo->ttm != NULL)) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + } else { + + /* This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the blit + * operation has completed. + */ + + ret = drm_buffer_object_transfer(bo, &old_obj); + if (ret) + return ret; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) + old_obj->ttm = NULL; + else + bo->ttm = NULL; + + atomic_inc(&old_obj->fence->usage); + mutex_lock(&dev->struct_mutex); + list_del(&old_obj->lru); + drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); + drm_bo_usage_deref_locked(old_obj); + mutex_unlock(&dev->struct_mutex); + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + + diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 5d1c39be..41f05b78 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -118,7 +118,7 @@ uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type) } } -void i915_emit_copy_blit(drm_device_t *dev, +static void i915_emit_copy_blit(drm_device_t *dev, uint32_t src_offset, uint32_t dst_offset, uint32_t pages, @@ -161,3 +161,35 @@ void i915_emit_copy_blit(drm_device_t *dev, } return; } + +static int drm_bo_move_blit(drm_buffer_object_t *bo, + int evict, + int no_wait, + drm_bo_mem_reg_t *new_mem) +{ + drm_bo_mem_reg_t *old_mem = &bo->mem; + int dir = 0; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = 1; + } + + i915_emit_copy_blit(bo->dev, + old_mem->mm_node->start << PAGE_SHIFT, + new_mem->mm_node->start << PAGE_SHIFT, + new_mem->num_pages, + dir); + + i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH); + + return drm_bo_move_accel_cleanup(bo, evict, no_wait, + DRM_FENCE_TYPE_EXE | + DRM_I915_FENCE_TYPE_RW, + DRM_I915_FENCE_FLAG_FLUSHED, + new_mem); +} + + + From 6a49d9a8abd9f168211017c2d585d0d64e89c530 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:02:02 +0100 Subject: [PATCH 19/34] Fix evict_mutex locking range. Implement unmappable buffers. (fault moves them to mappable when needed). Various bugfixes. --- linux-core/drm_bo.c | 31 +++++++++++++------------------ linux-core/drm_bo_move.c | 5 +---- linux-core/drm_vm.c | 24 ++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 22 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 67e7d37f..3e0d05d2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1149,7 +1149,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, */ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move) + int no_wait, int force_no_move, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1161,14 +1161,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, drm_bo_busy(bo); - /* - * Make sure we're not mapped. - */ - - ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) - return ret; - /* * Wait for outstanding fences. */ @@ -1195,15 +1187,15 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(dev, &mem, no_wait); - mutex_unlock(&bm->evict_mutex); if (ret) - return ret; + goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - if (ret) { - mutex_lock(&dev->struct_mutex); + out_unlock: + if (ret || !move_unfenced) { + mutex_lock(&dev->struct_mutex); if (mem.mm_node) { drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; @@ -1214,6 +1206,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_unlock(&dev->struct_mutex); } + mutex_unlock(&bm->evict_mutex); return ret; } @@ -1293,6 +1286,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + return ret; + if (bo->type == drm_bo_type_fake) { ret = drm_bo_check_fake(dev, &bo->mem); if (ret) @@ -1315,7 +1312,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (!drm_bo_mem_compat(&bo->mem)) { ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, 1); + no_wait, 1, move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1728,11 +1725,9 @@ static int drm_bo_force_list_clean(drm_device_t * dev, unsigned long _end = jiffies + 3 * DRM_HZ; do { ret = drm_bo_wait(entry, 0, 1, 0); - if (ret && allow_errors) { - if (ret == -EINTR) - ret = -EAGAIN; + if (ret && allow_errors) goto out_err; - } + } while (ret && !time_after_eq(jiffies, _end)); if (entry->fence) { diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index d2c44501..53f7fea8 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -295,10 +295,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); - bo->mem.mm_node = NULL; - bo->ttm = NULL; atomic_inc(&bo->fence->usage); - bo->mem.flags = 0; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); @@ -355,7 +352,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, /* This should help pipeline ordinary buffer moves. * * Hang old buffer memory on a new buffer object, - * and leave it to be released when the blit + * and leave it to be released when the GPU * operation has completed. */ diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 5afa9800..4a41e761 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -746,6 +746,30 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); + /* + * If buffer happens to be in a non-mappable location, + * move it to a mappable. + */ + + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + uint32_t mask_save = bo->mem.mask; + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + bo->mem.mask = mask_save; + + if (!err) + err = drm_bo_wait(bo, 0, 0, 0); + + if (err) { + data->type = (err == -EAGAIN) ? + VM_FAULT_MINOR : VM_FAULT_SIGBUS; + goto out_unlock; + } + } + if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; goto out_unlock; From 99acdaee482fc8a2fc6718317e2f546401e93739 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:07:29 +0100 Subject: [PATCH 20/34] Fix copyright statements. --- linux-core/drm_bo.c | 11 +++++------ linux-core/drm_bo_move.c | 2 +- linux-core/drm_fence.c | 11 +++++------ linux-core/drm_object.c | 14 ++++++++------ linux-core/drm_ttm.c | 14 ++++++++------ linux-core/drm_ttm.h | 11 +++++------ 6 files changed, 32 insertions(+), 31 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3e0d05d2..c8e1e2b6 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -10,6 +10,10 @@ * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -18,11 +22,6 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * * **************************************************************************/ /* diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 53f7fea8..e1340205 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 06d48255..b4125c6e 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -11,6 +11,10 @@ * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL @@ -19,11 +23,6 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * **************************************************************************/ /* * Authors: Thomas Hellström diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 0157329c..939cf0d7 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -10,6 +10,10 @@ * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -18,13 +22,11 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * * **************************************************************************/ +/* + * Authors: Thomas Hellström + */ #include "drmP.h" diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 5c270bee..fa55a8b4 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -11,6 +11,10 @@ * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL @@ -18,13 +22,11 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * * **************************************************************************/ +/* + * Authors: Thomas Hellström + */ #include "drmP.h" diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index 37003c43..e10db8c1 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -11,6 +11,10 @@ * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL @@ -19,11 +23,6 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * **************************************************************************/ /* * Authors: Thomas Hellström From d32b21e016c371b8676f42da5fc3aeded039a6c8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:11:53 +0100 Subject: [PATCH 21/34] Remove some code that should have gone in commit 6a49d9a8abd9f168211017c2d585d0d64e89c530 --- linux-core/drm_bo.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index c8e1e2b6..3339d5a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1329,25 +1329,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } - if (move_unfenced) { - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, - _DRM_BO_FLAG_UNFENCED); - mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - list_add_tail(&bo->lru, &bm->unfenced); - mutex_unlock(&dev->struct_mutex); - } else { - mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { - DRM_FLAG_MASKED(bo->priv_flags, 0, - _DRM_BO_FLAG_UNFENCED); - DRM_WAKEUP(& bo->event_queue); - } - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - } - DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; From 57df3980724d3da446c4576b3fadcd89c5da414e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 12:43:18 +0100 Subject: [PATCH 22/34] Reinstate some LRU handling. --- linux-core/drm_bo.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 3339d5a8..8d2b544e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1145,6 +1145,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, /* * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, @@ -1200,6 +1201,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); @@ -1329,6 +1331,25 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } + /* + * Adjust lru to be sure. + */ + + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + if (move_unfenced) { + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + } else { + drm_bo_add_to_lru(bo, bm); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_WAKEUP(&bo->event_queue); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + } + } + mutex_unlock(&dev->struct_mutex); + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; From 53aee3122a1821b8ca24ed2bc5c1940cb0f2ff8e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 16:36:53 +0100 Subject: [PATCH 23/34] I915 accelerated blit copy functional. Fixed - to System memory copies are implemented by flipping in a cache-coherent TTM, blitting to it, and then flipping it out. --- linux-core/drm_bo_move.c | 4 +- linux-core/drm_mm.c | 1 + linux-core/drm_ttm.c | 1 + linux-core/i915_buffer.c | 84 ++++++++++++++++++++++++++++++++-------- linux-core/i915_drv.c | 2 +- shared-core/i915_drv.h | 4 ++ 6 files changed, 78 insertions(+), 18 deletions(-) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index e1340205..d712a70f 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -333,8 +333,10 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, if (evict) { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) + if (ret) { + DRM_ERROR("Wait failure\n"); return ret; + } if (old_mem->mm_node) { mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 5889ee4d..634a1782 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -217,6 +217,7 @@ void drm_mm_put_block(drm_mm_node_t * cur) drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); } } +EXPORT_SYMBOL(drm_mm_put_block); drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, unsigned long size, diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index fa55a8b4..8cd961d7 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -344,3 +344,4 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) return 0; } +EXPORT_SYMBOL(drm_bind_ttm); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 41f05b78..70ba9a67 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -132,37 +132,28 @@ static void i915_emit_copy_blit(drm_device_t *dev, if (!dev_priv) return; - if (direction) { - stride = -stride; - src_offset += (pages - 1) << PAGE_SHIFT; - dst_offset += (pages - 1) << PAGE_SHIFT; - } - + i915_kernel_lost_context(dev); while(pages > 0) { cur_pages = pages; if (cur_pages > 2048) cur_pages = 2048; pages -= cur_pages; - BEGIN_LP_RING(8); - OUT_RING(XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | + BEGIN_LP_RING(6); + OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB); OUT_RING((stride & 0xffff) | ( 0xcc << 16) | (1 << 24) | - (1 << 25)); - OUT_RING(0); - OUT_RING((cur_pages << 16) | (PAGE_SIZE >> 2)); + (1 << 25) | (direction ? (1 << 30) : 0)); + OUT_RING((cur_pages << 16) | PAGE_SIZE); OUT_RING(dst_offset); - OUT_RING(0); OUT_RING(stride & 0xffff); OUT_RING(src_offset); ADVANCE_LP_RING(); - dst_offset += (cur_pages << PAGE_SHIFT)*(direction ? -1 : 1); - src_offset += (cur_pages << PAGE_SHIFT)*(direction ? -1 : 1); } return; } -static int drm_bo_move_blit(drm_buffer_object_t *bo, +static int i915_move_blit(drm_buffer_object_t *bo, int evict, int no_wait, drm_bo_mem_reg_t *new_mem) @@ -191,5 +182,66 @@ static int drm_bo_move_blit(drm_buffer_object_t *bo, new_mem); } - +/* + * Flip destination ttm into cached-coherent AGP, + * then blit and subsequently move out again. + */ + +static int i915_move_flip(drm_buffer_object_t *bo, + int evict, + int no_wait, + drm_bo_mem_reg_t *new_mem) +{ + drm_device_t *dev = bo->dev; + drm_bo_mem_reg_t tmp_mem; + int ret; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + tmp_mem.mask = DRM_BO_FLAG_MEM_TT | + DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_FORCE_CACHING; + + ret = drm_bo_mem_space(dev, &tmp_mem, no_wait); + if (ret) + return ret; + + ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); + if (ret) + goto out_cleanup; + + ret = i915_move_blit(bo, 1, no_wait, &tmp_mem); + if (ret) + goto out_cleanup; + + ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); +out_cleanup: + if (tmp_mem.mm_node) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(tmp_mem.mm_node); + tmp_mem.mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + return ret; +} + + +int i915_move(drm_buffer_object_t *bo, + int evict, + int no_wait, + drm_bo_mem_reg_t *new_mem) +{ + drm_bo_mem_reg_t *old_mem = &bo->mem; + + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (i915_move_flip(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else { + if (i915_move_blit(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return 0; +} diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 19b63b7f..848ffa78 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -64,7 +64,7 @@ static drm_bo_driver_t i915_bo_driver = { .invalidate_caches = i915_invalidate_caches, .init_mem_type = i915_init_mem_type, .evict_flags = i915_evict_flags, - .move = NULL, + .move = i915_move, }; #endif diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 55c8cf57..ffc9d431 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -192,6 +192,9 @@ extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); extern int i915_init_mem_type(drm_device_t *dev, uint32_t type, drm_mem_type_manager_t *man); extern uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type); +extern int i915_move(drm_buffer_object_t *bo, int evict, + int no_wait, drm_bo_mem_reg_t *new_mem); + #endif #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) @@ -329,6 +332,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) From 85ee2a8d044cd4d8de4894a794151af9471648e3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 10 Feb 2007 12:06:36 +0100 Subject: [PATCH 24/34] Various bugfixes. --- linux-core/drm_bo.c | 83 +++++++++++++++++++++++++++++++--------- linux-core/drm_bo_move.c | 40 ++++++++++++++----- linux-core/drm_vm.c | 13 +++++-- linux-core/i915_buffer.c | 4 +- 4 files changed, 105 insertions(+), 35 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8d2b544e..41f4e002 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -85,11 +85,8 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, int ret; ret = drm_bo_lock_kmm(bo); - if (ret) { - if (ret == -EAGAIN) - schedule(); + if (ret) return ret; - } drm_bo_unmap_virtual(bo); if (old_is_pci) drm_bo_finish_unmap(bo); @@ -142,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) } + + static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int evict, @@ -155,33 +154,63 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; int ret = 0; - + if (old_is_pci || new_is_pci) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) return ret; - if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm == NULL)) - ret = drm_bo_add_ttm(bo); - if (ret) - return ret; + /* + * Create and bind a ttm if required. + */ - if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + (bo->ttm == NULL)) { + ret = drm_bo_add_ttm(bo); + if (ret) + goto out_err; + + if (mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(bo->ttm, new_man->flags & + DRM_BO_FLAG_CACHED, + mem->mm_node->start); + if (ret) + goto out_err; + } + } + + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { + + drm_bo_mem_reg_t *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, mem->flags, + DRM_BO_MASK_MEMTYPE); + + } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); + } else { + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); + } + if (ret) + goto out_err; + if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - if (ret) - return ret; - if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); if (ret) @@ -196,6 +225,19 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; return 0; + +out_err: + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + new_man = &bm->man[bo->mem.mem_type]; + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + return ret; } /* @@ -269,7 +311,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) bm->nice_mode = 0; DRM_ERROR("Detected GPU lockup or " "fence driver was taken down. " - "Evicting waiting buffers.\n"); + "Evicting buffer.\n"); } if (bo->fence) { drm_fence_usage_deref_unlocked(dev, bo->fence); @@ -1148,8 +1190,8 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move, int move_unfenced) +int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int force_no_move, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1387,6 +1429,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, out: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } @@ -1481,6 +1524,7 @@ int drm_buffer_object_create(drm_file_t * priv, #endif bo->dev = dev; bo->type = type; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.num_pages = num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; @@ -1491,8 +1535,8 @@ int drm_buffer_object_create(drm_file_t * priv, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; - bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags = 0; + bo->mem.mask = 0; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1517,6 +1561,7 @@ int drm_buffer_object_create(drm_file_t * priv, out_err: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index d712a70f..3347f945 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -168,7 +168,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) return -ENOMEM; memcpy_fromio(dst, src, PAGE_SIZE); - kunmap(dst); + kunmap(d); return 0; } @@ -186,7 +186,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) return -ENOMEM; memcpy_toio(dst, src, PAGE_SIZE); - kunmap(src); + kunmap(s); return 0; } @@ -283,7 +283,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) return -ENOMEM; @@ -292,11 +292,15 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, mutex_lock(&fbo->mutex); mutex_lock(&dev->struct_mutex); + DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); +#ifdef DRM_ODD_MM_COMPAT + INIT_LIST_HEAD(&fbo->vma_list); + INIT_LIST_HEAD(&fbo->p_mm_list); +#endif atomic_inc(&bo->fence->usage); - fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); @@ -307,6 +311,11 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, return 0; } +/* + * Since move is underway, we need to block signals in this function. + * We cannot restart until it has finished. + */ + int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, int evict, int no_wait, @@ -324,19 +333,29 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, if (bo->fence) drm_fence_usage_deref_unlocked(dev, bo->fence); - ret = drm_fence_object_create(dev, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); if (ret) return ret; - if (evict) { +#ifdef DRM_ODD_MM_COMPAT + /* + * In this mode, we don't allow pipelining a copy blit, + * since the buffer will be accessible from user space + * the moment we return and rebuild the page tables. + * + * With normal vm operation, page tables are rebuilt + * on demand using fault(), which waits for buffer idle. + */ + if (1) +#else + if (evict) +#endif + { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) { - DRM_ERROR("Wait failure\n"); + if (ret) return ret; - } if (old_mem->mm_node) { mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); @@ -359,6 +378,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ ret = drm_buffer_object_transfer(bo, &old_obj); + if (ret) return ret; @@ -367,9 +387,9 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, else bo->ttm = NULL; - atomic_inc(&old_obj->fence->usage); mutex_lock(&dev->struct_mutex); list_del(&old_obj->lru); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 4a41e761..17778c26 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -746,6 +746,14 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); + err = drm_bo_wait(bo, 0, 0, 0); + if (err) { + data->type = (err == -EAGAIN) ? + VM_FAULT_MINOR : VM_FAULT_SIGBUS; + goto out_unlock; + } + + /* * If buffer happens to be in a non-mappable location, * move it to a mappable. @@ -760,16 +768,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, err = drm_bo_move_buffer(bo, new_mask, 0, 0); bo->mem.mask = mask_save; - if (!err) - err = drm_bo_wait(bo, 0, 0, 0); - if (err) { data->type = (err == -EAGAIN) ? VM_FAULT_MINOR : VM_FAULT_SIGBUS; goto out_unlock; } } - + if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; goto out_unlock; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 70ba9a67..a357a53e 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -234,9 +234,9 @@ int i915_move(drm_buffer_object_t *bo, { drm_bo_mem_reg_t *old_mem = &bo->mem; - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { if (i915_move_flip(bo, evict, no_wait, new_mem)) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { From f02f83ee08a2bb87700544a9b67f475532e84af4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 17:47:57 +0100 Subject: [PATCH 25/34] Cleanup and fix support for pinned buffers. --- linux-core/drmP.h | 10 +- linux-core/drm_bo.c | 369 ++++++++++++++++++++++----------------- linux-core/drm_bo_move.c | 53 ++++-- linux-core/i915_buffer.c | 16 +- 4 files changed, 263 insertions(+), 185 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 86dcd79f..b2ce724d 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1039,6 +1039,11 @@ typedef struct drm_buffer_object{ wait_queue_head_t event_queue; struct mutex mutex; + /* For pinned buffers */ + drm_mm_node_t *pinned_node; + uint32_t pinned_mem_type; + struct list_head pinned_lru; + /* For vm */ drm_ttm_t *ttm; @@ -1509,11 +1514,10 @@ extern int drm_fence_buffer_objects(drm_file_t * priv, uint32_t fence_flags, drm_fence_object_t *fence, drm_fence_object_t **used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm); +extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_device_t *dev, +extern int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 41f4e002..814175cd 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -65,19 +65,26 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm) +void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) { - struct list_head *list; drm_mem_type_manager_t *man; - man = &bm->man[bo->mem.mem_type]; - list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &man->pinned : &man->lru; - list_add_tail(&bo->lru, list); - return; + man = &bo->dev->bm.man[bo->pinned_mem_type]; + list_add_tail(&bo->pinned_lru, &man->pinned); } +void drm_bo_add_to_lru(drm_buffer_object_t * bo) +{ + drm_mem_type_manager_t *man; + + if (bo->mem.mm_node != bo->pinned_node) { + man = &bo->dev->bm.man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + } else + INIT_LIST_HEAD(&bo->lru); +} + + static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, int old_is_pci) { @@ -275,6 +282,39 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } +static int drm_bo_expire_fence(drm_buffer_object_t *bo, + int allow_errors) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + if (bo->fence) { + if (bm->nice_mode) { + unsigned long _end = jiffies + 3 * DRM_HZ; + int ret; + do { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->fence) { + bm->nice_mode = 0; + DRM_ERROR("Detected GPU lockup or " + "fence driver was taken down. " + "Evicting buffer.\n"); + } + } + if (bo->fence) { + drm_fence_usage_deref_unlocked(dev, bo->fence); + bo->fence = NULL; + } + } + return 0; +} + + /* * Call dev->struct_mutex locked. * Attempts to remove all private references to a buffer by expiring its @@ -299,26 +339,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) bo->fence = NULL; } - if (bo->fence && remove_all) { - if (bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - int ret; - do { - ret = drm_bo_wait(bo, 0, 1, 0); - } while (ret && !time_after_eq(jiffies, _end)); + if (bo->fence && remove_all) + (void) drm_bo_expire_fence(bo, 0); - if (bo->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU lockup or " - "fence driver was taken down. " - "Evicting buffer.\n"); - } - if (bo->fence) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } - } - } mutex_lock(&dev->struct_mutex); if (!atomic_dec_and_test(&bo->usage)) { @@ -331,6 +354,11 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) drm_mm_put_block(bo->mem.mm_node); bo->mem.mm_node = NULL; } + list_del_init(&bo->pinned_lru); + if (bo->pinned_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } list_del_init(&bo->ddestroy); mutex_unlock(&bo->mutex); drm_bo_destroy_locked(bo); @@ -361,7 +389,10 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) { + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && + list_empty(&bo->ddestroy) && + atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -404,9 +435,10 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) drm_buffer_object_t *entry, *nentry; struct list_head *list, *next; + list_for_each_safe(list, next, &bm->ddestroy) { entry = list_entry(list, drm_buffer_object_t, ddestroy); - + nentry = NULL; if (next != &bm->ddestroy) { nentry = list_entry(next, drm_buffer_object_t, @@ -420,7 +452,6 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) atomic_dec(&nentry->usage); } } - } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -566,7 +597,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); - drm_bo_add_to_lru(entry, bm); + drm_bo_add_to_lru(entry); } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(entry); @@ -587,11 +618,10 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); */ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait, int force_no_move) + int no_wait) { int ret = 0; drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; drm_bo_mem_reg_t evict_mem; /* @@ -611,12 +641,21 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - if (bo->type != drm_bo_type_dc) + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + if (bo->type == drm_bo_type_fake) { + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mm_node = NULL; + bo->pinned_mem_type = DRM_BO_MEM_LOCAL; + bo->pinned_node = NULL; goto out1; + } evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type); - ret = drm_bo_mem_space(dev, &evict_mem, no_wait); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); if (ret) { if (ret != -EAGAIN) @@ -625,6 +664,9 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } + if (bo->pinned_node) + DRM_ERROR("Evicting pinned buffer\n"); + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); if (ret) { @@ -640,7 +682,7 @@ out1: evict_mem.mm_node = NULL; } list_del(&bo->lru); - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, @@ -682,7 +724,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_lock(&entry->mutex); BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); - ret = drm_bo_evict(entry, mem_type, no_wait, 0); + ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(entry); if (ret) @@ -736,10 +778,11 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, } -int drm_bo_mem_space(drm_device_t *dev, +int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait) { + drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm= &dev->bm; drm_mem_type_manager_t *man; @@ -767,6 +810,13 @@ int drm_bo_mem_space(drm_device_t *dev, if (mem_type == DRM_BO_MEM_LOCAL) break; + if ((mem_type == bo->pinned_mem_type) && + (bo->pinned_node != NULL)) { + DRM_ERROR("Choosing pinned region\n"); + node = bo->pinned_node; + break; + } + mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; @@ -849,10 +899,6 @@ static int drm_bo_new_mask(drm_buffer_object_t *bo, return -EINVAL; } - /* - * FIXME: Check what can be done about pinned buffers here. - */ - bo->mem.mask = new_mask; return 0; } @@ -941,7 +987,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (bo->mem.mm_node) - ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); + ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); return ret; } @@ -1191,7 +1237,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, */ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int force_no_move, int move_unfenced) + int no_wait, int move_unfenced) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -1228,7 +1274,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, /* * Determine where to move the buffer. */ - ret = drm_bo_mem_space(dev, &mem, no_wait); + ret = drm_bo_mem_space(bo, &mem, no_wait); if (ret) goto out_unlock; @@ -1245,7 +1291,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); } @@ -1316,9 +1362,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags); drm_bo_driver_t *driver = dev->driver->bo_driver; - int ret; DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, @@ -1339,23 +1383,13 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return ret; } - /* - * Check whether we dropped no_move policy, and in that case, - * release reserved manager regions, if we're evicted. - */ - - if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && - !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) { - /* FIXME */ - } - /* * Check whether we need to move buffer. */ if (!drm_bo_mem_compat(&bo->mem)) { ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, 1, move_unfenced); + no_wait, move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1363,6 +1397,34 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } } + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + /* * We might need to add a TTM. */ @@ -1372,9 +1434,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (ret) return ret; } + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); /* - * Adjust lru to be sure. + * Finally, adjust lru to be sure. */ mutex_lock(&dev->struct_mutex); @@ -1384,7 +1447,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); } else { - drm_bo_add_to_lru(bo, bm); + drm_bo_add_to_lru(bo); if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { DRM_WAKEUP(&bo->event_queue); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); @@ -1392,7 +1455,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } mutex_unlock(&dev->struct_mutex); - DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); return 0; } @@ -1517,6 +1579,7 @@ int drm_buffer_object_create(drm_file_t * priv, atomic_set(&bo->mapped, -1); DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->pinned_lru); INIT_LIST_HEAD(&bo->ddestroy); #ifdef DRM_ODD_MM_COMPAT INIT_LIST_HEAD(&bo->p_mm_list); @@ -1729,6 +1792,65 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +static int drm_bo_leave_list(drm_buffer_object_t *bo, + uint32_t mem_type, + int free_pinned, + int allow_errors) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + + atomic_inc(&bo->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&bo->mutex); + + ret = drm_bo_expire_fence(bo, allow_errors); + if (ret) + goto out; + + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + + if (free_pinned) { + DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + if (bo->pinned_node != NULL) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + mutex_unlock(&dev->struct_mutex); + } + + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + "cleanup. Removing flag and evicting.\n"); + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + } + + ret = drm_bo_evict(bo, mem_type, 0); + + if (ret){ + if (allow_errors){ + goto out; + } else { + ret = 0; + DRM_ERROR("Cleanup eviction failed\n"); + } + } + +out: + mutex_unlock(&bo->mutex); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(bo); + return ret; +} + + /* * dev->struct_sem locked. */ @@ -1736,102 +1858,39 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, - int force_no_move, int allow_errors) + int free_pinned, + int allow_errors, + int pinned_list) { - drm_buffer_manager_t *bm = &dev->bm; - struct list_head *list, *next, *prev; + struct list_head *list; drm_buffer_object_t *entry; int ret; - int clean; - retry: - clean = 1; - list_for_each_safe(list, next, head) { - prev = list->prev; - entry = list_entry(list, drm_buffer_object_t, lru); - atomic_inc(&entry->usage); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&entry->mutex); - mutex_lock(&dev->struct_mutex); - - if (prev != list->prev || next != list->next) { - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - goto retry; - } - if (entry->mem.mm_node) { - clean = 0; - - /* - * Expire the fence. - */ - - mutex_unlock(&dev->struct_mutex); - if (entry->fence && bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - do { - ret = drm_bo_wait(entry, 0, 1, 0); - if (ret && allow_errors) - goto out_err; - - } while (ret && !time_after_eq(jiffies, _end)); - - if (entry->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU hang or " - "fence manager was taken down. " - "Evicting waiting buffers\n"); - } - } - if (entry->fence) { - drm_fence_usage_deref_unlocked(dev, - entry->fence); - entry->fence = NULL; - } - - DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - - if (force_no_move) { - DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); - } - if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " - "cleanup. Removing flag and evicting.\n"); - entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; - entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; - } - - ret = drm_bo_evict(entry, mem_type, 1, force_no_move); - if (ret) { - if (allow_errors) { - goto out_err; - } else { - DRM_ERROR("Aargh. Eviction failed.\n"); - } - } - mutex_lock(&dev->struct_mutex); - } - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - if (prev != list->prev || next != list->next) { - goto retry; - } + list = head->next; + while(list != head) { + if (pinned_list) + entry = list_entry(list, drm_buffer_object_t, + pinned_lru); + else + entry = list_entry(list, drm_buffer_object_t, + lru); + + ret = drm_bo_leave_list(entry, mem_type, free_pinned, + allow_errors); + + if (ret) + return ret; + + list = head->next; } - if (!clean) - goto retry; return 0; - out_err: - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_unlocked(entry); - mutex_lock(&dev->struct_mutex); - return ret; + } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem_type]; - drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -1854,15 +1913,10 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) * Throw out unfenced buffers. */ - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); - /* - * Throw out evicted no-move buffers. - */ - - drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0); - drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0); - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); if (drm_mm_clean(&man->manager)) { drm_mm_takedown(&man->manager); @@ -1885,14 +1939,14 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return -EINVAL; } - ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 1); if (ret) return ret; ret = - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); return ret; } @@ -1971,6 +2025,7 @@ int drm_bo_driver_finish(drm_device_t * dev) } } mutex_unlock(&dev->struct_mutex); + if (!cancel_delayed_work(&bm->wq)) { flush_scheduled_work(); } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 3347f945..c6fe4ec2 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,12 +30,31 @@ #include "drmP.h" + +/** + * Free the old memory node unless it's a pinned region and we + * have not been requested to free also pinned regions. + */ + +static void drm_bo_free_old_node(drm_buffer_object_t *bo) +{ + drm_bo_mem_reg_t *old_mem = &bo->mem; + + if (old_mem->mm_node && + (old_mem->mm_node != bo->pinned_node)) { + mutex_lock(&bo->dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&bo->dev->struct_mutex); + } + old_mem->mm_node = NULL; +} + int drm_bo_move_ttm(drm_buffer_object_t *bo, int evict, int no_wait, drm_bo_mem_reg_t *new_mem) { - drm_device_t *dev = bo->dev; drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; @@ -49,10 +68,7 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, else drm_ttm_unbind(ttm); - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - old_mem->mm_node = NULL; - mutex_unlock(&dev->struct_mutex); + drm_bo_free_old_node(bo); DRM_FLAG_MASKED(old_mem->flags, DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); @@ -246,11 +262,8 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, } mb(); out2: - if (old_mem->mm_node) { - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - mutex_unlock(&dev->struct_mutex); - } + drm_bo_free_old_node(bo); + *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; @@ -295,12 +308,14 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); + INIT_LIST_HEAD(&fbo->pinned_lru); #ifdef DRM_ODD_MM_COMPAT INIT_LIST_HEAD(&fbo->vma_list); INIT_LIST_HEAD(&fbo->p_mm_list); #endif atomic_inc(&bo->fence->usage); + fbo->pinned_node = NULL; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); @@ -356,12 +371,9 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, ret = drm_bo_wait(bo, 0, 1, 0); if (ret) return ret; - if (old_mem->mm_node) { - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - old_mem->mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - } + + drm_bo_free_old_node(bo); + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { drm_ttm_unbind(bo->ttm); @@ -388,9 +400,14 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, bo->ttm = NULL; mutex_lock(&dev->struct_mutex); - list_del(&old_obj->lru); + list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); + + if (old_obj->mem.mm_node == bo->pinned_node) + old_obj->mem.mm_node = NULL; + else + drm_bo_add_to_lru(old_obj); + drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index a357a53e..7655902f 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -154,9 +154,9 @@ static void i915_emit_copy_blit(drm_device_t *dev, } static int i915_move_blit(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) + int evict, + int no_wait, + drm_bo_mem_reg_t *new_mem) { drm_bo_mem_reg_t *old_mem = &bo->mem; int dir = 0; @@ -203,7 +203,7 @@ static int i915_move_flip(drm_buffer_object_t *bo, DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; - ret = drm_bo_mem_space(dev, &tmp_mem, no_wait); + ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); if (ret) return ret; @@ -238,10 +238,12 @@ int i915_move(drm_buffer_object_t *bo, return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { if (i915_move_flip(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + return drm_bo_move_memcpy(bo, evict, no_wait, + new_mem); } else { - if (i915_move_blit(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + if (i915_move_blit(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, + new_mem); } return 0; } From b0c5339ed69c6ff08b7817f870e895aae2ef04c7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:32:03 +0100 Subject: [PATCH 26/34] More bugfixes. --- linux-core/drm_bo.c | 58 +++++++++++++++++++++------------------- linux-core/drm_bo_move.c | 9 +++---- 2 files changed, 34 insertions(+), 33 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 814175cd..48cb5ef4 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -692,8 +692,6 @@ out: return ret; } - - static int drm_bo_mem_force_space(drm_device_t *dev, drm_bo_mem_reg_t *mem, uint32_t mem_type, @@ -744,7 +742,6 @@ static int drm_bo_mem_force_space(drm_device_t *dev, return 0; } - static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, uint32_t mem_type, uint32_t mask, @@ -777,7 +774,6 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, return 1; } - int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait) @@ -1817,10 +1813,8 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; if (bo->pinned_node != NULL) { - mutex_lock(&dev->struct_mutex); drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); } @@ -1831,8 +1825,9 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } - - ret = drm_bo_evict(bo, mem_type, 0); + + if (bo->mem.mem_type == mem_type) + ret = drm_bo_evict(bo, mem_type, 0); if (ret){ if (allow_errors){ @@ -1862,29 +1857,40 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int allow_errors, int pinned_list) { - struct list_head *list; + struct list_head *list, *next; drm_buffer_object_t *entry; int ret; + int do_retry; - list = head->next; - while(list != head) { + /* + * We need to + * restart if a node disappears from under us. + * Nodes cannot be added since the hardware lock is needed + * For this operation. + */ + +retry: + list_for_each_safe(list, next, head) { if (pinned_list) entry = list_entry(list, drm_buffer_object_t, pinned_lru); else entry = list_entry(list, drm_buffer_object_t, lru); - + atomic_inc(&entry->usage); ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); - + + do_retry = list->next != next; + drm_bo_usage_deref_locked(entry); + if (ret) return ret; - - list = head->next; + + if (do_retry) + goto retry; } - return 0; - + return 0; } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) @@ -1909,12 +1915,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - /* - * Throw out unfenced buffers. - */ - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); - drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -1928,6 +1929,12 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } +/** + *Evict all buffers of a particular mem_type, but leave memory manager + *regions for NO_MOVE buffers intact. New buffers cannot be added at this + *point since we have the hardware lock. + */ + static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) { int ret; @@ -1942,11 +1949,8 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 1); - if (ret) - return ret; - ret = - drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + return ret; } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index c6fe4ec2..1d142087 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -365,7 +365,8 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ if (1) #else - if (evict) + if (evict || ((bo->mem.mm_node == bo->pinned_node) && + bo->mem.mm_node != NULL)) #endif { ret = drm_bo_wait(bo, 0, 1, 0); @@ -402,11 +403,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, mutex_lock(&dev->struct_mutex); list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - - if (old_obj->mem.mm_node == bo->pinned_node) - old_obj->mem.mm_node = NULL; - else - drm_bo_add_to_lru(old_obj); + drm_bo_add_to_lru(old_obj); drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); From 398913dc0e632c71e3095a7d50dae911aed18884 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:34:50 +0100 Subject: [PATCH 27/34] Lindent. --- linux-core/drm_bo.c | 304 ++++++++++++++++++--------------------- linux-core/drm_bo_move.c | 122 ++++++++-------- linux-core/drm_ttm.c | 17 +-- linux-core/drm_ttm.h | 3 +- linux-core/i915_buffer.c | 103 ++++++------- 5 files changed, 244 insertions(+), 305 deletions(-) diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 48cb5ef4..ed089096 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,12 +49,10 @@ * */ - - -static void drm_bo_destroy_locked(drm_buffer_object_t *bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t *bo); +static void drm_bo_destroy_locked(drm_buffer_object_t * bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -80,19 +78,17 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) if (bo->mem.mm_node != bo->pinned_node) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); - } else + } else INIT_LIST_HEAD(&bo->lru); } - -static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, - int old_is_pci) +static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; ret = drm_bo_lock_kmm(bo); - if (ret) + if (ret) return ret; drm_bo_unmap_virtual(bo); if (old_is_pci) @@ -103,11 +99,11 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t *bo, return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t *bo) +static void drm_bo_vm_post_move(drm_buffer_object_t * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; - + ret = drm_bo_remap_bound(bo); if (ret) { DRM_ERROR("Failed to remap a bound buffer object.\n" @@ -129,7 +125,7 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -145,13 +141,9 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } - - - -static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, - drm_bo_mem_reg_t *mem, - int evict, - int no_wait) +static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int evict, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -161,7 +153,6 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; int ret = 0; - if (old_is_pci || new_is_pci) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) @@ -171,8 +162,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, * Create and bind a ttm if required. */ - if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm == NULL)) { + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { ret = drm_bo_add_ttm(bo); if (ret) goto out_err; @@ -185,7 +175,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, goto out_err; } } - + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -195,15 +185,14 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, *old_mem = *mem; mem->mm_node = NULL; old_mem->mask = save_mask; - DRM_FLAG_MASKED(save_flags, mem->flags, - DRM_BO_MASK_MEMTYPE); + DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE); } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ret = drm_bo_move_ttm(bo, evict, no_wait, mem); - } else if (dev->driver->bo_driver->move) { + } else if (dev->driver->bo_driver->move) { ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); } else { @@ -217,13 +206,15 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags); + ret = + dev->driver->bo_driver->invalidate_caches(dev, + bo->mem.flags); if (ret) DRM_ERROR("Can not flush read caches\n"); } - + DRM_FLAG_MASKED(bo->priv_flags, (evict) ? _DRM_BO_FLAG_EVICTED : 0, _DRM_BO_FLAG_EVICTED); @@ -233,10 +224,10 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, return 0; -out_err: + out_err: if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); - + new_man = &bm->man[bo->mem.mem_type]; if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { drm_ttm_unbind(bo->ttm); @@ -282,8 +273,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t *bo, - int allow_errors) +static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -312,8 +302,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t *bo, } } return 0; -} - +} /* * Call dev->struct_mutex locked. @@ -321,8 +310,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t *bo, * fence object and removing from lru lists and memory managers. */ - -static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) +static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -333,14 +321,13 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(bo->fence, - bo->fence_type)) { + if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { drm_fence_usage_deref_locked(dev, bo->fence); bo->fence = NULL; } - if (bo->fence && remove_all) - (void) drm_bo_expire_fence(bo, 0); + if (bo->fence && remove_all) + (void)drm_bo_expire_fence(bo, 0); mutex_lock(&dev->struct_mutex); @@ -369,30 +356,27 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) drm_fence_object_flush(dev, bo->fence, bo->fence_type); list_add_tail(&bo->ddestroy, &bm->ddestroy); schedule_delayed_work(&bm->wq, - ((DRM_HZ / 100) < - 1) ? 1 : DRM_HZ / 100); + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); } -out: + out: mutex_unlock(&bo->mutex); return; } - /* * Verify that refcount is 0 and that there are no internal references * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t *bo) +static void drm_bo_destroy_locked(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && - list_empty(&bo->ddestroy) && - atomic_read(&bo->usage) == 0) { + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { BUG_ON(bo->fence != NULL); #ifdef DRM_ODD_MM_COMPAT @@ -423,7 +407,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) return; } - /* * Call dev->struct_mutex locked. */ @@ -435,10 +418,9 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) drm_buffer_object_t *entry, *nentry; struct list_head *list, *next; - list_for_each_safe(list, next, &bm->ddestroy) { entry = list_entry(list, drm_buffer_object_t, ddestroy); - + nentry = NULL; if (next != &bm->ddestroy) { nentry = list_entry(next, drm_buffer_object_t, @@ -464,11 +446,11 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) drm_device_t *dev = (drm_device_t *) data; drm_buffer_manager_t *bm = &dev->bm; #else - drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); + drm_buffer_manager_t *bm = + container_of(work, drm_buffer_manager_t, wq.work); drm_device_t *dev = container_of(bm, drm_device_t, bm); #endif - DRM_DEBUG("Delayed delete Worker\n"); mutex_lock(&dev->struct_mutex); @@ -494,10 +476,10 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) { drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + drm_user_object_entry(uo, drm_buffer_object_t, base); drm_bo_takedown_vm_locked(bo); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(bo); } static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) @@ -641,7 +623,6 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, goto out; } - evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -663,19 +644,19 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, "buffer eviction.\n"); goto out; } - + if (bo->pinned_node) - DRM_ERROR("Evicting pinned buffer\n"); + DRM_ERROR("Evicting pinned buffer\n"); ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); - + if (ret) { if (ret != -EAGAIN) DRM_ERROR("Buffer eviction failed\n"); goto out; } - -out1: + + out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); @@ -687,15 +668,14 @@ out1: DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); - -out: + + out: return ret; } -static int drm_bo_mem_force_space(drm_device_t *dev, - drm_bo_mem_reg_t *mem, - uint32_t mem_type, - int no_wait) +static int drm_bo_mem_force_space(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + uint32_t mem_type, int no_wait) { drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; @@ -707,7 +687,7 @@ static int drm_bo_mem_force_space(drm_device_t *dev, mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(&man->manager, num_pages, + node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); if (node) break; @@ -720,7 +700,8 @@ static int drm_bo_mem_force_space(drm_device_t *dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem. + flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -742,10 +723,9 @@ static int drm_bo_mem_force_space(drm_device_t *dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, +static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, uint32_t mem_type, - uint32_t mask, - uint32_t *res_mask) + uint32_t mask, uint32_t * res_mask) { uint32_t cur_flags = drm_bo_type_flags(mem_type); uint32_t flag_diff; @@ -773,14 +753,13 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t *man, *res_mask = cur_flags; return 1; } - -int drm_bo_mem_space(drm_buffer_object_t *bo, - drm_bo_mem_reg_t *mem, - int no_wait) + +int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait) { drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm= &dev->bm; - drm_mem_type_manager_t *man; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -793,12 +772,12 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, drm_mm_node_t *node = NULL; int ret; - for (i=0; iman[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); if (!type_ok) continue; @@ -808,7 +787,7 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, if ((mem_type == bo->pinned_mem_type) && (bo->pinned_node != NULL)) { - DRM_ERROR("Choosing pinned region\n"); + DRM_ERROR("Choosing pinned region\n"); node = bo->pinned_node; break; } @@ -816,17 +795,17 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; - node = drm_mm_search_free(&man->manager, mem->num_pages, + node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); - if (node) - node = drm_mm_get_block(node, mem->num_pages, + if (node) + node = drm_mm_get_block(node, mem->num_pages, mem->page_alignment); } mutex_unlock(&dev->struct_mutex); if (node) break; } - + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { mem->mm_node = node; mem->mem_type = mem_type; @@ -834,27 +813,26 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, return 0; } - if (!type_found) + if (!type_found) return -EINVAL; - + num_prios = dev->driver->bo_driver->num_mem_busy_prio; prios = dev->driver->bo_driver->mem_busy_prio; - for (i=0; iman[mem_type]; - if (!drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags)) + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags)) continue; - + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); - + if (ret == 0) { mem->flags = cur_flags; return 0; } - + if (ret == -EAGAIN) has_eagain = 1; } @@ -862,10 +840,10 @@ int drm_bo_mem_space(drm_buffer_object_t *bo, ret = (has_eagain) ? -EAGAIN : -ENOMEM; return ret; } + EXPORT_SYMBOL(drm_bo_mem_space); - -static int drm_bo_new_mask(drm_buffer_object_t *bo, +static int drm_bo_new_mask(drm_buffer_object_t * bo, uint32_t new_mask, uint32_t hint) { uint32_t new_props; @@ -1253,7 +1231,6 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, if (ret) return ret; - mem.num_pages = bo->mem.num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; @@ -1263,7 +1240,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, mutex_lock(&dev->struct_mutex); list_del(&bo->lru); list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_unlock(&dev->struct_mutex); @@ -1271,13 +1248,13 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(bo, &mem, no_wait); - + if (ret) goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: + out_unlock: if (ret || !move_unfenced) { mutex_lock(&dev->struct_mutex); if (mem.mm_node) { @@ -1288,18 +1265,16 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, DRM_WAKEUP(&bo->event_queue); list_del(&bo->lru); drm_bo_add_to_lru(bo); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&bm->evict_mutex); return ret; } - -static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) +static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) { - uint32_t - flag_diff = (mem->mask ^ mem->flags); + uint32_t flag_diff = (mem->mask ^ mem->flags); if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; @@ -1311,11 +1286,11 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem) return 0; return 1; } - -static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) + +static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + drm_mem_type_manager_t *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1328,11 +1303,11 @@ static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) BUG_ON(mem->mm_node); - for (i=0; iman[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); if (type_ok) break; } @@ -1348,7 +1323,7 @@ static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem) DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); return -EINVAL; } - + /* * bo locked. */ @@ -1361,9 +1336,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, drm_bo_driver_t *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, bo->mem.flags); - ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); + ret = + driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; @@ -1384,7 +1360,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (!drm_bo_mem_compat(&bo->mem)) { - ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, + ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, no_wait, move_unfenced); if (ret) { if (ret != -EAGAIN) @@ -1427,7 +1403,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ret = drm_bo_add_ttm(bo); - if (ret) + if (ret) return ret; } DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); @@ -1435,23 +1411,23 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, /* * Finally, adjust lru to be sure. */ - + mutex_lock(&dev->struct_mutex); list_del(&bo->lru); if (move_unfenced) { list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); } else { drm_bo_add_to_lru(bo); if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { DRM_WAKEUP(&bo->event_queue); - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); } } mutex_unlock(&dev->struct_mutex); - return 0; } @@ -1601,7 +1577,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; - + if (bo->type == drm_bo_type_dc) { mutex_lock(&dev->struct_mutex); ret = drm_bo_setup_vm_locked(bo); @@ -1609,8 +1585,7 @@ int drm_buffer_object_create(drm_file_t * priv, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, - hint & DRM_BO_HINT_DONT_BLOCK); + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1788,10 +1763,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } -static int drm_bo_leave_list(drm_buffer_object_t *bo, +static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, - int free_pinned, - int allow_errors) + int free_pinned, int allow_errors) { drm_device_t *dev = bo->dev; int ret = 0; @@ -1805,7 +1779,7 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, goto out; DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - + if (free_pinned) { DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); mutex_lock(&dev->struct_mutex); @@ -1815,7 +1789,7 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, if (bo->pinned_node != NULL) { drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; - } + } mutex_unlock(&dev->struct_mutex); } @@ -1825,26 +1799,25 @@ static int drm_bo_leave_list(drm_buffer_object_t *bo, bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } - + if (bo->mem.mem_type == mem_type) ret = drm_bo_evict(bo, mem_type, 0); - if (ret){ - if (allow_errors){ + if (ret) { + if (allow_errors) { goto out; } else { ret = 0; DRM_ERROR("Cleanup eviction failed\n"); } } - -out: + + out: mutex_unlock(&bo->mutex); mutex_lock(&dev->struct_mutex); drm_bo_usage_deref_locked(bo); return ret; } - /* * dev->struct_sem locked. @@ -1853,9 +1826,8 @@ out: static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, - int free_pinned, - int allow_errors, - int pinned_list) + int free_pinned, + int allow_errors, int pinned_list) { struct list_head *list, *next; drm_buffer_object_t *entry; @@ -1869,16 +1841,15 @@ static int drm_bo_force_list_clean(drm_device_t * dev, * For this operation. */ -retry: + retry: list_for_each_safe(list, next, head) { if (pinned_list) - entry = list_entry(list, drm_buffer_object_t, + entry = list_entry(list, drm_buffer_object_t, pinned_lru); else - entry = list_entry(list, drm_buffer_object_t, - lru); + entry = list_entry(list, drm_buffer_object_t, lru); atomic_inc(&entry->usage); - ret = drm_bo_leave_list(entry, mem_type, free_pinned, + ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); do_retry = list->next != next; @@ -1890,7 +1861,7 @@ retry: if (do_retry) goto retry; } - return 0; + return 0; } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) @@ -1966,7 +1937,7 @@ static int drm_bo_init_mm(drm_device_t * dev, DRM_ERROR("Illegal memory type %d\n", type); return ret; } - + man = &bm->man[type]; if (man->has_type) { DRM_ERROR("Memory manager already initialized for type %d\n", @@ -1975,7 +1946,7 @@ static int drm_bo_init_mm(drm_device_t * dev, } ret = dev->driver->bo_driver->init_mem_type(dev, type, man); - if (ret) + if (ret) return ret; ret = 0; @@ -2174,15 +2145,15 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) +int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) return 0; - + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) return 0; @@ -2191,6 +2162,7 @@ int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem) } return 1; } + EXPORT_SYMBOL(drm_mem_reg_is_pci); /** @@ -2207,17 +2179,16 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t *dev, - drm_bo_mem_reg_t *mem, +int drm_bo_pci_offset(drm_device_t * dev, + drm_bo_mem_reg_t * mem, unsigned long *bus_base, - unsigned long *bus_offset, - unsigned long *bus_size) + unsigned long *bus_offset, unsigned long *bus_size) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; *bus_size = 0; - if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) return -EINVAL; if (drm_mem_reg_is_pci(dev, mem)) { @@ -2229,7 +2200,6 @@ int drm_bo_pci_offset(drm_device_t *dev, return 0; } - /** * \c Kill all user-space virtual mappings of this buffer object. * @@ -2238,7 +2208,7 @@ int drm_bo_pci_offset(drm_device_t *dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t *bo) +void drm_bo_unmap_virtual(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2247,12 +2217,12 @@ void drm_bo_unmap_virtual(drm_buffer_object_t *bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) { drm_map_list_t *list = &bo->map_list; drm_local_map_t *map; drm_device_t *dev = bo->dev; - + if (list->user_token) { drm_ht_remove_item(&dev->map_hash, &list->hash); list->user_token = 0; @@ -2272,12 +2242,12 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo) drm_bo_usage_deref_locked(bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) { drm_map_list_t *list = &bo->map_list; drm_local_map_t *map; drm_device_t *dev = bo->dev; - + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); if (!list->map) return -ENOMEM; @@ -2288,8 +2258,8 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) map->flags = _DRM_REMOVABLE; map->size = bo->mem.num_pages * PAGE_SIZE; atomic_inc(&bo->usage); - map->handle = (void *) bo; - + map->handle = (void *)bo; + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, bo->mem.num_pages, 0, 0); @@ -2306,7 +2276,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo) drm_bo_takedown_vm_locked(bo); return -ENOMEM; } - + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; return 0; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1d142087..7e195125 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,18 +30,16 @@ #include "drmP.h" - /** * Free the old memory node unless it's a pinned region and we * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t *bo) +static void drm_bo_free_old_node(drm_buffer_object_t * bo) { drm_bo_mem_reg_t *old_mem = &bo->mem; - if (old_mem->mm_node && - (old_mem->mm_node != bo->pinned_node)) { + if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); old_mem->mm_node = NULL; @@ -50,10 +48,8 @@ static void drm_bo_free_old_node(drm_buffer_object_t *bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +int drm_bo_move_ttm(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -69,15 +65,15 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, drm_ttm_unbind(ttm); drm_bo_free_old_node(bo); - DRM_FLAG_MASKED(old_mem->flags, - DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | - DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); old_mem->mem_type = DRM_BO_MEM_LOCAL; save_flags = old_mem->flags; - } + } if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_CACHED, + ret = drm_bind_ttm(ttm, + new_mem->flags & DRM_BO_FLAG_CACHED, new_mem->mm_node->start); if (ret) return ret; @@ -89,8 +85,8 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } -EXPORT_SYMBOL(drm_bo_move_ttm); +EXPORT_SYMBOL(drm_bo_move_ttm); /** * \c Return a kernel virtual address to the buffer object PCI memory. @@ -107,11 +103,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ - -int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual) +int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, + void **virtual) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -120,11 +116,11 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual *virtual = NULL; ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) + if (ret || bus_size == 0) return ret; if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - addr = (void *) (((u8 *)man->io_addr) + bus_offset); + addr = (void *)(((u8 *) man->io_addr) + bus_offset); else { addr = ioremap_nocache(bus_base + bus_offset, bus_size); if (!addr) @@ -134,7 +130,6 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual return 0; } - /** * \c Unmap mapping obtained using drm_bo_ioremap * @@ -143,34 +138,34 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, +void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; - + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; - + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { iounmap(virtual); } } - static int drm_copy_io_page(void *dst, void *src, unsigned long page) { - uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT)); - uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT)); + uint32_t *dstP = + (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); + uint32_t *srcP = + (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); int i; - for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) iowrite32(ioread32(srcP++), dstP++); return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -178,7 +173,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) if (!d) return -ENOMEM; - src = (void *)((unsigned long) src + (page << PAGE_SHIFT)); + src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); dst = kmap(d); if (!dst) return -ENOMEM; @@ -188,7 +183,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -196,7 +191,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) if (!s) return -ENOMEM; - dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT)); + dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); src = kmap(s); if (!src) return -ENOMEM; @@ -206,11 +201,8 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) return 0; } - -int drm_bo_move_memcpy(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +int drm_bo_move_memcpy(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_device_t *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; @@ -226,42 +218,42 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, unsigned long page; unsigned long add = 0; int dir; - + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); if (ret) return ret; ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); - if (ret) + if (ret) goto out; if (old_iomap == NULL && new_iomap == NULL) goto out2; if (old_iomap == NULL && ttm == NULL) goto out2; - + add = 0; dir = 1; - if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { dir = -1; add = new_mem->num_pages - 1; } - for (i=0; i < new_mem->num_pages; ++i) { - page = i*dir + add; - if (old_iomap == NULL) + for (i = 0; i < new_mem->num_pages; ++i) { + page = i * dir + add; + if (old_iomap == NULL) ret = drm_copy_ttm_io_page(ttm, new_iomap, page); else if (new_iomap == NULL) ret = drm_copy_io_ttm_page(ttm, old_iomap, page); - else + else ret = drm_copy_io_page(new_iomap, old_iomap, page); if (ret) goto out1; } mb(); -out2: + out2: drm_bo_free_old_node(bo); *old_mem = *new_mem; @@ -275,12 +267,13 @@ out2: bo->ttm = NULL; } -out1: + out1: drm_mem_reg_iounmap(dev, new_mem, new_iomap); -out: + out: drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } + EXPORT_SYMBOL(drm_bo_move_memcpy); /* @@ -289,8 +282,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t *bo, - drm_buffer_object_t **new_obj) +int drm_buffer_object_transfer(drm_buffer_object_t * bo, + drm_buffer_object_t ** new_obj) { drm_buffer_object_t *fbo; drm_device_t *dev = bo->dev; @@ -299,7 +292,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) return -ENOMEM; - + *fbo = *bo; mutex_init(&fbo->mutex); mutex_lock(&fbo->mutex); @@ -331,12 +324,11 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, +int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, int evict, int no_wait, uint32_t fence_type, - uint32_t fence_flags, - drm_bo_mem_reg_t *new_mem) + uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) { drm_device_t *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; @@ -345,7 +337,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; drm_buffer_object_t *old_obj; - + if (bo->fence) drm_fence_usage_deref_unlocked(dev, bo->fence); ret = drm_fence_object_create(dev, fence_type, @@ -365,18 +357,17 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ if (1) #else - if (evict || ((bo->mem.mm_node == bo->pinned_node) && + if (evict || ((bo->mem.mm_node == bo->pinned_node) && bo->mem.mm_node != NULL)) #endif { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) + if (ret) return ret; drm_bo_free_old_node(bo); - if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm != NULL)) { + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { drm_ttm_unbind(bo->ttm); drm_destroy_ttm(bo->ttm); bo->ttm = NULL; @@ -404,7 +395,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj); - + drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); @@ -416,6 +407,5 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } -EXPORT_SYMBOL(drm_bo_move_accel_cleanup); - +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 8cd961d7..e67719e2 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -35,18 +35,17 @@ static void drm_ttm_ipi_handler(void *null) flush_agp_cache(); } -static void drm_ttm_cache_flush(void) +static void drm_ttm_cache_flush(void) { if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) DRM_ERROR("Timed out waiting for drm cache flush.\n"); } - /* * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t *ttm) +static void ttm_alloc_pages(drm_ttm_t * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -67,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t *ttm) } } -static void ttm_free_pages(drm_ttm_t *ttm) +static void ttm_free_pages(drm_ttm_t * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -81,7 +80,6 @@ static void ttm_free_pages(drm_ttm_t *ttm) ttm->pages = NULL; } - static struct page *drm_ttm_alloc_page(void) { struct page *page; @@ -102,7 +100,6 @@ static struct page *drm_ttm_alloc_page(void) return page; } - /* * Change caching policy for the linear kernel map * for range of pages in a ttm. @@ -117,7 +114,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) return 0; - if (noncached) + if (noncached) drm_ttm_cache_flush(); for (i = 0; i < ttm->num_pages; ++i) { @@ -194,7 +191,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index) +struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) { struct page *p; drm_buffer_manager_t *bm = &ttm->dev->bm; @@ -210,7 +207,6 @@ struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index) return p; } - static int drm_ttm_populate(drm_ttm_t * ttm) { struct page *page; @@ -235,7 +231,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size) +drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) { drm_bo_driver_t *bo_driver = dev->driver->bo_driver; drm_ttm_t *ttm; @@ -344,4 +340,5 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) return 0; } + EXPORT_SYMBOL(drm_bind_ttm); diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index e10db8c1..b96f5cee 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -79,13 +79,12 @@ typedef struct drm_ttm { } drm_ttm_t; - extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); extern void drm_ttm_unbind(drm_ttm_t * ttm); extern void drm_ttm_evict(drm_ttm_t * ttm); extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index); +extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 7655902f..4fab0bef 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,7 +33,6 @@ #include "i915_drm.h" #include "i915_drv.h" - drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) { return drm_agp_init_ttm(dev, NULL); @@ -65,51 +64,49 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) return i915_emit_mi_flush(dev, flush_cmd); } -int i915_init_mem_type(drm_device_t *dev, uint32_t type, - drm_mem_type_manager_t *man) +int i915_init_mem_type(drm_device_t * dev, uint32_t type, + drm_mem_type_manager_t * man) { - switch(type) { + switch (type) { case DRM_BO_MEM_LOCAL: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; + _DRM_FLAG_MEMTYPE_CACHED; break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { - DRM_ERROR("AGP is not enabled for memory type %u\n", - (unsigned) type); + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); return -EINVAL; } man->io_offset = dev->agp->agp_info.aper_base; man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_NEEDS_IOREMAP; + _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; break; case DRM_BO_MEM_PRIV0: if (!(drm_core_has_AGP(dev) && dev->agp)) { - DRM_ERROR("AGP is not enabled for memory type %u\n", - (unsigned) type); + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); return -EINVAL; } man->io_offset = dev->agp->agp_info.aper_base; man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_FIXED | - _DRM_FLAG_NEEDS_IOREMAP; + _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; break; default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } -uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type) +uint32_t i915_evict_flags(drm_device_t * dev, uint32_t type) { - switch(type) { + switch (type) { case DRM_BO_MEM_LOCAL: case DRM_BO_MEM_TT: return DRM_BO_FLAG_MEM_LOCAL; @@ -118,11 +115,10 @@ uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type) } } -static void i915_emit_copy_blit(drm_device_t *dev, - uint32_t src_offset, - uint32_t dst_offset, - uint32_t pages, - int direction) +static void i915_emit_copy_blit(drm_device_t * dev, + uint32_t src_offset, + uint32_t dst_offset, + uint32_t pages, int direction) { uint32_t cur_pages; uint32_t stride = PAGE_SIZE; @@ -131,9 +127,9 @@ static void i915_emit_copy_blit(drm_device_t *dev, if (!dev_priv) return; - + i915_kernel_lost_context(dev); - while(pages > 0) { + while (pages > 0) { cur_pages = pages; if (cur_pages > 2048) cur_pages = 2048; @@ -142,7 +138,7 @@ static void i915_emit_copy_blit(drm_device_t *dev, BEGIN_LP_RING(6); OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB); - OUT_RING((stride & 0xffff) | ( 0xcc << 16) | (1 << 24) | + OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) | (1 << 25) | (direction ? (1 << 30) : 0)); OUT_RING((cur_pages << 16) | PAGE_SIZE); OUT_RING(dst_offset); @@ -153,33 +149,29 @@ static void i915_emit_copy_blit(drm_device_t *dev, return; } -static int i915_move_blit(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +static int i915_move_blit(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_bo_mem_reg_t *old_mem = &bo->mem; int dir = 0; - if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { dir = 1; } i915_emit_copy_blit(bo->dev, old_mem->mm_node->start << PAGE_SHIFT, new_mem->mm_node->start << PAGE_SHIFT, - new_mem->num_pages, - dir); + new_mem->num_pages, dir); i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH); return drm_bo_move_accel_cleanup(bo, evict, no_wait, DRM_FENCE_TYPE_EXE | - DRM_I915_FENCE_TYPE_RW, - DRM_I915_FENCE_FLAG_FLUSHED, - new_mem); + DRM_I915_FENCE_TYPE_RW, + DRM_I915_FENCE_FLAG_FLUSHED, new_mem); } /* @@ -187,11 +179,8 @@ static int i915_move_blit(drm_buffer_object_t *bo, * then blit and subsequently move out again. */ - -static int i915_move_flip(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +static int i915_move_flip(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_device_t *dev = bo->dev; drm_bo_mem_reg_t tmp_mem; @@ -200,23 +189,22 @@ static int i915_move_flip(drm_buffer_object_t *bo, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; tmp_mem.mask = DRM_BO_FLAG_MEM_TT | - DRM_BO_FLAG_CACHED | - DRM_BO_FLAG_FORCE_CACHING; - + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; + ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); - if (ret) + if (ret) return ret; - + ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); - if (ret) + if (ret) goto out_cleanup; ret = i915_move_blit(bo, 1, no_wait, &tmp_mem); - if (ret) + if (ret) goto out_cleanup; - + ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); -out_cleanup: + out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); drm_mm_put_block(tmp_mem.mm_node); @@ -226,24 +214,19 @@ out_cleanup: return ret; } - -int i915_move(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +int i915_move(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_bo_mem_reg_t *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (i915_move_flip(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, - new_mem); + if (i915_move_flip(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { if (i915_move_blit(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, - new_mem); + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } return 0; } From abc14ddfb5ad85bf2a5094597d829e6614e6c359 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 21:40:42 +0100 Subject: [PATCH 28/34] Update flags and comments. --- shared-core/drm.h | 105 ++++++++++++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 40 deletions(-) diff --git a/shared-core/drm.h b/shared-core/drm.h index 71189559..479a1a65 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -694,9 +694,10 @@ typedef struct drm_fence_arg { } drm_fence_arg_t; /* Buffer permissions, referring to how the GPU uses the buffers. - these translate to fence types used for the buffers. - Typically a texture buffer is read, A destination buffer is write and - a command (batch-) buffer is exe. Can be or-ed together. */ + * these translate to fence types used for the buffers. + * Typically a texture buffer is read, A destination buffer is write and + * a command (batch-) buffer is exe. Can be or-ed together. + */ #define DRM_BO_FLAG_READ 0x00000001 #define DRM_BO_FLAG_WRITE 0x00000002 @@ -704,55 +705,82 @@ typedef struct drm_fence_arg { /* * Status flags. Can be read to determine the actual state of a buffer. + * Can also be set in the buffer mask before validation. */ -/* - * Cannot evict this buffer. Not even with force. This type of buffer should - * only be available for root, and must be manually removed before buffer - * manager shutdown or swapout. - */ -#define DRM_BO_FLAG_NO_EVICT 0x00000010 -/* Always keep a system memory shadow to a vram buffer */ -#define DRM_BO_FLAG_MAPPABLE 0x00000020 -/* The buffer is shareable with other processes */ -#define DRM_BO_FLAG_SHAREABLE 0x00000040 -/* The buffer is currently cached */ -#define DRM_BO_FLAG_CACHED 0x00000080 -/* Make sure that every time this buffer is validated, it ends up on the same - * location. The buffer will also not be evicted when claiming space for - * other buffers. Basically a pinned buffer but it may be thrown out as - * part of buffer manager shutdown or swapout. Not supported yet.*/ -#define DRM_BO_FLAG_NO_MOVE 0x00000100 - - /* - * Request flags. + * Mask: Never evict this buffer. Not even with force. This type of buffer is only + * available to root and must be manually removed before buffer manager shutdown + * or lock. + * Flags: Acknowledge */ +#define DRM_BO_FLAG_NO_EVICT 0x00000010 -/* Make sure the buffer is in cached memory when mapped for reading */ +/* + * Mask: Require that the buffer is placed in mappable memory when validated. + * If not set the buffer may or may not be in mappable memory when validated. + * Flags: If set, the buffer is in mappable memory. + */ +#define DRM_BO_FLAG_MAPPABLE 0x00000020 + +/* Mask: The buffer should be shareable with other processes. + * Flags: The buffer is shareable with other processes. + */ +#define DRM_BO_FLAG_SHAREABLE 0x00000040 + +/* Mask: If set, place the buffer in cache-coherent memory if available. + * If clear, never place the buffer in cache coherent memory if validated. + * Flags: The buffer is currently in cache-coherent memory. + */ +#define DRM_BO_FLAG_CACHED 0x00000080 + +/* Mask: Make sure that every time this buffer is validated, + * it ends up on the same location provided that the memory mask is the same. + * The buffer will also not be evicted when claiming space for + * other buffers. Basically a pinned buffer but it may be thrown out as + * part of buffer manager shutdown or locking. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_NO_MOVE 0x00000100 + +/* Mask: Make sure the buffer is in cached memory when mapped for reading. + * Flags: Acknowledge. + */ #define DRM_BO_FLAG_READ_CACHED 0x00080000 -/* Bind this buffer cached if the hardware supports it. */ + +/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. + * Flags: Acknowledge. + */ #define DRM_BO_FLAG_FORCE_CACHING 0x00002000 + +/* + * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. + * Flags: Acknowledge. + */ #define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000 -/* System Memory */ +/* + * Memory type flags that can be or'ed together in the mask, but only + * one appears in flags. + */ + +/* System memory */ #define DRM_BO_FLAG_MEM_LOCAL 0x01000000 /* Translation table memory */ #define DRM_BO_FLAG_MEM_TT 0x02000000 /* Vram memory */ #define DRM_BO_FLAG_MEM_VRAM 0x04000000 /* Up to the driver to define. */ -#define DRM_BO_FLAG_MEM_PRIV0 0x10000000 -#define DRM_BO_FLAG_MEM_PRIV1 0x20000000 -#define DRM_BO_FLAG_MEM_PRIV2 0x40000000 -#define DRM_BO_FLAG_MEM_PRIV3 0x80000000 +#define DRM_BO_FLAG_MEM_PRIV0 0x08000000 +#define DRM_BO_FLAG_MEM_PRIV1 0x10000000 +#define DRM_BO_FLAG_MEM_PRIV2 0x20000000 +#define DRM_BO_FLAG_MEM_PRIV3 0x40000000 +#define DRM_BO_FLAG_MEM_PRIV4 0x80000000 /* Memory flag mask */ #define DRM_BO_MASK_MEM 0xFF000000 #define DRM_BO_MASK_MEMTYPE 0xFF0000A0 -/* When creating a buffer, Avoid system storage even if allowed */ -#define DRM_BO_HINT_AVOID_LOCAL 0x00000001 /* Don't block on validate and map */ #define DRM_BO_HINT_DONT_BLOCK 0x00000002 /* Don't place this buffer on the unfenced list.*/ @@ -761,9 +789,6 @@ typedef struct drm_fence_arg { #define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 -/* Driver specific flags. Could be for example rendering engine */ -#define DRM_BO_MASK_DRIVER 0x00F00000 - typedef enum { drm_bo_type_dc, drm_bo_type_user, @@ -831,11 +856,11 @@ typedef struct drm_bo_arg{ #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 -#define DRM_BO_MEM_PRIV0 4 -#define DRM_BO_MEM_PRIV1 5 -#define DRM_BO_MEM_PRIV2 6 -#define DRM_BO_MEM_PRIV3 7 - +#define DRM_BO_MEM_PRIV0 3 +#define DRM_BO_MEM_PRIV1 4 +#define DRM_BO_MEM_PRIV2 5 +#define DRM_BO_MEM_PRIV3 6 +#define DRM_BO_MEM_PRIV4 7 #define DRM_BO_MEM_TYPES 8 /* For now. */ From 9efdae317ce01cea95f75855b175243ae858fde4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 13 Feb 2007 20:05:32 +0100 Subject: [PATCH 29/34] More bugfixes. Fixed memory, pinned buffers and unmappable memory now seems fully functional. --- linux-core/drmP.h | 2 + linux-core/drm_bo.c | 188 +++++++++++++++++++++++++++------------ linux-core/drm_bo_move.c | 1 - linux-core/i915_buffer.c | 9 +- 4 files changed, 139 insertions(+), 61 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index b2ce724d..c472689b 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1520,6 +1520,8 @@ extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, extern int drm_bo_mem_space(drm_buffer_object_t *bo, drm_bo_mem_reg_t *mem, int no_wait); +extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int move_unfenced); /* diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index ed089096..e593258b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -75,11 +75,12 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; - if (bo->mem.mm_node != bo->pinned_node) { + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); - } else + } else { INIT_LIST_HEAD(&bo->lru); + } } static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) @@ -339,6 +340,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) list_del_init(&bo->lru); if (bo->mem.mm_node) { drm_mm_put_block(bo->mem.mm_node); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; bo->mem.mm_node = NULL; } list_del_init(&bo->pinned_lru); @@ -377,7 +380,11 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { - BUG_ON(bo->fence != NULL); + if (bo->fence != NULL) { + DRM_ERROR("Fence was non-zero.\n"); + drm_bo_cleanup_refs(bo, 0); + return; + } #ifdef DRM_ODD_MM_COMPAT BUG_ON(!list_empty(&bo->vma_list)); @@ -565,6 +572,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, count = 0; l = f_list.next; while (l != &f_list) { + prefetch(l->next); entry = list_entry(l, drm_buffer_object_t, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -629,8 +637,6 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->type == drm_bo_type_fake) { bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.mm_node = NULL; - bo->pinned_mem_type = DRM_BO_MEM_LOCAL; - bo->pinned_node = NULL; goto out1; } @@ -641,13 +647,10 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed to find memory space for " - "buffer eviction.\n"); + "buffer 0x%p eviction.\n", bo); goto out; } - if (bo->pinned_node) - DRM_ERROR("Evicting pinned buffer\n"); - ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); if (ret) { @@ -659,7 +662,8 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { - drm_mm_put_block(evict_mem.mm_node); + if (evict_mem.mm_node != bo->pinned_node) + drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } list_del(&bo->lru); @@ -700,8 +704,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem. - flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -737,18 +740,24 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); - if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) { + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) return 0; + + if (mem_type == DRM_BO_MEM_LOCAL) { + *res_mask = cur_flags; + return 1; } + flag_diff = (mask ^ cur_flags); if ((flag_diff & DRM_BO_FLAG_CACHED) && - (mask & DRM_BO_FLAG_FORCE_CACHING)) { + (!(mask & DRM_BO_FLAG_CACHED) || + (mask & DRM_BO_FLAG_FORCE_CACHING))) return 0; - } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && - (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) { + ((mask & DRM_BO_FLAG_MAPPABLE) || + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) return 0; - } *res_mask = cur_flags; return 1; @@ -772,6 +781,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, drm_mm_node_t *node = NULL; int ret; + mem->mm_node = NULL; for (i = 0; i < num_prios; ++i) { mem_type = prios[i]; man = &bm->man[mem_type]; @@ -787,7 +797,6 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, if ((mem_type == bo->pinned_mem_type) && (bo->pinned_node != NULL)) { - DRM_ERROR("Choosing pinned region\n"); node = bo->pinned_node; break; } @@ -1248,17 +1257,17 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * Determine where to move the buffer. */ ret = drm_bo_mem_space(bo, &mem, no_wait); - if (ret) goto out_unlock; ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: + out_unlock: if (ret || !move_unfenced) { mutex_lock(&dev->struct_mutex); if (mem.mm_node) { - drm_mm_put_block(mem.mm_node); + if (mem.mm_node != bo->pinned_node) + drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); @@ -1279,10 +1288,13 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && - (mem->mask & DRM_BO_FLAG_FORCE_CACHING)) - return 0; + (!(mem->mask & DRM_BO_FLAG_CACHED) || + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { + return 0; + } if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && - (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)) + ((mem->mask & DRM_BO_FLAG_MAPPABLE) || + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) return 0; return 1; } @@ -1360,8 +1372,8 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (!drm_bo_mem_compat(&bo->mem)) { - ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, - no_wait, move_unfenced); + ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, + move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); @@ -1374,14 +1386,14 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, */ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); if (bo->pinned_node != bo->mem.mm_node) { - drm_mm_put_block(bo->pinned_node); + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); bo->pinned_node = bo->mem.mm_node; } @@ -1763,6 +1775,39 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +/** + *Clean the unfenced list and put on regular LRU. + *This is part of the memory manager cleanup and should only be + *called with the DRI lock held. + *Call dev->struct_sem locked. + */ + +static void drm_bo_clean_unfenced(drm_device_t *dev) +{ + drm_buffer_manager_t *bm = &dev->bm; + struct list_head *head, *list; + drm_buffer_object_t *entry; + + head = &bm->unfenced; + + list = head->next; + while(list != head) { + prefetch(list->next); + entry = list_entry(list, drm_buffer_object_t, lru); + + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + + list_del(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + list = head->next; + } +} + static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, int free_pinned, int allow_errors) @@ -1770,16 +1815,12 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, drm_device_t *dev = bo->dev; int ret = 0; - atomic_inc(&bo->usage); - mutex_unlock(&dev->struct_mutex); mutex_lock(&bo->mutex); ret = drm_bo_expire_fence(bo, allow_errors); if (ret) goto out; - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (free_pinned) { DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); mutex_lock(&dev->struct_mutex); @@ -1814,52 +1855,86 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, out: mutex_unlock(&bo->mutex); - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(bo); return ret; } + +static drm_buffer_object_t *drm_bo_entry(struct list_head *list, + int pinned_list) +{ + if (pinned_list) + return list_entry(list, drm_buffer_object_t, pinned_lru); + else + return list_entry(list, drm_buffer_object_t, lru); +} + /* - * dev->struct_sem locked. + * dev->struct_mutex locked. */ static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, int free_pinned, - int allow_errors, int pinned_list) + int allow_errors, + int pinned_list) { - struct list_head *list, *next; - drm_buffer_object_t *entry; + struct list_head *list, *next, *prev; + drm_buffer_object_t *entry, *nentry; int ret; - int do_retry; + int do_restart; /* - * We need to - * restart if a node disappears from under us. - * Nodes cannot be added since the hardware lock is needed - * For this operation. + * The list traversal is a bit odd here, because an item may + * disappear from the list when we release the struct_mutex or + * when we decrease the usage count. Also we're not guaranteed + * to drain pinned lists, so we can't always restart. */ - retry: +restart: + nentry = NULL; list_for_each_safe(list, next, head) { - if (pinned_list) - entry = list_entry(list, drm_buffer_object_t, - pinned_lru); - else - entry = list_entry(list, drm_buffer_object_t, lru); + prev = list->prev; + + entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); atomic_inc(&entry->usage); + if (nentry) { + atomic_dec(&nentry->usage); + nentry = NULL; + } + + /* + * Protect the next item from destruction, so we can check + * its list pointers later on. + */ + + if (next != head) { + nentry = drm_bo_entry(next, pinned_list); + atomic_inc(&nentry->usage); + } + mutex_unlock(&dev->struct_mutex); + ret = drm_bo_leave_list(entry, mem_type, free_pinned, allow_errors); + mutex_lock(&dev->struct_mutex); - do_retry = list->next != next; drm_bo_usage_deref_locked(entry); - if (ret) return ret; - if (do_retry) - goto retry; + /* + * Has the next item disappeared from the list? + */ + + do_restart = ((next->prev != list) && (next->prev != prev)); + + if (nentry != NULL && do_restart) { + drm_bo_usage_deref_locked(nentry); + nentry = NULL; + } + + if (do_restart) + goto restart; } return 0; } @@ -1886,7 +1961,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0); + drm_bo_clean_unfenced(dev); drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -1917,10 +1992,11 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return -EINVAL; } - ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0); + drm_bo_clean_unfenced(dev); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); return ret; } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 7e195125..21f5f6cc 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -58,7 +58,6 @@ int drm_bo_move_ttm(drm_buffer_object_t * bo, int ret; if (old_mem->mem_type == DRM_BO_MEM_TT) { - if (evict) drm_ttm_evict(ttm); else diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 4fab0bef..3ccfab38 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -93,7 +93,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, man->io_offset = dev->agp->agp_info.aper_base; man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; man->io_addr = NULL; - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; break; @@ -111,7 +111,7 @@ uint32_t i915_evict_flags(drm_device_t * dev, uint32_t type) case DRM_BO_MEM_TT: return DRM_BO_FLAG_MEM_LOCAL; default: - return DRM_BO_FLAG_MEM_TT; + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; } } @@ -204,10 +204,11 @@ static int i915_move_flip(drm_buffer_object_t * bo, goto out_cleanup; ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); - out_cleanup: +out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); - drm_mm_put_block(tmp_mem.mm_node); + if (tmp_mem.mm_node != bo->pinned_node) + drm_mm_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); } From 7bcb62b45d18ab7b48ad3cb5d13aec3bc577678e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 10:49:37 +0100 Subject: [PATCH 30/34] Rework buffer object vm code to use nopfn() for kernels >= 2.6.19. --- linux-core/drm_compat.c | 125 +++++++++++++++++++++++++--------------- linux-core/drm_compat.h | 32 ++++++---- linux-core/drm_vm.c | 55 ++++++++++++++---- 3 files changed, 145 insertions(+), 67 deletions(-) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 8dd15ded..eeda4e4a 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) /* - * vm code for kernels below 2,6,15 in which version a major vm write + * vm code for kernels below 2.6.15 in which version a major vm write * occured. This implement a simple straightforward * version similar to what's going to be - * in kernel 2.6.20+? + * in kernel 2.6.19+ + * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use + * nopfn. */ -static int drm_pte_is_clear(struct vm_area_struct *vma, - unsigned long addr) -{ - struct mm_struct *mm = vma->vm_mm; - int ret = 1; - pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; - - - spin_lock(&mm->page_table_lock); - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd)) - goto unlock; - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) - goto unlock; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - goto unlock; - pte = pte_offset_map(pmd, addr); - if (!pte) - goto unlock; - ret = pte_none(*pte); - pte_unmap(pte); - unlock: - spin_unlock(&mm->page_table_lock); - return ret; -} - -int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, pgprot_t pgprot) -{ - int ret; - if (!drm_pte_is_clear(vma, addr)) - return -EBUSY; - - ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot); - return ret; -} - static struct { spinlock_t lock; struct page *dummy_page; @@ -186,10 +146,85 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, #endif +#if !defined(DRM_FULL_MM_COMPAT) && \ + ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))) + +static int drm_pte_is_clear(struct vm_area_struct *vma, + unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + int ret = 1; + pte_t *pte; + pmd_t *pmd; + pud_t *pud; + pgd_t *pgd; + + spin_lock(&mm->page_table_lock); + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) + goto unlock; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + goto unlock; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + goto unlock; + pte = pte_offset_map(pmd, addr); + if (!pte) + goto unlock; + ret = pte_none(*pte); + pte_unmap(pte); + unlock: + spin_unlock(&mm->page_table_lock); + return ret; +} + +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + int ret; + if (!drm_pte_is_clear(vma, addr)) + return -EBUSY; + + ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); + return ret; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT)) + +/** + * While waiting for the fault() handler to appear in + * we accomplish approximately + * the same wrapping it with nopfn. + */ + +unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma, + unsigned long address) +{ + struct fault_data data; + data.address = address; + + (void) drm_bo_vm_fault(vma, &data); + if (data.type == VM_FAULT_OOM) + return NOPFN_OOM; + else if (data.type == VM_FAULT_SIGBUS) + return NOPFN_SIGBUS; + + /* + * pfn already set. + */ + + return 0; +} +#endif + + #ifdef DRM_ODD_MM_COMPAT /* - * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated + * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated * workaround for a single BUG statement in do_no_page in these versions. The * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 313aab85..0dee3564 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -158,11 +158,14 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags) #include #include -#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) #define DRM_ODD_MM_COMPAT #endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) +#define DRM_FULL_MM_COMPAT +#endif /* @@ -200,18 +203,23 @@ extern int drm_map_page_into_agp(struct page *page); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) extern struct page *get_nopage_retry(void); extern void free_nopage_retry(void); -struct fault_data; -extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma, - struct fault_data *data); #define NOPAGE_REFAULT get_nopage_retry() #endif +#if !defined(DRM_FULL_MM_COMPAT) && \ + ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))) -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) +struct fault_data; +extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + struct fault_data *data); + +#endif +#ifndef DRM_FULL_MM_COMPAT /* - * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19. + * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19. * For now, just return a dummy page that we've allocated out of * static space. The page will be put by do_nopage() since we've already * filled out the pte. @@ -228,13 +236,17 @@ struct fault_data { extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, pgprot_t pgprot); + unsigned long pfn); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type); - -#endif +#else +extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, + unsigned long address); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ +#endif /* ndef DRM_FULL_MM_COMPAT */ #ifdef DRM_ODD_MM_COMPAT diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 17778c26..4a340b57 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -720,11 +720,20 @@ EXPORT_SYMBOL(drm_mmap); * \param vma Virtual memory area. * \param data Fault data on failure or refault. * \return Always NULL as we insert pfns directly. + * + * It's important that pfns are inserted while holding the bo->mutex lock. + * otherwise we might race with unmap_mapping_range() which is always + * called with the bo->mutex lock held. + * + * It's not pretty to modify the vma->vm_page_prot variable while not + * holding the mm semaphore in write mode. However, we have it i read mode, + * so we won't be racing with any other writers, and we only actually modify + * it when no ptes are present so it shouldn't be a big deal. */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) +#ifdef DRM_FULL_MM_COMPAT static #endif struct page *drm_bo_vm_fault(struct vm_area_struct *vma, @@ -738,7 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, drm_device_t *dev; unsigned long pfn; int err; - pgprot_t pgprot; unsigned long bus_base; unsigned long bus_offset; unsigned long bus_size; @@ -759,14 +767,12 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, * move it to a mappable. */ +#ifdef DRM_BO_FULL_COMPAT if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { - uint32_t mask_save = bo->mem.mask; uint32_t new_mask = bo->mem.mask | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_FORCE_MAPPABLE; - err = drm_bo_move_buffer(bo, new_mask, 0, 0); - bo->mem.mask = mask_save; if (err) { data->type = (err == -EAGAIN) ? @@ -774,6 +780,24 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, goto out_unlock; } } +#else + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + unsigned long _end = jiffies + 3*DRM_HZ; + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + + do { + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + } while((err == -EAGAIN) && !time_after_eq(jiffies, _end)); + + if (err) { + DRM_ERROR("Timeout moving buffer to mappable location.\n"); + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + } +#endif if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; @@ -793,7 +817,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, if (bus_size) { pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; - pgprot = drm_io_prot(_DRM_AGP, vma); + vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma); } else { ttm = bo->ttm; @@ -804,10 +828,10 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, goto out_unlock; } pfn = page_to_pfn(page); - pgprot = vma->vm_page_prot; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } - err = vm_insert_pfn(vma, address, pfn, pgprot); + err = vm_insert_pfn(vma, address, pfn); if (!err || err == -EBUSY) data->type = VM_FAULT_MINOR; @@ -870,10 +894,14 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) } static struct vm_operations_struct drm_bo_vm_ops = { -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) - .nopage = drm_bo_vm_nopage, -#else +#ifdef DRM_FULL_MM_COMPAT .fault = drm_bo_vm_fault, +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + .nopfn = drm_bo_vm_nopfn, +#else + .nopage = drm_bo_vm_nopage, +#endif #endif .open = drm_bo_vm_open, .close = drm_bo_vm_close, @@ -896,6 +924,9 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma, vma->vm_private_data = map->handle; vma->vm_file = filp; vma->vm_flags |= VM_RESERVED | VM_IO; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + vma->vm_flags |= VM_PFNMAP; +#endif drm_bo_vm_open_locked(vma); #ifdef DRM_ODD_MM_COMPAT drm_bo_map_bound(vma); From 04760563b88c8e94f3ae448710d1ab8b350c2e5f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 12:39:02 +0100 Subject: [PATCH 31/34] Set the drm bus map type for each buffer object memory type. --- linux-core/drmP.h | 1 + linux-core/drm_compat.c | 3 ++- linux-core/drm_vm.c | 4 +++- linux-core/i915_buffer.c | 4 +++- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c472689b..e070c073 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -664,6 +664,7 @@ typedef struct drm_mem_type_manager { struct list_head lru; struct list_head pinned; uint32_t flags; + uint32_t drm_bus_maptype; unsigned long io_offset; unsigned long io_size; void *io_addr; diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index eeda4e4a..3bb35997 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -306,8 +306,9 @@ int drm_bo_map_bound(struct vm_area_struct *vma) BUG_ON(ret); if (bus_size) { + drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; - pgprot_t pgprot = drm_io_prot(_DRM_AGP, vma); + pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, pgprot); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 4a340b57..f3b1088f 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -816,8 +816,10 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { + drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; - vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma); + vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); } else { ttm = bo->ttm; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 3ccfab38..374b28df 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -71,6 +71,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, case DRM_BO_MEM_LOCAL: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -83,6 +84,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; break; case DRM_BO_MEM_PRIV0: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -95,7 +97,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, man->io_addr = NULL; man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; - + man->drm_bus_maptype = _DRM_AGP; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); From 5c9a7b0f9499b94856916facd110059223d243dc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 13:31:35 +0100 Subject: [PATCH 32/34] Remove an intel-specific hack and replace it with a fence driver callback. --- linux-core/drmP.h | 8 ++-- linux-core/drm_fence.c | 94 +++++++++++++++++++++++------------------ linux-core/i915_drv.c | 1 + linux-core/i915_fence.c | 22 +++++++--- shared-core/i915_drv.h | 6 ++- 5 files changed, 80 insertions(+), 51 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index e070c073..e0afc508 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -793,10 +793,11 @@ typedef struct drm_fence_driver{ uint32_t flush_diff; uint32_t sequence_mask; int lazy_capable; - int (*emit) (struct drm_device *dev, uint32_t flags, + int (*has_irq) (struct drm_device *dev, uint32_t class, uint32_t flags); + int (*emit) (struct drm_device *dev, uint32_t class, uint32_t flags, uint32_t *breadcrumb, uint32_t *native_type); - void (*poke_flush) (struct drm_device *dev); + void (*poke_flush) (struct drm_device *dev, uint32_t class); } drm_fence_driver_t; #define _DRM_FENCE_TYPE_EXE 0x00 @@ -1464,7 +1465,8 @@ extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_obje * fence objects (drm_fence.c) */ -extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type); +extern void drm_fence_handler(drm_device_t *dev, uint32_t class, + uint32_t sequence, uint32_t type); extern void drm_fence_manager_init(drm_device_t *dev); extern void drm_fence_manager_takedown(drm_device_t *dev); extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index b4125c6e..dc428949 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,7 +34,8 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type) +void drm_fence_handler(drm_device_t * dev, uint32_t class, + uint32_t sequence, uint32_t type) { int wake = 0; uint32_t diff; @@ -156,7 +157,7 @@ static int fence_signaled(drm_device_t * dev, volatile drm_fence_driver_t *driver = dev->driver->fence_driver; if (poke_flush) - driver->poke_flush(dev); + driver->poke_flush(dev, fence->class); read_lock_irqsave(&fm->lock, flags); signaled = (fence->type & mask & fence->signaled) == (fence->type & mask); @@ -177,7 +178,6 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm, * Last_exe_flush is invalid. Find oldest sequence. */ -/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/ list = &fm->ring; if (list->next == &fm->ring) { return; @@ -234,7 +234,7 @@ int drm_fence_object_flush(drm_device_t * dev, } } write_unlock_irqrestore(&fm->lock, flags); - driver->poke_flush(dev); + driver->poke_flush(dev, fence->class); return 0; } @@ -273,11 +273,37 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence) EXPORT_SYMBOL(drm_fence_flush_old); +static int drm_fence_lazy_wait(drm_device_t *dev, + volatile drm_fence_object_t *fence, + int ignore_signals, uint32_t mask) +{ + drm_fence_manager_t *fm = &dev->fm; + unsigned long _end = jiffies + 3*DRM_HZ; + int ret = 0; + + do { + DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, + fence_signaled(dev, fence, mask, 1)); + if (time_after_eq(jiffies, _end)) + break; + } while (ret == -EINTR && ignore_signals); + if (time_after_eq(jiffies, _end) && (ret != 0)) + ret = -EBUSY; + if (ret) { + if (ret == -EBUSY) { + DRM_ERROR("Fence timeout. " + "GPU lockup or fence driver was " + "taken down.\n"); + } + return ((ret == -EINTR) ? -EAGAIN : ret); + } + return 0; +} + int drm_fence_object_wait(drm_device_t * dev, volatile drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask) { - drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; @@ -298,46 +324,32 @@ int drm_fence_object_wait(drm_device_t * dev, if (lazy && driver->lazy_capable) { - do { - DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, - fence_signaled(dev, fence, mask, 1)); - if (time_after_eq(jiffies, _end)) - break; - } while (ret == -EINTR && ignore_signals); - if (time_after_eq(jiffies, _end) && (ret != 0)) - ret = -EBUSY; - if (ret) { - if (ret == -EBUSY) { - DRM_ERROR("Fence timeout. " - "GPU lockup or fence driver was " - "taken down.\n"); - } - return ((ret == -EINTR) ? -EAGAIN : ret); - } - } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) && - driver->lazy_capable) { - - /* - * We use IRQ wait for EXE fence if available to gain - * CPU in some cases. - */ - - do { - DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, - fence_signaled(dev, fence, - DRM_FENCE_TYPE_EXE, 1)); - if (time_after_eq(jiffies, _end)) - break; - } while (ret == -EINTR && ignore_signals); - if (time_after_eq(jiffies, _end) && (ret != 0)) - ret = -EBUSY; + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask); if (ret) - return ((ret == -EINTR) ? -EAGAIN : ret); - } + return ret; + } else { + + if (driver->has_irq(dev, fence->class, + DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + DRM_FENCE_TYPE_EXE); + if (ret) + return ret; + } + + if (driver->has_irq(dev, fence->class, + mask & ~DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + mask); + if (ret) + return ret; + } + } if (fence_signaled(dev, fence, mask, 0)) return 0; + DRM_ERROR("Busy wait\n"); /* * Avoid kernel-space busy-waits. */ @@ -367,7 +379,7 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, int ret; drm_fence_unring(dev, &fence->ring); - ret = driver->emit(dev, fence_flags, &sequence, &native_type); + ret = driver->emit(dev, fence->class, fence_flags, &sequence, &native_type); if (ret) return ret; diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 848ffa78..b7bf883c 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -47,6 +47,7 @@ static drm_fence_driver_t i915_fence_driver = { .lazy_capable = 1, .emit = i915_fence_emit_sequence, .poke_flush = i915_poke_flush, + .has_irq = i915_fence_has_irq, }; #endif #ifdef I915_HAVE_BUFFER diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 2182604c..fa2a3d1f 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev) diff = sequence - fm->last_exe_flush; if (diff < driver->wrap_diff && diff != 0) { - drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE); + drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); } diff = sequence - fm->exe_flush_sequence; @@ -84,7 +84,7 @@ static void i915_perform_flush(drm_device_t * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags); } } @@ -104,13 +104,13 @@ static void i915_perform_flush(drm_device_t * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags); } } } -void i915_poke_flush(drm_device_t * dev) +void i915_poke_flush(drm_device_t * dev, uint32_t class) { drm_fence_manager_t *fm = &dev->fm; unsigned long flags; @@ -120,7 +120,7 @@ void i915_poke_flush(drm_device_t * dev) write_unlock_irqrestore(&fm->lock, flags); } -int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags, +int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -144,3 +144,15 @@ void i915_fence_handler(drm_device_t * dev) i915_perform_flush(dev); write_unlock(&fm->lock); } + +int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +{ + /* + * We have an irq that tells us when we have a new breadcrumb. + */ + + if (class == 0 && flags == DRM_FENCE_TYPE_EXE) + return 1; + + return 0; +} diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index ffc9d431..675fd168 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -178,10 +178,12 @@ extern void i915_mem_release(drm_device_t * dev, extern void i915_fence_handler(drm_device_t *dev); -extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags, +extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class, + uint32_t flags, uint32_t *sequence, uint32_t *native_type); -extern void i915_poke_flush(drm_device_t *dev); +extern void i915_poke_flush(drm_device_t *dev, uint32_t class); +extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags); #endif #ifdef I915_HAVE_BUFFER From 8ffc1844b083e36266ebc4d1a47f6e8fe619fd05 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 14:05:40 +0100 Subject: [PATCH 33/34] Move fence- and buffer-object related header stuff to drm_ttm.h --- linux-core/drmP.h | 369 ------------------------------------------- linux-core/drm_ttm.h | 359 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 357 insertions(+), 371 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index e0afc508..c3607c3f 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -591,117 +591,9 @@ typedef struct ati_pcigart_info { drm_local_map_t mapping; } drm_ati_pcigart_info; -/* - * User space objects and their references. - */ - -#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -typedef enum { - drm_fence_type, - drm_buffer_type, - drm_ttm_type - - /* - * Add other user space object types here. - */ - -} drm_object_type_t; - -/* - * A user object is a structure that helps the drm give out user handles - * to kernel internal objects and to keep track of these objects so that - * they can be destroyed, for example when the user space process exits. - * Designed to be accessible using a user space 32-bit handle. - */ - -typedef struct drm_user_object{ - drm_hash_item_t hash; - struct list_head list; - drm_object_type_t type; - atomic_t refcount; - int shareable; - drm_file_t *owner; - void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj, - drm_ref_t ref_action); - void (*unref)(drm_file_t *priv, struct drm_user_object *obj, - drm_ref_t unref_action); - void (*remove)(drm_file_t *priv, struct drm_user_object *obj); -} drm_user_object_t; - -/* - * A ref object is a structure which is used to - * keep track of references to user objects and to keep track of these - * references so that they can be destroyed for example when the user space - * process exits. Designed to be accessible using a pointer to the _user_ object. - */ - - -typedef struct drm_ref_object { - drm_hash_item_t hash; - struct list_head list; - atomic_t refcount; - drm_ref_t unref_action; -} drm_ref_object_t; - -struct drm_buffer_object; #include "drm_ttm.h" -#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ -#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ -#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ -#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap - before kernel access. */ -#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ -#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ - - -typedef struct drm_mem_type_manager { - int has_type; - int use_type; - drm_mm_t manager; - struct list_head lru; - struct list_head pinned; - uint32_t flags; - uint32_t drm_bus_maptype; - unsigned long io_offset; - unsigned long io_size; - void *io_addr; -} drm_mem_type_manager_t; - -typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; - unsigned long size; - unsigned long num_pages; - uint32_t page_alignment; - uint32_t mem_type; - uint32_t flags; - uint32_t mask; -} drm_bo_mem_reg_t; - -/* - * buffer object driver - */ - -typedef struct drm_bo_driver{ - const uint32_t *mem_type_prio; - const uint32_t *mem_busy_prio; - uint32_t num_mem_type_prio; - uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) - (struct drm_device *dev); - int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type); - int (*invalidate_caches)(struct drm_device *dev, uint32_t flags); - int (*init_mem_type)(struct drm_device *dev, uint32_t type, - drm_mem_type_manager_t *man); - uint32_t (*evict_flags) (struct drm_device *dev, uint32_t type); - int (*move)(struct drm_buffer_object *bo, - int evict, int no_wait, - struct drm_bo_mem_reg *new_mem); -} drm_bo_driver_t; - - /** * DRM driver structure. This structure represent the common code for * a family of cards. There will one drm_device for each card present @@ -787,61 +679,6 @@ typedef struct drm_head { } drm_head_t; -typedef struct drm_fence_driver{ - int no_types; - uint32_t wrap_diff; - uint32_t flush_diff; - uint32_t sequence_mask; - int lazy_capable; - int (*has_irq) (struct drm_device *dev, uint32_t class, uint32_t flags); - int (*emit) (struct drm_device *dev, uint32_t class, uint32_t flags, - uint32_t *breadcrumb, - uint32_t *native_type); - void (*poke_flush) (struct drm_device *dev, uint32_t class); -} drm_fence_driver_t; - -#define _DRM_FENCE_TYPE_EXE 0x00 - -typedef struct drm_fence_manager{ - int initialized; - rwlock_t lock; - - /* - * The list below should be maintained in sequence order and - * access is protected by the above spinlock. - */ - - struct list_head ring; - struct list_head *fence_types[32]; - volatile uint32_t pending_flush; - wait_queue_head_t fence_queue; - int pending_exe_flush; - uint32_t last_exe_flush; - uint32_t exe_flush_sequence; - atomic_t count; -} drm_fence_manager_t; - -typedef struct drm_buffer_manager{ - struct mutex init_mutex; - struct mutex evict_mutex; - int nice_mode; - int initialized; - drm_file_t *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; - struct list_head unfenced; - struct list_head ddestroy; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) - struct work_struct wq; -#else - struct delayed_work wq; -#endif - uint32_t fence_type; - unsigned long cur_pages; - atomic_t count; -} drm_buffer_manager_t; - - - /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. @@ -996,77 +833,6 @@ typedef struct drm_agp_ttm_priv { } drm_agp_ttm_priv; #endif -typedef struct drm_fence_object{ - drm_user_object_t base; - atomic_t usage; - - /* - * The below three fields are protected by the fence manager spinlock. - */ - - struct list_head ring; - int class; - uint32_t native_type; - uint32_t type; - uint32_t signaled; - uint32_t sequence; - uint32_t flush_mask; - uint32_t submitted_flush; -} drm_fence_object_t; - - -typedef struct drm_buffer_object{ - drm_device_t *dev; - drm_user_object_t base; - - /* - * If there is a possibility that the usage variable is zero, - * then dev->struct_mutext should be locked before incrementing it. - */ - - atomic_t usage; - unsigned long buffer_start; - drm_bo_type_t type; - unsigned long offset; - atomic_t mapped; - drm_bo_mem_reg_t mem; - - struct list_head lru; - struct list_head ddestroy; - - uint32_t fence_type; - uint32_t fence_class; - drm_fence_object_t *fence; - uint32_t priv_flags; - wait_queue_head_t event_queue; - struct mutex mutex; - - /* For pinned buffers */ - drm_mm_node_t *pinned_node; - uint32_t pinned_mem_type; - struct list_head pinned_lru; - - /* For vm */ - - drm_ttm_t *ttm; - drm_map_list_t map_list; - uint32_t memory_type; - unsigned long bus_offset; - uint32_t vm_flags; - void *iomap; - - -#ifdef DRM_ODD_MM_COMPAT - /* dev->struct_mutex only protected. */ - struct list_head vma_list; - struct list_head p_mm_list; -#endif - -} drm_buffer_object_t; - -#define _DRM_BO_FLAG_UNFENCED 0x00000001 -#define _DRM_BO_FLAG_EVICTED 0x00000002 - static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) @@ -1408,144 +1174,9 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) } -/* - * User space object bookkeeping (drm_object.c) - */ - -/* - * Must be called with the struct_mutex held. - */ - -extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item, - -/* - * Must be called with the struct_mutex held. - */ - int shareable); -extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key); - -/* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_user_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. - */ - -extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item); - -/* - * Must be called with the struct_mutex held. May temporarily release it. - */ - -extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object, - drm_ref_t ref_action); - -/* - * Must be called with the struct_mutex held. - */ - -drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv, - drm_user_object_t *referenced_object, - drm_ref_t ref_action); -/* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_ref_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. - */ - -extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item); -extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type, - drm_user_object_t **object); -extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type); -/* - * fence objects (drm_fence.c) - */ - -extern void drm_fence_handler(drm_device_t *dev, uint32_t class, - uint32_t sequence, uint32_t type); -extern void drm_fence_manager_init(drm_device_t *dev); -extern void drm_fence_manager_takedown(drm_device_t *dev); -extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence); -extern int drm_fence_object_flush(drm_device_t * dev, - volatile drm_fence_object_t * fence, - uint32_t type); -extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence, - uint32_t type); -extern void drm_fence_usage_deref_locked(drm_device_t * dev, - drm_fence_object_t * fence); -extern void drm_fence_usage_deref_unlocked(drm_device_t * dev, - drm_fence_object_t * fence); -extern int drm_fence_object_wait(drm_device_t * dev, - volatile drm_fence_object_t * fence, - int lazy, int ignore_signals, uint32_t mask); -extern int drm_fence_object_create(drm_device_t *dev, uint32_t type, - uint32_t fence_flags, - drm_fence_object_t **c_fence); -extern int drm_fence_add_user_object(drm_file_t *priv, - drm_fence_object_t *fence, - int shareable); - - - - - -extern int drm_fence_ioctl(DRM_IOCTL_ARGS); - -/* - * buffer objects (drm_bo.c) - */ - -extern int drm_bo_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_driver_finish(drm_device_t *dev); -extern int drm_bo_driver_init(drm_device_t *dev); -extern int drm_bo_pci_offset(drm_device_t *dev, - drm_bo_mem_reg_t *mem, - unsigned long *bus_base, - unsigned long *bus_offset, - unsigned long *bus_size); -extern int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem); - - -extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); -extern int drm_fence_buffer_objects(drm_file_t * priv, - struct list_head *list, - uint32_t fence_flags, - drm_fence_object_t *fence, - drm_fence_object_t **used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, - int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t *bo, - drm_bo_mem_reg_t *mem, - int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, - int no_wait, int move_unfenced); - - -/* - * Buffer object memory move helpers. - * drm_bo_move.c - */ - -extern int drm_bo_move_ttm(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, - int evict, - int no_wait, - uint32_t fence_type, - uint32_t fence_flags, - drm_bo_mem_reg_t *new_mem); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index b96f5cee..04fb1707 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -32,11 +32,199 @@ #define _DRM_TTM_H #define DRM_HAS_TTM +struct drm_device; + +/*************************************************** + * User space objects. (drm_object.c) + */ + +#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +typedef enum { + drm_fence_type, + drm_buffer_type, + drm_ttm_type + /* + * Add other user space object types here. + */ +} drm_object_type_t; + /* - * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?) + * A user object is a structure that helps the drm give out user handles + * to kernel internal objects and to keep track of these objects so that + * they can be destroyed, for example when the user space process exits. + * Designed to be accessible using a user space 32-bit handle. + */ + +typedef struct drm_user_object { + drm_hash_item_t hash; + struct list_head list; + drm_object_type_t type; + atomic_t refcount; + int shareable; + drm_file_t *owner; + void (*ref_struct_locked) (drm_file_t * priv, + struct drm_user_object * obj, + drm_ref_t ref_action); + void (*unref) (drm_file_t * priv, struct drm_user_object * obj, + drm_ref_t unref_action); + void (*remove) (drm_file_t * priv, struct drm_user_object * obj); +} drm_user_object_t; + +/* + * A ref object is a structure which is used to + * keep track of references to user objects and to keep track of these + * references so that they can be destroyed for example when the user space + * process exits. Designed to be accessible using a pointer to the _user_ object. + */ + +typedef struct drm_ref_object { + drm_hash_item_t hash; + struct list_head list; + atomic_t refcount; + drm_ref_t unref_action; +} drm_ref_object_t; + +/** + * Must be called with the struct_mutex held. + */ + +extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, + int shareable); +/** + * Must be called with the struct_mutex held. + */ + +extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, + uint32_t key); + +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_user_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); + +/* + * Must be called with the struct_mutex held. May temporarily release it. + */ + +extern int drm_add_ref_object(drm_file_t * priv, + drm_user_object_t * referenced_object, + drm_ref_t ref_action); + +/* + * Must be called with the struct_mutex held. + */ + +drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, + drm_user_object_t * referenced_object, + drm_ref_t ref_action); +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_ref_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); +extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type, + drm_user_object_t ** object); +extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type); + +/*************************************************** + * Fence objects. (drm_fence.c) + */ + +typedef struct drm_fence_object { + drm_user_object_t base; + atomic_t usage; + + /* + * The below three fields are protected by the fence manager spinlock. + */ + + struct list_head ring; + int class; + uint32_t native_type; + uint32_t type; + uint32_t signaled; + uint32_t sequence; + uint32_t flush_mask; + uint32_t submitted_flush; +} drm_fence_object_t; + +#define _DRM_FENCE_TYPE_EXE 0x00 + +typedef struct drm_fence_manager { + int initialized; + rwlock_t lock; + + /* + * The list below should be maintained in sequence order and + * access is protected by the above spinlock. + */ + + struct list_head ring; + struct list_head *fence_types[32]; + volatile uint32_t pending_flush; + wait_queue_head_t fence_queue; + int pending_exe_flush; + uint32_t last_exe_flush; + uint32_t exe_flush_sequence; + atomic_t count; +} drm_fence_manager_t; + +typedef struct drm_fence_driver { + int no_types; + uint32_t wrap_diff; + uint32_t flush_diff; + uint32_t sequence_mask; + int lazy_capable; + int (*has_irq) (struct drm_device * dev, uint32_t class, + uint32_t flags); + int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, + uint32_t * breadcrumb, uint32_t * native_type); + void (*poke_flush) (struct drm_device * dev, uint32_t class); +} drm_fence_driver_t; + +extern void drm_fence_handler(struct drm_device *dev, uint32_t class, + uint32_t sequence, uint32_t type); +extern void drm_fence_manager_init(struct drm_device *dev); +extern void drm_fence_manager_takedown(struct drm_device *dev); +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t sequence); +extern int drm_fence_object_flush(struct drm_device *dev, + drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type); +extern void drm_fence_usage_deref_locked(struct drm_device *dev, + drm_fence_object_t * fence); +extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, + drm_fence_object_t * fence); +extern int drm_fence_object_wait(struct drm_device *dev, + drm_fence_object_t * fence, + int lazy, int ignore_signals, uint32_t mask); +extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, + uint32_t fence_flags, + drm_fence_object_t ** c_fence); +extern int drm_fence_add_user_object(drm_file_t * priv, + drm_fence_object_t * fence, int shareable); +extern int drm_fence_ioctl(DRM_IOCTL_ARGS); + +/************************************************** + *TTMs + */ + +/* + * The ttm backend GTT interface. (In our case AGP). + * Any similar type of device (PCIE?) * needs only to implement these functions to be usable with the "TTM" interface. * The AGP backend implementation lives in drm_agpsupport.c - * basically maps these calls to available functions in agpgart. Each drm device driver gets an + * basically maps these calls to available functions in agpgart. + * Each drm device driver gets an * additional function pointer that creates these types, * so that the device can choose the correct aperture. * (Multiple AGP apertures, etc.) @@ -111,4 +299,171 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); #define DRM_TTM_PAGE_PRESENT 0x08 #define DRM_TTM_PAGE_VMALLOC 0x10 +/*************************************************** + * Buffer objects. (drm_bo.c, drm_bo_move.c) + */ + +typedef struct drm_bo_mem_reg { + drm_mm_node_t *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t flags; + uint32_t mask; +} drm_bo_mem_reg_t; + +typedef struct drm_buffer_object { + struct drm_device *dev; + drm_user_object_t base; + + /* + * If there is a possibility that the usage variable is zero, + * then dev->struct_mutext should be locked before incrementing it. + */ + + atomic_t usage; + unsigned long buffer_start; + drm_bo_type_t type; + unsigned long offset; + atomic_t mapped; + drm_bo_mem_reg_t mem; + + struct list_head lru; + struct list_head ddestroy; + + uint32_t fence_type; + uint32_t fence_class; + drm_fence_object_t *fence; + uint32_t priv_flags; + wait_queue_head_t event_queue; + struct mutex mutex; + + /* For pinned buffers */ + drm_mm_node_t *pinned_node; + uint32_t pinned_mem_type; + struct list_head pinned_lru; + + /* For vm */ + + drm_ttm_t *ttm; + drm_map_list_t map_list; + uint32_t memory_type; + unsigned long bus_offset; + uint32_t vm_flags; + void *iomap; + +#ifdef DRM_ODD_MM_COMPAT + /* dev->struct_mutex only protected. */ + struct list_head vma_list; + struct list_head p_mm_list; +#endif + +} drm_buffer_object_t; + +#define _DRM_BO_FLAG_UNFENCED 0x00000001 +#define _DRM_BO_FLAG_EVICTED 0x00000002 + +typedef struct drm_mem_type_manager { + int has_type; + int use_type; + drm_mm_t manager; + struct list_head lru; + struct list_head pinned; + uint32_t flags; + uint32_t drm_bus_maptype; + unsigned long io_offset; + unsigned long io_size; + void *io_addr; +} drm_mem_type_manager_t; + +#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ +#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ +#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap + before kernel access. */ +#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ +#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ + +typedef struct drm_buffer_manager { + struct mutex init_mutex; + struct mutex evict_mutex; + int nice_mode; + int initialized; + drm_file_t *last_to_validate; + drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct list_head unfenced; + struct list_head ddestroy; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct work_struct wq; +#else + struct delayed_work wq; +#endif + uint32_t fence_type; + unsigned long cur_pages; + atomic_t count; +} drm_buffer_manager_t; + +typedef struct drm_bo_driver { + const uint32_t *mem_type_prio; + const uint32_t *mem_busy_prio; + uint32_t num_mem_type_prio; + uint32_t num_mem_busy_prio; + drm_ttm_backend_t *(*create_ttm_backend_entry) + (struct drm_device * dev); + int (*fence_type) (uint32_t flags, uint32_t * class, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*init_mem_type) (struct drm_device * dev, uint32_t type, + drm_mem_type_manager_t * man); + uint32_t(*evict_flags) (struct drm_device * dev, uint32_t type); + int (*move) (struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +} drm_bo_driver_t; + +/* + * buffer objects (drm_bo.c) + */ + +extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_driver_finish(struct drm_device *dev); +extern int drm_bo_driver_init(struct drm_device *dev); +extern int drm_bo_pci_offset(struct drm_device *dev, + drm_bo_mem_reg_t * mem, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size); +extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); + +extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); +extern int drm_fence_buffer_objects(drm_file_t * priv, + struct list_head *list, + uint32_t fence_flags, + drm_fence_object_t * fence, + drm_fence_object_t ** used_fence); +extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); +extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait); +extern int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait); +extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int move_unfenced); + +/* + * Buffer object memory move helpers. + * drm_bo_move.c + */ + +extern int drm_bo_move_ttm(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem); +extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, + int evict, + int no_wait, drm_bo_mem_reg_t * new_mem); +extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int evict, + int no_wait, + uint32_t fence_type, + uint32_t fence_flags, + drm_bo_mem_reg_t * new_mem); + #endif From 1345076c8f93936563cd5c15588b1d76d87969d3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 14:10:10 +0100 Subject: [PATCH 34/34] Rename drm_ttm.h to drm_objects.h Fix up some header incompatibilities in drm_fence.c caused by the previous commit. --- linux-core/drmP.h | 2 +- linux-core/drm_fence.c | 13 +++++++------ linux-core/{drm_ttm.h => drm_objects.h} | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) rename linux-core/{drm_ttm.h => drm_objects.h} (99%) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c3607c3f..0bf71c49 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -592,7 +592,7 @@ typedef struct ati_pcigart_info { } drm_ati_pcigart_info; -#include "drm_ttm.h" +#include "drm_objects.h" /** * DRM driver structure. This structure represent the common code for diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index dc428949..3ccfdcb8 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -147,7 +147,7 @@ static void drm_fence_object_destroy(drm_file_t * priv, drm_fence_usage_deref_locked(dev, fence); } -static int fence_signaled(drm_device_t * dev, volatile +static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence, uint32_t mask, int poke_flush) { @@ -172,7 +172,7 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm, uint32_t diff; if (!fm->pending_exe_flush) { - volatile struct list_head *list; + struct list_head *list; /* * Last_exe_flush is invalid. Find oldest sequence. @@ -201,14 +201,15 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm, } } -int drm_fence_object_signaled(volatile drm_fence_object_t * fence, +int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type) { return ((fence->signaled & type) == type); } int drm_fence_object_flush(drm_device_t * dev, - volatile drm_fence_object_t * fence, uint32_t type) + drm_fence_object_t * fence, + uint32_t type) { drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; @@ -274,7 +275,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence) EXPORT_SYMBOL(drm_fence_flush_old); static int drm_fence_lazy_wait(drm_device_t *dev, - volatile drm_fence_object_t *fence, + drm_fence_object_t *fence, int ignore_signals, uint32_t mask) { drm_fence_manager_t *fm = &dev->fm; @@ -301,7 +302,7 @@ static int drm_fence_lazy_wait(drm_device_t *dev, } int drm_fence_object_wait(drm_device_t * dev, - volatile drm_fence_object_t * fence, + drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask) { drm_fence_driver_t *driver = dev->driver->fence_driver; diff --git a/linux-core/drm_ttm.h b/linux-core/drm_objects.h similarity index 99% rename from linux-core/drm_ttm.h rename to linux-core/drm_objects.h index 04fb1707..f9b8ebd9 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_objects.h @@ -28,8 +28,8 @@ * Authors: Thomas Hellström */ -#ifndef _DRM_TTM_H -#define _DRM_TTM_H +#ifndef _DRM_OBJECTS_H +#define _DRM_OJBECTS_H #define DRM_HAS_TTM struct drm_device;