drm/ttm: apply linux kernel coding style to bo_lock/move/object/ttm.c

main
Dave Airlie 2007-11-05 19:09:18 +10:00
parent 7ad3890707
commit 6ee5412da0
4 changed files with 76 additions and 91 deletions

View File

@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock)
if (atomic_read(&lock->readers) == 0) if (atomic_read(&lock->readers) == 0)
wake_up_interruptible(&lock->queue); wake_up_interruptible(&lock->queue);
} }
EXPORT_SYMBOL(drm_bo_read_unlock); EXPORT_SYMBOL(drm_bo_read_unlock);
int drm_bo_read_lock(struct drm_bo_lock *lock) int drm_bo_read_lock(struct drm_bo_lock *lock)
@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_bo_read_lock); EXPORT_SYMBOL(drm_bo_read_lock);
static int __drm_bo_write_unlock(struct drm_bo_lock *lock) static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
int ret = 0; int ret = 0;
struct drm_device *dev; struct drm_device *dev;
if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) { if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
return -EINVAL; return -EINVAL;
}
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
ret = wait_event_interruptible ret = wait_event_interruptible
@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
ret = drm_add_user_object(file_priv, &lock->base, 0); ret = drm_add_user_object(file_priv, &lock->base, 0);
lock->base.remove = &drm_bo_write_lock_remove; lock->base.remove = &drm_bo_write_lock_remove;
lock->base.type = drm_lock_type; lock->base.type = drm_lock_type;
if (ret) { if (ret)
(void)__drm_bo_write_unlock(lock); (void)__drm_bo_write_unlock(lock);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;

View File

@ -82,7 +82,6 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_bo_move_ttm); EXPORT_SYMBOL(drm_bo_move_ttm);
/** /**
@ -145,10 +144,9 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
bm = &dev->bm; bm = &dev->bm;
man = &bm->man[mem->mem_type]; man = &bm->man[mem->mem_type];
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
iounmap(virtual); iounmap(virtual);
} }
}
static int drm_copy_io_page(void *dst, void *src, unsigned long page) static int drm_copy_io_page(void *dst, void *src, unsigned long page)
{ {
@ -163,7 +161,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)
return 0; return 0;
} }
static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
unsigned long page)
{ {
struct page *d = drm_ttm_get_page(ttm, page); struct page *d = drm_ttm_get_page(ttm, page);
void *dst; void *dst;
@ -271,7 +270,6 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
drm_mem_reg_iounmap(dev, &old_copy, old_iomap); drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
return ret; return ret;
} }
EXPORT_SYMBOL(drm_bo_move_memcpy); EXPORT_SYMBOL(drm_bo_move_memcpy);
/* /*
@ -323,11 +321,9 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,
*/ */
int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
int evict, int evict, int no_wait, uint32_t fence_class,
int no_wait, uint32_t fence_type, uint32_t fence_flags,
uint32_t fence_class, struct drm_bo_mem_reg *new_mem)
uint32_t fence_type,
uint32_t fence_flags, struct drm_bo_mem_reg * new_mem)
{ {
struct drm_device *dev = bo->dev; struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
@ -407,7 +403,6 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_bo_move_accel_cleanup); EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
int drm_bo_same_page(unsigned long offset, int drm_bo_same_page(unsigned long offset,
@ -420,13 +415,11 @@ EXPORT_SYMBOL(drm_bo_same_page);
unsigned long drm_bo_offset_end(unsigned long offset, unsigned long drm_bo_offset_end(unsigned long offset,
unsigned long end) unsigned long end)
{ {
offset = (offset + PAGE_SIZE) & PAGE_MASK; offset = (offset + PAGE_SIZE) & PAGE_MASK;
return (end < offset) ? end : offset; return (end < offset) ? end : offset;
} }
EXPORT_SYMBOL(drm_bo_offset_end); EXPORT_SYMBOL(drm_bo_offset_end);
static pgprot_t drm_kernel_io_prot(uint32_t map_type) static pgprot_t drm_kernel_io_prot(uint32_t map_type)
{ {
pgprot_t tmp = PAGE_KERNEL; pgprot_t tmp = PAGE_KERNEL;
@ -475,8 +468,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
return (!map->virtual) ? -ENOMEM : 0; return (!map->virtual) ? -ENOMEM : 0;
} }
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
unsigned long num_pages, struct drm_bo_kmap_obj *map) unsigned long start_page, unsigned long num_pages,
struct drm_bo_kmap_obj *map)
{ {
struct drm_device *dev = bo->dev; struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg *mem = &bo->mem; struct drm_bo_mem_reg *mem = &bo->mem;
@ -530,7 +524,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
* and caching policy the buffer currently has. * and caching policy the buffer currently has.
* Mapping multiple pages or buffers that live in io memory is a bit slow and * Mapping multiple pages or buffers that live in io memory is a bit slow and
* consumes vmalloc space. Be restrictive with such mappings. * consumes vmalloc space. Be restrictive with such mappings.
* Mapping single pages usually returns the logical kernel address, (which is fast) * Mapping single pages usually returns the logical kernel address,
* (which is fast)
* BUG may use slower temporary mappings for high memory pages or * BUG may use slower temporary mappings for high memory pages or
* uncached / write-combined pages. * uncached / write-combined pages.
* *

View File

@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
DRM_ASSERT_LOCKED(&dev->struct_mutex); DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash); ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret) { if (ret)
return NULL; return NULL;
}
item = drm_hash_entry(hash, struct drm_user_object, hash); item = drm_hash_entry(hash, struct drm_user_object, hash);
if (priv != item->owner) { if (priv != item->owner) {

View File

@ -54,18 +54,17 @@ static void ttm_alloc_pages(struct drm_ttm * ttm)
if (drm_alloc_memctl(size)) if (drm_alloc_memctl(size))
return; return;
if (size <= PAGE_SIZE) { if (size <= PAGE_SIZE)
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
}
if (!ttm->pages) { if (!ttm->pages) {
ttm->pages = vmalloc_user(size); ttm->pages = vmalloc_user(size);
if (ttm->pages) if (ttm->pages)
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC; ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
} }
if (!ttm->pages) { if (!ttm->pages)
drm_free_memctl(size); drm_free_memctl(size);
} }
}
static void ttm_free_pages(struct drm_ttm *ttm) static void ttm_free_pages(struct drm_ttm *ttm)
{ {
@ -85,9 +84,9 @@ static struct page *drm_ttm_alloc_page(void)
{ {
struct page *page; struct page *page;
if (drm_alloc_memctl(PAGE_SIZE)) { if (drm_alloc_memctl(PAGE_SIZE))
return NULL; return NULL;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) { if (!page) {
drm_free_memctl(PAGE_SIZE); drm_free_memctl(PAGE_SIZE);
@ -186,14 +185,10 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
#else #else
ClearPageReserved(*cur_page); ClearPageReserved(*cur_page);
#endif #endif
if (page_count(*cur_page) != 1) { if (page_count(*cur_page) != 1)
DRM_ERROR("Erroneous page count. " DRM_ERROR("Erroneous page count. Leaking pages.\n");
"Leaking pages.\n"); if (page_mapped(*cur_page))
} DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
if (page_mapped(*cur_page)) {
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
__free_page(*cur_page); __free_page(*cur_page);
drm_free_memctl(PAGE_SIZE); drm_free_memctl(PAGE_SIZE);
--bm->cur_pages; --bm->cur_pages;
@ -284,10 +279,9 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
} }
for (i = 0; i < num_pages; ++i) { for (i = 0; i < num_pages; ++i) {
if (ttm->pages[i] == NULL) { if (ttm->pages[i] == NULL)
ttm->pages[i] = ttm->dummy_read_page; ttm->pages[i] = ttm->dummy_read_page;
} }
}
return 0; return 0;
} }
@ -380,9 +374,8 @@ void drm_ttm_fixup_caching(struct drm_ttm * ttm)
if (ttm->state == ttm_evicted) { if (ttm->state == ttm_evicted) {
struct drm_ttm_backend *be = ttm->be; struct drm_ttm_backend *be = ttm->be;
if (be->func->needs_ub_cache_adjust(be)) { if (be->func->needs_ub_cache_adjust(be))
drm_set_caching(ttm, 0); drm_set_caching(ttm, 0);
}
ttm->state = ttm_unbound; ttm->state = ttm_unbound;
} }
} }
@ -412,13 +405,14 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
if (ret) if (ret)
return ret; return ret;
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
} else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
bo_driver->ttm_cache_flush) bo_driver->ttm_cache_flush)
bo_driver->ttm_cache_flush(ttm); bo_driver->ttm_cache_flush(ttm);
if ((ret = be->func->bind(be, bo_mem))) { ret = be->func->bind(be, bo_mem);
if (ret) {
ttm->state = ttm_evicted; ttm->state = ttm_evicted;
DRM_ERROR("Couldn't bind backend.\n"); DRM_ERROR("Couldn't bind backend.\n");
return ret; return ret;
@ -429,5 +423,4 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_bind_ttm); EXPORT_SYMBOL(drm_bind_ttm);