Add memory usage accounting to avoid DOS problems.
parent
5443dbe35f
commit
d515936ea7
|
@ -1129,6 +1129,14 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
|
||||||
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
|
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
|
||||||
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
|
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
|
||||||
|
|
||||||
|
extern void drm_free_memctl(size_t size);
|
||||||
|
extern int drm_alloc_memctl(size_t size);
|
||||||
|
extern void drm_query_memctl(drm_u64_t *cur_used,
|
||||||
|
drm_u64_t *low_threshold,
|
||||||
|
drm_u64_t *high_threshold);
|
||||||
|
extern void drm_init_memctl(size_t low_threshold,
|
||||||
|
size_t high_threshold);
|
||||||
|
|
||||||
/* Misc. IOCTL support (drm_ioctl.h) */
|
/* Misc. IOCTL support (drm_ioctl.h) */
|
||||||
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
|
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, unsigned long arg);
|
unsigned int cmd, unsigned long arg);
|
||||||
|
@ -1527,6 +1535,58 @@ extern void *drm_alloc(size_t size, int area);
|
||||||
extern void drm_free(void *pt, size_t size, int area);
|
extern void drm_free(void *pt, size_t size, int area);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Accounting variants of standard calls.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void *drm_ctl_alloc(size_t size, int area)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
if (drm_alloc_memctl(size))
|
||||||
|
return NULL;
|
||||||
|
ret = drm_alloc(size, area);
|
||||||
|
if (!ret)
|
||||||
|
drm_free_memctl(size);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
if (drm_alloc_memctl(nmemb*size))
|
||||||
|
return NULL;
|
||||||
|
ret = drm_calloc(nmemb, size, area);
|
||||||
|
if (!ret)
|
||||||
|
drm_free_memctl(nmemb*size);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void drm_ctl_free(void *pt, size_t size, int area)
|
||||||
|
{
|
||||||
|
drm_free(pt, size, area);
|
||||||
|
drm_free_memctl(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
|
||||||
|
int flags)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
if (drm_alloc_memctl(size))
|
||||||
|
return NULL;
|
||||||
|
ret = kmem_cache_alloc(cache, flags);
|
||||||
|
if (!ret)
|
||||||
|
drm_free_memctl(size);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
|
||||||
|
void *obj)
|
||||||
|
{
|
||||||
|
kmem_cache_free(cache, obj);
|
||||||
|
drm_free_memctl(size);
|
||||||
|
}
|
||||||
|
|
||||||
/*@}*/
|
/*@}*/
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -570,14 +570,19 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
|
||||||
struct page **cur_page, **last_page = pages + num_pages;
|
struct page **cur_page, **last_page = pages + num_pages;
|
||||||
DRM_AGP_MEM *mem;
|
DRM_AGP_MEM *mem;
|
||||||
|
|
||||||
|
if (drm_alloc_memctl(num_pages * sizeof(void *)))
|
||||||
|
return -1;
|
||||||
|
|
||||||
DRM_DEBUG("drm_agp_populate_ttm\n");
|
DRM_DEBUG("drm_agp_populate_ttm\n");
|
||||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
|
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
|
||||||
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
|
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
|
||||||
#else
|
#else
|
||||||
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
|
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
|
||||||
#endif
|
#endif
|
||||||
if (!mem)
|
if (!mem) {
|
||||||
|
drm_free_memctl(num_pages *sizeof(void *));
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
|
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
|
||||||
mem->page_count = 0;
|
mem->page_count = 0;
|
||||||
|
@ -626,8 +631,10 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
|
||||||
|
|
||||||
DRM_DEBUG("drm_agp_clear_ttm\n");
|
DRM_DEBUG("drm_agp_clear_ttm\n");
|
||||||
if (mem) {
|
if (mem) {
|
||||||
|
unsigned long num_pages = mem->page_count;
|
||||||
backend->unbind(backend);
|
backend->unbind(backend);
|
||||||
agp_free_memory(mem);
|
agp_free_memory(mem);
|
||||||
|
drm_free_memctl(num_pages *sizeof(void *));
|
||||||
}
|
}
|
||||||
|
|
||||||
agp_priv->mem = NULL;
|
agp_priv->mem = NULL;
|
||||||
|
@ -644,10 +651,12 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
|
||||||
if (agp_priv->mem) {
|
if (agp_priv->mem) {
|
||||||
backend->clear(backend);
|
backend->clear(backend);
|
||||||
}
|
}
|
||||||
drm_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
|
drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
|
||||||
|
backend->private = NULL;
|
||||||
|
}
|
||||||
|
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
|
||||||
|
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
|
||||||
}
|
}
|
||||||
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE)
|
|
||||||
drm_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -662,15 +671,15 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
|
||||||
drm_agp_ttm_priv *agp_priv;
|
drm_agp_ttm_priv *agp_priv;
|
||||||
|
|
||||||
agp_be = (backend != NULL) ? backend:
|
agp_be = (backend != NULL) ? backend:
|
||||||
drm_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
||||||
|
|
||||||
if (!agp_be)
|
if (!agp_be)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
agp_priv = drm_calloc(1, sizeof(agp_priv), DRM_MEM_MAPPINGS);
|
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
|
||||||
|
|
||||||
if (!agp_priv) {
|
if (!agp_priv) {
|
||||||
drm_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -237,7 +237,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
|
||||||
drm_ttm_object_deref_locked(dev, bo->ttm_object);
|
drm_ttm_object_deref_locked(dev, bo->ttm_object);
|
||||||
}
|
}
|
||||||
atomic_dec(&bm->count);
|
atomic_dec(&bm->count);
|
||||||
drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
|
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_bo_delayed_delete(drm_device_t * dev)
|
static void drm_bo_delayed_delete(drm_device_t * dev)
|
||||||
|
@ -1390,7 +1390,7 @@ int drm_buffer_object_create(drm_file_t * priv,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
|
bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
|
||||||
|
|
||||||
if (!bo)
|
if (!bo)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1752,6 +1752,12 @@ static int drm_bo_lock_mm(drm_device_t *dev, unsigned mem_type)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
drm_buffer_manager_t *bm = &dev->bm;
|
drm_buffer_manager_t *bm = &dev->bm;
|
||||||
|
|
||||||
|
if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
|
||||||
|
DRM_ERROR("Illegal memory manager memory type %u,\n",
|
||||||
|
mem_type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
|
ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -239,8 +239,13 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
|
||||||
page = NOPAGE_OOM;
|
page = NOPAGE_OOM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||||
|
page = NOPAGE_OOM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
|
drm_free_memctl(PAGE_SIZE);
|
||||||
page = NOPAGE_OOM;
|
page = NOPAGE_OOM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -284,7 +289,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
vma->vm_private_data;
|
vma->vm_private_data;
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
|
||||||
v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM);
|
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
|
||||||
if (!v_entry) {
|
if (!v_entry) {
|
||||||
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -300,7 +305,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM);
|
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
|
||||||
if (!n_entry) {
|
if (!n_entry) {
|
||||||
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -325,7 +330,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
if (v_entry->vma == vma) {
|
if (v_entry->vma == vma) {
|
||||||
found = 1;
|
found = 1;
|
||||||
list_del(&v_entry->head);
|
list_del(&v_entry->head);
|
||||||
drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
|
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -336,7 +341,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
|
||||||
if (atomic_add_negative(-1, &entry->refcount)) {
|
if (atomic_add_negative(-1, &entry->refcount)) {
|
||||||
list_del(&entry->head);
|
list_del(&entry->head);
|
||||||
BUG_ON(entry->locked);
|
BUG_ON(entry->locked);
|
||||||
drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
|
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,6 @@ static drm_ioctl_desc_t drm_ioctls[] = {
|
||||||
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Take down the DRM device.
|
* Take down the DRM device.
|
||||||
*
|
*
|
||||||
|
@ -502,7 +501,10 @@ static void drm_free_memory_caches(void )
|
||||||
static int __init drm_core_init(void)
|
static int __init drm_core_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct sysinfo si;
|
||||||
|
|
||||||
|
si_meminfo(&si);
|
||||||
|
drm_init_memctl(si.totalram/2, si.totalram*3/4);
|
||||||
ret = drm_create_memory_caches();
|
ret = drm_create_memory_caches();
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_p1;
|
goto err_p1;
|
||||||
|
|
|
@ -118,7 +118,8 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
||||||
fence->base.hash.key);
|
fence->base.hash.key);
|
||||||
atomic_dec(&fm->count);
|
atomic_dec(&fm->count);
|
||||||
kmem_cache_free(drm_cache.fence_object, fence);
|
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
|
||||||
|
fence);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +133,8 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||||
if (atomic_read(&fence->usage) == 0) {
|
if (atomic_read(&fence->usage) == 0) {
|
||||||
drm_fence_unring(dev, &fence->ring);
|
drm_fence_unring(dev, &fence->ring);
|
||||||
atomic_dec(&fm->count);
|
atomic_dec(&fm->count);
|
||||||
kmem_cache_free(drm_cache.fence_object, fence);
|
drm_ctl_cache_free(drm_cache.fence_object,
|
||||||
|
sizeof(*fence), fence);
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
@ -439,7 +441,8 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
|
||||||
int ret;
|
int ret;
|
||||||
drm_fence_manager_t *fm = &dev->fm;
|
drm_fence_manager_t *fm = &dev->fm;
|
||||||
|
|
||||||
fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
|
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
|
||||||
|
sizeof(*fence), GFP_KERNEL);
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ret = drm_fence_object_init(dev, type, flags, fence);
|
ret = drm_fence_object_init(dev, type, flags, fence);
|
||||||
|
|
|
@ -44,7 +44,7 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
|
||||||
ht->order = order;
|
ht->order = order;
|
||||||
ht->fill = 0;
|
ht->fill = 0;
|
||||||
ht->table = NULL;
|
ht->table = NULL;
|
||||||
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > 4*PAGE_SIZE);
|
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
|
||||||
if (!ht->use_vmalloc) {
|
if (!ht->use_vmalloc) {
|
||||||
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
|
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
|
||||||
DRM_MEM_HASHTAB);
|
DRM_MEM_HASHTAB);
|
||||||
|
|
|
@ -37,6 +37,75 @@
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
|
static struct {
|
||||||
|
spinlock_t lock;
|
||||||
|
drm_u64_t cur_used;
|
||||||
|
drm_u64_t low_threshold;
|
||||||
|
drm_u64_t high_threshold;
|
||||||
|
} drm_memctl = {
|
||||||
|
.lock = SPIN_LOCK_UNLOCKED
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline size_t drm_size_align(size_t size) {
|
||||||
|
|
||||||
|
register size_t tmpSize = 4;
|
||||||
|
if (size > PAGE_SIZE)
|
||||||
|
return PAGE_ALIGN(size);
|
||||||
|
|
||||||
|
while(tmpSize < size)
|
||||||
|
tmpSize <<= 1;
|
||||||
|
|
||||||
|
return (size_t) tmpSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_alloc_memctl(size_t size)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned long a_size = drm_size_align(size);
|
||||||
|
|
||||||
|
spin_lock(&drm_memctl.lock);
|
||||||
|
ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
|
||||||
|
-ENOMEM : 0;
|
||||||
|
if (!ret)
|
||||||
|
drm_memctl.cur_used += a_size;
|
||||||
|
spin_unlock(&drm_memctl.lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_alloc_memctl);
|
||||||
|
|
||||||
|
void drm_free_memctl(size_t size)
|
||||||
|
{
|
||||||
|
unsigned long a_size = drm_size_align(size);
|
||||||
|
|
||||||
|
spin_lock(&drm_memctl.lock);
|
||||||
|
drm_memctl.cur_used -= a_size;
|
||||||
|
spin_unlock(&drm_memctl.lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_free_memctl);
|
||||||
|
|
||||||
|
void drm_query_memctl(drm_u64_t *cur_used,
|
||||||
|
drm_u64_t *low_threshold,
|
||||||
|
drm_u64_t *high_threshold)
|
||||||
|
{
|
||||||
|
spin_lock(&drm_memctl.lock);
|
||||||
|
*cur_used = drm_memctl.cur_used;
|
||||||
|
*low_threshold = drm_memctl.low_threshold;
|
||||||
|
*high_threshold = drm_memctl.high_threshold;
|
||||||
|
spin_unlock(&drm_memctl.lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_query_memctl);
|
||||||
|
|
||||||
|
void drm_init_memctl(size_t p_low_threshold,
|
||||||
|
size_t p_high_threshold)
|
||||||
|
{
|
||||||
|
spin_lock(&drm_memctl.lock);
|
||||||
|
drm_memctl.cur_used = 0;
|
||||||
|
drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
|
||||||
|
drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
|
||||||
|
spin_unlock(&drm_memctl.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifndef DEBUG_MEMORY
|
#ifndef DEBUG_MEMORY
|
||||||
|
|
||||||
/** No-op. */
|
/** No-op. */
|
||||||
|
|
|
@ -59,8 +59,9 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
|
||||||
return parent;
|
return parent;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
child = (drm_mm_node_t *) kmem_cache_alloc(drm_cache.mm,
|
child = (drm_mm_node_t *)
|
||||||
GFP_KERNEL);
|
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!child)
|
if (!child)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -110,8 +111,9 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
||||||
prev_node->size += next_node->size;
|
prev_node->size += next_node->size;
|
||||||
list_del(&next_node->ml_entry);
|
list_del(&next_node->ml_entry);
|
||||||
list_del(&next_node->fl_entry);
|
list_del(&next_node->fl_entry);
|
||||||
kmem_cache_free(drm_cache.mm, next_node);
|
drm_ctl_cache_free(drm_cache.mm,
|
||||||
|
sizeof(*next_node),
|
||||||
|
next_node);
|
||||||
} else {
|
} else {
|
||||||
next_node->size += cur->size;
|
next_node->size += cur->size;
|
||||||
next_node->start = cur->start;
|
next_node->start = cur->start;
|
||||||
|
@ -124,7 +126,7 @@ void drm_mm_put_block(drm_mm_node_t * cur)
|
||||||
list_add(&cur->fl_entry, &list_root->fl_entry);
|
list_add(&cur->fl_entry, &list_root->fl_entry);
|
||||||
} else {
|
} else {
|
||||||
list_del(&cur->ml_entry);
|
list_del(&cur->ml_entry);
|
||||||
kmem_cache_free(drm_cache.mm, cur);
|
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +176,8 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
|
||||||
INIT_LIST_HEAD(&mm->root_node.fl_entry);
|
INIT_LIST_HEAD(&mm->root_node.fl_entry);
|
||||||
|
|
||||||
|
|
||||||
child = (drm_mm_node_t *) kmem_cache_alloc(drm_cache.mm, GFP_KERNEL);
|
child = (drm_mm_node_t *)
|
||||||
|
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
|
||||||
|
|
||||||
if (!child)
|
if (!child)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -210,7 +213,7 @@ void drm_mm_takedown(drm_mm_t * mm)
|
||||||
|
|
||||||
list_del(&entry->fl_entry);
|
list_del(&entry->fl_entry);
|
||||||
list_del(&entry->ml_entry);
|
list_del(&entry->ml_entry);
|
||||||
kmem_cache_free(drm_cache.mm, entry);
|
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_mm_takedown);
|
EXPORT_SYMBOL(drm_mm_takedown);
|
||||||
|
|
|
@ -152,7 +152,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
|
||||||
ref_action);
|
ref_action);
|
||||||
}
|
}
|
||||||
|
|
||||||
item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
|
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
|
||||||
if (item == NULL) {
|
if (item == NULL) {
|
||||||
DRM_ERROR("Could not allocate reference object\n");
|
DRM_ERROR("Could not allocate reference object\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -218,7 +218,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
|
||||||
list_del_init(&item->list);
|
list_del_init(&item->list);
|
||||||
if (unref_action == _DRM_REF_USE)
|
if (unref_action == _DRM_REF_USE)
|
||||||
drm_remove_other_references(priv, user_object);
|
drm_remove_other_references(priv, user_object);
|
||||||
drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
|
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (unref_action) {
|
switch (unref_action) {
|
||||||
|
|
|
@ -439,6 +439,10 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
|
||||||
int len = 0;
|
int len = 0;
|
||||||
drm_buffer_manager_t *bm = &dev->bm;
|
drm_buffer_manager_t *bm = &dev->bm;
|
||||||
drm_fence_manager_t *fm = &dev->fm;
|
drm_fence_manager_t *fm = &dev->fm;
|
||||||
|
drm_u64_t used_mem;
|
||||||
|
drm_u64_t low_mem;
|
||||||
|
drm_u64_t high_mem;
|
||||||
|
|
||||||
|
|
||||||
if (offset > DRM_PROC_LIMIT) {
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
*eof = 1;
|
*eof = 1;
|
||||||
|
@ -459,12 +463,18 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
|
||||||
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
|
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
|
||||||
atomic_read(&bm->count));
|
atomic_read(&bm->count));
|
||||||
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
|
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
|
||||||
DRM_PROC_PRINT("Max allowed number of locked GATT pages %lu\n",
|
|
||||||
bm->max_pages);
|
|
||||||
} else {
|
} else {
|
||||||
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
|
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_query_memctl(&used_mem, &low_mem, &high_mem);
|
||||||
|
|
||||||
|
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
|
||||||
|
(unsigned long) (used_mem >> PAGE_SHIFT));
|
||||||
|
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
|
||||||
|
(unsigned long) (low_mem >> PAGE_SHIFT));
|
||||||
|
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
|
||||||
|
(unsigned long) (high_mem >> PAGE_SHIFT));
|
||||||
|
|
||||||
DRM_PROC_PRINT("\n");
|
DRM_PROC_PRINT("\n");
|
||||||
|
|
||||||
|
|
|
@ -37,12 +37,17 @@ static void *ttm_alloc(unsigned long size, int type)
|
||||||
{
|
{
|
||||||
void *ret = NULL;
|
void *ret = NULL;
|
||||||
|
|
||||||
if (size <= 4*PAGE_SIZE) {
|
if (drm_alloc_memctl(size))
|
||||||
|
return NULL;
|
||||||
|
if (size <= PAGE_SIZE) {
|
||||||
ret = drm_alloc(size, type);
|
ret = drm_alloc(size, type);
|
||||||
}
|
}
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = vmalloc(size);
|
ret = vmalloc(size);
|
||||||
}
|
}
|
||||||
|
if (!ret) {
|
||||||
|
drm_free_memctl(size);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,6 +60,7 @@ static void ttm_free(void *pointer, unsigned long size, int type)
|
||||||
} else {
|
} else {
|
||||||
drm_free(pointer, size, type);
|
drm_free(pointer, size, type);
|
||||||
}
|
}
|
||||||
|
drm_free_memctl(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -174,6 +180,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
drm_free_gatt_pages(*cur_page, 0);
|
drm_free_gatt_pages(*cur_page, 0);
|
||||||
|
drm_free_memctl(PAGE_SIZE);
|
||||||
--bm->cur_pages;
|
--bm->cur_pages;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -182,8 +189,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
|
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,13 +209,14 @@ static int drm_ttm_populate(drm_ttm_t *ttm)
|
||||||
for (i=0; i<ttm->num_pages; ++i) {
|
for (i=0; i<ttm->num_pages; ++i) {
|
||||||
page = ttm->pages[i];
|
page = ttm->pages[i];
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (bm->cur_pages >= bm->max_pages) {
|
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||||
DRM_ERROR("Maximum locked page count exceeded\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
page = drm_alloc_gatt_pages(0);
|
page = drm_alloc_gatt_pages(0);
|
||||||
if (!page)
|
if (!page) {
|
||||||
|
drm_free_memctl(PAGE_SIZE);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||||
SetPageLocked(page);
|
SetPageLocked(page);
|
||||||
#else
|
#else
|
||||||
|
@ -238,7 +245,7 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
||||||
if (!bo_driver)
|
if (!bo_driver)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
|
ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
|
||||||
if (!ttm)
|
if (!ttm)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -254,6 +261,11 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
|
||||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
|
||||||
ttm->page_flags = 0;
|
ttm->page_flags = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Account also for AGP module memory usage.
|
||||||
|
*/
|
||||||
|
|
||||||
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
|
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
|
||||||
DRM_MEM_TTM);
|
DRM_MEM_TTM);
|
||||||
if (!ttm->pages) {
|
if (!ttm->pages) {
|
||||||
|
@ -403,14 +415,14 @@ static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
|
||||||
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
|
||||||
if (ttm) {
|
if (ttm) {
|
||||||
if (drm_destroy_ttm(ttm) != -EBUSY) {
|
if (drm_destroy_ttm(ttm) != -EBUSY) {
|
||||||
drm_free(map, sizeof(*map), DRM_MEM_TTM);
|
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
drm_free(map, sizeof(*map), DRM_MEM_TTM);
|
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free(object, sizeof(*object), DRM_MEM_TTM);
|
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
|
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
|
||||||
|
@ -444,13 +456,13 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
|
||||||
drm_local_map_t *map;
|
drm_local_map_t *map;
|
||||||
drm_ttm_t *ttm;
|
drm_ttm_t *ttm;
|
||||||
|
|
||||||
object = drm_calloc(1, sizeof(*object), DRM_MEM_TTM);
|
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
|
||||||
if (!object)
|
if (!object)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
object->flags = flags;
|
object->flags = flags;
|
||||||
list = &object->map_list;
|
list = &object->map_list;
|
||||||
|
|
||||||
list->map = drm_calloc(1, sizeof(*map), DRM_MEM_TTM);
|
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
|
||||||
if (!list->map) {
|
if (!list->map) {
|
||||||
drm_ttm_object_remove(dev, object);
|
drm_ttm_object_remove(dev, object);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -202,13 +202,13 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
|
||||||
page = ttm->pages[page_offset];
|
page = ttm->pages[page_offset];
|
||||||
|
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (bm->cur_pages >= bm->max_pages) {
|
if (drm_alloc_memctl(PAGE_SIZE)) {
|
||||||
DRM_ERROR("Maximum locked page count exceeded\n");
|
|
||||||
data->type = VM_FAULT_OOM;
|
data->type = VM_FAULT_OOM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
|
drm_free_memctl(PAGE_SIZE);
|
||||||
data->type = VM_FAULT_OOM;
|
data->type = VM_FAULT_OOM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -654,7 +654,7 @@ static void drm_vm_ttm_close(struct vm_area_struct *vma)
|
||||||
if (ttm->destroy) {
|
if (ttm->destroy) {
|
||||||
ret = drm_destroy_ttm(ttm);
|
ret = drm_destroy_ttm(ttm);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
drm_free(map, sizeof(*map), DRM_MEM_TTM);
|
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
Loading…
Reference in New Issue