Remove the memory caches for fence objects and memory manager nodes,

since the support for memory caches has gone from 2.6.20.
main
Thomas Hellstrom 2006-12-15 12:37:24 +01:00
parent aefc7a3443
commit 38ed67196f
5 changed files with 9 additions and 102 deletions

View File

@ -755,17 +755,6 @@ typedef struct drm_head {
struct class_device *dev_class;
} drm_head_t;
typedef struct drm_cache {
/*
* Memory caches
*/
kmem_cache_t *mm;
kmem_cache_t *fence_object;
} drm_cache_t;
typedef struct drm_fence_driver{
int no_types;
@ -1318,7 +1307,6 @@ extern int drm_put_head(drm_head_t * head);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern unsigned int drm_cards_limit;
extern drm_head_t **drm_heads;
extern drm_cache_t drm_cache;
extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
@ -1581,25 +1569,6 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
drm_free_memctl(size);
}
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
int flags)
{
void *ret;
if (drm_alloc_memctl(size))
return NULL;
ret = kmem_cache_alloc(cache, flags);
if (!ret)
drm_free_memctl(size);
return ret;
}
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
void *obj)
{
kmem_cache_free(cache, obj);
drm_free_memctl(size);
}
/*@}*/
#endif /* __KERNEL__ */

View File

@ -446,52 +446,6 @@ static struct file_operations drm_stub_fops = {
.open = drm_stub_open
};
static int drm_create_memory_caches(void)
{
drm_cache.mm = kmem_cache_create("drm_mm_node_t",
sizeof(drm_mm_node_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.mm)
return -ENOMEM;
drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
sizeof(drm_fence_object_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.fence_object)
return -ENOMEM;
return 0;
}
static void drm_free_mem_cache(kmem_cache_t *cache,
const char *name)
{
if (!cache)
return;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
if (kmem_cache_destroy(cache)) {
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
name);
}
#else
kmem_cache_destroy(cache);
#endif
}
static void drm_free_memory_caches(void )
{
drm_free_mem_cache(drm_cache.fence_object, "fence object");
drm_cache.fence_object = NULL;
drm_free_mem_cache(drm_cache.mm, "memory manager block");
drm_cache.mm = NULL;
}
static int __init drm_core_init(void)
{
int ret;
@ -499,9 +453,6 @@ static int __init drm_core_init(void)
si_meminfo(&si);
drm_init_memctl(si.totalram/2, si.totalram*3/4);
ret = drm_create_memory_caches();
if (ret)
goto err_p1;
ret = -ENOMEM;
drm_cards_limit =
@ -539,13 +490,11 @@ err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
err_p1:
drm_free_memory_caches();
return ret;
}
static void __exit drm_core_exit(void)
{
drm_free_memory_caches();
remove_proc_entry("dri", NULL);
drm_sysfs_destroy(drm_class);

View File

@ -117,8 +117,7 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
fence->base.hash.key);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
fence);
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
}
}
@ -132,8 +131,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object,
sizeof(*fence), fence);
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
}
@ -439,8 +437,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
int ret;
drm_fence_manager_t *fm = &dev->fm;
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
sizeof(*fence), GFP_KERNEL);
fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, flags, fence);

View File

@ -82,8 +82,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@ -119,8 +118,7 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@ -207,9 +205,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
drm_ctl_cache_free(drm_cache.mm,
sizeof(*next_node),
next_node);
drm_ctl_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -222,7 +219,7 @@ void drm_mm_put_block(drm_mm_node_t * cur)
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
@ -296,7 +293,7 @@ void drm_mm_takedown(drm_mm_t * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_mm_takedown);

View File

@ -54,11 +54,6 @@ drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
drm_cache_t drm_cache =
{ .mm = NULL,
.fence_object = NULL
};
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)