Add improved alignment functionality to the core memory manager.

This makes an allocated block actually align itself and returns any
wasted space to the manager.

Also add some functions to grow and shrink the managed area.
This will be used in the future to manage the buffer object swap cache.
main
Thomas Hellstrom 2006-10-26 21:17:43 +02:00
parent b4fba1679b
commit 47dbfc4e4a
1 changed files with 126 additions and 45 deletions

View File

@ -44,20 +44,79 @@
#include "drmP.h" #include "drmP.h"
#include <linux/slab.h> #include <linux/slab.h>
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, unsigned long tail_space(drm_mm_t *mm)
unsigned long size, unsigned alignment)
{ {
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
int remove_space_from_tail(drm_mm_t *mm, unsigned long size)
{
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return -ENOMEM;
if (entry->size <= size)
return -ENOMEM;
entry->size -= size;
return 0;
}
static int drm_mm_create_tail_node(drm_mm_t *mm,
unsigned long start,
unsigned long size)
{
drm_mm_node_t *child; drm_mm_node_t *child;
if (alignment) child = (drm_mm_node_t *)
size += alignment - 1; drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
if (!child)
return -ENOMEM;
if (parent->size == size) { child->free = 1;
list_del_init(&parent->fl_entry); child->size = size;
parent->free = 0; child->start = start;
return parent; child->mm = mm;
} else {
list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
return 0;
}
int add_space_to_tail(drm_mm_t *mm, unsigned long size)
{
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
unsigned long size)
{
drm_mm_node_t *child;
child = (drm_mm_node_t *) child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
@ -65,7 +124,6 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
if (!child) if (!child)
return NULL; return NULL;
INIT_LIST_HEAD(&child->ml_entry);
INIT_LIST_HEAD(&child->fl_entry); INIT_LIST_HEAD(&child->fl_entry);
child->free = 0; child->free = 0;
@ -74,9 +132,44 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
child->mm = parent->mm; child->mm = parent->mm;
list_add_tail(&child->ml_entry, &parent->ml_entry); list_add_tail(&child->ml_entry, &parent->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
parent->size -= size; parent->size -= size;
parent->start += size; parent->start += size;
return child;
} }
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
{
drm_mm_node_t *align_splitoff = NULL;
drm_mm_node_t *child;
unsigned tmp = size % alignment;
if (tmp) {
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
if (parent->size == size) {
list_del_init(&parent->fl_entry);
parent->free = 0;
return parent;
} else {
child = drm_mm_split_at_start(parent, size);
if (!child) {
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return NULL;
}
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return child; return child;
} }
@ -139,16 +232,23 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
drm_mm_node_t *entry; drm_mm_node_t *entry;
drm_mm_node_t *best; drm_mm_node_t *best;
unsigned long best_size; unsigned long best_size;
unsigned wasted;
best = NULL; best = NULL;
best_size = ~0UL; best_size = ~0UL;
if (alignment)
size += alignment - 1;
list_for_each(list, free_stack) { list_for_each(list, free_stack) {
entry = list_entry(list, drm_mm_node_t, fl_entry); entry = list_entry(list, drm_mm_node_t, fl_entry);
if (entry->size >= size) { wasted = 0;
if (alignment) {
register unsigned tmp = size % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match) if (!best_match)
return entry; return entry;
if (size < best_size) { if (size < best_size) {
@ -170,29 +270,10 @@ int drm_mm_clean(drm_mm_t * mm)
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
{ {
drm_mm_node_t *child;
INIT_LIST_HEAD(&mm->root_node.ml_entry); INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry); INIT_LIST_HEAD(&mm->root_node.fl_entry);
child = (drm_mm_node_t *) return drm_mm_create_tail_node(mm, start, size);
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
if (!child)
return -ENOMEM;
INIT_LIST_HEAD(&child->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
child->start = start;
child->size = size;
child->free = 1;
child->mm = mm;
list_add(&child->fl_entry, &mm->root_node.fl_entry);
list_add(&child->ml_entry, &mm->root_node.ml_entry);
return 0;
} }
EXPORT_SYMBOL(drm_mm_init); EXPORT_SYMBOL(drm_mm_init);