drm/shared-core/i915_mem.c

362 lines
8.0 KiB
C
Raw Normal View History

2004-06-10 06:45:38 -06:00
/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
*/
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
**************************************************************************/
#include "i915.h"
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/* This memory manager is integrated into the global/local lru
* mechanisms used by the clients. Specifically, it operates by
* setting the 'in_use' fields of the global LRU to indicate whether
* this region is privately allocated to a client.
*
* This does require the client to actually respect that field.
*
* Currently no effort is made to allocate 'private' memory in any
* clever way - the LRU information isn't used to determine which
* block to allocate, and the ring is drained prior to allocations --
* in other words allocation is expensive.
*/
static void mark_block( drm_device_t *dev, struct mem_block *p,
int in_use )
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_tex_region_t *list;
unsigned shift, nr;
unsigned start;
unsigned end;
unsigned i;
int age;
shift = dev_priv->tex_lru_log_granularity;
nr = I915_NR_TEX_REGIONS;
start = p->start >> shift;
end = (p->start + p->size - 1) >> shift;
age = ++sarea_priv->texAge;
list = sarea_priv->texList;
/* Mark the regions with the new flag and update their age. Move
* them to head of list to preserve LRU semantics.
*/
for (i = start ; i <= end ; i++) {
list[i].in_use = in_use;
list[i].age = age;
/* remove_from_list(i)
*/
list[(unsigned)list[i].next].prev = list[i].prev;
list[(unsigned)list[i].prev].next = list[i].next;
/* insert_at_head(list, i)
*/
list[i].prev = nr;
list[i].next = list[nr].next;
list[(unsigned)list[nr].next].prev = i;
list[nr].next = i;
}
}
/* Very simple allocator for agp memory, working on a static range
* already mapped into each client's address space.
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
DRMFILE filp )
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
if (!newblock)
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
2004-07-20 05:48:25 -06:00
newblock->filp = NULL;
2004-06-10 06:45:38 -06:00
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
p->size -= newblock->size;
p = newblock;
}
/* Maybe cut off the end of an existing block */
if (size < p->size) {
struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
if (!newblock)
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
2004-07-20 05:48:25 -06:00
newblock->filp = NULL;
2004-06-10 06:45:38 -06:00
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
p->size = size;
}
out:
/* Our block is in the middle */
p->filp = filp;
return p;
}
static struct mem_block *alloc_block( struct mem_block *heap, int size,
int align2, DRMFILE filp )
{
struct mem_block *p;
int mask = (1 << align2)-1;
for (p = heap->next ; p != heap ; p = p->next) {
int start = (p->start + mask) & ~mask;
2004-07-20 05:48:25 -06:00
if (p->filp == NULL && start + size <= p->start + p->size)
2004-06-10 06:45:38 -06:00
return split_block( p, start, size, filp );
}
return NULL;
}
static struct mem_block *find_block( struct mem_block *heap, int start )
{
struct mem_block *p;
for (p = heap->next ; p != heap ; p = p->next)
if (p->start == start)
return p;
return NULL;
}
static void free_block( struct mem_block *p )
{
2004-07-20 05:48:25 -06:00
p->filp = NULL;
2004-06-10 06:45:38 -06:00
/* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed.
*/
2004-07-20 05:48:25 -06:00
if (p->next->filp == NULL) {
2004-06-10 06:45:38 -06:00
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
p->next->prev = p;
DRM_FREE(q, sizeof(*q));
}
2004-07-20 05:48:25 -06:00
if (p->prev->filp == NULL) {
2004-06-10 06:45:38 -06:00
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
q->next->prev = q;
DRM_FREE(p, sizeof(*q));
}
}
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)
{
struct mem_block *blocks = DRM_MALLOC(sizeof(*blocks));
if (!blocks)
return -ENOMEM;
*heap = DRM_MALLOC(sizeof(**heap));
if (!*heap) {
DRM_FREE( blocks, sizeof(*blocks) );
return -ENOMEM;
}
blocks->start = start;
blocks->size = size;
2004-07-20 05:48:25 -06:00
blocks->filp = NULL;
2004-06-10 06:45:38 -06:00
blocks->next = blocks->prev = *heap;
memset( *heap, 0, sizeof(**heap) );
(*heap)->filp = (DRMFILE) -1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
void i915_mem_release( drm_device_t *dev,
DRMFILE filp, struct mem_block *heap )
{
struct mem_block *p;
if (!heap || !heap->next)
return;
for (p = heap->next ; p != heap ; p = p->next) {
if (p->filp == filp) {
2004-07-20 05:48:25 -06:00
p->filp = NULL;
2004-06-10 06:45:38 -06:00
mark_block( dev, p, 0 );
}
}
/* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed.
*/
for (p = heap->next ; p != heap ; p = p->next) {
2004-07-20 05:48:25 -06:00
while (p->filp == NULL && p->next->filp == NULL) {
2004-06-10 06:45:38 -06:00
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
p->next->prev = p;
DRM_FREE(q, sizeof(*q));
}
}
}
/* Shutdown.
*/
void i915_mem_takedown( struct mem_block **heap )
{
struct mem_block *p;
if (!*heap)
return;
for (p = (*heap)->next ; p != *heap ; ) {
struct mem_block *q = p;
p = p->next;
DRM_FREE(q, sizeof(*q));
}
DRM_FREE( *heap, sizeof(**heap) );
*heap = 0;
}
static struct mem_block **get_heap( drm_i915_private_t *dev_priv,
int region )
{
switch( region ) {
case I915_MEM_REGION_AGP:
return &dev_priv->agp_heap;
default:
return 0;
}
}
/* IOCTL HANDLERS */
int i915_mem_alloc( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_alloc_t alloc;
struct mem_block *block, **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( alloc, (drm_i915_mem_alloc_t *)data,
sizeof(alloc) );
heap = get_heap( dev_priv, alloc.region );
if (!heap || !*heap)
return DRM_ERR(EFAULT);
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
if (alloc.alignment < 12)
alloc.alignment = 12;
block = alloc_block( *heap, alloc.size, alloc.alignment,
filp );
if (!block)
return DRM_ERR(ENOMEM);
mark_block( dev, block, 1 );
if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
return DRM_ERR(EFAULT);
}
return 0;
}
int i915_mem_free( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_free_t memfree;
struct mem_block *block, **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( memfree, (drm_i915_mem_free_t *)data,
sizeof(memfree) );
heap = get_heap( dev_priv, memfree.region );
if (!heap || !*heap)
return DRM_ERR(EFAULT);
block = find_block( *heap, memfree.region_offset );
if (!block)
return DRM_ERR(EFAULT);
if (block->filp != filp)
return DRM_ERR(EPERM);
mark_block( dev, block, 0 );
free_block( block );
return 0;
}
int i915_mem_init_heap( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_init_heap_t initheap;
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( initheap, (drm_i915_mem_init_heap_t *)data,
sizeof(initheap) );
heap = get_heap( dev_priv, initheap.region );
if (!heap)
return DRM_ERR(EFAULT);
if (*heap) {
DRM_ERROR("heap already initialized?");
return DRM_ERR(EFAULT);
}
return init_heap( heap, initheap.start, initheap.size );
}