intel: Reformat to the kernel coding style. Welcome to the 8-space future.
This is done with: Lindent *.[ch] perl -pi -e 's|drm_intel_bo \* |drm_intel_bo *|g' *.[ch] perl -pi -e 's|drm_intel_bufmgr \* |drm_intel_bufmgr *|g' *.[ch] perl -pi -e 's|drm_intel_bo_gem \* |drm_intel_bo_gem *|g' *.[ch] perl -pi -e 's|drm_intel_bufmgr_gem \* |drm_intel_bufmgr_gem *|g' *.[ch] perl -pi -e 's|_fake \* |_fake *|g' *.[ch] hand-editing to whack indented comments into line and other touchups.main
parent
3c9bd068e0
commit
d70d60529f
|
@ -42,7 +42,9 @@
|
||||||
|
|
||||||
#define HAS_ATOMIC_OPS 1
|
#define HAS_ATOMIC_OPS 1
|
||||||
|
|
||||||
typedef struct { int atomic; } atomic_t;
|
typedef struct {
|
||||||
|
int atomic;
|
||||||
|
} atomic_t;
|
||||||
|
|
||||||
# define atomic_read(x) ((x)->atomic)
|
# define atomic_read(x) ((x)->atomic)
|
||||||
# define atomic_set(x, val) ((x)->atomic = (val))
|
# define atomic_set(x, val) ((x)->atomic = (val))
|
||||||
|
|
|
@ -44,124 +44,114 @@
|
||||||
* Convenience functions for buffer management methods.
|
* Convenience functions for buffer management methods.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
drm_intel_bo *
|
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||||
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
unsigned long size, unsigned int alignment)
|
||||||
unsigned long size, unsigned int alignment)
|
|
||||||
{
|
{
|
||||||
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
|
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_intel_bo *
|
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
|
||||||
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
|
const char *name,
|
||||||
unsigned long size, unsigned int alignment)
|
unsigned long size,
|
||||||
|
unsigned int alignment)
|
||||||
{
|
{
|
||||||
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
|
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void drm_intel_bo_reference(drm_intel_bo *bo)
|
||||||
drm_intel_bo_reference(drm_intel_bo *bo)
|
|
||||||
{
|
{
|
||||||
bo->bufmgr->bo_reference(bo);
|
bo->bufmgr->bo_reference(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void drm_intel_bo_unreference(drm_intel_bo *bo)
|
||||||
drm_intel_bo_unreference(drm_intel_bo *bo)
|
|
||||||
{
|
{
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bo->bufmgr->bo_unreference(bo);
|
bo->bufmgr->bo_unreference(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
|
||||||
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
|
|
||||||
{
|
{
|
||||||
return buf->bufmgr->bo_map(buf, write_enable);
|
return buf->bufmgr->bo_map(buf, write_enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_unmap(drm_intel_bo *buf)
|
||||||
drm_intel_bo_unmap(drm_intel_bo *buf)
|
|
||||||
{
|
{
|
||||||
return buf->bufmgr->bo_unmap(buf);
|
return buf->bufmgr->bo_unmap(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, const void *data)
|
unsigned long size, const void *data)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (bo->bufmgr->bo_subdata)
|
if (bo->bufmgr->bo_subdata)
|
||||||
return bo->bufmgr->bo_subdata(bo, offset, size, data);
|
return bo->bufmgr->bo_subdata(bo, offset, size, data);
|
||||||
if (size == 0 || data == NULL)
|
if (size == 0 || data == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = drm_intel_bo_map(bo, 1);
|
ret = drm_intel_bo_map(bo, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
memcpy((unsigned char *)bo->virtual + offset, data, size);
|
memcpy((unsigned char *)bo->virtual + offset, data, size);
|
||||||
drm_intel_bo_unmap(bo);
|
drm_intel_bo_unmap(bo);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, void *data)
|
unsigned long size, void *data)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
if (bo->bufmgr->bo_subdata)
|
if (bo->bufmgr->bo_subdata)
|
||||||
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
|
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
|
||||||
|
|
||||||
if (size == 0 || data == NULL)
|
if (size == 0 || data == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = drm_intel_bo_map(bo, 0);
|
ret = drm_intel_bo_map(bo, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
memcpy(data, (unsigned char *)bo->virtual + offset, size);
|
memcpy(data, (unsigned char *)bo->virtual + offset, size);
|
||||||
drm_intel_bo_unmap(bo);
|
drm_intel_bo_unmap(bo);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
|
||||||
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
|
|
||||||
{
|
{
|
||||||
bo->bufmgr->bo_wait_rendering(bo);
|
bo->bufmgr->bo_wait_rendering(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
|
||||||
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
|
|
||||||
{
|
{
|
||||||
bufmgr->destroy(bufmgr);
|
bufmgr->destroy(bufmgr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
|
||||||
int DR4)
|
|
||||||
{
|
{
|
||||||
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
|
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
|
||||||
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
|
|
||||||
{
|
{
|
||||||
bufmgr->debug = enable_debug;
|
bufmgr->debug = enable_debug;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
|
||||||
drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
|
|
||||||
{
|
{
|
||||||
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
|
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
|
||||||
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
|
|
||||||
{
|
{
|
||||||
if (bo->bufmgr->bo_flink)
|
if (bo->bufmgr->bo_flink)
|
||||||
return bo->bufmgr->bo_flink(bo, name);
|
return bo->bufmgr->bo_flink(bo, name);
|
||||||
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -174,43 +164,41 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||||
read_domains, write_domain);
|
read_domains, write_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
||||||
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
|
||||||
{
|
{
|
||||||
if (bo->bufmgr->bo_pin)
|
if (bo->bufmgr->bo_pin)
|
||||||
return bo->bufmgr->bo_pin(bo, alignment);
|
return bo->bufmgr->bo_pin(bo, alignment);
|
||||||
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_unpin(drm_intel_bo *bo)
|
||||||
drm_intel_bo_unpin(drm_intel_bo *bo)
|
|
||||||
{
|
{
|
||||||
if (bo->bufmgr->bo_unpin)
|
if (bo->bufmgr->bo_unpin)
|
||||||
return bo->bufmgr->bo_unpin(bo);
|
return bo->bufmgr->bo_unpin(bo);
|
||||||
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
uint32_t stride)
|
uint32_t stride)
|
||||||
{
|
{
|
||||||
if (bo->bufmgr->bo_set_tiling)
|
if (bo->bufmgr->bo_set_tiling)
|
||||||
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
|
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
|
||||||
|
|
||||||
*tiling_mode = I915_TILING_NONE;
|
*tiling_mode = I915_TILING_NONE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
uint32_t *swizzle_mode)
|
uint32_t * swizzle_mode)
|
||||||
{
|
{
|
||||||
if (bo->bufmgr->bo_get_tiling)
|
if (bo->bufmgr->bo_get_tiling)
|
||||||
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
|
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
|
||||||
|
|
||||||
*tiling_mode = I915_TILING_NONE;
|
*tiling_mode = I915_TILING_NONE;
|
||||||
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
|
int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
|
||||||
|
@ -227,17 +215,14 @@ int drm_intel_bo_busy(drm_intel_bo *bo)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
|
||||||
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
|
|
||||||
{
|
{
|
||||||
return bo->bufmgr->bo_references(bo, target_bo);
|
return bo->bufmgr->bo_references(bo, target_bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
|
||||||
drm_intel_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
|
|
||||||
{
|
{
|
||||||
if (bufmgr->get_pipe_from_crtc_id)
|
if (bufmgr->get_pipe_from_crtc_id)
|
||||||
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
|
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,37 +40,40 @@ typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
|
||||||
typedef struct _drm_intel_bo drm_intel_bo;
|
typedef struct _drm_intel_bo drm_intel_bo;
|
||||||
|
|
||||||
struct _drm_intel_bo {
|
struct _drm_intel_bo {
|
||||||
/**
|
/**
|
||||||
* Size in bytes of the buffer object.
|
* Size in bytes of the buffer object.
|
||||||
*
|
*
|
||||||
* The size may be larger than the size originally requested for the
|
* The size may be larger than the size originally requested for the
|
||||||
* allocation, such as being aligned to page size.
|
* allocation, such as being aligned to page size.
|
||||||
*/
|
*/
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
/**
|
|
||||||
* Alignment requirement for object
|
|
||||||
*
|
|
||||||
* Used for GTT mapping & pinning the object.
|
|
||||||
*/
|
|
||||||
unsigned long align;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Card virtual address (offset from the beginning of the aperture) for the
|
* Alignment requirement for object
|
||||||
* object. Only valid while validated.
|
*
|
||||||
*/
|
* Used for GTT mapping & pinning the object.
|
||||||
unsigned long offset;
|
*/
|
||||||
/**
|
unsigned long align;
|
||||||
* Virtual address for accessing the buffer data. Only valid while mapped.
|
|
||||||
*/
|
|
||||||
void *virtual;
|
|
||||||
|
|
||||||
/** Buffer manager context associated with this buffer object */
|
/**
|
||||||
drm_intel_bufmgr *bufmgr;
|
* Card virtual address (offset from the beginning of the aperture)
|
||||||
|
* for the object. Only valid while validated.
|
||||||
|
*/
|
||||||
|
unsigned long offset;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* MM-specific handle for accessing object
|
* Virtual address for accessing the buffer data. Only valid while
|
||||||
*/
|
* mapped.
|
||||||
int handle;
|
*/
|
||||||
|
void *virtual;
|
||||||
|
|
||||||
|
/** Buffer manager context associated with this buffer object */
|
||||||
|
drm_intel_bufmgr *bufmgr;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* MM-specific handle for accessing object
|
||||||
|
*/
|
||||||
|
int handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||||
|
@ -85,28 +88,27 @@ int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
|
||||||
int drm_intel_bo_unmap(drm_intel_bo *bo);
|
int drm_intel_bo_unmap(drm_intel_bo *bo);
|
||||||
|
|
||||||
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, const void *data);
|
unsigned long size, const void *data);
|
||||||
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, void *data);
|
unsigned long size, void *data);
|
||||||
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
|
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
|
||||||
|
|
||||||
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
|
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
|
||||||
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
|
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
|
||||||
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
drm_clip_rect_t * cliprects, int num_cliprects, int DR4);
|
||||||
int DR4);
|
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
|
||||||
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count);
|
|
||||||
|
|
||||||
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||||
uint32_t read_domains, uint32_t write_domain);
|
uint32_t read_domains, uint32_t write_domain);
|
||||||
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
|
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
|
||||||
int drm_intel_bo_unpin(drm_intel_bo *bo);
|
int drm_intel_bo_unpin(drm_intel_bo *bo);
|
||||||
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
uint32_t stride);
|
uint32_t stride);
|
||||||
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
uint32_t *swizzle_mode);
|
uint32_t * swizzle_mode);
|
||||||
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name);
|
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
|
||||||
int drm_intel_bo_busy(drm_intel_bo *bo);
|
int drm_intel_bo_busy(drm_intel_bo *bo);
|
||||||
|
|
||||||
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
|
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
|
||||||
|
@ -129,26 +131,29 @@ drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
|
||||||
unsigned long low_offset,
|
unsigned long low_offset,
|
||||||
void *low_virtual,
|
void *low_virtual,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
volatile unsigned int *last_dispatch);
|
volatile unsigned int
|
||||||
|
*last_dispatch);
|
||||||
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
||||||
volatile unsigned int *last_dispatch);
|
volatile unsigned int
|
||||||
|
*last_dispatch);
|
||||||
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||||
int (*exec)(drm_intel_bo *bo,
|
int (*exec) (drm_intel_bo *bo,
|
||||||
unsigned int used,
|
unsigned int used,
|
||||||
void *priv),
|
void *priv),
|
||||||
void *priv);
|
void *priv);
|
||||||
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
||||||
unsigned int (*emit)(void *priv),
|
unsigned int (*emit) (void *priv),
|
||||||
void (*wait)(unsigned int fence,
|
void (*wait) (unsigned int fence,
|
||||||
void *priv),
|
void *priv),
|
||||||
void *priv);
|
void *priv);
|
||||||
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
|
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
|
||||||
const char *name,
|
const char *name,
|
||||||
unsigned long offset, unsigned long size,
|
unsigned long offset,
|
||||||
void *virtual);
|
unsigned long size, void *virtual);
|
||||||
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||||
void (*invalidate_cb)(drm_intel_bo *bo,
|
void (*invalidate_cb) (drm_intel_bo
|
||||||
void *ptr),
|
* bo,
|
||||||
|
void *ptr),
|
||||||
void *ptr);
|
void *ptr);
|
||||||
|
|
||||||
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
|
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
|
||||||
|
@ -174,8 +179,8 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
|
||||||
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
|
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
|
||||||
reloc_offset, target_bo) \
|
reloc_offset, target_bo) \
|
||||||
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
|
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
|
||||||
target_bo, target_offset, \
|
target_bo, target_offset, \
|
||||||
read, write);
|
read, write);
|
||||||
#define dri_bo_pin drm_intel_bo_pin
|
#define dri_bo_pin drm_intel_bo_pin
|
||||||
#define dri_bo_unpin drm_intel_bo_unpin
|
#define dri_bo_unpin drm_intel_bo_unpin
|
||||||
#define dri_bo_get_tiling drm_intel_bo_get_tiling
|
#define dri_bo_get_tiling drm_intel_bo_get_tiling
|
||||||
|
@ -196,4 +201,3 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
|
||||||
/** @{ */
|
/** @{ */
|
||||||
|
|
||||||
#endif /* INTEL_BUFMGR_H */
|
#endif /* INTEL_BUFMGR_H */
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -40,178 +40,190 @@
|
||||||
* Contains public methods followed by private storage for the buffer manager.
|
* Contains public methods followed by private storage for the buffer manager.
|
||||||
*/
|
*/
|
||||||
struct _drm_intel_bufmgr {
|
struct _drm_intel_bufmgr {
|
||||||
/**
|
/**
|
||||||
* Allocate a buffer object.
|
* Allocate a buffer object.
|
||||||
*
|
*
|
||||||
* Buffer objects are not necessarily initially mapped into CPU virtual
|
* Buffer objects are not necessarily initially mapped into CPU virtual
|
||||||
* address space or graphics device aperture. They must be mapped using
|
* address space or graphics device aperture. They must be mapped
|
||||||
* bo_map() to be used by the CPU, and validated for use using bo_validate()
|
* using bo_map() to be used by the CPU, and validated for use using
|
||||||
* to be used from the graphics device.
|
* bo_validate() to be used from the graphics device.
|
||||||
*/
|
*/
|
||||||
drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
|
drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
|
||||||
unsigned long size, unsigned int alignment);
|
unsigned long size, unsigned int alignment);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate a buffer object, hinting that it will be used as a render target.
|
* Allocate a buffer object, hinting that it will be used as a
|
||||||
*
|
* render target.
|
||||||
* This is otherwise the same as bo_alloc.
|
*
|
||||||
*/
|
* This is otherwise the same as bo_alloc.
|
||||||
drm_intel_bo *(*bo_alloc_for_render)(drm_intel_bufmgr *bufmgr,
|
*/
|
||||||
const char *name,
|
drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
|
||||||
unsigned long size,
|
const char *name,
|
||||||
unsigned int alignment);
|
unsigned long size,
|
||||||
|
unsigned int alignment);
|
||||||
|
|
||||||
/** Takes a reference on a buffer object */
|
/** Takes a reference on a buffer object */
|
||||||
void (*bo_reference)(drm_intel_bo *bo);
|
void (*bo_reference) (drm_intel_bo *bo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Releases a reference on a buffer object, freeing the data if
|
* Releases a reference on a buffer object, freeing the data if
|
||||||
* rerefences remain.
|
* rerefences remain.
|
||||||
*/
|
*/
|
||||||
void (*bo_unreference)(drm_intel_bo *bo);
|
void (*bo_unreference) (drm_intel_bo *bo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maps the buffer into userspace.
|
* Maps the buffer into userspace.
|
||||||
*
|
*
|
||||||
* This function will block waiting for any existing execution on the
|
* This function will block waiting for any existing execution on the
|
||||||
* buffer to complete, first. The resulting mapping is available at
|
* buffer to complete, first. The resulting mapping is available at
|
||||||
* buf->virtual.
|
* buf->virtual.
|
||||||
*/
|
*/
|
||||||
int (*bo_map)(drm_intel_bo *bo, int write_enable);
|
int (*bo_map) (drm_intel_bo *bo, int write_enable);
|
||||||
|
|
||||||
/** Reduces the refcount on the userspace mapping of the buffer object. */
|
/**
|
||||||
int (*bo_unmap)(drm_intel_bo *bo);
|
* Reduces the refcount on the userspace mapping of the buffer
|
||||||
|
* object.
|
||||||
|
*/
|
||||||
|
int (*bo_unmap) (drm_intel_bo *bo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write data into an object.
|
* Write data into an object.
|
||||||
*
|
*
|
||||||
* This is an optional function, if missing,
|
* This is an optional function, if missing,
|
||||||
* drm_intel_bo will map/memcpy/unmap.
|
* drm_intel_bo will map/memcpy/unmap.
|
||||||
*/
|
*/
|
||||||
int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset,
|
int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, const void *data);
|
unsigned long size, const void *data);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read data from an object
|
* Read data from an object
|
||||||
*
|
*
|
||||||
* This is an optional function, if missing,
|
* This is an optional function, if missing,
|
||||||
* drm_intel_bo will map/memcpy/unmap.
|
* drm_intel_bo will map/memcpy/unmap.
|
||||||
*/
|
*/
|
||||||
int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset,
|
int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
|
||||||
unsigned long size, void *data);
|
unsigned long size, void *data);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for rendering to an object by the GPU to have completed.
|
* Waits for rendering to an object by the GPU to have completed.
|
||||||
*
|
*
|
||||||
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
|
* This is not required for any access to the BO by bo_map,
|
||||||
* It is merely a way for the driver to implement glFinish.
|
* bo_subdata, etc. It is merely a way for the driver to implement
|
||||||
*/
|
* glFinish.
|
||||||
void (*bo_wait_rendering)(drm_intel_bo *bo);
|
*/
|
||||||
|
void (*bo_wait_rendering) (drm_intel_bo *bo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tears down the buffer manager instance.
|
* Tears down the buffer manager instance.
|
||||||
*/
|
*/
|
||||||
void (*destroy)(drm_intel_bufmgr *bufmgr);
|
void (*destroy) (drm_intel_bufmgr *bufmgr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add relocation entry in reloc_buf, which will be updated with the
|
* Add relocation entry in reloc_buf, which will be updated with the
|
||||||
* target buffer's real offset on on command submission.
|
* target buffer's real offset on on command submission.
|
||||||
*
|
*
|
||||||
* Relocations remain in place for the lifetime of the buffer object.
|
* Relocations remain in place for the lifetime of the buffer object.
|
||||||
*
|
*
|
||||||
* \param bo Buffer to write the relocation into.
|
* \param bo Buffer to write the relocation into.
|
||||||
* \param offset Byte offset within reloc_bo of the pointer to target_bo.
|
* \param offset Byte offset within reloc_bo of the pointer to
|
||||||
* \param target_bo Buffer whose offset should be written into the
|
* target_bo.
|
||||||
* relocation entry.
|
* \param target_bo Buffer whose offset should be written into the
|
||||||
* \param target_offset Constant value to be added to target_bo's offset in
|
* relocation entry.
|
||||||
* relocation entry.
|
* \param target_offset Constant value to be added to target_bo's
|
||||||
* \param read_domains GEM read domains which the buffer will be read into
|
* offset in relocation entry.
|
||||||
* by the command that this relocation is part of.
|
* \param read_domains GEM read domains which the buffer will be
|
||||||
* \param write_domains GEM read domains which the buffer will be dirtied
|
* read into by the command that this relocation
|
||||||
* in by the command that this relocation is part of.
|
* is part of.
|
||||||
*/
|
* \param write_domains GEM read domains which the buffer will be
|
||||||
int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset,
|
* dirtied in by the command that this
|
||||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
* relocation is part of.
|
||||||
uint32_t read_domains, uint32_t write_domain);
|
*/
|
||||||
|
int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
|
||||||
|
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||||
|
uint32_t read_domains, uint32_t write_domain);
|
||||||
|
|
||||||
/** Executes the command buffer pointed to by bo. */
|
/** Executes the command buffer pointed to by bo. */
|
||||||
int (*bo_exec)(drm_intel_bo *bo, int used,
|
int (*bo_exec) (drm_intel_bo *bo, int used,
|
||||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||||
int DR4);
|
int DR4);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pin a buffer to the aperture and fix the offset until unpinned
|
* Pin a buffer to the aperture and fix the offset until unpinned
|
||||||
*
|
*
|
||||||
* \param buf Buffer to pin
|
* \param buf Buffer to pin
|
||||||
* \param alignment Required alignment for aperture, in bytes
|
* \param alignment Required alignment for aperture, in bytes
|
||||||
*/
|
*/
|
||||||
int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment);
|
int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
|
||||||
/**
|
|
||||||
* Unpin a buffer from the aperture, allowing it to be removed
|
|
||||||
*
|
|
||||||
* \param buf Buffer to unpin
|
|
||||||
*/
|
|
||||||
int (*bo_unpin)(drm_intel_bo *bo);
|
|
||||||
/**
|
|
||||||
* Ask that the buffer be placed in tiling mode
|
|
||||||
*
|
|
||||||
* \param buf Buffer to set tiling mode for
|
|
||||||
* \param tiling_mode desired, and returned tiling mode
|
|
||||||
*/
|
|
||||||
int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
|
|
||||||
uint32_t stride);
|
|
||||||
/**
|
|
||||||
* Get the current tiling (and resulting swizzling) mode for the bo.
|
|
||||||
*
|
|
||||||
* \param buf Buffer to get tiling mode for
|
|
||||||
* \param tiling_mode returned tiling mode
|
|
||||||
* \param swizzle_mode returned swizzling mode
|
|
||||||
*/
|
|
||||||
int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
|
|
||||||
uint32_t *swizzle_mode);
|
|
||||||
/**
|
|
||||||
* Create a visible name for a buffer which can be used by other apps
|
|
||||||
*
|
|
||||||
* \param buf Buffer to create a name for
|
|
||||||
* \param name Returned name
|
|
||||||
*/
|
|
||||||
int (*bo_flink)(drm_intel_bo *bo, uint32_t *name);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns 1 if mapping the buffer for write could cause the process
|
* Unpin a buffer from the aperture, allowing it to be removed
|
||||||
* to block, due to the object being active in the GPU.
|
*
|
||||||
*/
|
* \param buf Buffer to unpin
|
||||||
int (*bo_busy)(drm_intel_bo *bo);
|
*/
|
||||||
|
int (*bo_unpin) (drm_intel_bo *bo);
|
||||||
|
|
||||||
int (*check_aperture_space)(drm_intel_bo **bo_array, int count);
|
/**
|
||||||
|
* Ask that the buffer be placed in tiling mode
|
||||||
|
*
|
||||||
|
* \param buf Buffer to set tiling mode for
|
||||||
|
* \param tiling_mode desired, and returned tiling mode
|
||||||
|
*/
|
||||||
|
int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
|
uint32_t stride);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Disable buffer reuse for buffers which will be shared in some way,
|
* Get the current tiling (and resulting swizzling) mode for the bo.
|
||||||
* as with scanout buffers. When the buffer reference count goes to zero,
|
*
|
||||||
* it will be freed and not placed in the reuse list.
|
* \param buf Buffer to get tiling mode for
|
||||||
*
|
* \param tiling_mode returned tiling mode
|
||||||
* \param bo Buffer to disable reuse for
|
* \param swizzle_mode returned swizzling mode
|
||||||
*/
|
*/
|
||||||
int (*bo_disable_reuse)(drm_intel_bo *bo);
|
int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||||
|
uint32_t * swizzle_mode);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* Create a visible name for a buffer which can be used by other apps
|
||||||
* Return the pipe associated with a crtc_id so that vblank
|
*
|
||||||
* synchronization can use the correct data in the request.
|
* \param buf Buffer to create a name for
|
||||||
* This is only supported for KMS and gem at this point, when
|
* \param name Returned name
|
||||||
* unsupported, this function returns -1 and leaves the decision
|
*/
|
||||||
* of what to do in that case to the caller
|
int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
|
||||||
*
|
|
||||||
* \param bufmgr the associated buffer manager
|
|
||||||
* \param crtc_id the crtc identifier
|
|
||||||
*/
|
|
||||||
int (*get_pipe_from_crtc_id)(drm_intel_bufmgr *bufmgr, int crtc_id);
|
|
||||||
|
|
||||||
/** Returns true if target_bo is in the relocation tree rooted at bo. */
|
/**
|
||||||
int (*bo_references)(drm_intel_bo *bo, drm_intel_bo *target_bo);
|
* Returns 1 if mapping the buffer for write could cause the process
|
||||||
|
* to block, due to the object being active in the GPU.
|
||||||
|
*/
|
||||||
|
int (*bo_busy) (drm_intel_bo *bo);
|
||||||
|
|
||||||
int debug; /**< Enables verbose debugging printouts */
|
int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disable buffer reuse for buffers which will be shared in some way,
|
||||||
|
* as with scanout buffers. When the buffer reference count goes to
|
||||||
|
* zero, it will be freed and not placed in the reuse list.
|
||||||
|
*
|
||||||
|
* \param bo Buffer to disable reuse for
|
||||||
|
*/
|
||||||
|
int (*bo_disable_reuse) (drm_intel_bo *bo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Return the pipe associated with a crtc_id so that vblank
|
||||||
|
* synchronization can use the correct data in the request.
|
||||||
|
* This is only supported for KMS and gem at this point, when
|
||||||
|
* unsupported, this function returns -1 and leaves the decision
|
||||||
|
* of what to do in that case to the caller
|
||||||
|
*
|
||||||
|
* \param bufmgr the associated buffer manager
|
||||||
|
* \param crtc_id the crtc identifier
|
||||||
|
*/
|
||||||
|
int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
|
||||||
|
|
||||||
|
/** Returns true if target_bo is in the relocation tree rooted at bo. */
|
||||||
|
int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
|
||||||
|
|
||||||
|
/**< Enables verbose debugging printouts */
|
||||||
|
int debug;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* INTEL_BUFMGR_PRIV_H */
|
#endif /* INTEL_BUFMGR_PRIV_H */
|
||||||
|
|
||||||
|
|
|
@ -28,254 +28,244 @@
|
||||||
#include "xf86drm.h"
|
#include "xf86drm.h"
|
||||||
#include "mm.h"
|
#include "mm.h"
|
||||||
|
|
||||||
void
|
void mmDumpMemInfo(const struct mem_block *heap)
|
||||||
mmDumpMemInfo(const struct mem_block *heap)
|
|
||||||
{
|
{
|
||||||
drmMsg("Memory heap %p:\n", (void *)heap);
|
drmMsg("Memory heap %p:\n", (void *)heap);
|
||||||
if (heap == 0) {
|
if (heap == 0) {
|
||||||
drmMsg(" heap == 0\n");
|
drmMsg(" heap == 0\n");
|
||||||
} else {
|
} else {
|
||||||
const struct mem_block *p;
|
const struct mem_block *p;
|
||||||
|
|
||||||
for(p = heap->next; p != heap; p = p->next) {
|
for (p = heap->next; p != heap; p = p->next) {
|
||||||
drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
drmMsg(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
|
||||||
p->free ? 'F':'.',
|
p->size, p->free ? 'F' : '.',
|
||||||
p->reserved ? 'R':'.');
|
p->reserved ? 'R' : '.');
|
||||||
}
|
}
|
||||||
|
|
||||||
drmMsg("\nFree list:\n");
|
drmMsg("\nFree list:\n");
|
||||||
|
|
||||||
for(p = heap->next_free; p != heap; p = p->next_free) {
|
for (p = heap->next_free; p != heap; p = p->next_free) {
|
||||||
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
|
||||||
p->free ? 'F':'.',
|
p->size, p->free ? 'F' : '.',
|
||||||
p->reserved ? 'R':'.');
|
p->reserved ? 'R' : '.');
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
drmMsg("End of memory blocks\n");
|
drmMsg("End of memory blocks\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mem_block *
|
struct mem_block *mmInit(int ofs, int size)
|
||||||
mmInit(int ofs, int size)
|
|
||||||
{
|
{
|
||||||
struct mem_block *heap, *block;
|
struct mem_block *heap, *block;
|
||||||
|
|
||||||
if (size <= 0)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
|
if (size <= 0)
|
||||||
if (!heap)
|
return NULL;
|
||||||
return NULL;
|
|
||||||
|
|
||||||
block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
|
|
||||||
if (!block) {
|
|
||||||
free(heap);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
heap->next = block;
|
heap = (struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||||
heap->prev = block;
|
if (!heap)
|
||||||
heap->next_free = block;
|
return NULL;
|
||||||
heap->prev_free = block;
|
|
||||||
|
|
||||||
block->heap = heap;
|
block = (struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||||
block->next = heap;
|
if (!block) {
|
||||||
block->prev = heap;
|
free(heap);
|
||||||
block->next_free = heap;
|
return NULL;
|
||||||
block->prev_free = heap;
|
}
|
||||||
|
|
||||||
block->ofs = ofs;
|
heap->next = block;
|
||||||
block->size = size;
|
heap->prev = block;
|
||||||
block->free = 1;
|
heap->next_free = block;
|
||||||
|
heap->prev_free = block;
|
||||||
|
|
||||||
return heap;
|
block->heap = heap;
|
||||||
|
block->next = heap;
|
||||||
|
block->prev = heap;
|
||||||
|
block->next_free = heap;
|
||||||
|
block->prev_free = heap;
|
||||||
|
|
||||||
|
block->ofs = ofs;
|
||||||
|
block->size = size;
|
||||||
|
block->free = 1;
|
||||||
|
|
||||||
|
return heap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct mem_block *SliceBlock(struct mem_block *p,
|
||||||
static struct mem_block *
|
int startofs, int size,
|
||||||
SliceBlock(struct mem_block *p,
|
int reserved, int alignment)
|
||||||
int startofs, int size,
|
|
||||||
int reserved, int alignment)
|
|
||||||
{
|
{
|
||||||
struct mem_block *newblock;
|
struct mem_block *newblock;
|
||||||
|
|
||||||
/* break left [p, newblock, p->next], then p = newblock */
|
/* break left [p, newblock, p->next], then p = newblock */
|
||||||
if (startofs > p->ofs) {
|
if (startofs > p->ofs) {
|
||||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
newblock =
|
||||||
if (!newblock)
|
(struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||||
return NULL;
|
if (!newblock)
|
||||||
newblock->ofs = startofs;
|
return NULL;
|
||||||
newblock->size = p->size - (startofs - p->ofs);
|
newblock->ofs = startofs;
|
||||||
newblock->free = 1;
|
newblock->size = p->size - (startofs - p->ofs);
|
||||||
newblock->heap = p->heap;
|
newblock->free = 1;
|
||||||
|
newblock->heap = p->heap;
|
||||||
|
|
||||||
newblock->next = p->next;
|
newblock->next = p->next;
|
||||||
newblock->prev = p;
|
newblock->prev = p;
|
||||||
p->next->prev = newblock;
|
p->next->prev = newblock;
|
||||||
p->next = newblock;
|
p->next = newblock;
|
||||||
|
|
||||||
newblock->next_free = p->next_free;
|
newblock->next_free = p->next_free;
|
||||||
newblock->prev_free = p;
|
newblock->prev_free = p;
|
||||||
p->next_free->prev_free = newblock;
|
p->next_free->prev_free = newblock;
|
||||||
p->next_free = newblock;
|
p->next_free = newblock;
|
||||||
|
|
||||||
p->size -= newblock->size;
|
p->size -= newblock->size;
|
||||||
p = newblock;
|
p = newblock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* break right, also [p, newblock, p->next] */
|
/* break right, also [p, newblock, p->next] */
|
||||||
if (size < p->size) {
|
if (size < p->size) {
|
||||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
newblock =
|
||||||
if (!newblock)
|
(struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||||
return NULL;
|
if (!newblock)
|
||||||
newblock->ofs = startofs + size;
|
return NULL;
|
||||||
newblock->size = p->size - size;
|
newblock->ofs = startofs + size;
|
||||||
newblock->free = 1;
|
newblock->size = p->size - size;
|
||||||
newblock->heap = p->heap;
|
newblock->free = 1;
|
||||||
|
newblock->heap = p->heap;
|
||||||
|
|
||||||
newblock->next = p->next;
|
newblock->next = p->next;
|
||||||
newblock->prev = p;
|
newblock->prev = p;
|
||||||
p->next->prev = newblock;
|
p->next->prev = newblock;
|
||||||
p->next = newblock;
|
p->next = newblock;
|
||||||
|
|
||||||
newblock->next_free = p->next_free;
|
newblock->next_free = p->next_free;
|
||||||
newblock->prev_free = p;
|
newblock->prev_free = p;
|
||||||
p->next_free->prev_free = newblock;
|
p->next_free->prev_free = newblock;
|
||||||
p->next_free = newblock;
|
p->next_free = newblock;
|
||||||
|
|
||||||
p->size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* p = middle block */
|
p->size = size;
|
||||||
p->free = 0;
|
}
|
||||||
|
|
||||||
/* Remove p from the free list:
|
/* p = middle block */
|
||||||
*/
|
p->free = 0;
|
||||||
p->next_free->prev_free = p->prev_free;
|
|
||||||
p->prev_free->next_free = p->next_free;
|
|
||||||
|
|
||||||
p->next_free = 0;
|
/* Remove p from the free list:
|
||||||
p->prev_free = 0;
|
*/
|
||||||
|
p->next_free->prev_free = p->prev_free;
|
||||||
|
p->prev_free->next_free = p->next_free;
|
||||||
|
|
||||||
p->reserved = reserved;
|
p->next_free = 0;
|
||||||
return p;
|
p->prev_free = 0;
|
||||||
|
|
||||||
|
p->reserved = reserved;
|
||||||
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mem_block *mmAllocMem(struct mem_block *heap, int size, int align2,
|
||||||
struct mem_block *
|
int startSearch)
|
||||||
mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
|
||||||
{
|
{
|
||||||
struct mem_block *p;
|
struct mem_block *p;
|
||||||
const int mask = (1 << align2)-1;
|
const int mask = (1 << align2) - 1;
|
||||||
int startofs = 0;
|
int startofs = 0;
|
||||||
int endofs;
|
int endofs;
|
||||||
|
|
||||||
if (!heap || align2 < 0 || size <= 0)
|
if (!heap || align2 < 0 || size <= 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
for (p = heap->next_free; p != heap; p = p->next_free) {
|
for (p = heap->next_free; p != heap; p = p->next_free) {
|
||||||
assert(p->free);
|
assert(p->free);
|
||||||
|
|
||||||
startofs = (p->ofs + mask) & ~mask;
|
startofs = (p->ofs + mask) & ~mask;
|
||||||
if ( startofs < startSearch ) {
|
if (startofs < startSearch) {
|
||||||
startofs = startSearch;
|
startofs = startSearch;
|
||||||
}
|
}
|
||||||
endofs = startofs+size;
|
endofs = startofs + size;
|
||||||
if (endofs <= (p->ofs+p->size))
|
if (endofs <= (p->ofs + p->size))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p == heap)
|
if (p == heap)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
assert(p->free);
|
assert(p->free);
|
||||||
p = SliceBlock(p,startofs,size,0,mask+1);
|
p = SliceBlock(p, startofs, size, 0, mask + 1);
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mem_block *mmFindBlock(struct mem_block *heap, int start)
|
||||||
struct mem_block *
|
|
||||||
mmFindBlock(struct mem_block *heap, int start)
|
|
||||||
{
|
{
|
||||||
struct mem_block *p;
|
struct mem_block *p;
|
||||||
|
|
||||||
for (p = heap->next; p != heap; p = p->next) {
|
for (p = heap->next; p != heap; p = p->next) {
|
||||||
if (p->ofs == start)
|
if (p->ofs == start)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int Join2Blocks(struct mem_block *p)
|
||||||
static int
|
|
||||||
Join2Blocks(struct mem_block *p)
|
|
||||||
{
|
{
|
||||||
/* XXX there should be some assertions here */
|
/* XXX there should be some assertions here */
|
||||||
|
|
||||||
/* NOTE: heap->free == 0 */
|
/* NOTE: heap->free == 0 */
|
||||||
|
|
||||||
if (p->free && p->next->free) {
|
if (p->free && p->next->free) {
|
||||||
struct mem_block *q = p->next;
|
struct mem_block *q = p->next;
|
||||||
|
|
||||||
assert(p->ofs + p->size == q->ofs);
|
assert(p->ofs + p->size == q->ofs);
|
||||||
p->size += q->size;
|
p->size += q->size;
|
||||||
|
|
||||||
p->next = q->next;
|
p->next = q->next;
|
||||||
q->next->prev = p;
|
q->next->prev = p;
|
||||||
|
|
||||||
q->next_free->prev_free = q->prev_free;
|
q->next_free->prev_free = q->prev_free;
|
||||||
q->prev_free->next_free = q->next_free;
|
q->prev_free->next_free = q->next_free;
|
||||||
|
|
||||||
free(q);
|
free(q);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int mmFreeMem(struct mem_block *b)
|
||||||
mmFreeMem(struct mem_block *b)
|
|
||||||
{
|
{
|
||||||
if (!b)
|
if (!b)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (b->free) {
|
if (b->free) {
|
||||||
drmMsg("block already free\n");
|
drmMsg("block already free\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (b->reserved) {
|
if (b->reserved) {
|
||||||
drmMsg("block is reserved\n");
|
drmMsg("block is reserved\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
b->free = 1;
|
b->free = 1;
|
||||||
b->next_free = b->heap->next_free;
|
b->next_free = b->heap->next_free;
|
||||||
b->prev_free = b->heap;
|
b->prev_free = b->heap;
|
||||||
b->next_free->prev_free = b;
|
b->next_free->prev_free = b;
|
||||||
b->prev_free->next_free = b;
|
b->prev_free->next_free = b;
|
||||||
|
|
||||||
Join2Blocks(b);
|
Join2Blocks(b);
|
||||||
if (b->prev != b->heap)
|
if (b->prev != b->heap)
|
||||||
Join2Blocks(b->prev);
|
Join2Blocks(b->prev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mmDestroy(struct mem_block *heap)
|
||||||
void
|
|
||||||
mmDestroy(struct mem_block *heap)
|
|
||||||
{
|
{
|
||||||
struct mem_block *p;
|
struct mem_block *p;
|
||||||
|
|
||||||
if (!heap)
|
if (!heap)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (p = heap->next; p != heap; ) {
|
for (p = heap->next; p != heap;) {
|
||||||
struct mem_block *next = p->next;
|
struct mem_block *next = p->next;
|
||||||
free(p);
|
free(p);
|
||||||
p = next;
|
p = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
free(heap);
|
free(heap);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,23 +21,21 @@
|
||||||
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Memory manager code. Primarily used by device drivers to manage texture
|
* Memory manager code. Primarily used by device drivers to manage texture
|
||||||
* heaps, etc.
|
* heaps, etc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#ifndef MM_H
|
#ifndef MM_H
|
||||||
#define MM_H
|
#define MM_H
|
||||||
|
|
||||||
struct mem_block {
|
struct mem_block {
|
||||||
struct mem_block *next, *prev;
|
struct mem_block *next, *prev;
|
||||||
struct mem_block *next_free, *prev_free;
|
struct mem_block *next_free, *prev_free;
|
||||||
struct mem_block *heap;
|
struct mem_block *heap;
|
||||||
int ofs,size;
|
int ofs, size;
|
||||||
unsigned int free:1;
|
unsigned int free:1;
|
||||||
unsigned int reserved:1;
|
unsigned int reserved:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Rename the variables in the drm copy of this code so that it doesn't
|
/* Rename the variables in the drm copy of this code so that it doesn't
|
||||||
|
@ -67,7 +65,7 @@ extern struct mem_block *mmInit(int ofs, int size);
|
||||||
* return: pointer to the allocated block, 0 if error
|
* return: pointer to the allocated block, 0 if error
|
||||||
*/
|
*/
|
||||||
extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
|
extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
|
||||||
int align2, int startSearch);
|
int align2, int startSearch);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free block starts at offset
|
* Free block starts at offset
|
||||||
|
|
Loading…
Reference in New Issue