intel: Reformat to the kernel coding style. Welcome to the 8-space future.

This is done with:
Lindent *.[ch]
perl -pi -e 's|drm_intel_bo \* |drm_intel_bo *|g' *.[ch]
perl -pi -e 's|drm_intel_bufmgr \* |drm_intel_bufmgr *|g' *.[ch]
perl -pi -e 's|drm_intel_bo_gem \* |drm_intel_bo_gem *|g' *.[ch]
perl -pi -e 's|drm_intel_bufmgr_gem \* |drm_intel_bufmgr_gem *|g' *.[ch]
perl -pi -e 's|_fake \* |_fake *|g' *.[ch]
hand-editing to whack indented comments into line and other touchups.
main
Eric Anholt 2009-10-06 12:40:42 -07:00
parent 3c9bd068e0
commit d70d60529f
8 changed files with 2572 additions and 2485 deletions

View File

@ -42,7 +42,9 @@
#define HAS_ATOMIC_OPS 1
typedef struct { int atomic; } atomic_t;
typedef struct {
int atomic;
} atomic_t;
# define atomic_read(x) ((x)->atomic)
# define atomic_set(x, val) ((x)->atomic = (val))

View File

@ -44,124 +44,114 @@
* Convenience functions for buffer management methods.
*/
drm_intel_bo *
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
void
drm_intel_bo_reference(drm_intel_bo *bo)
void drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
bo->bufmgr->bo_reference(bo);
}
void
drm_intel_bo_unreference(drm_intel_bo *bo)
void drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
return;
if (bo == NULL)
return;
bo->bufmgr->bo_unreference(bo);
bo->bufmgr->bo_unreference(bo);
}
int
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
return buf->bufmgr->bo_map(buf, write_enable);
}
int
drm_intel_bo_unmap(drm_intel_bo *buf)
int drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
return buf->bufmgr->bo_unmap(buf);
}
int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
int ret;
int ret;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
ret = drm_intel_bo_map(bo, 1);
if (ret)
return ret;
memcpy((unsigned char *)bo->virtual + offset, data, size);
drm_intel_bo_unmap(bo);
return 0;
ret = drm_intel_bo_map(bo, 1);
if (ret)
return ret;
memcpy((unsigned char *)bo->virtual + offset, data, size);
drm_intel_bo_unmap(bo);
return 0;
}
int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
int ret;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
if (size == 0 || data == NULL)
return 0;
ret = drm_intel_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
drm_intel_bo_unmap(bo);
return 0;
ret = drm_intel_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
drm_intel_bo_unmap(bo);
return 0;
}
void
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
bo->bufmgr->bo_wait_rendering(bo);
}
void
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
bufmgr->destroy(bufmgr);
}
int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
void
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
bufmgr->debug = enable_debug;
}
int
drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
return -ENODEV;
return -ENODEV;
}
int
@ -174,43 +164,41 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
read_domains, write_domain);
}
int
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
return -ENODEV;
return -ENODEV;
}
int
drm_intel_bo_unpin(drm_intel_bo *bo)
int drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
return -ENODEV;
return -ENODEV;
}
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
*tiling_mode = I915_TILING_NONE;
return 0;
*tiling_mode = I915_TILING_NONE;
return 0;
}
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
uint32_t *swizzle_mode)
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
*tiling_mode = I915_TILING_NONE;
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
*tiling_mode = I915_TILING_NONE;
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
@ -227,17 +215,14 @@ int drm_intel_bo_busy(drm_intel_bo *bo)
return 0;
}
int
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
int
drm_intel_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
if (bufmgr->get_pipe_from_crtc_id)
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
return -1;
}

View File

@ -40,37 +40,40 @@ typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
typedef struct _drm_intel_bo drm_intel_bo;
struct _drm_intel_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
*/
unsigned long offset;
/**
* Virtual address for accessing the buffer data. Only valid while mapped.
*/
void *virtual;
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
/**
* Card virtual address (offset from the beginning of the aperture)
* for the object. Only valid while validated.
*/
unsigned long offset;
/**
* MM-specific handle for accessing object
*/
int handle;
/**
* Virtual address for accessing the buffer data. Only valid while
* mapped.
*/
void *virtual;
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
/**
* MM-specific handle for accessing object
*/
int handle;
};
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
@ -85,28 +88,27 @@ int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
int drm_intel_bo_unmap(drm_intel_bo *bo);
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
unsigned long size, const void *data);
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
unsigned long size, void *data);
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count);
drm_clip_rect_t * cliprects, int num_cliprects, int DR4);
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
int drm_intel_bo_unpin(drm_intel_bo *bo);
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
uint32_t *swizzle_mode);
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name);
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
int drm_intel_bo_busy(drm_intel_bo *bo);
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
@ -129,26 +131,29 @@ drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
volatile unsigned int *last_dispatch);
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int *last_dispatch);
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
int (*exec)(drm_intel_bo *bo,
unsigned int used,
void *priv),
int (*exec) (drm_intel_bo *bo,
unsigned int used,
void *priv),
void *priv);
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
unsigned int (*emit)(void *priv),
void (*wait)(unsigned int fence,
void *priv),
unsigned int (*emit) (void *priv),
void (*wait) (unsigned int fence,
void *priv),
void *priv);
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset, unsigned long size,
void *virtual);
unsigned long offset,
unsigned long size, void *virtual);
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void (*invalidate_cb)(drm_intel_bo *bo,
void *ptr),
void (*invalidate_cb) (drm_intel_bo
* bo,
void *ptr),
void *ptr);
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
@ -174,8 +179,8 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
reloc_offset, target_bo) \
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
target_bo, target_offset, \
read, write);
target_bo, target_offset, \
read, write);
#define dri_bo_pin drm_intel_bo_pin
#define dri_bo_unpin drm_intel_bo_unpin
#define dri_bo_get_tiling drm_intel_bo_get_tiling
@ -196,4 +201,3 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
/** @{ */
#endif /* INTEL_BUFMGR_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,178 +40,190 @@
* Contains public methods followed by private storage for the buffer manager.
*/
struct _drm_intel_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped using
* bo_map() to be used by the CPU, and validated for use using bo_validate()
* to be used from the graphics device.
*/
drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
* using bo_map() to be used by the CPU, and validated for use using
* bo_validate() to be used from the graphics device.
*/
drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_intel_bo *(*bo_alloc_for_render)(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(drm_intel_bo *bo);
/** Takes a reference on a buffer object */
void (*bo_reference) (drm_intel_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
void (*bo_unreference)(drm_intel_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
void (*bo_unreference) (drm_intel_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map)(drm_intel_bo *bo, int write_enable);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map) (drm_intel_bo *bo, int write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(drm_intel_bo *bo);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
int (*bo_unmap) (drm_intel_bo *bo);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
* It is merely a way for the driver to implement glFinish.
*/
void (*bo_wait_rendering)(drm_intel_bo *bo);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
void (*bo_wait_rendering) (drm_intel_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy)(drm_intel_bufmgr *bufmgr);
/**
* Tears down the buffer manager instance.
*/
void (*destroy) (drm_intel_bufmgr *bufmgr);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's offset in
* relocation entry.
* \param read_domains GEM read domains which the buffer will be read into
* by the command that this relocation is part of.
* \param write_domains GEM read domains which the buffer will be dirtied
* in by the command that this relocation is part of.
*/
int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to
* target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's
* offset in relocation entry.
* \param read_domains GEM read domains which the buffer will be
* read into by the command that this relocation
* is part of.
* \param write_domains GEM read domains which the buffer will be
* dirtied in by the command that this
* relocation is part of.
*/
int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
/** Executes the command buffer pointed to by bo. */
int (*bo_exec)(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
/** Executes the command buffer pointed to by bo. */
int (*bo_exec) (drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*bo_unpin)(drm_intel_bo *bo);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
uint32_t *swizzle_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink)(drm_intel_bo *bo, uint32_t *name);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int (*bo_busy)(drm_intel_bo *bo);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*bo_unpin) (drm_intel_bo *bo);
int (*check_aperture_space)(drm_intel_bo **bo_array, int count);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to zero,
* it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int (*bo_disable_reuse)(drm_intel_bo *bo);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
*
* Return the pipe associated with a crtc_id so that vblank
* synchronization can use the correct data in the request.
* This is only supported for KMS and gem at this point, when
* unsupported, this function returns -1 and leaves the decision
* of what to do in that case to the caller
*
* \param bufmgr the associated buffer manager
* \param crtc_id the crtc identifier
*/
int (*get_pipe_from_crtc_id)(drm_intel_bufmgr *bufmgr, int crtc_id);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int (*bo_references)(drm_intel_bo *bo, drm_intel_bo *target_bo);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int (*bo_busy) (drm_intel_bo *bo);
int debug; /**< Enables verbose debugging printouts */
int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to
* zero, it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int (*bo_disable_reuse) (drm_intel_bo *bo);
/**
*
* Return the pipe associated with a crtc_id so that vblank
* synchronization can use the correct data in the request.
* This is only supported for KMS and gem at this point, when
* unsupported, this function returns -1 and leaves the decision
* of what to do in that case to the caller
*
* \param bufmgr the associated buffer manager
* \param crtc_id the crtc identifier
*/
int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
/**< Enables verbose debugging printouts */
int debug;
};
#endif /* INTEL_BUFMGR_PRIV_H */

View File

@ -28,254 +28,244 @@
#include "xf86drm.h"
#include "mm.h"
void
mmDumpMemInfo(const struct mem_block *heap)
void mmDumpMemInfo(const struct mem_block *heap)
{
drmMsg("Memory heap %p:\n", (void *)heap);
if (heap == 0) {
drmMsg(" heap == 0\n");
} else {
const struct mem_block *p;
drmMsg("Memory heap %p:\n", (void *)heap);
if (heap == 0) {
drmMsg(" heap == 0\n");
} else {
const struct mem_block *p;
for(p = heap->next; p != heap; p = p->next) {
drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
p->free ? 'F':'.',
p->reserved ? 'R':'.');
}
for (p = heap->next; p != heap; p = p->next) {
drmMsg(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
p->size, p->free ? 'F' : '.',
p->reserved ? 'R' : '.');
}
drmMsg("\nFree list:\n");
drmMsg("\nFree list:\n");
for(p = heap->next_free; p != heap; p = p->next_free) {
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
p->free ? 'F':'.',
p->reserved ? 'R':'.');
}
for (p = heap->next_free; p != heap; p = p->next_free) {
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
p->size, p->free ? 'F' : '.',
p->reserved ? 'R' : '.');
}
}
drmMsg("End of memory blocks\n");
}
drmMsg("End of memory blocks\n");
}
struct mem_block *
mmInit(int ofs, int size)
struct mem_block *mmInit(int ofs, int size)
{
struct mem_block *heap, *block;
if (size <= 0)
return NULL;
struct mem_block *heap, *block;
heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
if (!heap)
return NULL;
block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
if (!block) {
free(heap);
return NULL;
}
if (size <= 0)
return NULL;
heap->next = block;
heap->prev = block;
heap->next_free = block;
heap->prev_free = block;
heap = (struct mem_block *)calloc(1, sizeof(struct mem_block));
if (!heap)
return NULL;
block->heap = heap;
block->next = heap;
block->prev = heap;
block->next_free = heap;
block->prev_free = heap;
block = (struct mem_block *)calloc(1, sizeof(struct mem_block));
if (!block) {
free(heap);
return NULL;
}
block->ofs = ofs;
block->size = size;
block->free = 1;
heap->next = block;
heap->prev = block;
heap->next_free = block;
heap->prev_free = block;
return heap;
block->heap = heap;
block->next = heap;
block->prev = heap;
block->next_free = heap;
block->prev_free = heap;
block->ofs = ofs;
block->size = size;
block->free = 1;
return heap;
}
static struct mem_block *
SliceBlock(struct mem_block *p,
int startofs, int size,
int reserved, int alignment)
static struct mem_block *SliceBlock(struct mem_block *p,
int startofs, int size,
int reserved, int alignment)
{
struct mem_block *newblock;
struct mem_block *newblock;
/* break left [p, newblock, p->next], then p = newblock */
if (startofs > p->ofs) {
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->heap = p->heap;
/* break left [p, newblock, p->next], then p = newblock */
if (startofs > p->ofs) {
newblock =
(struct mem_block *)calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->heap = p->heap;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
p->size -= newblock->size;
p = newblock;
}
p->size -= newblock->size;
p = newblock;
}
/* break right, also [p, newblock, p->next] */
if (size < p->size) {
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->heap = p->heap;
/* break right, also [p, newblock, p->next] */
if (size < p->size) {
newblock =
(struct mem_block *)calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->heap = p->heap;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
p->size = size;
}
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
/* p = middle block */
p->free = 0;
p->size = size;
}
/* Remove p from the free list:
*/
p->next_free->prev_free = p->prev_free;
p->prev_free->next_free = p->next_free;
/* p = middle block */
p->free = 0;
p->next_free = 0;
p->prev_free = 0;
/* Remove p from the free list:
*/
p->next_free->prev_free = p->prev_free;
p->prev_free->next_free = p->next_free;
p->reserved = reserved;
return p;
p->next_free = 0;
p->prev_free = 0;
p->reserved = reserved;
return p;
}
struct mem_block *
mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
struct mem_block *mmAllocMem(struct mem_block *heap, int size, int align2,
int startSearch)
{
struct mem_block *p;
const int mask = (1 << align2)-1;
int startofs = 0;
int endofs;
struct mem_block *p;
const int mask = (1 << align2) - 1;
int startofs = 0;
int endofs;
if (!heap || align2 < 0 || size <= 0)
return NULL;
if (!heap || align2 < 0 || size <= 0)
return NULL;
for (p = heap->next_free; p != heap; p = p->next_free) {
assert(p->free);
for (p = heap->next_free; p != heap; p = p->next_free) {
assert(p->free);
startofs = (p->ofs + mask) & ~mask;
if ( startofs < startSearch ) {
startofs = startSearch;
}
endofs = startofs+size;
if (endofs <= (p->ofs+p->size))
break;
}
startofs = (p->ofs + mask) & ~mask;
if (startofs < startSearch) {
startofs = startSearch;
}
endofs = startofs + size;
if (endofs <= (p->ofs + p->size))
break;
}
if (p == heap)
return NULL;
if (p == heap)
return NULL;
assert(p->free);
p = SliceBlock(p,startofs,size,0,mask+1);
assert(p->free);
p = SliceBlock(p, startofs, size, 0, mask + 1);
return p;
return p;
}
struct mem_block *
mmFindBlock(struct mem_block *heap, int start)
struct mem_block *mmFindBlock(struct mem_block *heap, int start)
{
struct mem_block *p;
struct mem_block *p;
for (p = heap->next; p != heap; p = p->next) {
if (p->ofs == start)
return p;
}
for (p = heap->next; p != heap; p = p->next) {
if (p->ofs == start)
return p;
}
return NULL;
return NULL;
}
static int
Join2Blocks(struct mem_block *p)
static int Join2Blocks(struct mem_block *p)
{
/* XXX there should be some assertions here */
/* XXX there should be some assertions here */
/* NOTE: heap->free == 0 */
/* NOTE: heap->free == 0 */
if (p->free && p->next->free) {
struct mem_block *q = p->next;
if (p->free && p->next->free) {
struct mem_block *q = p->next;
assert(p->ofs + p->size == q->ofs);
p->size += q->size;
assert(p->ofs + p->size == q->ofs);
p->size += q->size;
p->next = q->next;
q->next->prev = p;
p->next = q->next;
q->next->prev = p;
q->next_free->prev_free = q->prev_free;
q->prev_free->next_free = q->next_free;
free(q);
return 1;
}
return 0;
q->next_free->prev_free = q->prev_free;
q->prev_free->next_free = q->next_free;
free(q);
return 1;
}
return 0;
}
int
mmFreeMem(struct mem_block *b)
int mmFreeMem(struct mem_block *b)
{
if (!b)
return 0;
if (!b)
return 0;
if (b->free) {
drmMsg("block already free\n");
return -1;
}
if (b->reserved) {
drmMsg("block is reserved\n");
return -1;
}
if (b->free) {
drmMsg("block already free\n");
return -1;
}
if (b->reserved) {
drmMsg("block is reserved\n");
return -1;
}
b->free = 1;
b->next_free = b->heap->next_free;
b->prev_free = b->heap;
b->next_free->prev_free = b;
b->prev_free->next_free = b;
b->free = 1;
b->next_free = b->heap->next_free;
b->prev_free = b->heap;
b->next_free->prev_free = b;
b->prev_free->next_free = b;
Join2Blocks(b);
if (b->prev != b->heap)
Join2Blocks(b->prev);
Join2Blocks(b);
if (b->prev != b->heap)
Join2Blocks(b->prev);
return 0;
return 0;
}
void
mmDestroy(struct mem_block *heap)
void mmDestroy(struct mem_block *heap)
{
struct mem_block *p;
struct mem_block *p;
if (!heap)
return;
if (!heap)
return;
for (p = heap->next; p != heap; ) {
struct mem_block *next = p->next;
free(p);
p = next;
}
for (p = heap->next; p != heap;) {
struct mem_block *next = p->next;
free(p);
p = next;
}
free(heap);
free(heap);
}

View File

@ -21,23 +21,21 @@
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* Memory manager code. Primarily used by device drivers to manage texture
* heaps, etc.
*/
#ifndef MM_H
#define MM_H
struct mem_block {
struct mem_block *next, *prev;
struct mem_block *next_free, *prev_free;
struct mem_block *heap;
int ofs,size;
unsigned int free:1;
unsigned int reserved:1;
struct mem_block *next, *prev;
struct mem_block *next_free, *prev_free;
struct mem_block *heap;
int ofs, size;
unsigned int free:1;
unsigned int reserved:1;
};
/* Rename the variables in the drm copy of this code so that it doesn't
@ -67,7 +65,7 @@ extern struct mem_block *mmInit(int ofs, int size);
* return: pointer to the allocated block, 0 if error
*/
extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
int align2, int startSearch);
int align2, int startSearch);
/**
* Free block starts at offset