intel: Reformat to the kernel coding style. Welcome to the 8-space future.
This is done with: Lindent *.[ch] perl -pi -e 's|drm_intel_bo \* |drm_intel_bo *|g' *.[ch] perl -pi -e 's|drm_intel_bufmgr \* |drm_intel_bufmgr *|g' *.[ch] perl -pi -e 's|drm_intel_bo_gem \* |drm_intel_bo_gem *|g' *.[ch] perl -pi -e 's|drm_intel_bufmgr_gem \* |drm_intel_bufmgr_gem *|g' *.[ch] perl -pi -e 's|_fake \* |_fake *|g' *.[ch] hand-editing to whack indented comments into line and other touchups.main
parent
3c9bd068e0
commit
d70d60529f
|
@ -42,7 +42,9 @@
|
|||
|
||||
#define HAS_ATOMIC_OPS 1
|
||||
|
||||
typedef struct { int atomic; } atomic_t;
|
||||
typedef struct {
|
||||
int atomic;
|
||||
} atomic_t;
|
||||
|
||||
# define atomic_read(x) ((x)->atomic)
|
||||
# define atomic_set(x, val) ((x)->atomic = (val))
|
||||
|
|
|
@ -44,28 +44,26 @@
|
|||
* Convenience functions for buffer management methods.
|
||||
*/
|
||||
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
|
||||
}
|
||||
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long size,
|
||||
unsigned int alignment)
|
||||
{
|
||||
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
|
||||
}
|
||||
|
||||
void
|
||||
drm_intel_bo_reference(drm_intel_bo *bo)
|
||||
void drm_intel_bo_reference(drm_intel_bo *bo)
|
||||
{
|
||||
bo->bufmgr->bo_reference(bo);
|
||||
}
|
||||
|
||||
void
|
||||
drm_intel_bo_unreference(drm_intel_bo *bo)
|
||||
void drm_intel_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return;
|
||||
|
@ -73,14 +71,12 @@ drm_intel_bo_unreference(drm_intel_bo *bo)
|
|||
bo->bufmgr->bo_unreference(bo);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
|
||||
int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
|
||||
{
|
||||
return buf->bufmgr->bo_map(buf, write_enable);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_unmap(drm_intel_bo *buf)
|
||||
int drm_intel_bo_unmap(drm_intel_bo *buf)
|
||||
{
|
||||
return buf->bufmgr->bo_unmap(buf);
|
||||
}
|
||||
|
@ -123,40 +119,34 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
|
||||
void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
|
||||
{
|
||||
bo->bufmgr->bo_wait_rendering(bo);
|
||||
}
|
||||
|
||||
void
|
||||
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
|
||||
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
bufmgr->destroy(bufmgr);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
|
||||
{
|
||||
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
|
||||
}
|
||||
|
||||
void
|
||||
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
|
||||
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
|
||||
{
|
||||
bufmgr->debug = enable_debug;
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
|
||||
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
|
||||
{
|
||||
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
|
||||
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
|
||||
{
|
||||
if (bo->bufmgr->bo_flink)
|
||||
return bo->bufmgr->bo_flink(bo, name);
|
||||
|
@ -174,8 +164,7 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
|||
read_domains, write_domain);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
||||
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
||||
{
|
||||
if (bo->bufmgr->bo_pin)
|
||||
return bo->bufmgr->bo_pin(bo, alignment);
|
||||
|
@ -183,8 +172,7 @@ drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_unpin(drm_intel_bo *bo)
|
||||
int drm_intel_bo_unpin(drm_intel_bo *bo)
|
||||
{
|
||||
if (bo->bufmgr->bo_unpin)
|
||||
return bo->bufmgr->bo_unpin(bo);
|
||||
|
@ -227,17 +215,14 @@ int drm_intel_bo_busy(drm_intel_bo *bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
|
||||
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
|
||||
{
|
||||
return bo->bufmgr->bo_references(bo, target_bo);
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
|
||||
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
|
||||
{
|
||||
if (bufmgr->get_pipe_from_crtc_id)
|
||||
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ struct _drm_intel_bo {
|
|||
* allocation, such as being aligned to page size.
|
||||
*/
|
||||
unsigned long size;
|
||||
|
||||
/**
|
||||
* Alignment requirement for object
|
||||
*
|
||||
|
@ -55,12 +56,14 @@ struct _drm_intel_bo {
|
|||
unsigned long align;
|
||||
|
||||
/**
|
||||
* Card virtual address (offset from the beginning of the aperture) for the
|
||||
* object. Only valid while validated.
|
||||
* Card virtual address (offset from the beginning of the aperture)
|
||||
* for the object. Only valid while validated.
|
||||
*/
|
||||
unsigned long offset;
|
||||
|
||||
/**
|
||||
* Virtual address for accessing the buffer data. Only valid while mapped.
|
||||
* Virtual address for accessing the buffer data. Only valid while
|
||||
* mapped.
|
||||
*/
|
||||
void *virtual;
|
||||
|
||||
|
@ -93,8 +96,7 @@ void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
|
|||
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
|
||||
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
|
||||
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
drm_clip_rect_t * cliprects, int num_cliprects, int DR4);
|
||||
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
|
||||
|
||||
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||
|
@ -129,9 +131,11 @@ drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
|
|||
unsigned long low_offset,
|
||||
void *low_virtual,
|
||||
unsigned long size,
|
||||
volatile unsigned int *last_dispatch);
|
||||
volatile unsigned int
|
||||
*last_dispatch);
|
||||
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch);
|
||||
volatile unsigned int
|
||||
*last_dispatch);
|
||||
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||
int (*exec) (drm_intel_bo *bo,
|
||||
unsigned int used,
|
||||
|
@ -144,10 +148,11 @@ void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
|||
void *priv);
|
||||
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual);
|
||||
unsigned long offset,
|
||||
unsigned long size, void *virtual);
|
||||
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||
void (*invalidate_cb)(drm_intel_bo *bo,
|
||||
void (*invalidate_cb) (drm_intel_bo
|
||||
* bo,
|
||||
void *ptr),
|
||||
void *ptr);
|
||||
|
||||
|
@ -196,4 +201,3 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
|
|||
/** @{ */
|
||||
|
||||
#endif /* INTEL_BUFMGR_H */
|
||||
|
||||
|
|
|
@ -72,13 +72,14 @@
|
|||
*/
|
||||
#define MAX_RELOCS 4096
|
||||
|
||||
struct fake_buffer_reloc
|
||||
{
|
||||
struct fake_buffer_reloc {
|
||||
/** Buffer object that the relocation points at. */
|
||||
drm_intel_bo *target_buf;
|
||||
/** Offset of the relocation entry within reloc_buf. */
|
||||
uint32_t offset;
|
||||
/** Cached value of the offset when we last performed this relocation. */
|
||||
/**
|
||||
* Cached value of the offset when we last performed this relocation.
|
||||
*/
|
||||
uint32_t last_target_offset;
|
||||
/** Value added to target_buf's offset to get the relocation entry. */
|
||||
uint32_t delta;
|
||||
|
@ -98,8 +99,8 @@ struct block {
|
|||
*/
|
||||
unsigned on_hardware:1;
|
||||
/**
|
||||
* Marks that the block is currently fenced (being used by rendering) and
|
||||
* can't be freed until @fence is passed.
|
||||
* Marks that the block is currently fenced (being used by rendering)
|
||||
* and can't be freed until @fence is passed.
|
||||
*/
|
||||
unsigned fenced:1;
|
||||
|
||||
|
@ -129,11 +130,13 @@ typedef struct _bufmgr_fake {
|
|||
*/
|
||||
struct block on_hardware;
|
||||
/**
|
||||
* List of blocks which are in the GART and have an active fence on them.
|
||||
* List of blocks which are in the GART and have an active fence on
|
||||
* them.
|
||||
*/
|
||||
struct block fenced;
|
||||
/**
|
||||
* List of blocks which have an expired fence and are ready to be evicted.
|
||||
* List of blocks which have an expired fence and are ready to be
|
||||
* evicted.
|
||||
*/
|
||||
struct block lru;
|
||||
|
||||
|
@ -168,7 +171,9 @@ typedef struct _bufmgr_fake {
|
|||
|
||||
/** Driver-supplied argument to driver callbacks */
|
||||
void *driver_priv;
|
||||
/* Pointer to kernel-updated sarea data for the last completed user irq */
|
||||
/**
|
||||
* Pointer to kernel-updated sarea data for the last completed user irq
|
||||
*/
|
||||
volatile int *last_dispatch;
|
||||
|
||||
int fd;
|
||||
|
@ -185,12 +190,14 @@ typedef struct _drm_intel_bo_fake {
|
|||
const char *name;
|
||||
|
||||
unsigned dirty:1;
|
||||
/** has the card written to this buffer - we make need to copy it back */
|
||||
/**
|
||||
* has the card written to this buffer - we make need to copy it back
|
||||
*/
|
||||
unsigned card_dirty:1;
|
||||
unsigned int refcount;
|
||||
/* Flags may consist of any of the DRM_BO flags, plus
|
||||
* DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
|
||||
* driver private flags.
|
||||
* DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
|
||||
* first two driver private flags.
|
||||
*/
|
||||
uint64_t flags;
|
||||
/** Cache domains the target buffer is read into. */
|
||||
|
@ -223,7 +230,8 @@ static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
|||
|
||||
#define MAXFENCE 0x7fffffff
|
||||
|
||||
static int FENCE_LTE( unsigned a, unsigned b )
|
||||
static int
|
||||
FENCE_LTE(unsigned a, unsigned b)
|
||||
{
|
||||
if (a == b)
|
||||
return 1;
|
||||
|
@ -237,7 +245,8 @@ static int FENCE_LTE( unsigned a, unsigned b )
|
|||
return 0;
|
||||
}
|
||||
|
||||
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
||||
void
|
||||
drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
||||
unsigned int (*emit) (void *priv),
|
||||
void (*wait) (unsigned int fence,
|
||||
void *priv),
|
||||
|
@ -292,8 +301,8 @@ _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
|
|||
iw.irq_seq = seq;
|
||||
|
||||
/* The kernel IRQ_WAIT implementation is all sorts of broken.
|
||||
* 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned
|
||||
* range.
|
||||
* 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
|
||||
* unsigned range.
|
||||
* 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
|
||||
* signed range.
|
||||
* 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
|
||||
|
@ -301,68 +310,70 @@ _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
|
|||
* 4) It returns -EBUSY in 3 seconds even if the hardware is still
|
||||
* successfully chewing through buffers.
|
||||
*
|
||||
* Assume that in userland we treat sequence numbers as ints, which makes
|
||||
* some of the comparisons convenient, since the sequence numbers are
|
||||
* all postive signed integers.
|
||||
* Assume that in userland we treat sequence numbers as ints, which
|
||||
* makes some of the comparisons convenient, since the sequence
|
||||
* numbers are all postive signed integers.
|
||||
*
|
||||
* From this we get several cases we need to handle. Here's a timeline.
|
||||
* 0x2 0x7 0x7ffffff8 0x7ffffffd
|
||||
* | | | |
|
||||
* -------------------------------------------------------------------
|
||||
* ------------------------------------------------------------
|
||||
*
|
||||
* A) Normal wait for hw to catch up
|
||||
* hw_seq seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to catch up.
|
||||
* ------------------------------------------------------------
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
|
||||
* catch up.
|
||||
*
|
||||
* B) Normal wait for a sequence number that's already passed.
|
||||
* seq hw_seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* ------------------------------------------------------------
|
||||
* seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
|
||||
*
|
||||
* C) Hardware has already wrapped around ahead of us
|
||||
* hw_seq seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* ------------------------------------------------------------
|
||||
* seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
|
||||
* for hw_seq >= seq, which may never occur. Thus, we want to catch this
|
||||
* in userland and return 0.
|
||||
* for hw_seq >= seq, which may never occur. Thus, we want to catch
|
||||
* this in userland and return 0.
|
||||
*
|
||||
* D) We've wrapped around ahead of the hardware.
|
||||
* seq hw_seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would return
|
||||
* 0 quickly because hw_seq >= seq, even though the hardware isn't caught up.
|
||||
* Thus, we need to catch this early return in userland and bother the
|
||||
* kernel until the hardware really does catch up.
|
||||
* ------------------------------------------------------------
|
||||
* seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
|
||||
* return 0 quickly because hw_seq >= seq, even though the hardware
|
||||
* isn't caught up. Thus, we need to catch this early return in
|
||||
* userland and bother the kernel until the hardware really does
|
||||
* catch up.
|
||||
*
|
||||
* E) Hardware might wrap after we test in userland.
|
||||
* hw_seq seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >= hw_seq
|
||||
* and wait. However, suppose hw_seq wraps before we make it into the
|
||||
* kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then
|
||||
* returns -EBUSY. This is case C). We should catch this and then return
|
||||
* successfully.
|
||||
* ------------------------------------------------------------
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
|
||||
* hw_seq and wait. However, suppose hw_seq wraps before we make it
|
||||
* into the kernel. The kernel sees hw_seq >= seq and waits for 3
|
||||
* seconds then returns -EBUSY. This is case C). We should catch
|
||||
* this and then return successfully.
|
||||
*
|
||||
* F) Hardware might take a long time on a buffer.
|
||||
* hw_seq seq
|
||||
* | |
|
||||
* -------------------------------------------------------------------
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5 take too
|
||||
* long, it will return -EBUSY. Batchbuffers in the gltestperf demo were
|
||||
* seen to take up to 7 seconds. We should catch early -EBUSY return
|
||||
* and keep trying.
|
||||
* seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
|
||||
* take too long, it will return -EBUSY. Batchbuffers in the
|
||||
* gltestperf demo were seen to take up to 7 seconds. We should
|
||||
* catch early -EBUSY return and keep trying.
|
||||
*/
|
||||
|
||||
do {
|
||||
/* Keep a copy of last_dispatch so that if the wait -EBUSYs because the
|
||||
* hardware didn't catch up in 3 seconds, we can see if it at least made
|
||||
* progress and retry.
|
||||
/* Keep a copy of last_dispatch so that if the wait -EBUSYs
|
||||
* because the hardware didn't catch up in 3 seconds, we can
|
||||
* see if it at least made progress and retry.
|
||||
*/
|
||||
hw_seq = *bufmgr_fake->last_dispatch;
|
||||
|
||||
|
@ -377,7 +388,8 @@ _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
|
|||
-0x40000000);
|
||||
|
||||
/* Catch case E */
|
||||
if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
|
||||
if (ret == -EBUSY
|
||||
&& (seq - *bufmgr_fake->last_dispatch > 0x40000000))
|
||||
ret = 0;
|
||||
|
||||
/* Catch case F: Allow up to 15 seconds chewing on one buffer. */
|
||||
|
@ -389,8 +401,8 @@ _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
|
|||
(ret == -EBUSY && busy_count < 5));
|
||||
|
||||
if (ret != 0) {
|
||||
drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__, __LINE__,
|
||||
strerror(-ret));
|
||||
drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
|
||||
__LINE__, strerror(-ret));
|
||||
abort();
|
||||
}
|
||||
clear_fenced(bufmgr_fake, seq);
|
||||
|
@ -411,7 +423,8 @@ static int
|
|||
alloc_block(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake= (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
struct block *block = (struct block *)calloc(sizeof *block, 1);
|
||||
unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
|
||||
unsigned int sz;
|
||||
|
@ -429,8 +442,7 @@ alloc_block(drm_intel_bo *bo)
|
|||
|
||||
DRMINITLISTHEAD(block);
|
||||
|
||||
/* Insert at head or at tail???
|
||||
*/
|
||||
/* Insert at head or at tail??? */
|
||||
DRMLISTADDTAIL(block, &bufmgr_fake->lru);
|
||||
|
||||
block->virtual = (uint8_t *) bufmgr_fake->virtual +
|
||||
|
@ -444,11 +456,13 @@ alloc_block(drm_intel_bo *bo)
|
|||
|
||||
/* Release the card storage associated with buf:
|
||||
*/
|
||||
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
|
||||
static void
|
||||
free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
|
||||
int skip_dirty_copy)
|
||||
{
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
|
||||
DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
|
||||
block->on_hardware, block->fenced);
|
||||
|
||||
if (!block)
|
||||
return;
|
||||
|
@ -466,11 +480,9 @@ static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
|
|||
|
||||
if (block->on_hardware) {
|
||||
block->bo = NULL;
|
||||
}
|
||||
else if (block->fenced) {
|
||||
} else if (block->fenced) {
|
||||
block->bo = NULL;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
DBG(" - free immediately\n");
|
||||
DRMLISTDEL(block);
|
||||
|
||||
|
@ -482,14 +494,16 @@ static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
|
|||
static void
|
||||
alloc_backing_store(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
assert(!bo_fake->backing_store);
|
||||
assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
|
||||
|
||||
bo_fake->backing_store = malloc(bo->size);
|
||||
|
||||
DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
|
||||
DBG("alloc_backing - buf %d %p %d\n", bo_fake->id,
|
||||
bo_fake->backing_store, bo->size);
|
||||
assert(bo_fake->backing_store);
|
||||
}
|
||||
|
||||
|
@ -508,10 +522,12 @@ free_backing_store(drm_intel_bo *bo)
|
|||
static void
|
||||
set_dirty(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
|
||||
if (bo_fake->flags & BM_NO_BACKING_STORE
|
||||
&& bo_fake->invalidate_cb != NULL)
|
||||
bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
|
||||
|
||||
assert(!(bo_fake->flags & BM_PINNED));
|
||||
|
@ -533,7 +549,8 @@ evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
|
|||
if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
|
||||
continue;
|
||||
|
||||
if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
|
||||
if (block->fence && max_fence && !FENCE_LTE(block->fence,
|
||||
max_fence))
|
||||
return 0;
|
||||
|
||||
set_dirty(&bo_fake->bo);
|
||||
|
@ -572,8 +589,8 @@ evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
|
|||
/**
|
||||
* Removes all objects from the fenced list older than the given fence.
|
||||
*/
|
||||
static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
||||
unsigned int fence_cookie)
|
||||
static int
|
||||
clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
int ret = 0;
|
||||
|
@ -592,8 +609,7 @@ static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
|||
DRMLISTDEL(block);
|
||||
mmFreeMem(block->mem);
|
||||
free(block);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
DBG("return to lru: offset %x sz %x\n",
|
||||
block->mem->ofs, block->mem->size);
|
||||
DRMLISTDEL(block);
|
||||
|
@ -601,13 +617,13 @@ static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
|||
}
|
||||
|
||||
ret = 1;
|
||||
}
|
||||
else {
|
||||
/* Blocks are ordered by fence, so if one fails, all from
|
||||
* here will fail also:
|
||||
} else {
|
||||
/* Blocks are ordered by fence, so if one fails, all
|
||||
* from here will fail also:
|
||||
*/
|
||||
DBG("fence not passed: offset %x sz %x %d %d \n",
|
||||
block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
|
||||
block->mem->ofs, block->mem->size, block->fence,
|
||||
bufmgr_fake->last_fence);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -616,13 +632,14 @@ static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
static void
|
||||
fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
|
||||
DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
|
||||
block->mem->size, block->mem->ofs, block->bo, fence);
|
||||
DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
|
||||
block, block->mem->size, block->mem->ofs, block->bo, fence);
|
||||
block->fence = fence;
|
||||
|
||||
block->on_hardware = 0;
|
||||
|
@ -637,9 +654,11 @@ static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
|
||||
}
|
||||
|
||||
static int evict_and_alloc_block(drm_intel_bo *bo)
|
||||
static int
|
||||
evict_and_alloc_block(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
assert(bo_fake->block == NULL);
|
||||
|
@ -724,7 +743,8 @@ drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
|
|||
static void
|
||||
drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
if (bo_fake->block == NULL || !bo_fake->block->fenced)
|
||||
|
@ -736,7 +756,8 @@ drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
|
|||
static void
|
||||
drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
|
@ -779,8 +800,10 @@ drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
|
|||
}
|
||||
|
||||
static drm_intel_bo *
|
||||
drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long size,
|
||||
unsigned int alignment)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
|
@ -816,9 +839,10 @@ drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
}
|
||||
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual)
|
||||
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long offset,
|
||||
unsigned long size, void *virtual)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
|
@ -841,8 +865,8 @@ drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
bo_fake->flags = BM_PINNED;
|
||||
bo_fake->is_static = 1;
|
||||
|
||||
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
|
||||
bo_fake->name, bo_fake->bo.size / 1024);
|
||||
|
||||
return &bo_fake->bo;
|
||||
}
|
||||
|
@ -850,7 +874,8 @@ drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
static void
|
||||
drm_intel_fake_bo_reference(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
@ -869,7 +894,8 @@ drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
|
|||
static void
|
||||
drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
int i;
|
||||
|
||||
|
@ -881,9 +907,11 @@ drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
|
|||
free_backing_store(bo);
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++)
|
||||
drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
|
||||
drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
|
||||
target_buf);
|
||||
|
||||
DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
|
||||
DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
|
||||
bo_fake->name);
|
||||
|
||||
free(bo_fake->relocs);
|
||||
free(bo);
|
||||
|
@ -893,7 +921,8 @@ drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
|
|||
static void
|
||||
drm_intel_fake_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
drm_intel_fake_bo_unreference_locked(bo);
|
||||
|
@ -904,12 +933,14 @@ drm_intel_fake_bo_unreference(drm_intel_bo *bo)
|
|||
* Set the buffer as not requiring backing store, and instead get the callback
|
||||
* invoked whenever it would be set dirty.
|
||||
*/
|
||||
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||
void
|
||||
drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||
void (*invalidate_cb) (drm_intel_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
@ -941,7 +972,8 @@ void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
|||
static int
|
||||
drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
/* Static buffers are always mapped. */
|
||||
|
@ -961,33 +993,31 @@ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
|
|||
return 0;
|
||||
|
||||
{
|
||||
DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id,
|
||||
bo_fake->name, bo_fake->bo.size / 1024);
|
||||
|
||||
if (bo->virtual != NULL) {
|
||||
drmMsg("%s: already mapped\n", __FUNCTION__);
|
||||
abort();
|
||||
}
|
||||
else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
|
||||
} else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
|
||||
|
||||
if (!bo_fake->block && !evict_and_alloc_block(bo)) {
|
||||
DBG("%s: alloc failed\n", __FUNCTION__);
|
||||
bufmgr_fake->fail = 1;
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
assert(bo_fake->block);
|
||||
bo_fake->dirty = 0;
|
||||
|
||||
if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
|
||||
bo_fake->block->fenced) {
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
drm_intel_fake_bo_wait_rendering_locked
|
||||
(bo);
|
||||
}
|
||||
|
||||
bo->virtual = bo_fake->block->virtual;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (write_enable)
|
||||
set_dirty(bo);
|
||||
|
||||
|
@ -996,9 +1026,12 @@ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
|
|||
|
||||
if ((bo_fake->card_dirty == 1) && bo_fake->block) {
|
||||
if (bo_fake->block->fenced)
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
drm_intel_fake_bo_wait_rendering_locked
|
||||
(bo);
|
||||
|
||||
memcpy(bo_fake->backing_store, bo_fake->block->virtual, bo_fake->block->bo->size);
|
||||
memcpy(bo_fake->backing_store,
|
||||
bo_fake->block->virtual,
|
||||
bo_fake->block->bo->size);
|
||||
bo_fake->card_dirty = 0;
|
||||
}
|
||||
|
||||
|
@ -1012,7 +1045,8 @@ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
|
|||
static int
|
||||
drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
@ -1025,7 +1059,8 @@ drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
|
|||
static int
|
||||
drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
|
||||
/* Static buffers are always mapped. */
|
||||
|
@ -1044,10 +1079,10 @@ drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_fake_bo_unmap(drm_intel_bo *bo)
|
||||
static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
@ -1086,8 +1121,8 @@ drm_intel_fake_bo_validate(drm_intel_bo *bo)
|
|||
|
||||
bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
|
||||
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id,
|
||||
bo_fake->name, bo_fake->bo.size / 1024);
|
||||
|
||||
/* Sanity check: Buffers should be unmapped before being validated.
|
||||
* This is not so much of a problem for bufmgr_fake, but TTM refuses,
|
||||
|
@ -1104,7 +1139,8 @@ drm_intel_fake_bo_validate(drm_intel_bo *bo)
|
|||
/* Allocate the card memory */
|
||||
if (!bo_fake->block && !evict_and_alloc_block(bo)) {
|
||||
bufmgr_fake->fail = 1;
|
||||
DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
|
||||
DBG("Failed to validate buf %d:%s\n", bo_fake->id,
|
||||
bo_fake->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1118,20 +1154,20 @@ drm_intel_fake_bo_validate(drm_intel_bo *bo)
|
|||
DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
|
||||
bo_fake->name, bo->size, bo_fake->block->mem->ofs);
|
||||
|
||||
assert(!(bo_fake->flags &
|
||||
(BM_NO_BACKING_STORE|BM_PINNED)));
|
||||
assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
|
||||
|
||||
/* Actually, should be able to just wait for a fence on the memory,
|
||||
* which we would be tracking when we free it. Waiting for idle is
|
||||
* a sufficiently large hammer for now.
|
||||
/* Actually, should be able to just wait for a fence on the
|
||||
* mmory, hich we would be tracking when we free it. Waiting
|
||||
* for idle is a sufficiently large hammer for now.
|
||||
*/
|
||||
drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
|
||||
/* we may never have mapped this BO so it might not have any backing
|
||||
* store if this happens it should be rare, but 0 the card memory
|
||||
* in any case */
|
||||
/* we may never have mapped this BO so it might not have any
|
||||
* backing store if this happens it should be rare, but 0 the
|
||||
* card memory in any case */
|
||||
if (bo_fake->backing_store)
|
||||
memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
|
||||
memcpy(bo_fake->block->virtual, bo_fake->backing_store,
|
||||
bo->size);
|
||||
else
|
||||
memset(bo_fake->block->virtual, 0, bo->size);
|
||||
|
||||
|
@ -1176,7 +1212,8 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
|||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
struct fake_buffer_reloc *r;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
|
||||
|
@ -1188,7 +1225,8 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
|||
assert(target_bo);
|
||||
|
||||
if (bo_fake->relocs == NULL) {
|
||||
bo_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
|
||||
bo_fake->relocs =
|
||||
malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
|
||||
}
|
||||
|
||||
r = &bo_fake->relocs[bo_fake->nr_relocs++];
|
||||
|
@ -1198,7 +1236,8 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
|||
drm_intel_fake_bo_reference_locked(target_bo);
|
||||
|
||||
if (!target_fake->is_static) {
|
||||
bo_fake->child_size += ALIGN(target_bo->size, target_fake->alignment);
|
||||
bo_fake->child_size +=
|
||||
ALIGN(target_bo->size, target_fake->alignment);
|
||||
bo_fake->child_size += target_fake->child_size;
|
||||
}
|
||||
r->target_buf = target_bo;
|
||||
|
@ -1209,7 +1248,9 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
|||
r->write_domain = write_domain;
|
||||
|
||||
if (bufmgr_fake->debug) {
|
||||
/* Check that a conflicting relocation hasn't already been emitted. */
|
||||
/* Check that a conflicting relocation hasn't already been
|
||||
* emitted.
|
||||
*/
|
||||
for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
|
||||
struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
|
||||
|
||||
|
@ -1234,7 +1275,8 @@ drm_intel_fake_calculate_domains(drm_intel_bo *bo)
|
|||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake =
|
||||
(drm_intel_bo_fake *) r->target_buf;
|
||||
|
||||
/* Do the same for the tree of buffers we depend on */
|
||||
drm_intel_fake_calculate_domains(r->target_buf);
|
||||
|
@ -1244,11 +1286,11 @@ drm_intel_fake_calculate_domains(drm_intel_bo *bo)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
int i, ret;
|
||||
|
||||
|
@ -1256,12 +1298,14 @@ drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
|
|||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake =
|
||||
(drm_intel_bo_fake *) r->target_buf;
|
||||
uint32_t reloc_data;
|
||||
|
||||
/* Validate the target buffer if that hasn't been done. */
|
||||
if (!target_fake->validated) {
|
||||
ret = drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
|
||||
ret =
|
||||
drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
|
||||
if (ret != 0) {
|
||||
if (bo->virtual != NULL)
|
||||
drm_intel_fake_bo_unmap_locked(bo);
|
||||
|
@ -1276,7 +1320,8 @@ drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
|
|||
if (bo->virtual == NULL)
|
||||
drm_intel_fake_bo_map_locked(bo, 1);
|
||||
|
||||
*(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
|
||||
*(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
|
||||
reloc_data;
|
||||
|
||||
r->last_target_offset = r->target_buf->offset;
|
||||
}
|
||||
|
@ -1300,20 +1345,23 @@ drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
|
|||
static void
|
||||
drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake =
|
||||
(drm_intel_bo_fake *) r->target_buf;
|
||||
|
||||
if (target_fake->validated)
|
||||
drm_intel_bo_fake_post_submit(r->target_buf);
|
||||
|
||||
DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
|
||||
bo_fake->name, (uint32_t) bo->offset, r->offset,
|
||||
target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
|
||||
target_fake->name, (uint32_t) r->target_buf->offset,
|
||||
r->delta);
|
||||
}
|
||||
|
||||
assert(bo_fake->map_count == 0);
|
||||
|
@ -1322,8 +1370,8 @@ drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
|
|||
bo_fake->write_domain = 0;
|
||||
}
|
||||
|
||||
|
||||
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||
void
|
||||
drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||
int (*exec) (drm_intel_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
|
@ -1337,10 +1385,10 @@ void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
|||
|
||||
static int
|
||||
drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo->bufmgr;
|
||||
drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
|
||||
struct drm_i915_batchbuffer batch;
|
||||
int ret;
|
||||
|
@ -1383,7 +1431,8 @@ drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
|
|||
batch.DR1 = 0;
|
||||
batch.DR4 = DR4;
|
||||
|
||||
if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
|
||||
if (drmCommandWrite
|
||||
(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
|
||||
sizeof(batch))) {
|
||||
drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
|
@ -1410,7 +1459,8 @@ drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
|
|||
static int
|
||||
drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo_array[0]->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake =
|
||||
(drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
|
||||
unsigned int sz = 0;
|
||||
int i;
|
||||
|
||||
|
@ -1443,8 +1493,7 @@ drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count)
|
|||
* Used by the X Server on LeaveVT, when the card memory is no longer our
|
||||
* own.
|
||||
*/
|
||||
void
|
||||
drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
|
||||
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
|
||||
struct block *block, *tmp;
|
||||
|
@ -1468,26 +1517,31 @@ drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
|
|||
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
|
||||
/* Releases the memory, and memcpys dirty contents out if necessary. */
|
||||
/* Releases the memory, and memcpys dirty contents out if
|
||||
* necessary.
|
||||
*/
|
||||
free_block(bufmgr_fake, block, 0);
|
||||
bo_fake->block = NULL;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
|
||||
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch)
|
||||
volatile unsigned int
|
||||
*last_dispatch)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
|
||||
|
||||
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
|
||||
}
|
||||
|
||||
drm_intel_bufmgr *
|
||||
drm_intel_bufmgr_fake_init(int fd,
|
||||
unsigned long low_offset, void *low_virtual,
|
||||
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
|
||||
unsigned long low_offset,
|
||||
void *low_virtual,
|
||||
unsigned long size,
|
||||
volatile unsigned int *last_dispatch)
|
||||
volatile unsigned int
|
||||
*last_dispatch)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
|
||||
|
@ -1515,11 +1569,13 @@ drm_intel_bufmgr_fake_init(int fd,
|
|||
bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
|
||||
bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
|
||||
bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
|
||||
bufmgr_fake->bufmgr.bo_wait_rendering = drm_intel_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.bo_wait_rendering =
|
||||
drm_intel_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
|
||||
bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
|
||||
bufmgr_fake->bufmgr.check_aperture_space = drm_intel_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.check_aperture_space =
|
||||
drm_intel_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.debug = 0;
|
||||
|
||||
bufmgr_fake->fd = fd;
|
||||
|
@ -1527,4 +1583,3 @@ drm_intel_bufmgr_fake_init(int fd,
|
|||
|
||||
return &bufmgr_fake->bufmgr;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,6 @@ struct _drm_intel_bo_gem {
|
|||
drm_intel_bo bo;
|
||||
|
||||
atomic_t refcount;
|
||||
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
|
||||
uint32_t gem_handle;
|
||||
const char *name;
|
||||
|
||||
|
@ -162,10 +161,11 @@ struct _drm_intel_bo_gem {
|
|||
/**
|
||||
* Size in bytes of this buffer and its relocation descendents.
|
||||
*
|
||||
* Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
|
||||
* the common case.
|
||||
* Used to avoid costly tree walking in
|
||||
* drm_intel_bufmgr_check_aperture in the common case.
|
||||
*/
|
||||
int reloc_tree_size;
|
||||
|
||||
/**
|
||||
* Number of potential fence registers required by this buffer and its
|
||||
* relocations.
|
||||
|
@ -187,14 +187,11 @@ static int
|
|||
drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||
uint32_t stride);
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
|
||||
static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
|
||||
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_free(drm_intel_bo *bo);
|
||||
static void drm_intel_gem_bo_free(drm_intel_bo *bo);
|
||||
|
||||
static struct drm_intel_gem_bo_bucket *
|
||||
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
|
@ -203,7 +200,8 @@ drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
|
||||
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
|
||||
struct drm_intel_gem_bo_bucket *bucket =
|
||||
&bufmgr_gem->cache_bucket[i];
|
||||
if (bucket->size >= size) {
|
||||
return bucket;
|
||||
}
|
||||
|
@ -212,7 +210,8 @@ drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
|
||||
static void
|
||||
drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
|
@ -221,19 +220,24 @@ static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
|
|||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
||||
if (bo_gem->relocs == NULL) {
|
||||
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
|
||||
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
|
||||
bo_gem->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = 0; j < bo_gem->reloc_count; j++) {
|
||||
drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
|
||||
drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
|
||||
drm_intel_bo_gem *target_gem =
|
||||
(drm_intel_bo_gem *) target_bo;
|
||||
|
||||
DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
|
||||
DBG("%2d: %d (%s)@0x%08llx -> "
|
||||
"%d (%s)@0x%08lx + 0x%08x\n",
|
||||
i,
|
||||
bo_gem->gem_handle, bo_gem->name,
|
||||
(unsigned long long)bo_gem->relocs[j].offset,
|
||||
target_gem->gem_handle, target_gem->name, target_bo->offset,
|
||||
target_gem->gem_handle,
|
||||
target_gem->name,
|
||||
target_bo->offset,
|
||||
bo_gem->relocs[j].delta);
|
||||
}
|
||||
}
|
||||
|
@ -295,7 +299,6 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo)
|
|||
bufmgr_gem->exec_count++;
|
||||
}
|
||||
|
||||
|
||||
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
|
||||
sizeof(uint32_t))
|
||||
|
||||
|
@ -311,8 +314,7 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
|
|||
|
||||
bo_gem->relocs = malloc(max_relocs *
|
||||
sizeof(struct drm_i915_gem_relocation_entry));
|
||||
bo_gem->reloc_target_bo = malloc(max_relocs *
|
||||
sizeof(drm_intel_bo *));
|
||||
bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -335,8 +337,7 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
|
|||
|
||||
static int
|
||||
drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
drm_intel_bo_gem *bo_gem,
|
||||
int state)
|
||||
drm_intel_bo_gem *bo_gem, int state)
|
||||
{
|
||||
struct drm_i915_gem_madvise madv;
|
||||
|
||||
|
@ -356,8 +357,10 @@ drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
|
|||
while (!DRMLISTEMPTY(&bucket->head)) {
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
if (drm_intel_gem_bo_madvise (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
|
||||
bucket->head.next, head);
|
||||
if (drm_intel_gem_bo_madvise
|
||||
(bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
|
||||
break;
|
||||
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
|
@ -366,8 +369,10 @@ drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
|
|||
}
|
||||
|
||||
static drm_intel_bo *
|
||||
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment,
|
||||
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long size,
|
||||
unsigned int alignment,
|
||||
int for_render)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
|
||||
|
@ -399,20 +404,23 @@ retry:
|
|||
if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
|
||||
if (for_render) {
|
||||
/* Allocate new render-target BOs from the tail (MRU)
|
||||
* of the list, as it will likely be hot in the GPU cache
|
||||
* and in the aperture for us.
|
||||
* of the list, as it will likely be hot in the GPU
|
||||
* cache and in the aperture for us.
|
||||
*/
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head);
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
|
||||
bucket->head.prev, head);
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
alloc_from_cache = 1;
|
||||
} else {
|
||||
/* For non-render-target BOs (where we're probably going to map it
|
||||
* first thing in order to fill it with data), check if the
|
||||
* last BO in the cache is unbusy, and only reuse in that case.
|
||||
* Otherwise, allocating a new buffer is probably faster than
|
||||
/* For non-render-target BOs (where we're probably
|
||||
* going to map it first thing in order to fill it
|
||||
* with data), check if the last BO in the cache is
|
||||
* unbusy, and only reuse in that case. Otherwise,
|
||||
* allocating a new buffer is probably faster than
|
||||
* waiting for the GPU to finish.
|
||||
*/
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
|
||||
bucket->head.next, head);
|
||||
if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
|
||||
alloc_from_cache = 1;
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
|
@ -420,9 +428,11 @@ retry:
|
|||
}
|
||||
|
||||
if (alloc_from_cache) {
|
||||
if(!drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
|
||||
if (!drm_intel_gem_bo_madvise
|
||||
(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
|
||||
drm_intel_gem_bo_free(&bo_gem->bo);
|
||||
drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, bucket);
|
||||
drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
|
||||
bucket);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
@ -467,17 +477,23 @@ retry:
|
|||
}
|
||||
|
||||
static drm_intel_bo *
|
||||
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long size,
|
||||
unsigned int alignment)
|
||||
{
|
||||
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 1);
|
||||
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment,
|
||||
1);
|
||||
}
|
||||
|
||||
static drm_intel_bo *
|
||||
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long size,
|
||||
unsigned int alignment)
|
||||
{
|
||||
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 0);
|
||||
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment,
|
||||
0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -487,7 +503,8 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
* to another.
|
||||
*/
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned int handle)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
|
||||
|
@ -574,12 +591,14 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
|
||||
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
|
||||
struct drm_intel_gem_bo_bucket *bucket =
|
||||
&bufmgr_gem->cache_bucket[i];
|
||||
|
||||
while (!DRMLISTEMPTY(&bucket->head)) {
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
|
||||
bucket->head.next, head);
|
||||
if (time - bo_gem->free_time <= 1)
|
||||
break;
|
||||
|
||||
|
@ -590,8 +609,7 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
|
||||
static void drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
@ -603,7 +621,8 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
|
|||
|
||||
/* Unreference all the target buffers */
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
|
||||
drm_intel_gem_bo_unreference_locked(bo_gem->
|
||||
reloc_target_bo[i]);
|
||||
}
|
||||
|
||||
DBG("bo_unreference final: %d (%s)\n",
|
||||
|
@ -613,8 +632,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
|
|||
/* Put the buffer into our internal cache for reuse if we can. */
|
||||
tiling_mode = I915_TILING_NONE;
|
||||
if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
|
||||
drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0)
|
||||
{
|
||||
drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
|
||||
struct timespec time;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||
|
@ -626,15 +644,15 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
|
|||
|
||||
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
|
||||
|
||||
drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
|
||||
drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem,
|
||||
I915_MADV_DONTNEED);
|
||||
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
|
||||
} else {
|
||||
drm_intel_gem_bo_free(bo);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
|
||||
static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
||||
|
@ -643,22 +661,21 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
|
|||
drm_intel_gem_bo_unreference_final(bo);
|
||||
}
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_unreference(drm_intel_bo *bo)
|
||||
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
||||
assert(atomic_read(&bo_gem->refcount) > 0);
|
||||
if (atomic_dec_and_test(&bo_gem->refcount)) {
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem =
|
||||
(drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
drm_intel_gem_bo_unreference_final(bo);
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
||||
static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
@ -681,9 +698,10 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
|||
mmap_arg.size = bo->size;
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, bo_gem->name, strerror(errno));
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error mapping buffer %d (%s): %s .\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle,
|
||||
bo_gem->name, strerror(errno));
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -705,7 +723,8 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
|||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
|
||||
__FILE__, __LINE__, bo_gem->gem_handle,
|
||||
strerror(errno));
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -715,8 +734,7 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
||||
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
@ -729,13 +747,15 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
|||
if (bo_gem->gtt_virtual == NULL) {
|
||||
struct drm_i915_gem_mmap_gtt mmap_arg;
|
||||
|
||||
DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
|
||||
DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
|
||||
bo_gem->name);
|
||||
|
||||
memset(&mmap_arg, 0, sizeof(mmap_arg));
|
||||
mmap_arg.handle = bo_gem->gem_handle;
|
||||
|
||||
/* Get the fake offset back... */
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
|
||||
&mmap_arg);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error preparing buffer map %d (%s): %s .\n",
|
||||
|
@ -777,7 +797,8 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
|||
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
|
||||
__FILE__, __LINE__, bo_gem->gem_handle,
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
|
@ -785,8 +806,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
|
||||
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
@ -804,8 +824,7 @@ drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
|
||||
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
|
||||
|
@ -851,10 +870,10 @@ drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
|
|||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, (int) offset, (int) size,
|
||||
strerror (errno));
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
|
||||
(int)size, strerror(errno));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -900,10 +919,10 @@ drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
|
|||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, (int) offset, (int) size,
|
||||
strerror (errno));
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
|
||||
(int)size, strerror(errno));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -934,12 +953,14 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
|
|||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
|
||||
&set_domain);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle,
|
||||
set_domain.read_domains, set_domain.write_domain,
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
@ -957,11 +978,13 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
|
|||
|
||||
/* Free any cached buffer objects we were going to reuse */
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
|
||||
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
|
||||
struct drm_intel_gem_bo_bucket *bucket =
|
||||
&bufmgr_gem->cache_bucket[i];
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
|
||||
while (!DRMLISTEMPTY(&bucket->head)) {
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
|
||||
bucket->head.next, head);
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
|
||||
drm_intel_gem_bo_free(&bo_gem->bo);
|
||||
|
@ -1068,7 +1091,8 @@ drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
|
|||
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
|
||||
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
|
||||
bo_gem->gem_handle, bo_gem->name, bo->offset,
|
||||
(unsigned long long)bufmgr_gem->exec_objects[i].offset);
|
||||
(unsigned long long)bufmgr_gem->exec_objects[i].
|
||||
offset);
|
||||
bo->offset = bufmgr_gem->exec_objects[i].offset;
|
||||
}
|
||||
}
|
||||
|
@ -1076,8 +1100,7 @@ drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
|
|||
|
||||
static int
|
||||
drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
|
||||
struct drm_i915_gem_execbuffer execbuf;
|
||||
|
@ -1087,8 +1110,8 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
|
|||
/* Update indices and set up the validate list. */
|
||||
drm_intel_gem_bo_process_reloc(bo);
|
||||
|
||||
/* Add the batch buffer to the validation list. There are no relocations
|
||||
* pointing to it.
|
||||
/* Add the batch buffer to the validation list. There are no
|
||||
* relocations pointing to it.
|
||||
*/
|
||||
drm_intel_add_validate_buffer(bo);
|
||||
|
||||
|
@ -1102,15 +1125,20 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
|
|||
execbuf.DR4 = DR4;
|
||||
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
|
||||
&execbuf);
|
||||
} while (ret != 0 && errno == EAGAIN);
|
||||
|
||||
if (ret != 0 && errno == ENOMEM) {
|
||||
fprintf(stderr, "Execbuffer fails to pin. Estimate: %u. Actual: %u. Available: %u\n",
|
||||
fprintf(stderr,
|
||||
"Execbuffer fails to pin. "
|
||||
"Estimate: %u. Actual: %u. Available: %u\n",
|
||||
drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
|
||||
bufmgr_gem->exec_count),
|
||||
bufmgr_gem->
|
||||
exec_count),
|
||||
drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
|
||||
bufmgr_gem->exec_count),
|
||||
bufmgr_gem->
|
||||
exec_count),
|
||||
(unsigned int)bufmgr_gem->gtt_size);
|
||||
}
|
||||
drm_intel_update_buffer_offsets(bufmgr_gem);
|
||||
|
@ -1278,7 +1306,9 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
|
|||
bo_gem->included_in_check_aperture = 1;
|
||||
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
|
||||
total +=
|
||||
drm_intel_gem_bo_get_aperture_space(bo_gem->
|
||||
reloc_target_bo[i]);
|
||||
|
||||
return total;
|
||||
}
|
||||
|
@ -1324,7 +1354,8 @@ drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
|
|||
bo_gem->included_in_check_aperture = 0;
|
||||
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
|
||||
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
|
||||
reloc_target_bo[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1358,16 +1389,18 @@ drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
|
|||
|
||||
for (i = 0; i < count; i++) {
|
||||
total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
|
||||
/* For the first buffer object in the array, we get an accurate count
|
||||
* back for its reloc_tree size (since nothing had been flagged as
|
||||
* being counted yet). We can save that value out as a more
|
||||
* conservative reloc_tree_size that avoids double-counting target
|
||||
* buffers. Since the first buffer happens to usually be the batch
|
||||
* buffer in our callers, this can pull us back from doing the tree
|
||||
/* For the first buffer object in the array, we get an
|
||||
* accurate count back for its reloc_tree size (since nothing
|
||||
* had been flagged as being counted yet). We can save that
|
||||
* value out as a more conservative reloc_tree_size that
|
||||
* avoids double-counting target buffers. Since the first
|
||||
* buffer happens to usually be the batch buffer in our
|
||||
* callers, this can pull us back from doing the tree
|
||||
* walk on every new batch emit.
|
||||
*/
|
||||
if (i == 0) {
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
|
||||
drm_intel_bo_gem *bo_gem =
|
||||
(drm_intel_bo_gem *) bo_array[i];
|
||||
bo_gem->reloc_tree_size = total;
|
||||
}
|
||||
}
|
||||
|
@ -1396,7 +1429,8 @@ drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
|
|||
static int
|
||||
drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem =
|
||||
(drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
|
||||
unsigned int total = 0;
|
||||
unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
|
||||
int total_fences;
|
||||
|
@ -1414,7 +1448,8 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
|
|||
total = drm_intel_gem_compute_batch_space(bo_array, count);
|
||||
|
||||
if (total > threshold) {
|
||||
DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
|
||||
DBG("check_space: overflowed available aperture, "
|
||||
"%dkb vs %dkb\n",
|
||||
total / 1024, (int)bufmgr_gem->gtt_size / 1024);
|
||||
return -1;
|
||||
} else {
|
||||
|
@ -1453,7 +1488,8 @@ drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
|
|||
for (i = 0; i < bo_gem->reloc_count; i++) {
|
||||
if (bo_gem->reloc_target_bo[i] == target_bo)
|
||||
return 1;
|
||||
if (drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i], target_bo))
|
||||
if (drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
|
||||
target_bo))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1492,7 +1528,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
strerror(errno));
|
||||
bufmgr_gem->gtt_size = 128 * 1024 * 1024;
|
||||
fprintf(stderr, "Assuming %dkB available aperture size.\n"
|
||||
"May lead to reduced performance or incorrect rendering.\n",
|
||||
"May lead to reduced performance or incorrect "
|
||||
"rendering.\n",
|
||||
(int)bufmgr_gem->gtt_size / 1024);
|
||||
}
|
||||
|
||||
|
@ -1509,8 +1546,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
gp.value = &bufmgr_gem->available_fences;
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
|
||||
if (ret) {
|
||||
fprintf(stderr, "get fences failed: %d [%d]\n", ret, errno);
|
||||
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
|
||||
fprintf(stderr, "get fences failed: %d [%d]\n", ret,
|
||||
errno);
|
||||
fprintf(stderr, "param: %d, val: %d\n", gp.param,
|
||||
*gp.value);
|
||||
bufmgr_gem->available_fences = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1524,7 +1563,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
|
||||
|
||||
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
|
||||
bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render;
|
||||
bufmgr_gem->bufmgr.bo_alloc_for_render =
|
||||
drm_intel_gem_bo_alloc_for_render;
|
||||
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
|
||||
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
|
||||
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
|
||||
|
@ -1542,9 +1582,11 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
|
||||
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.debug = 0;
|
||||
bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
|
||||
bufmgr_gem->bufmgr.check_aperture_space =
|
||||
drm_intel_gem_check_aperture_space;
|
||||
bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
|
||||
bufmgr_gem->bufmgr.get_pipe_from_crtc_id = drm_intel_gem_get_pipe_from_crtc_id;
|
||||
bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
|
||||
drm_intel_gem_get_pipe_from_crtc_id;
|
||||
bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
|
||||
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
|
@ -1555,4 +1597,3 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
|
||||
return &bufmgr_gem->bufmgr;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,15 +44,16 @@ struct _drm_intel_bufmgr {
|
|||
* Allocate a buffer object.
|
||||
*
|
||||
* Buffer objects are not necessarily initially mapped into CPU virtual
|
||||
* address space or graphics device aperture. They must be mapped using
|
||||
* bo_map() to be used by the CPU, and validated for use using bo_validate()
|
||||
* to be used from the graphics device.
|
||||
* address space or graphics device aperture. They must be mapped
|
||||
* using bo_map() to be used by the CPU, and validated for use using
|
||||
* bo_validate() to be used from the graphics device.
|
||||
*/
|
||||
drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment);
|
||||
|
||||
/**
|
||||
* Allocate a buffer object, hinting that it will be used as a render target.
|
||||
* Allocate a buffer object, hinting that it will be used as a
|
||||
* render target.
|
||||
*
|
||||
* This is otherwise the same as bo_alloc.
|
||||
*/
|
||||
|
@ -79,7 +80,10 @@ struct _drm_intel_bufmgr {
|
|||
*/
|
||||
int (*bo_map) (drm_intel_bo *bo, int write_enable);
|
||||
|
||||
/** Reduces the refcount on the userspace mapping of the buffer object. */
|
||||
/**
|
||||
* Reduces the refcount on the userspace mapping of the buffer
|
||||
* object.
|
||||
*/
|
||||
int (*bo_unmap) (drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
|
@ -103,8 +107,9 @@ struct _drm_intel_bufmgr {
|
|||
/**
|
||||
* Waits for rendering to an object by the GPU to have completed.
|
||||
*
|
||||
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
|
||||
* It is merely a way for the driver to implement glFinish.
|
||||
* This is not required for any access to the BO by bo_map,
|
||||
* bo_subdata, etc. It is merely a way for the driver to implement
|
||||
* glFinish.
|
||||
*/
|
||||
void (*bo_wait_rendering) (drm_intel_bo *bo);
|
||||
|
||||
|
@ -120,15 +125,18 @@ struct _drm_intel_bufmgr {
|
|||
* Relocations remain in place for the lifetime of the buffer object.
|
||||
*
|
||||
* \param bo Buffer to write the relocation into.
|
||||
* \param offset Byte offset within reloc_bo of the pointer to target_bo.
|
||||
* \param offset Byte offset within reloc_bo of the pointer to
|
||||
* target_bo.
|
||||
* \param target_bo Buffer whose offset should be written into the
|
||||
* relocation entry.
|
||||
* \param target_offset Constant value to be added to target_bo's offset in
|
||||
* relocation entry.
|
||||
* \param read_domains GEM read domains which the buffer will be read into
|
||||
* by the command that this relocation is part of.
|
||||
* \param write_domains GEM read domains which the buffer will be dirtied
|
||||
* in by the command that this relocation is part of.
|
||||
* \param target_offset Constant value to be added to target_bo's
|
||||
* offset in relocation entry.
|
||||
* \param read_domains GEM read domains which the buffer will be
|
||||
* read into by the command that this relocation
|
||||
* is part of.
|
||||
* \param write_domains GEM read domains which the buffer will be
|
||||
* dirtied in by the command that this
|
||||
* relocation is part of.
|
||||
*/
|
||||
int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
|
@ -146,12 +154,14 @@ struct _drm_intel_bufmgr {
|
|||
* \param alignment Required alignment for aperture, in bytes
|
||||
*/
|
||||
int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
|
||||
|
||||
/**
|
||||
* Unpin a buffer from the aperture, allowing it to be removed
|
||||
*
|
||||
* \param buf Buffer to unpin
|
||||
*/
|
||||
int (*bo_unpin) (drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
* Ask that the buffer be placed in tiling mode
|
||||
*
|
||||
|
@ -160,6 +170,7 @@ struct _drm_intel_bufmgr {
|
|||
*/
|
||||
int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||
uint32_t stride);
|
||||
|
||||
/**
|
||||
* Get the current tiling (and resulting swizzling) mode for the bo.
|
||||
*
|
||||
|
@ -169,6 +180,7 @@ struct _drm_intel_bufmgr {
|
|||
*/
|
||||
int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
|
||||
uint32_t * swizzle_mode);
|
||||
|
||||
/**
|
||||
* Create a visible name for a buffer which can be used by other apps
|
||||
*
|
||||
|
@ -187,8 +199,8 @@ struct _drm_intel_bufmgr {
|
|||
|
||||
/**
|
||||
* Disable buffer reuse for buffers which will be shared in some way,
|
||||
* as with scanout buffers. When the buffer reference count goes to zero,
|
||||
* it will be freed and not placed in the reuse list.
|
||||
* as with scanout buffers. When the buffer reference count goes to
|
||||
* zero, it will be freed and not placed in the reuse list.
|
||||
*
|
||||
* \param bo Buffer to disable reuse for
|
||||
*/
|
||||
|
@ -210,8 +222,8 @@ struct _drm_intel_bufmgr {
|
|||
/** Returns true if target_bo is in the relocation tree rooted at bo. */
|
||||
int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
|
||||
|
||||
int debug; /**< Enables verbose debugging printouts */
|
||||
/**< Enables verbose debugging printouts */
|
||||
int debug;
|
||||
};
|
||||
|
||||
#endif /* INTEL_BUFMGR_PRIV_H */
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@
|
|||
#include "xf86drm.h"
|
||||
#include "mm.h"
|
||||
|
||||
void
|
||||
mmDumpMemInfo(const struct mem_block *heap)
|
||||
void mmDumpMemInfo(const struct mem_block *heap)
|
||||
{
|
||||
drmMsg("Memory heap %p:\n", (void *)heap);
|
||||
if (heap == 0) {
|
||||
|
@ -38,16 +37,16 @@ mmDumpMemInfo(const struct mem_block *heap)
|
|||
const struct mem_block *p;
|
||||
|
||||
for (p = heap->next; p != heap; p = p->next) {
|
||||
drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
||||
p->free ? 'F':'.',
|
||||
drmMsg(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
|
||||
p->size, p->free ? 'F' : '.',
|
||||
p->reserved ? 'R' : '.');
|
||||
}
|
||||
|
||||
drmMsg("\nFree list:\n");
|
||||
|
||||
for (p = heap->next_free; p != heap; p = p->next_free) {
|
||||
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
||||
p->free ? 'F':'.',
|
||||
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
|
||||
p->size, p->free ? 'F' : '.',
|
||||
p->reserved ? 'R' : '.');
|
||||
}
|
||||
|
||||
|
@ -55,8 +54,7 @@ mmDumpMemInfo(const struct mem_block *heap)
|
|||
drmMsg("End of memory blocks\n");
|
||||
}
|
||||
|
||||
struct mem_block *
|
||||
mmInit(int ofs, int size)
|
||||
struct mem_block *mmInit(int ofs, int size)
|
||||
{
|
||||
struct mem_block *heap, *block;
|
||||
|
||||
|
@ -91,9 +89,7 @@ mmInit(int ofs, int size)
|
|||
return heap;
|
||||
}
|
||||
|
||||
|
||||
static struct mem_block *
|
||||
SliceBlock(struct mem_block *p,
|
||||
static struct mem_block *SliceBlock(struct mem_block *p,
|
||||
int startofs, int size,
|
||||
int reserved, int alignment)
|
||||
{
|
||||
|
@ -101,7 +97,8 @@ SliceBlock(struct mem_block *p,
|
|||
|
||||
/* break left [p, newblock, p->next], then p = newblock */
|
||||
if (startofs > p->ofs) {
|
||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
||||
newblock =
|
||||
(struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||
if (!newblock)
|
||||
return NULL;
|
||||
newblock->ofs = startofs;
|
||||
|
@ -125,7 +122,8 @@ SliceBlock(struct mem_block *p,
|
|||
|
||||
/* break right, also [p, newblock, p->next] */
|
||||
if (size < p->size) {
|
||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
||||
newblock =
|
||||
(struct mem_block *)calloc(1, sizeof(struct mem_block));
|
||||
if (!newblock)
|
||||
return NULL;
|
||||
newblock->ofs = startofs + size;
|
||||
|
@ -161,9 +159,8 @@ SliceBlock(struct mem_block *p,
|
|||
return p;
|
||||
}
|
||||
|
||||
|
||||
struct mem_block *
|
||||
mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
||||
struct mem_block *mmAllocMem(struct mem_block *heap, int size, int align2,
|
||||
int startSearch)
|
||||
{
|
||||
struct mem_block *p;
|
||||
const int mask = (1 << align2) - 1;
|
||||
|
@ -194,9 +191,7 @@ mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
|||
return p;
|
||||
}
|
||||
|
||||
|
||||
struct mem_block *
|
||||
mmFindBlock(struct mem_block *heap, int start)
|
||||
struct mem_block *mmFindBlock(struct mem_block *heap, int start)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
|
@ -208,9 +203,7 @@ mmFindBlock(struct mem_block *heap, int start)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
Join2Blocks(struct mem_block *p)
|
||||
static int Join2Blocks(struct mem_block *p)
|
||||
{
|
||||
/* XXX there should be some assertions here */
|
||||
|
||||
|
@ -234,8 +227,7 @@ Join2Blocks(struct mem_block *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
mmFreeMem(struct mem_block *b)
|
||||
int mmFreeMem(struct mem_block *b)
|
||||
{
|
||||
if (!b)
|
||||
return 0;
|
||||
|
@ -262,9 +254,7 @@ mmFreeMem(struct mem_block *b)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
mmDestroy(struct mem_block *heap)
|
||||
void mmDestroy(struct mem_block *heap)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
|
|
|
@ -21,13 +21,11 @@
|
|||
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Memory manager code. Primarily used by device drivers to manage texture
|
||||
* heaps, etc.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef MM_H
|
||||
#define MM_H
|
||||
|
||||
|
|
Loading…
Reference in New Issue