intel: move drm calls to exec buffers to libdrm_intel.
This avoids duplicating the effort in 3 places. Also, added emit/wait fence callbacks back in bufmgr_fake since we need it for non-drm 2d. Sigh.main
parent
869d8bebed
commit
f9d98beefc
|
@ -123,14 +123,12 @@ dri_bufmgr_destroy(dri_bufmgr *bufmgr)
|
|||
bufmgr->destroy(bufmgr);
|
||||
}
|
||||
|
||||
void *dri_process_relocs(dri_bo *batch_buf)
|
||||
int
|
||||
dri_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
return batch_buf->bufmgr->process_relocs(batch_buf);
|
||||
}
|
||||
|
||||
void dri_post_submit(dri_bo *batch_buf)
|
||||
{
|
||||
batch_buf->bufmgr->post_submit(batch_buf);
|
||||
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -76,10 +76,9 @@ void dri_bo_wait_rendering(dri_bo *bo);
|
|||
|
||||
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
|
||||
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
|
||||
|
||||
void *dri_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_submit(dri_bo *batch_buf);
|
||||
int dri_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
|
||||
|
||||
int dri_bo_emit_reloc(dri_bo *reloc_buf,
|
||||
|
@ -103,6 +102,16 @@ dri_bufmgr *intel_bufmgr_fake_init(int fd,
|
|||
volatile unsigned int *last_dispatch);
|
||||
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch);
|
||||
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
|
||||
int (*exec)(dri_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv);
|
||||
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv);
|
||||
dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual);
|
||||
|
|
|
@ -136,6 +136,31 @@ typedef struct _bufmgr_fake {
|
|||
unsigned need_fence:1;
|
||||
int thrashing;
|
||||
|
||||
/**
|
||||
* Driver callback to emit a fence, returning the cookie.
|
||||
*
|
||||
* This allows the driver to hook in a replacement for the DRM usage in
|
||||
* bufmgr_fake.
|
||||
*
|
||||
* Currently, this also requires that a write flush be emitted before
|
||||
* emitting the fence, but this should change.
|
||||
*/
|
||||
unsigned int (*fence_emit)(void *private);
|
||||
/** Driver callback to wait for a fence cookie to have passed. */
|
||||
void (*fence_wait)(unsigned int fence, void *private);
|
||||
void *fence_priv;
|
||||
|
||||
/**
|
||||
* Driver callback to execute a buffer.
|
||||
*
|
||||
* This allows the driver to hook in a replacement for the DRM usage in
|
||||
* bufmgr_fake.
|
||||
*/
|
||||
int (*exec)(dri_bo *bo, unsigned int used, void *priv);
|
||||
void *exec_priv;
|
||||
|
||||
/** Driver-supplied argument to driver callbacks */
|
||||
void *driver_priv;
|
||||
/* Pointer to kernel-updated sarea data for the last completed user irq */
|
||||
volatile unsigned int *last_dispatch;
|
||||
|
||||
|
@ -205,12 +230,28 @@ static int FENCE_LTE( unsigned a, unsigned b )
|
|||
return 0;
|
||||
}
|
||||
|
||||
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
|
||||
bufmgr_fake->fence_emit = emit;
|
||||
bufmgr_fake->fence_wait = wait;
|
||||
bufmgr_fake->fence_priv = priv;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
struct drm_i915_irq_emit ie;
|
||||
int ret, seq = 1;
|
||||
|
||||
if (bufmgr_fake->fence_emit != NULL)
|
||||
return bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
|
||||
|
||||
ie.irq_seq = &seq;
|
||||
ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
|
||||
&ie, sizeof(ie));
|
||||
|
@ -243,6 +284,11 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
|
|||
unsigned int last_dispatch;
|
||||
int ret;
|
||||
|
||||
if (bufmgr_fake->fence_wait != NULL) {
|
||||
bufmgr_fake->fence_wait(cookie, bufmgr_fake->fence_priv);
|
||||
return;
|
||||
}
|
||||
|
||||
DBG("wait 0x%08x\n", iw.irq_seq);
|
||||
|
||||
/* The kernel implementation of IRQ_WAIT is broken for wraparound, and has
|
||||
|
@ -1092,38 +1138,6 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
|||
return dri_fake_bo_validate(bo);
|
||||
}
|
||||
|
||||
static void *
|
||||
dri_fake_process_relocs(dri_bo *batch_buf)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
|
||||
dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
|
||||
int ret;
|
||||
int retry_count = 0;
|
||||
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
|
||||
dri_fake_calculate_domains(batch_buf);
|
||||
|
||||
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
/* we've ran out of RAM so blow the whole lot away and retry */
|
||||
restart:
|
||||
ret = dri_fake_reloc_and_validate_buffer(batch_buf);
|
||||
if (bufmgr_fake->fail == 1) {
|
||||
if (retry_count == 0) {
|
||||
retry_count++;
|
||||
dri_fake_kick_all(bufmgr_fake);
|
||||
bufmgr_fake->fail = 0;
|
||||
goto restart;
|
||||
} else /* dump out the memory here */
|
||||
mmDumpMemInfo(bufmgr_fake->heap);
|
||||
}
|
||||
|
||||
assert(ret == 0);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_bo_fake_post_submit(dri_bo *bo)
|
||||
{
|
||||
|
@ -1150,12 +1164,74 @@ dri_bo_fake_post_submit(dri_bo *bo)
|
|||
}
|
||||
|
||||
|
||||
static void
|
||||
dri_fake_post_submit(dri_bo *batch_buf)
|
||||
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
|
||||
int (*exec)(dri_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv)
|
||||
{
|
||||
dri_fake_fence_validated(batch_buf->bufmgr);
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
|
||||
dri_bo_fake_post_submit(batch_buf);
|
||||
bufmgr_fake->exec = exec;
|
||||
bufmgr_fake->exec_priv = exec;
|
||||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *batch_fake = (dri_bo_fake *)bo;
|
||||
struct drm_i915_batchbuffer batch;
|
||||
int ret;
|
||||
int retry_count = 0;
|
||||
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
|
||||
dri_fake_calculate_domains(bo);
|
||||
|
||||
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
/* we've ran out of RAM so blow the whole lot away and retry */
|
||||
restart:
|
||||
ret = dri_fake_reloc_and_validate_buffer(bo);
|
||||
if (bufmgr_fake->fail == 1) {
|
||||
if (retry_count == 0) {
|
||||
retry_count++;
|
||||
dri_fake_kick_all(bufmgr_fake);
|
||||
bufmgr_fake->fail = 0;
|
||||
goto restart;
|
||||
} else /* dump out the memory here */
|
||||
mmDumpMemInfo(bufmgr_fake->heap);
|
||||
}
|
||||
|
||||
assert(ret == 0);
|
||||
|
||||
if (bufmgr_fake->exec != NULL) {
|
||||
int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
} else {
|
||||
batch.start = bo->offset;
|
||||
batch.used = used;
|
||||
batch.cliprects = cliprects;
|
||||
batch.num_cliprects = num_cliprects;
|
||||
batch.DR1 = 0;
|
||||
batch.DR4 = DR4;
|
||||
|
||||
if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
|
||||
sizeof(batch))) {
|
||||
drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
|
||||
dri_fake_fence_validated(bo->bufmgr);
|
||||
|
||||
dri_bo_fake_post_submit(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1264,8 +1340,7 @@ intel_bufmgr_fake_init(int fd,
|
|||
bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.bo_emit_reloc = dri_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
|
||||
bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
|
||||
bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
|
||||
bufmgr_fake->bufmgr.bo_exec = dri_fake_bo_exec;
|
||||
bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.debug = 0;
|
||||
|
||||
|
|
|
@ -87,8 +87,6 @@ typedef struct _dri_bufmgr_gem {
|
|||
|
||||
/** Array of lists of cached gem objects of power-of-two sizes */
|
||||
struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
|
||||
|
||||
struct drm_i915_gem_execbuffer exec_arg;
|
||||
} dri_bufmgr_gem;
|
||||
|
||||
struct _dri_bo_gem {
|
||||
|
@ -706,27 +704,6 @@ dri_gem_bo_process_reloc(dri_bo *bo)
|
|||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
dri_gem_process_reloc(dri_bo *batch_buf)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
|
||||
|
||||
/* Update indices and set up the validate list. */
|
||||
dri_gem_bo_process_reloc(batch_buf);
|
||||
|
||||
/* Add the batch buffer to the validation list. There are no relocations
|
||||
* pointing to it.
|
||||
*/
|
||||
intel_add_validate_buffer(batch_buf);
|
||||
|
||||
bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
|
||||
bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
|
||||
bufmgr_gem->exec_arg.batch_start_offset = 0;
|
||||
bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
|
||||
|
||||
return &bufmgr_gem->exec_arg;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
|
||||
{
|
||||
|
@ -746,11 +723,35 @@ intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_post_submit(dri_bo *batch_buf)
|
||||
static int
|
||||
dri_gem_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
|
||||
int i;
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
struct drm_i915_gem_execbuffer execbuf;
|
||||
int ret, i;
|
||||
|
||||
/* Update indices and set up the validate list. */
|
||||
dri_gem_bo_process_reloc(bo);
|
||||
|
||||
/* Add the batch buffer to the validation list. There are no relocations
|
||||
* pointing to it.
|
||||
*/
|
||||
intel_add_validate_buffer(bo);
|
||||
|
||||
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
|
||||
execbuf.buffer_count = bufmgr_gem->exec_count;
|
||||
execbuf.batch_start_offset = 0;
|
||||
execbuf.batch_len = used;
|
||||
execbuf.cliprects_ptr = (uintptr_t)cliprects;
|
||||
execbuf.num_cliprects = num_cliprects;
|
||||
execbuf.DR1 = 0;
|
||||
execbuf.DR4 = DR4;
|
||||
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
intel_update_buffer_offsets (bufmgr_gem);
|
||||
|
||||
|
@ -770,6 +771,8 @@ dri_gem_post_submit(dri_bo *batch_buf)
|
|||
bufmgr_gem->exec_bos[i] = NULL;
|
||||
}
|
||||
bufmgr_gem->exec_count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -913,9 +916,8 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
bufmgr_gem->bufmgr.bo_unpin = dri_gem_bo_unpin;
|
||||
bufmgr_gem->bufmgr.bo_set_tiling = dri_gem_bo_set_tiling;
|
||||
bufmgr_gem->bufmgr.bo_flink = dri_gem_bo_flink;
|
||||
bufmgr_gem->bufmgr.bo_exec = dri_gem_bo_exec;
|
||||
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
|
||||
bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
|
||||
bufmgr_gem->bufmgr.debug = 0;
|
||||
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
|
|
|
@ -103,23 +103,6 @@ struct _dri_bufmgr {
|
|||
*/
|
||||
void (*destroy)(dri_bufmgr *bufmgr);
|
||||
|
||||
/**
|
||||
* Processes the relocations, either in userland or by converting the list
|
||||
* for use in batchbuffer submission.
|
||||
*
|
||||
* Kernel-based implementations will return a pointer to the arguments
|
||||
* to be handed with batchbuffer submission to the kernel. The userland
|
||||
* implementation performs the buffer validation and emits relocations
|
||||
* into them the appopriate order.
|
||||
*
|
||||
* \param batch_buf buffer at the root of the tree of relocations
|
||||
* \return argument to be completed and passed to the execbuffers ioctl
|
||||
* (if any).
|
||||
*/
|
||||
void *(*process_relocs)(dri_bo *batch_buf);
|
||||
|
||||
void (*post_submit)(dri_bo *batch_buf);
|
||||
|
||||
/**
|
||||
* Add relocation entry in reloc_buf, which will be updated with the
|
||||
* target buffer's real offset on on command submission.
|
||||
|
@ -140,6 +123,12 @@ struct _dri_bufmgr {
|
|||
int (*bo_emit_reloc)(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target);
|
||||
|
||||
/** Executes the command buffer pointed to by bo. */
|
||||
int (*bo_exec)(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
|
||||
/**
|
||||
* Pin a buffer to the aperture and fix the offset until unpinned
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue