freedreno/msm: fix issue where same bo is on multiple rings
It should be a less common case, but it is possible for a single bo to be on multiple rings, for example when sharing a buffer across multiple pipe_context's created from same pipe_screen. So rather than completely fall over in this case, fallback to slow-path of looping over all bo's in the ring's bo-table (but retain the fast- path of constant-lookup for the first ring the buffer is on). Signed-off-by: Rob Clark <robclark@freedesktop.org>main
parent
2fa58ef8f4
commit
9e34ee4f75
|
@ -129,7 +129,6 @@ drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
|
||||||
{
|
{
|
||||||
struct msm_bo *msm_bo;
|
struct msm_bo *msm_bo;
|
||||||
struct fd_bo *bo;
|
struct fd_bo *bo;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
msm_bo = calloc(1, sizeof(*msm_bo));
|
msm_bo = calloc(1, sizeof(*msm_bo));
|
||||||
if (!msm_bo)
|
if (!msm_bo)
|
||||||
|
@ -139,8 +138,5 @@ drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
|
||||||
bo->funcs = &funcs;
|
bo->funcs = &funcs;
|
||||||
bo->fd = -1;
|
bo->fd = -1;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(msm_bo->list); i++)
|
|
||||||
list_inithead(&msm_bo->list[i]);
|
|
||||||
|
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,8 +71,19 @@ struct msm_bo {
|
||||||
struct fd_bo base;
|
struct fd_bo base;
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
uint64_t presumed;
|
uint64_t presumed;
|
||||||
uint32_t indexp1[FD_PIPE_MAX]; /* index plus 1 */
|
/* in the common case, a bo won't be referenced by more than a single
|
||||||
struct list_head list[FD_PIPE_MAX];
|
* (parent) ring[*]. So to avoid looping over all the bo's in the
|
||||||
|
* reloc table to find the idx of a bo that might already be in the
|
||||||
|
* table, we cache the idx in the bo. But in order to detect the
|
||||||
|
* slow-path where bo is ref'd in multiple rb's, we also must track
|
||||||
|
* the current_ring for which the idx is valid. See bo2idx().
|
||||||
|
*
|
||||||
|
* [*] in case multiple ringbuffers, ie. one toplevel and other rb(s)
|
||||||
|
* used for IB target(s), the toplevel rb is the parent which is
|
||||||
|
* tracking bo's for the submit
|
||||||
|
*/
|
||||||
|
struct fd_ringbuffer *current_ring;
|
||||||
|
uint32_t idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct msm_bo * to_msm_bo(struct fd_bo *x)
|
static inline struct msm_bo * to_msm_bo(struct fd_bo *x)
|
||||||
|
|
|
@ -39,8 +39,6 @@ struct msm_ringbuffer {
|
||||||
struct fd_ringbuffer base;
|
struct fd_ringbuffer base;
|
||||||
struct fd_bo *ring_bo;
|
struct fd_bo *ring_bo;
|
||||||
|
|
||||||
struct list_head submit_list;
|
|
||||||
|
|
||||||
/* submit ioctl related tables: */
|
/* submit ioctl related tables: */
|
||||||
struct {
|
struct {
|
||||||
/* bo's table: */
|
/* bo's table: */
|
||||||
|
@ -56,11 +54,17 @@ struct msm_ringbuffer {
|
||||||
uint32_t nr_relocs, max_relocs;
|
uint32_t nr_relocs, max_relocs;
|
||||||
} submit;
|
} submit;
|
||||||
|
|
||||||
|
/* should have matching entries in submit.bos: */
|
||||||
|
struct fd_bo **bos;
|
||||||
|
uint32_t nr_bos, max_bos;
|
||||||
|
|
||||||
/* should have matching entries in submit.cmds: */
|
/* should have matching entries in submit.cmds: */
|
||||||
struct fd_ringbuffer **rings;
|
struct fd_ringbuffer **rings;
|
||||||
uint32_t nr_rings, max_rings;
|
uint32_t nr_rings, max_rings;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
|
static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
|
||||||
{
|
{
|
||||||
if ((nr + 1) > *max) {
|
if ((nr + 1) > *max) {
|
||||||
|
@ -83,27 +87,47 @@ static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
|
||||||
return (struct msm_ringbuffer *)x;
|
return (struct msm_ringbuffer *)x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
|
||||||
|
{
|
||||||
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||||
|
uint32_t idx;
|
||||||
|
|
||||||
|
idx = APPEND(&msm_ring->submit, bos);
|
||||||
|
idx = APPEND(msm_ring, bos);
|
||||||
|
|
||||||
|
msm_ring->submit.bos[idx].flags = 0;
|
||||||
|
msm_ring->submit.bos[idx].handle = bo->handle;
|
||||||
|
msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
|
||||||
|
|
||||||
|
msm_ring->bos[idx] = fd_bo_ref(bo);
|
||||||
|
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
/* add (if needed) bo, return idx: */
|
/* add (if needed) bo, return idx: */
|
||||||
static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
|
static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
|
||||||
{
|
{
|
||||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||||
struct msm_bo *msm_bo = to_msm_bo(bo);
|
struct msm_bo *msm_bo = to_msm_bo(bo);
|
||||||
int id = ring->pipe->id;
|
|
||||||
uint32_t idx;
|
uint32_t idx;
|
||||||
if (!msm_bo->indexp1[id]) {
|
pthread_mutex_lock(&idx_lock);
|
||||||
struct list_head *list = &msm_bo->list[id];
|
if (!msm_bo->current_ring) {
|
||||||
idx = APPEND(&msm_ring->submit, bos);
|
idx = append_bo(ring, bo);
|
||||||
msm_ring->submit.bos[idx].flags = 0;
|
msm_bo->current_ring = ring;
|
||||||
msm_ring->submit.bos[idx].handle = bo->handle;
|
msm_bo->idx = idx;
|
||||||
msm_ring->submit.bos[idx].presumed = msm_bo->presumed;
|
} else if (msm_bo->current_ring == ring) {
|
||||||
msm_bo->indexp1[id] = idx + 1;
|
idx = msm_bo->idx;
|
||||||
|
|
||||||
assert(LIST_IS_EMPTY(list));
|
|
||||||
fd_bo_ref(bo);
|
|
||||||
list_addtail(list, &msm_ring->submit_list);
|
|
||||||
} else {
|
} else {
|
||||||
idx = msm_bo->indexp1[id] - 1;
|
/* slow-path: */
|
||||||
|
for (idx = 0; idx < msm_ring->nr_bos; idx++)
|
||||||
|
if (msm_ring->bos[idx] == bo)
|
||||||
|
break;
|
||||||
|
if (idx == msm_ring->nr_bos) {
|
||||||
|
/* not found */
|
||||||
|
idx = append_bo(ring, bo);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&idx_lock);
|
||||||
if (flags & FD_RELOC_READ)
|
if (flags & FD_RELOC_READ)
|
||||||
msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
|
msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
|
||||||
if (flags & FD_RELOC_WRITE)
|
if (flags & FD_RELOC_WRITE)
|
||||||
|
@ -193,6 +217,8 @@ static void flush_reset(struct fd_ringbuffer *ring)
|
||||||
msm_ring->submit.nr_relocs = 0;
|
msm_ring->submit.nr_relocs = 0;
|
||||||
msm_ring->submit.nr_cmds = 0;
|
msm_ring->submit.nr_cmds = 0;
|
||||||
msm_ring->submit.nr_bos = 0;
|
msm_ring->submit.nr_bos = 0;
|
||||||
|
msm_ring->nr_rings = 0;
|
||||||
|
msm_ring->nr_bos = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
|
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
|
||||||
|
@ -202,9 +228,8 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
|
||||||
struct drm_msm_gem_submit req = {
|
struct drm_msm_gem_submit req = {
|
||||||
.pipe = to_msm_pipe(ring->pipe)->pipe,
|
.pipe = to_msm_pipe(ring->pipe)->pipe,
|
||||||
};
|
};
|
||||||
struct msm_bo *msm_bo = NULL, *tmp;
|
|
||||||
uint32_t i, submit_offset, size;
|
uint32_t i, submit_offset, size;
|
||||||
int ret, id = ring->pipe->id;
|
int ret;
|
||||||
|
|
||||||
submit_offset = offset_bytes(last_start, ring->start);
|
submit_offset = offset_bytes(last_start, ring->start);
|
||||||
size = offset_bytes(ring->cur, last_start);
|
size = offset_bytes(ring->cur, last_start);
|
||||||
|
@ -242,10 +267,9 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LIST_FOR_EACH_ENTRY_SAFE(msm_bo, tmp, &msm_ring->submit_list, list[id]) {
|
for (i = 0; i < msm_ring->nr_bos; i++) {
|
||||||
struct list_head *list = &msm_bo->list[id];
|
struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
|
||||||
list_delinit(list);
|
msm_bo->current_ring = NULL;
|
||||||
msm_bo->indexp1[id] = 0;
|
|
||||||
fd_bo_del(&msm_bo->base);
|
fd_bo_del(&msm_bo->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,8 +362,6 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
|
||||||
ring = &msm_ring->base;
|
ring = &msm_ring->base;
|
||||||
ring->funcs = &funcs;
|
ring->funcs = &funcs;
|
||||||
|
|
||||||
list_inithead(&msm_ring->submit_list);
|
|
||||||
|
|
||||||
msm_ring->ring_bo = fd_bo_new(pipe->dev, size, 0);
|
msm_ring->ring_bo = fd_bo_new(pipe->dev, size, 0);
|
||||||
if (!msm_ring->ring_bo) {
|
if (!msm_ring->ring_bo) {
|
||||||
ERROR_MSG("ringbuffer allocation failed");
|
ERROR_MSG("ringbuffer allocation failed");
|
||||||
|
|
Loading…
Reference in New Issue