amdgpu: add the interface of waiting multiple fences

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
[v2: allow returning the first signaled fence index]
Signed-off-by: monk.liu <Monk.Liu@amd.com>
[v3:
 - cleanup *status setting
 - fix amdgpu symbols check
v4: simplify return from amdgpu_cs_wait_fences (suggested
    by Edward O'Callaghan)]
Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com> (v1)
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> (v1)
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
main
Nicolai Hähnle 2017-04-13 16:43:14 +02:00
parent f45853802e
commit d8d45a4938
3 changed files with 95 additions and 0 deletions

View File

@ -33,6 +33,7 @@ amdgpu_cs_query_fence_status
amdgpu_cs_query_reset_state amdgpu_cs_query_reset_state
amdgpu_cs_signal_semaphore amdgpu_cs_signal_semaphore
amdgpu_cs_submit amdgpu_cs_submit
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore amdgpu_cs_wait_semaphore
amdgpu_device_deinitialize amdgpu_device_deinitialize
amdgpu_device_initialize amdgpu_device_initialize

View File

@ -907,6 +907,29 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
uint64_t flags, uint64_t flags,
uint32_t *expired); uint32_t *expired);
/**
* Wait for multiple fences
*
* \param fences - \c [in] The fence array to wait
* \param fence_count - \c [in] The fence count
* \param wait_all - \c [in] If true, wait all fences to be signaled,
* otherwise, wait at least one fence
* \param timeout_ns - \c [in] The timeout to wait, in nanoseconds
* \param status - \c [out] '1' for signaled, '0' for timeout
* \param first - \c [out] the index of the first signaled fence from @fences
*
* \return 0 on success
* <0 - Negative POSIX Error code
*
* \note Currently it supports only one amdgpu_device. All fences come from
* the same amdgpu_device with the same fd.
*/
int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
uint32_t fence_count,
bool wait_all,
uint64_t timeout_ns,
uint32_t *status, uint32_t *first);
/* /*
* Query / Info API * Query / Info API
* *

View File

@ -443,6 +443,77 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
return r; return r;
} }
static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
uint32_t fence_count,
bool wait_all,
uint64_t timeout_ns,
uint32_t *status,
uint32_t *first)
{
struct drm_amdgpu_fence *drm_fences;
amdgpu_device_handle dev = fences[0].context->dev;
union drm_amdgpu_wait_fences args;
int r;
uint32_t i;
drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
for (i = 0; i < fence_count; i++) {
drm_fences[i].ctx_id = fences[i].context->id;
drm_fences[i].ip_type = fences[i].ip_type;
drm_fences[i].ip_instance = fences[i].ip_instance;
drm_fences[i].ring = fences[i].ring;
drm_fences[i].seq_no = fences[i].fence;
}
memset(&args, 0, sizeof(args));
args.in.fences = (uint64_t)(uintptr_t)drm_fences;
args.in.fence_count = fence_count;
args.in.wait_all = wait_all;
args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
if (r)
return -errno;
*status = args.out.status;
if (first)
*first = args.out.first_signaled;
return 0;
}
int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
uint32_t fence_count,
bool wait_all,
uint64_t timeout_ns,
uint32_t *status,
uint32_t *first)
{
uint32_t i;
/* Sanity check */
if (NULL == fences)
return -EINVAL;
if (NULL == status)
return -EINVAL;
if (fence_count <= 0)
return -EINVAL;
for (i = 0; i < fence_count; i++) {
if (NULL == fences[i].context)
return -EINVAL;
if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
return -EINVAL;
if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL;
}
*status = 0;
return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
timeout_ns, status, first);
}
int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem) int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
{ {
struct amdgpu_semaphore *gpu_semaphore; struct amdgpu_semaphore *gpu_semaphore;