amdgpu : move management of user fence from libdrm to UMD

Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
main
Ken Wang 2015-07-10 22:22:27 +08:00 committed by Alex Deucher
parent 01e4546ff3
commit 926c805686
6 changed files with 70 additions and 135 deletions

View File

@ -310,6 +310,20 @@ struct amdgpu_cs_ib_info {
uint32_t size; uint32_t size;
}; };
/**
* Structure describing fence information
*
* \sa amdgpu_cs_request, amdgpu_cs_query_fence,
* amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
*/
struct amdgpu_cs_fence_info {
/** buffer object for the fence */
amdgpu_bo_handle handle;
/** fence offset in the unit of sizeof(uint64_t) */
uint64_t offset;
};
/** /**
* Structure describing submission request * Structure describing submission request
* *
@ -357,6 +371,16 @@ struct amdgpu_cs_request {
* IBs to submit. Those IBs will be submit together as single entity * IBs to submit. Those IBs will be submit together as single entity
*/ */
struct amdgpu_cs_ib_info *ibs; struct amdgpu_cs_ib_info *ibs;
/**
* The returned sequence number for the command submission
*/
uint64_t seq_no;
/**
* The fence information
*/
struct amdgpu_cs_fence_info fence_info;
}; };
/** /**
@ -841,22 +865,20 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
* from the same GPU context to the same ip:ip_instance:ring will be executed in * from the same GPU context to the same ip:ip_instance:ring will be executed in
* order. * order.
* *
* The caller can specify the user fence buffer/location with the fence_info in the
* cs_request.The sequence number is returned via the 'seq_no' paramter
* in ibs_request structure.
*
* *
* \param dev - \c [in] Device handle. * \param dev - \c [in] Device handle.
* See #amdgpu_device_initialize() * See #amdgpu_device_initialize()
* \param context - \c [in] GPU Context * \param context - \c [in] GPU Context
* \param flags - \c [in] Global submission flags * \param flags - \c [in] Global submission flags
* \param ibs_request - \c [in] Pointer to submission requests. * \param ibs_request - \c [in/out] Pointer to submission requests.
* We could submit to the several * We could submit to the several
* engines/rings simulteniously as * engines/rings simulteniously as
* 'atomic' operation * 'atomic' operation
* \param number_of_requests - \c [in] Number of submission requests * \param number_of_requests - \c [in] Number of submission requests
* \param fences - \c [out] Pointer to array of data to get
* fences to identify submission
* requests. Timestamps are valid
* in this GPU context and could be used
* to identify/detect completion of
* submission request
* *
* \return 0 on success\n * \return 0 on success\n
* <0 - Negative POSIX Error code * <0 - Negative POSIX Error code
@ -873,8 +895,7 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
int amdgpu_cs_submit(amdgpu_context_handle context, int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags, uint64_t flags,
struct amdgpu_cs_request *ibs_request, struct amdgpu_cs_request *ibs_request,
uint32_t number_of_requests, uint32_t number_of_requests);
uint64_t *fences);
/** /**
* Query status of Command Buffer Submission * Query status of Command Buffer Submission

View File

@ -43,8 +43,6 @@
int amdgpu_cs_ctx_create(amdgpu_device_handle dev, int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
amdgpu_context_handle *context) amdgpu_context_handle *context)
{ {
struct amdgpu_bo_alloc_request alloc_buffer = {};
struct amdgpu_bo_alloc_result info = {};
struct amdgpu_context *gpu_context; struct amdgpu_context *gpu_context;
union drm_amdgpu_ctx args; union drm_amdgpu_ctx args;
int r; int r;
@ -62,44 +60,22 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL); r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
if (r) if (r)
goto error_mutex; goto error;
/* Create the fence BO */
alloc_buffer.alloc_size = 4 * 1024;
alloc_buffer.phys_alignment = 4 * 1024;
alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(dev, &alloc_buffer, &info);
if (r)
goto error_fence_alloc;
gpu_context->fence_bo = info.buf_handle;
r = amdgpu_bo_cpu_map(gpu_context->fence_bo, &gpu_context->fence_cpu);
if (r)
goto error_fence_map;
/* Create the context */ /* Create the context */
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
if (r) if (r)
goto error_kernel; goto error;
gpu_context->id = args.out.alloc.ctx_id; gpu_context->id = args.out.alloc.ctx_id;
*context = (amdgpu_context_handle)gpu_context; *context = (amdgpu_context_handle)gpu_context;
return 0; return 0;
error_kernel: error:
amdgpu_bo_cpu_unmap(gpu_context->fence_bo);
error_fence_map:
amdgpu_bo_free(gpu_context->fence_bo);
error_fence_alloc:
pthread_mutex_destroy(&gpu_context->sequence_mutex); pthread_mutex_destroy(&gpu_context->sequence_mutex);
error_mutex:
free(gpu_context); free(gpu_context);
return r; return r;
} }
@ -120,14 +96,6 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context)
if (NULL == context) if (NULL == context)
return -EINVAL; return -EINVAL;
r = amdgpu_bo_cpu_unmap(context->fence_bo);
if (r)
return r;
r = amdgpu_bo_free(context->fence_bo);
if (r)
return r;
pthread_mutex_destroy(&context->sequence_mutex); pthread_mutex_destroy(&context->sequence_mutex);
/* now deal with kernel side */ /* now deal with kernel side */
@ -163,11 +131,6 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
return r; return r;
} }
static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
{
return ip * AMDGPU_CS_MAX_RINGS + ring;
}
/** /**
* Submit command to kernel DRM * Submit command to kernel DRM
* \param dev - \c [in] Device handle * \param dev - \c [in] Device handle
@ -179,8 +142,7 @@ static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
* \sa amdgpu_cs_submit() * \sa amdgpu_cs_submit()
*/ */
static int amdgpu_cs_submit_one(amdgpu_context_handle context, static int amdgpu_cs_submit_one(amdgpu_context_handle context,
struct amdgpu_cs_request *ibs_request, struct amdgpu_cs_request *ibs_request)
uint64_t *fence)
{ {
union drm_amdgpu_cs cs; union drm_amdgpu_cs cs;
uint64_t *chunk_array; uint64_t *chunk_array;
@ -188,6 +150,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
struct drm_amdgpu_cs_chunk_data *chunk_data; struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *dependencies = NULL; struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
uint32_t i, size; uint32_t i, size;
bool user_fence;
int r = 0; int r = 0;
if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM) if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
@ -196,13 +159,15 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
return -EINVAL; return -EINVAL;
if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT) if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
return -EINVAL; return -EINVAL;
user_fence = (ibs_request->fence_info.handle != NULL);
size = ibs_request->number_of_ibs + 2; size = ibs_request->number_of_ibs + (user_fence ? 2 : 1);
chunk_array = alloca(sizeof(uint64_t) * size); chunk_array = alloca(sizeof(uint64_t) * size);
chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size); chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
size = ibs_request->number_of_ibs + 1; size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size); chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
memset(&cs, 0, sizeof(cs)); memset(&cs, 0, sizeof(cs));
@ -232,8 +197,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
pthread_mutex_lock(&context->sequence_mutex); pthread_mutex_lock(&context->sequence_mutex);
if (ibs_request->ip_type != AMDGPU_HW_IP_UVD && if (user_fence) {
ibs_request->ip_type != AMDGPU_HW_IP_VCE) {
i = cs.in.num_chunks++; i = cs.in.num_chunks++;
/* fence chunk */ /* fence chunk */
@ -243,11 +207,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i]; chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
/* fence bo handle */ /* fence bo handle */
chunk_data[i].fence_data.handle = context->fence_bo->handle; chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
/* offset */ /* offset */
chunk_data[i].fence_data.offset = amdgpu_cs_fence_index( chunk_data[i].fence_data.offset =
ibs_request->ip_type, ibs_request->ring); ibs_request->fence_info.offset * sizeof(uint64_t);
chunk_data[i].fence_data.offset *= sizeof(uint64_t);
} }
if (ibs_request->number_of_dependencies) { if (ibs_request->number_of_dependencies) {
@ -283,7 +246,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
if (r) if (r)
goto error_unlock; goto error_unlock;
*fence = cs.out.handle; ibs_request->seq_no = cs.out.handle;
error_unlock: error_unlock:
pthread_mutex_unlock(&context->sequence_mutex); pthread_mutex_unlock(&context->sequence_mutex);
@ -294,25 +257,23 @@ error_unlock:
int amdgpu_cs_submit(amdgpu_context_handle context, int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags, uint64_t flags,
struct amdgpu_cs_request *ibs_request, struct amdgpu_cs_request *ibs_request,
uint32_t number_of_requests, uint32_t number_of_requests)
uint64_t *fences)
{ {
uint32_t i; uint32_t i;
int r; int r;
uint64_t bo_size;
uint64_t bo_offset;
if (NULL == context) if (NULL == context)
return -EINVAL; return -EINVAL;
if (NULL == ibs_request) if (NULL == ibs_request)
return -EINVAL; return -EINVAL;
if (NULL == fences)
return -EINVAL;
r = 0; r = 0;
for (i = 0; i < number_of_requests; i++) { for (i = 0; i < number_of_requests; i++) {
r = amdgpu_cs_submit_one(context, ibs_request, fences); r = amdgpu_cs_submit_one(context, ibs_request);
if (r) if (r)
break; break;
fences++;
ibs_request++; ibs_request++;
} }
@ -380,10 +341,6 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
uint64_t flags, uint64_t flags,
uint32_t *expired) uint32_t *expired)
{ {
amdgpu_context_handle context;
uint64_t *expired_fence;
unsigned ip_type, ip_instance;
uint32_t ring;
bool busy = true; bool busy = true;
int r; int r;
@ -398,57 +355,14 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
if (fence->ring >= AMDGPU_CS_MAX_RINGS) if (fence->ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL; return -EINVAL;
context = fence->context;
ip_type = fence->ip_type;
ip_instance = fence->ip_instance;
ring = fence->ring;
expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
*expired = false; *expired = false;
pthread_mutex_lock(&context->sequence_mutex); r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
if (fence->fence <= *expired_fence) { fence->ip_instance, fence->ring,
/* This fence value is expired already. */ fence->fence, timeout_ns, flags, &busy);
pthread_mutex_unlock(&context->sequence_mutex);
if (!r && !busy)
*expired = true; *expired = true;
return 0;
}
/* Check the user fence only if the IP supports user fences. */
if (fence->ip_type != AMDGPU_HW_IP_UVD &&
fence->ip_type != AMDGPU_HW_IP_VCE) {
uint64_t *signaled_fence = context->fence_cpu;
signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
if (fence->fence <= *signaled_fence) {
/* This fence value is signaled already. */
*expired_fence = *signaled_fence;
pthread_mutex_unlock(&context->sequence_mutex);
*expired = true;
return 0;
}
/* Checking the user fence is enough. */
if (timeout_ns == 0) {
pthread_mutex_unlock(&context->sequence_mutex);
return 0;
}
}
pthread_mutex_unlock(&context->sequence_mutex);
r = amdgpu_ioctl_wait_cs(context, ip_type, ip_instance, ring,
fence->fence, timeout_ns,
flags, &busy);
if (!r && !busy) {
*expired = true;
pthread_mutex_lock(&context->sequence_mutex);
/* The thread doesn't hold sequence_mutex. Other thread could
update *expired_fence already. Check whether there is a
newerly expired fence. */
if (fence->fence > *expired_fence)
*expired_fence = fence->fence;
pthread_mutex_unlock(&context->sequence_mutex);
}
return r; return r;
} }

View File

@ -109,11 +109,6 @@ struct amdgpu_context {
/** Mutex for accessing fences and to maintain command submissions /** Mutex for accessing fences and to maintain command submissions
in good sequence. */ in good sequence. */
pthread_mutex_t sequence_mutex; pthread_mutex_t sequence_mutex;
/** Buffer for user fences */
struct amdgpu_bo *fence_bo;
void *fence_cpu;
/** The newest expired fence for the ring of the ip blocks. */
uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
/* context id*/ /* context id*/
uint32_t id; uint32_t id;
}; };

View File

@ -209,13 +209,15 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
ibs_request.number_of_ibs = 2; ibs_request.number_of_ibs = 2;
ibs_request.ibs = ib_info; ibs_request.ibs = ib_info;
ibs_request.resources = bo_list; ibs_request.resources = bo_list;
ibs_request.fence_info.handle = NULL;
r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
r = amdgpu_cs_submit(context_handle, 0,
&ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle; fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.ip_type = AMDGPU_HW_IP_GFX;
fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status, r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE, AMDGPU_TIMEOUT_INFINITE,
@ -233,6 +235,7 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
r = amdgpu_cs_ctx_free(context_handle); r = amdgpu_cs_ctx_free(context_handle);
CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(r, 0);
} }
static void amdgpu_command_submission_gfx_shared_ib(void) static void amdgpu_command_submission_gfx_shared_ib(void)
@ -284,13 +287,15 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
ibs_request.number_of_ibs = 2; ibs_request.number_of_ibs = 2;
ibs_request.ibs = ib_info; ibs_request.ibs = ib_info;
ibs_request.resources = bo_list; ibs_request.resources = bo_list;
ibs_request.fence_info.handle = NULL;
r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
r = amdgpu_cs_submit(context_handle, 0,
&ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle; fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_GFX; fence_status.ip_type = AMDGPU_HW_IP_GFX;
fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status, r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE, AMDGPU_TIMEOUT_INFINITE,
@ -357,15 +362,16 @@ static void amdgpu_command_submission_compute(void)
ibs_request.number_of_ibs = 1; ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info; ibs_request.ibs = &ib_info;
ibs_request.resources = bo_list; ibs_request.resources = bo_list;
ibs_request.fence_info.handle = NULL;
memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence)); memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
r = amdgpu_cs_submit(context_handle, 0, r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
&ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle; fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_COMPUTE; fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
fence_status.ring = instance; fence_status.ring = instance;
fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status, r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE, AMDGPU_TIMEOUT_INFINITE,
@ -428,6 +434,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
ibs_request->ring = instance; ibs_request->ring = instance;
ibs_request->number_of_ibs = 1; ibs_request->number_of_ibs = 1;
ibs_request->ibs = ib_info; ibs_request->ibs = ib_info;
ibs_request->fence_info.handle = NULL;
memcpy(all_res, resources, sizeof(resources[0]) * res_cnt); memcpy(all_res, resources, sizeof(resources[0]) * res_cnt);
all_res[res_cnt] = ib_result_handle; all_res[res_cnt] = ib_result_handle;
@ -439,8 +446,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
CU_ASSERT_NOT_EQUAL(ibs_request, NULL); CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
/* submit CS */ /* submit CS */
r = amdgpu_cs_submit(context_handle, 0, r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1);
ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0); CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_list_destroy(ibs_request->resources); r = amdgpu_bo_list_destroy(ibs_request->resources);
@ -449,6 +455,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
fence_status.ip_type = AMDGPU_HW_IP_DMA; fence_status.ip_type = AMDGPU_HW_IP_DMA;
fence_status.ring = ibs_request->ring; fence_status.ring = ibs_request->ring;
fence_status.context = context_handle; fence_status.context = context_handle;
fence_status.fence = ibs_request->seq_no;
/* wait for IB accomplished */ /* wait for IB accomplished */
r = amdgpu_cs_query_fence_status(&fence_status, r = amdgpu_cs_query_fence_status(&fence_status,

View File

@ -130,8 +130,7 @@ static int submit(unsigned ndw, unsigned ip)
ibs_request.number_of_ibs = 1; ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info; ibs_request.ibs = &ib_info;
r = amdgpu_cs_submit(context_handle, 0, r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
&ibs_request, 1, &fence_status.fence);
if (r) if (r)
return r; return r;

View File

@ -147,8 +147,7 @@ static int submit(unsigned ndw, unsigned ip)
ibs_request.number_of_ibs = 1; ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info; ibs_request.ibs = &ib_info;
r = amdgpu_cs_submit(context_handle, 0, r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
&ibs_request, 1, &fence_status.fence);
if (r) if (r)
return r; return r;