amdgpu: add amdgpu_bo_va_op for va map/unmap support v3

The following interfaces are changed accordingly:
- amdgpu_bo_alloc
- amdgpu_create_bo_from_user_mem

v2: update the interfaces
v3: remove virtual_mc_base_address from amdgpu_bo

Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
main
Jammy Zhou 2015-07-13 20:57:44 +08:00 committed by Alex Deucher
parent 95d0f35daf
commit 8aeffcc1cf
8 changed files with 391 additions and 243 deletions

View File

@ -155,19 +155,6 @@ struct amdgpu_bo_alloc_request {
uint64_t flags;
};
/**
* Structure describing memory allocation request
*
* \sa amdgpu_bo_alloc()
*/
struct amdgpu_bo_alloc_result {
/** Assigned virtual MC Base Address */
uint64_t virtual_mc_base_address;
/** Handle of allocated memory to be used by the given process only. */
amdgpu_bo_handle buf_handle;
};
/**
* Special UMD specific information associated with buffer.
*
@ -213,13 +200,6 @@ struct amdgpu_bo_info {
*/
uint64_t phys_alignment;
/**
* Assigned virtual MC Base Address.
* \note This information will be returned only if this buffer was
* allocated in the same process otherwise 0 will be returned.
*/
uint64_t virtual_mc_base_address;
/** Heap where to allocate memory. */
uint32_t preferred_heap;
@ -242,9 +222,6 @@ struct amdgpu_bo_import_result {
/** Buffer size */
uint64_t alloc_size;
/** Assigned virtual MC Base Address */
uint64_t virtual_mc_base_address;
};
/**
@ -558,8 +535,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
* See #amdgpu_device_initialize()
* \param alloc_buffer - \c [in] Pointer to the structure describing an
* allocation request
* \param info - \c [out] Pointer to structure which return
* information about allocated memory
* \param buf_handle - \c [out] Allocated buffer handle
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
@ -568,7 +544,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
*/
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
struct amdgpu_bo_alloc_result *info);
amdgpu_bo_handle *buf_handle);
/**
* Associate opaque data with buffer to be queried by another UMD
@ -652,7 +628,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
* want to map to GPU address space (make GPU accessible)
* (This address must be correctly aligned).
* \param size - [in] Size of allocation (must be correctly aligned)
* \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as
* \param buf_handle - [out] Buffer handle for the userptr memory
* resource on submission and be used in other operations.
*
*
@ -677,7 +653,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
*/
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu, uint64_t size,
struct amdgpu_bo_alloc_result *info);
amdgpu_bo_handle *buf_handle);
/**
* Free previosuly allocated memory
@ -1173,4 +1149,26 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
uint64_t *start,
uint64_t *end);
/**
* VA mapping/unmapping for the buffer object
*
* \param bo - \c [in] BO handle
* \param offset - \c [in] Start offset to map
* \param size - \c [in] Size to map
* \param addr - \c [in] Start virtual address.
* \param flags - \c [in] Supported flags for mapping/unmapping
* \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_bo_va_op(amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops);
#endif /* #ifdef _AMDGPU_H_ */

View File

@ -52,72 +52,6 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
}
/* map the buffer to the GPU virtual address space */
static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
{
amdgpu_device_handle dev = bo->dev;
struct drm_amdgpu_gem_va va;
int r;
memset(&va, 0, sizeof(va));
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
bo->alloc_size, alignment, 0);
if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
return -ENOSPC;
va.handle = bo->handle;
va.operation = AMDGPU_VA_OP_MAP;
va.flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE;
va.va_address = bo->virtual_mc_base_address;
va.offset_in_bo = 0;
va.map_size = ALIGN(bo->alloc_size, getpagesize());
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
if (r) {
amdgpu_bo_free_internal(bo);
return r;
}
return 0;
}
/* unmap the buffer from the GPU virtual address space */
static void amdgpu_bo_unmap(amdgpu_bo_handle bo)
{
amdgpu_device_handle dev = bo->dev;
struct drm_amdgpu_gem_va va;
int r;
if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
return;
memset(&va, 0, sizeof(va));
va.handle = bo->handle;
va.operation = AMDGPU_VA_OP_UNMAP;
va.flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE;
va.va_address = bo->virtual_mc_base_address;
va.offset_in_bo = 0;
va.map_size = ALIGN(bo->alloc_size, getpagesize());
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
if (r) {
fprintf(stderr, "amdgpu: VA_OP_UNMAP failed with %d\n", r);
return;
}
amdgpu_vamgr_free_va(bo->dev->vamgr, bo->virtual_mc_base_address,
bo->alloc_size);
bo->virtual_mc_base_address = AMDGPU_INVALID_VA_ADDRESS;
}
void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
{
/* Remove the buffer from the hash tables. */
@ -136,7 +70,6 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
amdgpu_bo_cpu_unmap(bo);
}
amdgpu_bo_unmap(bo);
amdgpu_close_kms_handle(bo->dev, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
@ -144,7 +77,7 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
struct amdgpu_bo_alloc_result *info)
amdgpu_bo_handle *buf_handle)
{
struct amdgpu_bo *bo;
union drm_amdgpu_gem_create args;
@ -183,14 +116,7 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
if (r) {
amdgpu_bo_free_internal(bo);
return r;
}
info->buf_handle = bo;
info->virtual_mc_base_address = bo->virtual_mc_base_address;
*buf_handle = bo;
return 0;
}
@ -255,7 +181,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
memset(info, 0, sizeof(*info));
info->alloc_size = bo_info.bo_size;
info->phys_alignment = bo_info.alignment;
info->virtual_mc_base_address = bo->virtual_mc_base_address;
info->preferred_heap = bo_info.domains;
info->alloc_flags = bo_info.domain_flags;
info->metadata.flags = metadata.data.flags;
@ -421,8 +346,6 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
output->virtual_mc_base_address =
bo->virtual_mc_base_address;
return 0;
}
@ -484,19 +407,11 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
bo->dev = dev;
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
r = amdgpu_bo_map(bo, 1 << 20);
if (r) {
pthread_mutex_unlock(&dev->bo_table_mutex);
amdgpu_bo_reference(&bo, NULL);
return r;
}
util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
output->virtual_mc_base_address = bo->virtual_mc_base_address;
return 0;
}
@ -615,7 +530,7 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
struct amdgpu_bo_alloc_result *info)
amdgpu_bo_handle *buf_handle)
{
int r;
struct amdgpu_bo *bo;
@ -647,15 +562,7 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
bo->alloc_size = size;
bo->handle = args.handle;
r = amdgpu_bo_map(bo, 1 << 12);
if (r) {
amdgpu_bo_free_internal(bo);
return r;
}
info->buf_handle = bo;
info->virtual_mc_base_address = bo->virtual_mc_base_address;
info->virtual_mc_base_address += off;
*buf_handle = bo;
return r;
}
@ -766,3 +673,32 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
free(list);
return r;
}
int amdgpu_bo_va_op(amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops)
{
amdgpu_device_handle dev = bo->dev;
struct drm_amdgpu_gem_va va;
int r;
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
return -EINVAL;
memset(&va, 0, sizeof(va));
va.handle = bo->handle;
va.operation = ops;
va.flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE;
va.va_address = addr;
va.offset_in_bo = offset;
va.map_size = ALIGN(size, getpagesize());
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
return r;
}

View File

@ -88,7 +88,6 @@ struct amdgpu_bo {
struct amdgpu_device *dev;
uint64_t alloc_size;
uint64_t virtual_mc_base_address;
uint32_t handle;
uint32_t flink_name;

View File

@ -25,6 +25,7 @@
#define _AMDGPU_TEST_H_
#include "amdgpu.h"
#include "amdgpu_drm.h"
/**
* Define max. number of card in system which we are able to handle
@ -109,10 +110,11 @@ static inline amdgpu_bo_handle gpu_mem_alloc(
uint64_t alignment,
uint32_t type,
uint64_t flags,
uint64_t *vmc_addr)
uint64_t *vmc_addr,
amdgpu_va_handle *va_handle)
{
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
int r;
CU_ASSERT_NOT_EQUAL(vmc_addr, NULL);
@ -122,22 +124,50 @@ static inline amdgpu_bo_handle gpu_mem_alloc(
req.preferred_heap = type;
req.flags = flags;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_NOT_EQUAL(res.virtual_mc_base_address, 0);
CU_ASSERT_NOT_EQUAL(res.buf_handle, NULL);
*vmc_addr = res.virtual_mc_base_address;
return res.buf_handle;
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
size, alignment, 0, vmc_addr,
va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, size, *vmc_addr, 0, AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
return buf_handle;
}
static inline int gpu_mem_free(amdgpu_bo_handle bo,
amdgpu_va_handle va_handle,
uint64_t vmc_addr,
uint64_t size)
{
int r;
r = amdgpu_bo_va_op(bo, 0, size, vmc_addr, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(bo);
CU_ASSERT_EQUAL(r, 0);
return 0;
}
static inline int
amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
unsigned alignment, unsigned heap, uint64_t flags,
amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address)
amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address,
amdgpu_va_handle *va_handle)
{
struct amdgpu_bo_alloc_request request = {};
struct amdgpu_bo_alloc_result out;
amdgpu_bo_handle buf_handle;
amdgpu_va_handle handle;
uint64_t vmc_addr;
int r;
request.alloc_size = size;
@ -145,19 +175,53 @@ amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
request.preferred_heap = heap;
request.flags = flags;
r = amdgpu_bo_alloc(dev, &request, &out);
r = amdgpu_bo_alloc(dev, &request, &buf_handle);
if (r)
return r;
r = amdgpu_bo_cpu_map(out.buf_handle, cpu);
if (r) {
amdgpu_bo_free(out.buf_handle);
return r;
}
r = amdgpu_va_range_alloc(dev,
amdgpu_gpu_va_range_general,
size, alignment, 0, &vmc_addr,
&handle, 0);
if (r)
goto error_va_alloc;
r = amdgpu_bo_va_op(buf_handle, 0, size, vmc_addr, 0, AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
r = amdgpu_bo_cpu_map(buf_handle, cpu);
if (r)
goto error_cpu_map;
*bo = buf_handle;
*mc_address = vmc_addr;
*va_handle = handle;
*bo = out.buf_handle;
*mc_address = out.virtual_mc_base_address;
return 0;
error_cpu_map:
amdgpu_bo_cpu_unmap(bo);
error_va_map:
amdgpu_bo_va_op(bo, 0, size, vmc_addr, 0, AMDGPU_VA_OP_UNMAP);
error_va_alloc:
amdgpu_bo_free(bo);
return r;
}
static inline int
amdgpu_bo_unmap_and_free(amdgpu_bo_handle bo, amdgpu_va_handle va_handle,
uint64_t mc_addr, uint64_t size)
{
amdgpu_bo_cpu_unmap(bo);
amdgpu_bo_va_op(bo, 0, size, mc_addr, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(va_handle);
amdgpu_bo_free(bo);
return 0;
}
static inline int

View File

@ -109,6 +109,7 @@ static void amdgpu_query_info_test(void)
static void amdgpu_memory_alloc(void)
{
amdgpu_bo_handle bo;
amdgpu_va_handle va_handle;
uint64_t bo_mc;
int r;
@ -117,9 +118,9 @@ static void amdgpu_memory_alloc(void)
4096, 4096,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
&bo_mc);
&bo_mc, &va_handle);
r = amdgpu_bo_free(bo);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test invisible VRAM */
@ -127,19 +128,18 @@ static void amdgpu_memory_alloc(void)
4096, 4096,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
&bo_mc);
&bo_mc, &va_handle);
r = amdgpu_bo_free(bo);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GART Cacheable */
bo = gpu_mem_alloc(device_handle,
4096, 4096,
AMDGPU_GEM_DOMAIN_GTT,
0,
&bo_mc);
0, &bo_mc, &va_handle);
r = amdgpu_bo_free(bo);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GART USWC */
@ -147,9 +147,9 @@ static void amdgpu_memory_alloc(void)
4096, 4096,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
&bo_mc);
&bo_mc, &va_handle);
r = amdgpu_bo_free(bo);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
}
@ -165,6 +165,7 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
uint32_t *ptr;
uint32_t expired;
amdgpu_bo_list_handle bo_list;
amdgpu_va_handle va_handle, va_handle_ce;
int r;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
@ -173,13 +174,13 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address);
&ib_result_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_ce_handle, &ib_result_ce_cpu,
&ib_result_ce_mc_address);
&ib_result_ce_mc_address, &va_handle_ce);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_get_bo_list(device_handle, ib_result_handle,
@ -224,10 +225,12 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
0, &expired);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(ib_result_handle);
r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
ib_result_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(ib_result_ce_handle);
r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
ib_result_ce_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_list_destroy(bo_list);
@ -250,6 +253,7 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
uint32_t *ptr;
uint32_t expired;
amdgpu_bo_list_handle bo_list;
amdgpu_va_handle va_handle;
int r;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
@ -258,7 +262,7 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address);
&ib_result_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
@ -302,7 +306,8 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
0, &expired);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(ib_result_handle);
r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
ib_result_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_list_destroy(bo_list);
@ -333,6 +338,7 @@ static void amdgpu_command_submission_compute(void)
uint32_t expired;
int i, r, instance;
amdgpu_bo_list_handle bo_list;
amdgpu_va_handle va_handle;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
CU_ASSERT_EQUAL(r, 0);
@ -341,7 +347,7 @@ static void amdgpu_command_submission_compute(void)
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address);
&ib_result_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
@ -381,7 +387,8 @@ static void amdgpu_command_submission_compute(void)
r = amdgpu_bo_list_destroy(bo_list);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(ib_result_handle);
r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
ib_result_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
}
@ -408,6 +415,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
uint64_t ib_result_mc_address;
struct amdgpu_cs_fence fence_status = {0};
amdgpu_bo_handle *all_res = alloca(sizeof(resources[0]) * (res_cnt + 1));
amdgpu_va_handle va_handle;
/* prepare CS */
CU_ASSERT_NOT_EQUAL(pm4_src, NULL);
@ -420,7 +428,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address);
&ib_result_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
/* copy PM4 packet to ring from caller */
@ -464,7 +472,8 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(expired, true);
r = amdgpu_bo_free(ib_result_handle);
r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
ib_result_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
}
@ -482,6 +491,7 @@ static void amdgpu_command_submission_sdma_write_linear(void)
volatile uint32_t *bo_cpu;
int i, j, r, loop;
uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
amdgpu_va_handle va_handle;
pm4 = calloc(pm4_dw, sizeof(*pm4));
CU_ASSERT_NOT_EQUAL(pm4, NULL);
@ -502,13 +512,12 @@ static void amdgpu_command_submission_sdma_write_linear(void)
loop = 0;
while(loop < 2) {
/* allocate UC bo for sDMA use */
bo = gpu_mem_alloc(device_handle,
sdma_write_length * sizeof(uint32_t),
4096, AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop], &bo_mc);
CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo, (void **)&bo_cpu), 0);
CU_ASSERT_NOT_EQUAL(bo_cpu, NULL);
r = amdgpu_bo_alloc_and_map(device_handle,
sdma_write_length * sizeof(uint32_t),
4096, AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop], &bo, &bo_cpu,
&bo_mc, &va_handle);
CU_ASSERT_EQUAL(r, 0);
/* clear bo */
memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t));
@ -536,7 +545,10 @@ static void amdgpu_command_submission_sdma_write_linear(void)
while(i < sdma_write_length) {
CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
}
amdgpu_bo_free(bo);
r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
sdma_write_length * sizeof(uint32_t));
CU_ASSERT_EQUAL(r, 0);
loop++;
}
/* clean resources */
@ -564,6 +576,7 @@ static void amdgpu_command_submission_sdma_const_fill(void)
volatile uint32_t *bo_cpu;
int i, j, r, loop;
uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
amdgpu_va_handle va_handle;
pm4 = calloc(pm4_dw, sizeof(*pm4));
CU_ASSERT_NOT_EQUAL(pm4, NULL);
@ -584,13 +597,12 @@ static void amdgpu_command_submission_sdma_const_fill(void)
loop = 0;
while(loop < 2) {
/* allocate UC bo for sDMA use */
bo = gpu_mem_alloc(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop], &bo_mc);
CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo, (void **)&bo_cpu), 0);
CU_ASSERT_NOT_EQUAL(bo_cpu, NULL);
r = amdgpu_bo_alloc_and_map(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop], &bo, &bo_cpu,
&bo_mc, &va_handle);
CU_ASSERT_EQUAL(r, 0);
/* clear bo */
memset((void*)bo_cpu, 0, sdma_write_length);
@ -616,7 +628,10 @@ static void amdgpu_command_submission_sdma_const_fill(void)
while(i < (sdma_write_length / 4)) {
CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
}
amdgpu_bo_free(bo);
r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
sdma_write_length);
CU_ASSERT_EQUAL(r, 0);
loop++;
}
/* clean resources */
@ -644,6 +659,7 @@ static void amdgpu_command_submission_sdma_copy_linear(void)
volatile unsigned char *bo1_cpu, *bo2_cpu;
int i, j, r, loop1, loop2;
uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
amdgpu_va_handle bo1_va_handle, bo2_va_handle;
pm4 = calloc(pm4_dw, sizeof(*pm4));
CU_ASSERT_NOT_EQUAL(pm4, NULL);
@ -666,25 +682,23 @@ static void amdgpu_command_submission_sdma_copy_linear(void)
while(loop1 < 2) {
while(loop2 < 2) {
/* allocate UC bo1for sDMA use */
bo1 = gpu_mem_alloc(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop1], &bo1_mc);
CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo1, (void **)&bo1_cpu), 0);
CU_ASSERT_NOT_EQUAL(bo1_cpu, NULL);
r = amdgpu_bo_alloc_and_map(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop1], &bo1, &bo1_cpu,
&bo1_mc, &bo1_va_handle);
CU_ASSERT_EQUAL(r, 0);
/* set bo1 */
memset((void*)bo1_cpu, 0xaa, sdma_write_length);
/* allocate UC bo2 for sDMA use */
bo2 = gpu_mem_alloc(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop2], &bo2_mc);
CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo2, (void **)&bo2_cpu), 0);
CU_ASSERT_NOT_EQUAL(bo2_cpu, NULL);
r = amdgpu_bo_alloc_and_map(device_handle,
sdma_write_length, 4096,
AMDGPU_GEM_DOMAIN_GTT,
gtt_flags[loop2], &bo2, &bo2_cpu,
&bo2_mc, &bo2_va_handle);
CU_ASSERT_EQUAL(r, 0);
/* clear bo2 */
memset((void*)bo2_cpu, 0, sdma_write_length);
@ -713,8 +727,12 @@ static void amdgpu_command_submission_sdma_copy_linear(void)
while(i < sdma_write_length) {
CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
}
amdgpu_bo_free(bo1);
amdgpu_bo_free(bo2);
r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
sdma_write_length);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
sdma_write_length);
CU_ASSERT_EQUAL(r, 0);
loop2++;
}
loop1++;
@ -749,9 +767,8 @@ static void amdgpu_userptr_test(void)
amdgpu_context_handle context_handle;
struct amdgpu_cs_ib_info *ib_info;
struct amdgpu_cs_request *ibs_request;
struct amdgpu_bo_alloc_result res;
memset(&res, 0, sizeof(res));
amdgpu_bo_handle buf_handle;
amdgpu_va_handle va_handle;
pm4 = calloc(pm4_dw, sizeof(*pm4));
CU_ASSERT_NOT_EQUAL(pm4, NULL);
@ -770,10 +787,19 @@ static void amdgpu_userptr_test(void)
memset(ptr, 0, BUFFER_SIZE);
r = amdgpu_create_bo_from_user_mem(device_handle,
ptr, BUFFER_SIZE, &res);
ptr, BUFFER_SIZE, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
bo_mc = res.virtual_mc_base_address;
handle = res.buf_handle;
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
BUFFER_SIZE, 1, 0, &bo_mc,
&va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
handle = buf_handle;
j = i = 0;
pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
@ -796,7 +822,12 @@ static void amdgpu_userptr_test(void)
free(ibs_request);
free(ib_info);
free(pm4);
r = amdgpu_bo_free(res.buf_handle);
r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
free(ptr);

View File

@ -36,6 +36,7 @@ static uint32_t minor_version;
static amdgpu_bo_handle buffer_handle;
static uint64_t virtual_mc_base_address;
static amdgpu_va_handle va_handle;
static void amdgpu_bo_export_import(void);
static void amdgpu_bo_metadata(void);
@ -53,7 +54,8 @@ CU_TestInfo bo_tests[] = {
int suite_bo_tests_init(void)
{
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
uint64_t va;
int r;
r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
@ -65,20 +67,48 @@ int suite_bo_tests_init(void)
req.phys_alignment = BUFFER_ALIGN;
req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
if (r)
return CUE_SINIT_FAILED;
buffer_handle = res.buf_handle;
virtual_mc_base_address = res.virtual_mc_base_address;
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
BUFFER_SIZE, BUFFER_ALIGN, 0,
&va, &va_handle, 0);
if (r)
goto error_va_alloc;
r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, va, 0, AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
buffer_handle = buf_handle;
virtual_mc_base_address = va;
return CUE_SUCCESS;
error_va_map:
amdgpu_va_range_free(va_handle);
error_va_alloc:
amdgpu_bo_free(buf_handle);
return CUE_SINIT_FAILED;
}
int suite_bo_tests_clean(void)
{
int r;
r = amdgpu_bo_va_op(buffer_handle, 0, BUFFER_SIZE,
virtual_mc_base_address, 0,
AMDGPU_VA_OP_UNMAP);
if (r)
return CUE_SCLEAN_FAILED;
r = amdgpu_va_range_free(va_handle);
if (r)
return CUE_SCLEAN_FAILED;
r = amdgpu_bo_free(buffer_handle);
if (r)
return CUE_SCLEAN_FAILED;

View File

@ -43,6 +43,7 @@ static amdgpu_context_handle context_handle;
static amdgpu_bo_handle ib_handle;
static uint64_t ib_mc_address;
static uint32_t *ib_cpu;
static amdgpu_va_handle ib_va_handle;
static amdgpu_bo_handle resources[MAX_RESOURCES];
static unsigned num_resources;
@ -63,6 +64,7 @@ int suite_cs_tests_init(void)
amdgpu_bo_handle ib_result_handle;
void *ib_result_cpu;
uint64_t ib_result_mc_address;
amdgpu_va_handle ib_result_va_handle;
int r;
r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
@ -79,13 +81,15 @@ int suite_cs_tests_init(void)
r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address);
&ib_result_mc_address,
&ib_result_va_handle);
if (r)
return CUE_SINIT_FAILED;
ib_handle = ib_result_handle;
ib_mc_address = ib_result_mc_address;
ib_cpu = ib_result_cpu;
ib_va_handle = ib_result_va_handle;
return CUE_SUCCESS;
}
@ -94,7 +98,8 @@ int suite_cs_tests_clean(void)
{
int r;
r = amdgpu_bo_free(ib_handle);
r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
ib_mc_address, IB_SIZE);
if (r)
return CUE_SCLEAN_FAILED;
@ -163,39 +168,56 @@ static void uvd_cmd(uint64_t addr, unsigned cmd, int *idx)
static void amdgpu_cs_uvd_create(void)
{
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
uint64_t va = 0;
amdgpu_va_handle va_handle;
void *msg;
int i, r;
req.alloc_size = 4*1024;
req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(res.buf_handle, &msg);
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
4096, 1, 0, &va,
&va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, 4096, va, 0, AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(buf_handle, &msg);
CU_ASSERT_EQUAL(r, 0);
memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg));
if (family_id >= AMDGPU_FAMILY_VI)
((uint8_t*)msg)[0x10] = 7;
r = amdgpu_bo_cpu_unmap(res.buf_handle);
r = amdgpu_bo_cpu_unmap(buf_handle);
CU_ASSERT_EQUAL(r, 0);
num_resources = 0;
resources[num_resources++] = res.buf_handle;
resources[num_resources++] = buf_handle;
resources[num_resources++] = ib_handle;
i = 0;
uvd_cmd(res.virtual_mc_base_address, 0x0, &i);
uvd_cmd(va, 0x0, &i);
for (; i % 16; ++i)
ib_cpu[i] = 0x80000000;
r = submit(i, AMDGPU_HW_IP_UVD);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(resources[0]);
r = amdgpu_bo_va_op(buf_handle, 0, 4096, va, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
}
@ -204,7 +226,9 @@ static void amdgpu_cs_uvd_decode(void)
const unsigned dpb_size = 15923584, dt_size = 737280;
uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, dt_addr, it_addr;
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
amdgpu_va_handle va_handle;
uint64_t va = 0;
uint64_t sum;
uint8_t *ptr;
int i, r;
@ -219,10 +243,20 @@ static void amdgpu_cs_uvd_decode(void)
req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(res.buf_handle, (void **)&ptr);
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
req.alloc_size, 1, 0, &va,
&va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr);
CU_ASSERT_EQUAL(r, 0);
memcpy(ptr, uvd_decode_msg, sizeof(uvd_create_msg));
@ -246,10 +280,10 @@ static void amdgpu_cs_uvd_decode(void)
memset(ptr, 0, dt_size);
num_resources = 0;
resources[num_resources++] = res.buf_handle;
resources[num_resources++] = buf_handle;
resources[num_resources++] = ib_handle;
msg_addr = res.virtual_mc_base_address;
msg_addr = va;
fb_addr = msg_addr + 4*1024;
if (family_id >= AMDGPU_FAMILY_VI) {
it_addr = fb_addr + 4*1024;
@ -280,48 +314,72 @@ static void amdgpu_cs_uvd_decode(void)
sum += ptr[i];
CU_ASSERT_EQUAL(sum, 0x20345d8);
r = amdgpu_bo_cpu_unmap(res.buf_handle);
r = amdgpu_bo_cpu_unmap(buf_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(resources[0]);
r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
}
static void amdgpu_cs_uvd_destroy(void)
{
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
amdgpu_va_handle va_handle;
uint64_t va = 0;
void *msg;
int i, r;
req.alloc_size = 4*1024;
req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(res.buf_handle, &msg);
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
req.alloc_size, 1, 0, &va,
&va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_cpu_map(buf_handle, &msg);
CU_ASSERT_EQUAL(r, 0);
memcpy(msg, uvd_destroy_msg, sizeof(uvd_create_msg));
if (family_id >= AMDGPU_FAMILY_VI)
((uint8_t*)msg)[0x10] = 7;
r = amdgpu_bo_cpu_unmap(res.buf_handle);
r = amdgpu_bo_cpu_unmap(buf_handle);
CU_ASSERT_EQUAL(r, 0);
num_resources = 0;
resources[num_resources++] = res.buf_handle;
resources[num_resources++] = buf_handle;
resources[num_resources++] = ib_handle;
i = 0;
uvd_cmd(res.virtual_mc_base_address, 0x0, &i);
uvd_cmd(va, 0x0, &i);
for (; i % 16; ++i)
ib_cpu[i] = 0x80000000;
r = submit(i, AMDGPU_HW_IP_UVD);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(resources[0]);
r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
}

View File

@ -39,8 +39,10 @@
#define MAX_RESOURCES 16
struct amdgpu_vce_bo {
struct amdgpu_bo *handle;
amdgpu_bo_handle handle;
amdgpu_va_handle va_handle;
uint64_t addr;
uint64_t size;
uint8_t *ptr;
};
@ -62,6 +64,7 @@ static uint32_t family_id;
static amdgpu_context_handle context_handle;
static amdgpu_bo_handle ib_handle;
static amdgpu_va_handle ib_va_handle;
static uint64_t ib_mc_address;
static uint32_t *ib_cpu;
@ -98,7 +101,7 @@ int suite_vce_tests_init(void)
r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_handle, (void**)&ib_cpu,
&ib_mc_address);
&ib_mc_address, &ib_va_handle);
if (r)
return CUE_SINIT_FAILED;
@ -111,7 +114,8 @@ int suite_vce_tests_clean(void)
{
int r;
r = amdgpu_bo_free(ib_handle);
r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
ib_mc_address, IB_SIZE);
if (r)
return CUE_SCLEAN_FAILED;
@ -170,15 +174,27 @@ static int submit(unsigned ndw, unsigned ip)
static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned domain)
{
struct amdgpu_bo_alloc_request req = {0};
struct amdgpu_bo_alloc_result res = {0};
amdgpu_bo_handle buf_handle;
amdgpu_va_handle va_handle;
uint64_t va = 0;
int r;
req.alloc_size = ALIGN(size, 4096);
req.preferred_heap = domain;
r = amdgpu_bo_alloc(device_handle, &req, &res);
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
vce_bo->addr = res.virtual_mc_base_address;
vce_bo->handle = res.buf_handle;
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
req.alloc_size, 1, 0, &va,
&va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
vce_bo->addr = va;
vce_bo->handle = buf_handle;
vce_bo->size = req.alloc_size;
vce_bo->va_handle = va_handle;
r = amdgpu_bo_cpu_map(vce_bo->handle, (void **)&vce_bo->ptr);
CU_ASSERT_EQUAL(r, 0);
memset(vce_bo->ptr, 0, size);
@ -186,6 +202,22 @@ static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned
CU_ASSERT_EQUAL(r, 0);
}
static void free_resource(struct amdgpu_vce_bo *vce_bo)
{
int r;
r = amdgpu_bo_va_op(vce_bo->handle, 0, vce_bo->size,
vce_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_va_range_free(vce_bo->va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(vce_bo->handle);
CU_ASSERT_EQUAL(r, 0);
memset(vce_bo, 0, sizeof(*vce_bo));
}
static void amdgpu_cs_vce_create(void)
{
int len, r;
@ -213,8 +245,7 @@ static void amdgpu_cs_vce_create(void)
r = submit(len, AMDGPU_HW_IP_VCE);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(resources[0]);
CU_ASSERT_EQUAL(r, 0);
free_resource(&enc.fb[0]);
}
static void amdgpu_cs_vce_config(void)
@ -419,10 +450,12 @@ static void amdgpu_cs_vce_encode(void)
check_result(&enc);
}
for (i = 0; i < num_resources-1; ++i) {
r = amdgpu_bo_free(resources[i]);
CU_ASSERT_EQUAL(r, 0);
}
free_resource(&enc.fb[0]);
free_resource(&enc.fb[1]);
free_resource(&enc.bs[0]);
free_resource(&enc.bs[1]);
free_resource(&enc.vbuf);
free_resource(&enc.cpb);
}
static void amdgpu_cs_vce_destroy(void)
@ -450,6 +483,5 @@ static void amdgpu_cs_vce_destroy(void)
r = submit(len, AMDGPU_HW_IP_VCE);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_free(resources[0]);
CU_ASSERT_EQUAL(r, 0);
free_resource(&enc.fb[0]);
}