amdgpu: add base_preferred parameter to amdgpu_vamgr_find_va

base_preferred parameter is added to amdgpu_vamgr_find_va
so UMD can specify preferred va address when allocating.

Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
main
Ken Wang 2015-07-09 13:48:25 +08:00 committed by Alex Deucher
parent 8097d08ee4
commit 5b01908d1c
3 changed files with 35 additions and 17 deletions

View File

@ -62,7 +62,7 @@ static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
memset(&va, 0, sizeof(va));
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
bo->alloc_size, alignment);
bo->alloc_size, alignment, 0);
if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
return -ENOSPC;

View File

@ -123,8 +123,8 @@ struct amdgpu_bo_va_mgr* amdgpu_vamgr_get_global(struct amdgpu_device *dev);
void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src);
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
uint64_t size, uint64_t alignment);
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t alignment, uint64_t base_preferred);
void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
uint64_t size);

View File

@ -68,8 +68,8 @@ void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
*dst = src;
}
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
uint64_t size, uint64_t alignment)
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t alignment, uint64_t base_required)
{
struct amdgpu_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0;
@ -77,16 +77,27 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
alignment = MAX2(alignment, mgr->va_alignment);
size = ALIGN(size, mgr->va_alignment);
if (base_required % alignment)
return AMDGPU_INVALID_VA_ADDRESS;
pthread_mutex_lock(&mgr->bo_va_mutex);
/* TODO: using more appropriate way to track the holes */
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
if (base_required) {
if(hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
waste = base_required - hole->offset;
offset = base_required;
} else {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
}
if (!waste && hole->size == size) {
offset = hole->offset;
@ -97,8 +108,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
}
if ((hole->size - waste) > size) {
if (waste) {
n = calloc(1,
sizeof(struct amdgpu_bo_va_hole));
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
@ -115,9 +125,16 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
}
}
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
if (base_required) {
if (base_required < mgr->va_offset)
return AMDGPU_INVALID_VA_ADDRESS;
offset = mgr->va_offset;
waste = base_required - mgr->va_offset;
} else {
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
}
if (offset + waste + size > mgr->va_max) {
pthread_mutex_unlock(&mgr->bo_va_mutex);
@ -130,6 +147,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
n->offset = offset;
list_add(&n->list, &mgr->va_holes);
}
offset += waste;
mgr->va_offset += size + waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);