amdgpu: add base_preferred parameter to amdgpu_vamgr_find_va

base_preferred parameter is added to amdgpu_vamgr_find_va
so UMD can specify preferred va address when allocating.

Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
main
Ken Wang 2015-07-09 13:48:25 +08:00 committed by Alex Deucher
parent 8097d08ee4
commit 5b01908d1c
3 changed files with 35 additions and 17 deletions

View File

@ -62,7 +62,7 @@ static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
memset(&va, 0, sizeof(va)); memset(&va, 0, sizeof(va));
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr, bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
bo->alloc_size, alignment); bo->alloc_size, alignment, 0);
if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
return -ENOSPC; return -ENOSPC;

View File

@ -123,8 +123,8 @@ struct amdgpu_bo_va_mgr* amdgpu_vamgr_get_global(struct amdgpu_device *dev);
void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src); void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src);
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t size, uint64_t alignment); uint64_t alignment, uint64_t base_preferred);
void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
uint64_t size); uint64_t size);

View File

@ -68,8 +68,8 @@ void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
*dst = src; *dst = src;
} }
uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t size, uint64_t alignment) uint64_t alignment, uint64_t base_required)
{ {
struct amdgpu_bo_va_hole *hole, *n; struct amdgpu_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0; uint64_t offset = 0, waste = 0;
@ -77,10 +77,20 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
alignment = MAX2(alignment, mgr->va_alignment); alignment = MAX2(alignment, mgr->va_alignment);
size = ALIGN(size, mgr->va_alignment); size = ALIGN(size, mgr->va_alignment);
if (base_required % alignment)
return AMDGPU_INVALID_VA_ADDRESS;
pthread_mutex_lock(&mgr->bo_va_mutex); pthread_mutex_lock(&mgr->bo_va_mutex);
/* TODO: using more appropriate way to track the holes */ /* TODO: using more appropriate way to track the holes */
/* first look for a hole */ /* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
if (base_required) {
if(hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
waste = base_required - hole->offset;
offset = base_required;
} else {
offset = hole->offset; offset = hole->offset;
waste = offset % alignment; waste = offset % alignment;
waste = waste ? alignment - waste : 0; waste = waste ? alignment - waste : 0;
@ -88,6 +98,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
if (offset >= (hole->offset + hole->size)) { if (offset >= (hole->offset + hole->size)) {
continue; continue;
} }
}
if (!waste && hole->size == size) { if (!waste && hole->size == size) {
offset = hole->offset; offset = hole->offset;
list_del(&hole->list); list_del(&hole->list);
@ -97,8 +108,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
} }
if ((hole->size - waste) > size) { if ((hole->size - waste) > size) {
if (waste) { if (waste) {
n = calloc(1, n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
sizeof(struct amdgpu_bo_va_hole));
n->size = waste; n->size = waste;
n->offset = hole->offset; n->offset = hole->offset;
list_add(&n->list, &hole->list); list_add(&n->list, &hole->list);
@ -115,9 +125,16 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
} }
} }
if (base_required) {
if (base_required < mgr->va_offset)
return AMDGPU_INVALID_VA_ADDRESS;
offset = mgr->va_offset;
waste = base_required - mgr->va_offset;
} else {
offset = mgr->va_offset; offset = mgr->va_offset;
waste = offset % alignment; waste = offset % alignment;
waste = waste ? alignment - waste : 0; waste = waste ? alignment - waste : 0;
}
if (offset + waste + size > mgr->va_max) { if (offset + waste + size > mgr->va_max) {
pthread_mutex_unlock(&mgr->bo_va_mutex); pthread_mutex_unlock(&mgr->bo_va_mutex);
@ -130,6 +147,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
n->offset = offset; n->offset = offset;
list_add(&n->list, &mgr->va_holes); list_add(&n->list, &mgr->va_holes);
} }
offset += waste; offset += waste;
mgr->va_offset += size + waste; mgr->va_offset += size + waste;
pthread_mutex_unlock(&mgr->bo_va_mutex); pthread_mutex_unlock(&mgr->bo_va_mutex);