amdgpu: add flag to support 32bit VA address v4

The AMDGPU_VA_RANGE_32_BIT flag is added to request VA range in the
32bit address space for amdgpu_va_range_alloc.

The 32bit address space is reserved at initialization time, and managed
with a separate VAMGR as part of the global VAMGR. And if no enough VA
space available in range above 4GB, this reserved range can be used as
fallback.

v2: add comment for AMDGPU_VA_RANGE_32_BIT, and add vamgr to va_range
v3: rebase to Emil's drm_private series
v4: fix one warning

Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
main
Jammy Zhou 2015-08-17 11:09:08 +08:00 committed by Alex Deucher
parent 102ab6f004
commit ffa305d0fc
4 changed files with 59 additions and 7 deletions

View File

@ -1074,6 +1074,11 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
unsigned count, uint32_t instance, uint32_t flags, unsigned count, uint32_t instance, uint32_t flags,
uint32_t *values); uint32_t *values);
/**
* Flag to request VA address range in the 32bit address space
*/
#define AMDGPU_VA_RANGE_32_BIT 0x1
/** /**
* Allocate virtual address range * Allocate virtual address range
* *

View File

@ -43,6 +43,7 @@
#include "amdgpu_drm.h" #include "amdgpu_drm.h"
#include "amdgpu_internal.h" #include "amdgpu_internal.h"
#include "util_hash_table.h" #include "util_hash_table.h"
#include "util_math.h"
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
#define UINT_TO_PTR(x) ((void *)((intptr_t)(x))) #define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@ -173,6 +174,7 @@ int amdgpu_device_initialize(int fd,
int flag_auth = 0; int flag_auth = 0;
int flag_authexist=0; int flag_authexist=0;
uint32_t accel_working = 0; uint32_t accel_working = 0;
uint64_t start, max;
*device_handle = NULL; *device_handle = NULL;
@ -251,6 +253,19 @@ int amdgpu_device_initialize(int fd,
dev->vamgr = amdgpu_vamgr_get_global(dev); dev->vamgr = amdgpu_vamgr_get_global(dev);
max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
start = amdgpu_vamgr_find_va(dev->vamgr,
max - dev->dev_info.virtual_address_offset,
dev->dev_info.virtual_address_alignment, 0);
if (start > 0xffffffff)
goto free_va; /* shouldn't get here */
dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
if (dev->vamgr_32 == NULL)
goto free_va;
amdgpu_vamgr_init(dev->vamgr_32, start, max,
dev->dev_info.virtual_address_alignment);
*major_version = dev->major_version; *major_version = dev->major_version;
*minor_version = dev->minor_version; *minor_version = dev->minor_version;
*device_handle = dev; *device_handle = dev;
@ -259,6 +274,11 @@ int amdgpu_device_initialize(int fd,
return 0; return 0;
free_va:
r = -ENOMEM;
amdgpu_vamgr_free_va(dev->vamgr, start,
max - dev->dev_info.virtual_address_offset);
cleanup: cleanup:
if (dev->fd >= 0) if (dev->fd >= 0)
close(dev->fd); close(dev->fd);

View File

@ -66,6 +66,7 @@ struct amdgpu_va {
uint64_t address; uint64_t address;
uint64_t size; uint64_t size;
enum amdgpu_gpu_va_range range; enum amdgpu_gpu_va_range range;
struct amdgpu_bo_va_mgr *vamgr;
}; };
struct amdgpu_device { struct amdgpu_device {
@ -83,7 +84,10 @@ struct amdgpu_device {
pthread_mutex_t bo_table_mutex; pthread_mutex_t bo_table_mutex;
struct drm_amdgpu_info_device dev_info; struct drm_amdgpu_info_device dev_info;
struct amdgpu_gpu_info info; struct amdgpu_gpu_info info;
/** The global VA manager for the whole virtual address space */
struct amdgpu_bo_va_mgr *vamgr; struct amdgpu_bo_va_mgr *vamgr;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr *vamgr_32;
}; };
struct amdgpu_bo { struct amdgpu_bo {
@ -128,6 +132,11 @@ drm_private void
amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
struct amdgpu_bo_va_mgr *src); struct amdgpu_bo_va_mgr *src);
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
uint64_t max, uint64_t alignment);
drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
drm_private uint64_t drm_private uint64_t
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t alignment, uint64_t base_required); uint64_t alignment, uint64_t base_required);

View File

@ -46,7 +46,7 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
return -EINVAL; return -EINVAL;
} }
static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
uint64_t max, uint64_t alignment) uint64_t max, uint64_t alignment)
{ {
mgr->va_offset = start; mgr->va_offset = start;
@ -57,7 +57,7 @@ static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
pthread_mutex_init(&mgr->bo_va_mutex, NULL); pthread_mutex_init(&mgr->bo_va_mutex, NULL);
} }
static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
{ {
struct amdgpu_bo_va_hole *hole; struct amdgpu_bo_va_hole *hole;
LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) { LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
@ -255,23 +255,39 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
amdgpu_va_handle *va_range_handle, amdgpu_va_handle *va_range_handle,
uint64_t flags) uint64_t flags)
{ {
va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment); struct amdgpu_bo_va_mgr *vamgr;
size = ALIGN(size, vamgr.va_alignment);
*va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size, if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = dev->vamgr_32;
else
vamgr = dev->vamgr;
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
size = ALIGN(size, vamgr->va_alignment);
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required); va_base_alignment, va_base_required);
if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
(*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
/* fallback to 32bit address */
vamgr = dev->vamgr_32;
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required);
}
if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) { if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
struct amdgpu_va* va; struct amdgpu_va* va;
va = calloc(1, sizeof(struct amdgpu_va)); va = calloc(1, sizeof(struct amdgpu_va));
if(!va){ if(!va){
amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size); amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
return -ENOMEM; return -ENOMEM;
} }
va->dev = dev; va->dev = dev;
va->address = *va_base_allocated; va->address = *va_base_allocated;
va->size = size; va->size = size;
va->range = va_range_type; va->range = va_range_type;
va->vamgr = vamgr;
*va_range_handle = va; *va_range_handle = va;
} else { } else {
return -EINVAL; return -EINVAL;
@ -284,7 +300,9 @@ int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
{ {
if(!va_range_handle || !va_range_handle->address) if(!va_range_handle || !va_range_handle->address)
return 0; return 0;
amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address,
amdgpu_vamgr_free_va(va_range_handle->vamgr,
va_range_handle->address,
va_range_handle->size); va_range_handle->size);
free(va_range_handle); free(va_range_handle);
return 0; return 0;