amdgpu: make vamgr per device v2
Each device can have its own vamgr, so make it per device now. This can fix the failure with multiple GPUs used in one single process. v2: rebase Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>main
parent
ffa305d0fc
commit
56d8dd6a9c
|
@ -130,7 +130,8 @@ static int amdgpu_get_auth(int fd, int *auth)
|
||||||
|
|
||||||
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
|
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
|
||||||
{
|
{
|
||||||
amdgpu_vamgr_reference(&dev->vamgr, NULL);
|
amdgpu_vamgr_deinit(dev->vamgr);
|
||||||
|
free(dev->vamgr);
|
||||||
util_hash_table_destroy(dev->bo_flink_names);
|
util_hash_table_destroy(dev->bo_flink_names);
|
||||||
util_hash_table_destroy(dev->bo_handles);
|
util_hash_table_destroy(dev->bo_handles);
|
||||||
pthread_mutex_destroy(&dev->bo_table_mutex);
|
pthread_mutex_destroy(&dev->bo_table_mutex);
|
||||||
|
@ -251,7 +252,13 @@ int amdgpu_device_initialize(int fd,
|
||||||
if (r)
|
if (r)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
dev->vamgr = amdgpu_vamgr_get_global(dev);
|
dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
|
||||||
|
if (dev->vamgr == NULL)
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset,
|
||||||
|
dev->dev_info.virtual_address_max,
|
||||||
|
dev->dev_info.virtual_address_alignment);
|
||||||
|
|
||||||
max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
|
max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
|
||||||
start = amdgpu_vamgr_find_va(dev->vamgr,
|
start = amdgpu_vamgr_find_va(dev->vamgr,
|
||||||
|
@ -278,6 +285,8 @@ free_va:
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
amdgpu_vamgr_free_va(dev->vamgr, start,
|
amdgpu_vamgr_free_va(dev->vamgr, start,
|
||||||
max - dev->dev_info.virtual_address_offset);
|
max - dev->dev_info.virtual_address_offset);
|
||||||
|
amdgpu_vamgr_deinit(dev->vamgr);
|
||||||
|
free(dev->vamgr);
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
if (dev->fd >= 0)
|
if (dev->fd >= 0)
|
||||||
|
|
|
@ -52,7 +52,6 @@ struct amdgpu_bo_va_hole {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_bo_va_mgr {
|
struct amdgpu_bo_va_mgr {
|
||||||
atomic_t refcount;
|
|
||||||
/* the start virtual address */
|
/* the start virtual address */
|
||||||
uint64_t va_offset;
|
uint64_t va_offset;
|
||||||
uint64_t va_max;
|
uint64_t va_max;
|
||||||
|
@ -125,13 +124,6 @@ struct amdgpu_context {
|
||||||
|
|
||||||
drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
|
drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
|
||||||
|
|
||||||
drm_private struct amdgpu_bo_va_mgr*
|
|
||||||
amdgpu_vamgr_get_global(struct amdgpu_device *dev);
|
|
||||||
|
|
||||||
drm_private void
|
|
||||||
amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
|
|
||||||
struct amdgpu_bo_va_mgr *src);
|
|
||||||
|
|
||||||
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
|
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
|
||||||
uint64_t max, uint64_t alignment);
|
uint64_t max, uint64_t alignment);
|
||||||
|
|
||||||
|
|
|
@ -33,8 +33,6 @@
|
||||||
#include "amdgpu_internal.h"
|
#include "amdgpu_internal.h"
|
||||||
#include "util_math.h"
|
#include "util_math.h"
|
||||||
|
|
||||||
static struct amdgpu_bo_va_mgr vamgr = {{0}};
|
|
||||||
|
|
||||||
int amdgpu_va_range_query(amdgpu_device_handle dev,
|
int amdgpu_va_range_query(amdgpu_device_handle dev,
|
||||||
enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
|
enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
|
||||||
{
|
{
|
||||||
|
@ -67,28 +65,6 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
|
||||||
pthread_mutex_destroy(&mgr->bo_va_mutex);
|
pthread_mutex_destroy(&mgr->bo_va_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_private struct amdgpu_bo_va_mgr *
|
|
||||||
amdgpu_vamgr_get_global(struct amdgpu_device *dev)
|
|
||||||
{
|
|
||||||
int ref;
|
|
||||||
ref = atomic_inc_return(&vamgr.refcount);
|
|
||||||
|
|
||||||
if (ref == 1)
|
|
||||||
amdgpu_vamgr_init(&vamgr, dev->dev_info.virtual_address_offset,
|
|
||||||
dev->dev_info.virtual_address_max,
|
|
||||||
dev->dev_info.virtual_address_alignment);
|
|
||||||
return &vamgr;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_private void
|
|
||||||
amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
|
|
||||||
struct amdgpu_bo_va_mgr *src)
|
|
||||||
{
|
|
||||||
if (update_references(&(*dst)->refcount, NULL))
|
|
||||||
amdgpu_vamgr_deinit(*dst);
|
|
||||||
*dst = src;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_private uint64_t
|
drm_private uint64_t
|
||||||
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
|
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
|
||||||
uint64_t alignment, uint64_t base_required)
|
uint64_t alignment, uint64_t base_required)
|
||||||
|
@ -105,7 +81,7 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
|
||||||
pthread_mutex_lock(&mgr->bo_va_mutex);
|
pthread_mutex_lock(&mgr->bo_va_mutex);
|
||||||
/* TODO: using more appropriate way to track the holes */
|
/* TODO: using more appropriate way to track the holes */
|
||||||
/* first look for a hole */
|
/* first look for a hole */
|
||||||
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
|
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
|
||||||
if (base_required) {
|
if (base_required) {
|
||||||
if(hole->offset > base_required ||
|
if(hole->offset > base_required ||
|
||||||
(hole->offset + hole->size) < (base_required + size))
|
(hole->offset + hole->size) < (base_required + size))
|
||||||
|
|
Loading…
Reference in New Issue