From 9066acf10900fb6afaa49383116324d94e5aaacd Mon Sep 17 00:00:00 2001 From: "monk.liu" Date: Wed, 13 May 2015 13:58:43 +0800 Subject: [PATCH] amdgpu: fix code alignment Signed-off-by: monk.liu --- amdgpu/amdgpu_vamgr.c | 218 +++++++++++++++++++++--------------------- 1 file changed, 109 insertions(+), 109 deletions(-) diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c index 070ecc47..877e0baa 100644 --- a/amdgpu/amdgpu_vamgr.c +++ b/amdgpu/amdgpu_vamgr.c @@ -43,134 +43,134 @@ void amdgpu_vamgr_init(struct amdgpu_device *dev) uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, uint64_t alignment) { - struct amdgpu_bo_va_hole *hole, *n; - uint64_t offset = 0, waste = 0; + struct amdgpu_bo_va_hole *hole, *n; + uint64_t offset = 0, waste = 0; - alignment = MAX2(alignment, mgr->va_alignment); - size = ALIGN(size, mgr->va_alignment); + alignment = MAX2(alignment, mgr->va_alignment); + size = ALIGN(size, mgr->va_alignment); - pthread_mutex_lock(&mgr->bo_va_mutex); - /* TODO: using more appropriate way to track the holes */ - /* first look for a hole */ - LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { - offset = hole->offset; - waste = offset % alignment; - waste = waste ? alignment - waste : 0; - offset += waste; - if (offset >= (hole->offset + hole->size)) { - continue; - } - if (!waste && hole->size == size) { - offset = hole->offset; - list_del(&hole->list); - free(hole); - pthread_mutex_unlock(&mgr->bo_va_mutex); - return offset; - } - if ((hole->size - waste) > size) { - if (waste) { - n = calloc(1, - sizeof(struct amdgpu_bo_va_hole)); - n->size = waste; - n->offset = hole->offset; - list_add(&n->list, &hole->list); - } - hole->size -= (size + waste); - hole->offset += size + waste; - pthread_mutex_unlock(&mgr->bo_va_mutex); - return offset; - } - if ((hole->size - waste) == size) { - hole->size = waste; - pthread_mutex_unlock(&mgr->bo_va_mutex); - return offset; - } - } + pthread_mutex_lock(&mgr->bo_va_mutex); + /* TODO: using more appropriate way to track the holes */ + /* first look for a hole */ + LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { + offset = hole->offset; + waste = offset % alignment; + waste = waste ? alignment - waste : 0; + offset += waste; + if (offset >= (hole->offset + hole->size)) { + continue; + } + if (!waste && hole->size == size) { + offset = hole->offset; + list_del(&hole->list); + free(hole); + pthread_mutex_unlock(&mgr->bo_va_mutex); + return offset; + } + if ((hole->size - waste) > size) { + if (waste) { + n = calloc(1, + sizeof(struct amdgpu_bo_va_hole)); + n->size = waste; + n->offset = hole->offset; + list_add(&n->list, &hole->list); + } + hole->size -= (size + waste); + hole->offset += size + waste; + pthread_mutex_unlock(&mgr->bo_va_mutex); + return offset; + } + if ((hole->size - waste) == size) { + hole->size = waste; + pthread_mutex_unlock(&mgr->bo_va_mutex); + return offset; + } + } - offset = mgr->va_offset; - waste = offset % alignment; - waste = waste ? alignment - waste : 0; + offset = mgr->va_offset; + waste = offset % alignment; + waste = waste ? alignment - waste : 0; if (offset + waste + size > mgr->va_max) { pthread_mutex_unlock(&mgr->bo_va_mutex); return AMDGPU_INVALID_VA_ADDRESS; } - if (waste) { - n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); - n->size = waste; - n->offset = offset; - list_add(&n->list, &mgr->va_holes); - } - offset += waste; - mgr->va_offset += size + waste; - pthread_mutex_unlock(&mgr->bo_va_mutex); - return offset; + if (waste) { + n = calloc(1, sizeof(struct amdgpu_bo_va_hole)); + n->size = waste; + n->offset = offset; + list_add(&n->list, &mgr->va_holes); + } + offset += waste; + mgr->va_offset += size + waste; + pthread_mutex_unlock(&mgr->bo_va_mutex); + return offset; } void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) { - struct amdgpu_bo_va_hole *hole; + struct amdgpu_bo_va_hole *hole; - size = ALIGN(size, mgr->va_alignment); + size = ALIGN(size, mgr->va_alignment); - pthread_mutex_lock(&mgr->bo_va_mutex); - if ((va + size) == mgr->va_offset) { - mgr->va_offset = va; - /* Delete uppermost hole if it reaches the new top */ - if (!LIST_IS_EMPTY(&mgr->va_holes)) { - hole = container_of(mgr->va_holes.next, hole, list); - if ((hole->offset + hole->size) == va) { - mgr->va_offset = hole->offset; - list_del(&hole->list); - free(hole); - } - } - } else { - struct amdgpu_bo_va_hole *next; + pthread_mutex_lock(&mgr->bo_va_mutex); + if ((va + size) == mgr->va_offset) { + mgr->va_offset = va; + /* Delete uppermost hole if it reaches the new top */ + if (!LIST_IS_EMPTY(&mgr->va_holes)) { + hole = container_of(mgr->va_holes.next, hole, list); + if ((hole->offset + hole->size) == va) { + mgr->va_offset = hole->offset; + list_del(&hole->list); + free(hole); + } + } + } else { + struct amdgpu_bo_va_hole *next; - hole = container_of(&mgr->va_holes, hole, list); - LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { - if (next->offset < va) - break; - hole = next; - } + hole = container_of(&mgr->va_holes, hole, list); + LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { + if (next->offset < va) + break; + hole = next; + } - if (&hole->list != &mgr->va_holes) { - /* Grow upper hole if it's adjacent */ - if (hole->offset == (va + size)) { - hole->offset = va; - hole->size += size; - /* Merge lower hole if it's adjacent */ - if (next != hole - && &next->list != &mgr->va_holes - && (next->offset + next->size) == va) { - next->size += hole->size; - list_del(&hole->list); - free(hole); - } - goto out; - } - } + if (&hole->list != &mgr->va_holes) { + /* Grow upper hole if it's adjacent */ + if (hole->offset == (va + size)) { + hole->offset = va; + hole->size += size; + /* Merge lower hole if it's adjacent */ + if (next != hole + && &next->list != &mgr->va_holes + && (next->offset + next->size) == va) { + next->size += hole->size; + list_del(&hole->list); + free(hole); + } + goto out; + } + } - /* Grow lower hole if it's adjacent */ - if (next != hole && &next->list != &mgr->va_holes && - (next->offset + next->size) == va) { - next->size += size; - goto out; - } + /* Grow lower hole if it's adjacent */ + if (next != hole && &next->list != &mgr->va_holes && + (next->offset + next->size) == va) { + next->size += size; + goto out; + } - /* FIXME on allocation failure we just lose virtual address space - * maybe print a warning - */ - next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); - if (next) { - next->size = size; - next->offset = va; - list_add(&next->list, &hole->list); - } - } + /* FIXME on allocation failure we just lose virtual address space + * maybe print a warning + */ + next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); + if (next) { + next->size = size; + next->offset = va; + list_add(&next->list, &hole->list); + } + } out: - pthread_mutex_unlock(&mgr->bo_va_mutex); + pthread_mutex_unlock(&mgr->bo_va_mutex); }