drm/radeon: add uncached allocator to drm ttm code.
parent
994f240503
commit
241a9b6414
|
@ -15,7 +15,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
|||
drm_hashtab.o drm_mm.o drm_compat.o \
|
||||
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
|
||||
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
|
||||
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
|
||||
drm_regman.o drm_vm_nopage_compat.o drm_gem.o drm_uncached.o
|
||||
tdfx-objs := tdfx_drv.o
|
||||
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
||||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||
|
|
|
@ -1850,6 +1850,7 @@ int drm_bo_driver_finish(struct drm_device *dev)
|
|||
__free_page(bm->dummy_read_page);
|
||||
}
|
||||
|
||||
drm_uncached_fini();
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
|
@ -1869,6 +1870,8 @@ int drm_bo_driver_init(struct drm_device *dev)
|
|||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
int ret = -EINVAL;
|
||||
|
||||
drm_uncached_init();
|
||||
|
||||
bm->dummy_read_page = NULL;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!driver)
|
||||
|
|
|
@ -661,6 +661,9 @@ struct drm_bo_lock {
|
|||
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
|
||||
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
|
||||
|
||||
#define _DRM_BM_ALLOCATOR_CACHED 0x0
|
||||
#define _DRM_BM_ALLOCATOR_UNCACHED 0x1
|
||||
|
||||
struct drm_buffer_manager {
|
||||
struct drm_bo_lock bm_lock;
|
||||
struct mutex evict_mutex;
|
||||
|
@ -679,6 +682,7 @@ struct drm_buffer_manager {
|
|||
unsigned long cur_pages;
|
||||
atomic_t count;
|
||||
struct page *dummy_read_page;
|
||||
int allocator_type;
|
||||
};
|
||||
|
||||
struct drm_bo_driver {
|
||||
|
@ -894,6 +898,15 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m
|
|||
void **virtual);
|
||||
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
||||
void *virtual);
|
||||
|
||||
/*
|
||||
* drm_uncached.c
|
||||
*/
|
||||
extern int drm_uncached_init(void);
|
||||
extern void drm_uncached_fini(void);
|
||||
extern struct page *drm_get_uncached_page(void);
|
||||
extern void drm_put_uncached_page(struct page *page);
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
#define DRM_ASSERT_LOCKED(_mutex) \
|
||||
BUG_ON(!mutex_is_locked(_mutex) || \
|
||||
|
|
|
@ -120,14 +120,18 @@ static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
|
|||
ttm->pages = NULL;
|
||||
}
|
||||
|
||||
static struct page *drm_ttm_alloc_page(void)
|
||||
static struct page *drm_ttm_alloc_page(struct drm_ttm *ttm)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (drm_alloc_memctl(PAGE_SIZE))
|
||||
return NULL;
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||
page = drm_get_uncached_page();
|
||||
else
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
|
||||
if (!page) {
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
return NULL;
|
||||
|
@ -149,6 +153,9 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
|
|||
struct page **cur_page;
|
||||
int do_tlbflush = 0;
|
||||
|
||||
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||
return 0;
|
||||
|
||||
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
|
||||
return 0;
|
||||
|
||||
|
@ -215,14 +222,18 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
|||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
if (*cur_page) {
|
||||
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||
drm_put_uncached_page(*cur_page);
|
||||
else {
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||
ClearPageReserved(*cur_page);
|
||||
ClearPageReserved(*cur_page);
|
||||
#endif
|
||||
if (page_count(*cur_page) != 1)
|
||||
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
||||
if (page_mapped(*cur_page))
|
||||
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
||||
__free_page(*cur_page);
|
||||
if (page_count(*cur_page) != 1)
|
||||
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
||||
if (page_mapped(*cur_page))
|
||||
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
||||
__free_page(*cur_page);
|
||||
}
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
}
|
||||
|
@ -268,7 +279,7 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
|
|||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||
|
||||
while(NULL == (p = ttm->pages[index])) {
|
||||
p = drm_ttm_alloc_page();
|
||||
p = drm_ttm_alloc_page(ttm);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
/* simple list based uncached page allocator
|
||||
* - Add chunks of 1MB to the allocator at a time.
|
||||
* - Use page->lru to keep a free list
|
||||
* - doesn't track currently in use pages
|
||||
*
|
||||
* TODO: Add shrinker support
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include <asm/agp.h>
|
||||
|
||||
static struct list_head uncached_free_list;
|
||||
|
||||
static struct mutex uncached_mutex;
|
||||
static int uncached_inited;
|
||||
static int total_uncached_pages;
|
||||
|
||||
/* add 1MB at a time */
|
||||
#define NUM_PAGES_TO_ADD 256
|
||||
|
||||
static void drm_uncached_page_put(struct page *page)
|
||||
{
|
||||
unmap_page_from_agp(page);
|
||||
put_page(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
int drm_uncached_add_pages_locked(int num_pages)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("adding uncached memory %ld\n", num_pages * PAGE_SIZE);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||
if (!page) {
|
||||
DRM_ERROR("unable to get page %d\n", i);
|
||||
return i;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wc((unsigned long)page_address(page), 1);
|
||||
#else
|
||||
map_page_into_agp(page);
|
||||
#endif
|
||||
|
||||
list_add(&page->lru, &uncached_free_list);
|
||||
total_uncached_pages++;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
struct page *drm_get_uncached_page(void)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&uncached_mutex);
|
||||
if (list_empty(&uncached_free_list)) {
|
||||
ret = drm_uncached_add_pages_locked(NUM_PAGES_TO_ADD);
|
||||
if (ret == 0)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = list_first_entry(&uncached_free_list, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
|
||||
mutex_unlock(&uncached_mutex);
|
||||
return page;
|
||||
}
|
||||
|
||||
void drm_put_uncached_page(struct page *page)
|
||||
{
|
||||
mutex_lock(&uncached_mutex);
|
||||
list_add(&page->lru, &uncached_free_list);
|
||||
mutex_unlock(&uncached_mutex);
|
||||
}
|
||||
|
||||
void drm_uncached_release_all_pages(void)
|
||||
{
|
||||
struct page *page, *tmp;
|
||||
|
||||
list_for_each_entry_safe(page, tmp, &uncached_free_list, lru) {
|
||||
list_del(&page->lru);
|
||||
drm_uncached_page_put(page);
|
||||
}
|
||||
}
|
||||
|
||||
int drm_uncached_init(void)
|
||||
{
|
||||
|
||||
if (uncached_inited)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&uncached_free_list);
|
||||
|
||||
mutex_init(&uncached_mutex);
|
||||
uncached_inited = 1;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
void drm_uncached_fini(void)
|
||||
{
|
||||
if (!uncached_inited)
|
||||
return;
|
||||
|
||||
uncached_inited = 0;
|
||||
drm_uncached_release_all_pages();
|
||||
}
|
||||
|
|
@ -966,6 +966,9 @@ int radeon_gem_mm_init(struct drm_device *dev)
|
|||
/* init TTM underneath */
|
||||
drm_bo_driver_init(dev);
|
||||
|
||||
/* use the uncached allocator */
|
||||
dev->bm.allocator_type = _DRM_BM_ALLOCATOR_UNCACHED;
|
||||
|
||||
/* size the mappable VRAM memory for now */
|
||||
radeon_vram_setup(dev);
|
||||
|
||||
|
|
Loading…
Reference in New Issue