Add a compat kmap_atomic_prot_pfn to do quick kernel map / unmaps of

PCI- or high memory.
This is substantially more efficient than drm_bo_kmap,
since the mapping only lives on a single processor.
Unmapping is done use kunmap_atomic(). Flushes only a single tlb() entry.

Add a support utility int drm_bo_pfn_prot() that returns the
pfn and desired page protection for a given bo offset.

This is all intended for relocations in bound TTMS or vram.
Mapping-accessing-unmapping must be atomic, either using preempt_xx() macros
or a spinlock.
main
Thomas Hellstrom 2008-02-28 13:47:15 +01:00
parent 72983ff301
commit 40c9e6a26d
4 changed files with 74 additions and 0 deletions

View File

@ -595,3 +595,36 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
map->page = NULL;
}
EXPORT_SYMBOL(drm_bo_kunmap);
int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot)
{
struct drm_bo_mem_reg *mem = &bo->mem;
struct drm_device *dev = bo->dev;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
int ret;
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
&bus_size);
if (ret)
return -EINVAL;
if (bus_size != 0)
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
else if (!bo->ttm)
return -EINVAL;
else
*pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
*prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
return 0;
}
EXPORT_SYMBOL(drm_bo_pfn_prot);

View File

@ -729,3 +729,35 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
}
EXPORT_SYMBOL(idr_replace);
#endif
#if defined(CONFIG_X86)
#define drm_kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection)
{
enum fixed_addresses idx;
unsigned long vaddr;
static pte_t *km_pte;
static int initialized = 0;
if (unlikely(!initialized)) {
km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
initialized = 1;
}
pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(km_pte-idx, pfn_pte(pfn, protection));
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
#endif

View File

@ -328,4 +328,9 @@ void *idr_replace(struct idr *idp, void *ptr, int id);
typedef _Bool bool;
#endif
#if defined(CONFIG_X86)
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection);
#endif
#endif

View File

@ -738,6 +738,10 @@ static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct drm_bo_kmap_obj *map);
extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot);
/*