Get rid of all ugly PTE hacks.
parent
16be6ba63a
commit
cee659afb5
|
@ -874,6 +874,7 @@ typedef struct drm_device {
|
||||||
drm_open_hash_t map_hash; /**< User token hash table for maps */
|
drm_open_hash_t map_hash; /**< User token hash table for maps */
|
||||||
drm_mm_t offset_manager; /**< User token manager */
|
drm_mm_t offset_manager; /**< User token manager */
|
||||||
drm_open_hash_t object_hash; /**< User token hash table for objects */
|
drm_open_hash_t object_hash; /**< User token hash table for objects */
|
||||||
|
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
|
||||||
|
|
||||||
/** \name Context handle management */
|
/** \name Context handle management */
|
||||||
/*@{ */
|
/*@{ */
|
||||||
|
|
|
@ -26,139 +26,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include <asm/pgtable.h>
|
|
||||||
#include <asm/cacheflush.h>
|
|
||||||
#include <asm/tlbflush.h>
|
|
||||||
|
|
||||||
#ifdef MODULE
|
|
||||||
void pgd_clear_bad(pgd_t * pgd)
|
|
||||||
{
|
|
||||||
pgd_ERROR(*pgd);
|
|
||||||
pgd_clear(pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
void pud_clear_bad(pud_t * pud)
|
|
||||||
{
|
|
||||||
pud_ERROR(*pud);
|
|
||||||
pud_clear(pud);
|
|
||||||
}
|
|
||||||
|
|
||||||
void pmd_clear_bad(pmd_t * pmd)
|
|
||||||
{
|
|
||||||
pmd_ERROR(*pmd);
|
|
||||||
pmd_clear(pmd);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
|
|
||||||
unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
pte_t *pte;
|
|
||||||
struct page *page;
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
pte = pte_offset_map(pmd, addr);
|
|
||||||
do {
|
|
||||||
if (pte_present(*pte)) {
|
|
||||||
pte_t ptent;
|
|
||||||
pfn = pte_pfn(*pte);
|
|
||||||
ptent = *pte;
|
|
||||||
ptep_get_and_clear(mm, addr, pte);
|
|
||||||
if (pfn_valid(pfn)) {
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
if (atomic_add_negative(-1, &page->_mapcount)) {
|
|
||||||
if (page_test_and_clear_dirty(page))
|
|
||||||
set_page_dirty(page);
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
|
|
||||||
dec_zone_page_state(page, NR_FILE_MAPPED);
|
|
||||||
#else
|
|
||||||
dec_page_state(nr_mapped);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
put_page(page);
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
|
|
||||||
dec_mm_counter(mm, file_rss);
|
|
||||||
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
|
|
||||||
dec_mm_counter(mm, rss);
|
|
||||||
#else
|
|
||||||
--mm->rss;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
||||||
pte_unmap(pte - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud,
|
|
||||||
unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
pmd_t *pmd;
|
|
||||||
unsigned long next;
|
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
|
||||||
do {
|
|
||||||
next = pmd_addr_end(addr, end);
|
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
|
||||||
continue;
|
|
||||||
change_pte_range(mm, pmd, addr, next);
|
|
||||||
} while (pmd++, addr = next, addr != end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd,
|
|
||||||
unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
pud_t *pud;
|
|
||||||
unsigned long next;
|
|
||||||
|
|
||||||
pud = pud_offset(pgd, addr);
|
|
||||||
do {
|
|
||||||
next = pud_addr_end(addr, end);
|
|
||||||
if (pud_none_or_clear_bad(pud))
|
|
||||||
continue;
|
|
||||||
change_pmd_range(mm, pud, addr, next);
|
|
||||||
} while (pud++, addr = next, addr != end);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function should be called with all relevant spinlocks held.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
void drm_clear_vma(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
pgd_t *pgd;
|
|
||||||
unsigned long next;
|
|
||||||
#if defined(flush_tlb_mm) || !defined(MODULE)
|
|
||||||
unsigned long start = addr;
|
|
||||||
#endif
|
|
||||||
BUG_ON(addr >= end);
|
|
||||||
pgd = pgd_offset(mm, addr);
|
|
||||||
flush_cache_range(vma, addr, end);
|
|
||||||
do {
|
|
||||||
next = pgd_addr_end(addr, end);
|
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
|
||||||
continue;
|
|
||||||
change_pud_range(mm, pgd, addr, next);
|
|
||||||
} while (pgd++, addr = next, addr != end);
|
|
||||||
#if defined(flush_tlb_mm) || !defined(MODULE)
|
|
||||||
flush_tlb_range(vma, addr, end);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
|
|
||||||
void drm_clear_vma(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, unsigned long end)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
|
|
||||||
spin_unlock(&mm->page_table_lock);
|
|
||||||
(void) zap_page_range(vma, addr, end - addr, NULL);
|
|
||||||
spin_lock(&mm->page_table_lock);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
int drm_map_page_into_agp(struct page *page)
|
int drm_map_page_into_agp(struct page *page)
|
||||||
|
|
|
@ -263,6 +263,7 @@ int drm_lastclose(drm_device_t * dev)
|
||||||
dev->lock.filp = NULL;
|
dev->lock.filp = NULL;
|
||||||
wake_up_interruptible(&dev->lock.lock_queue);
|
wake_up_interruptible(&dev->lock.lock_queue);
|
||||||
}
|
}
|
||||||
|
dev->dev_mapping = NULL;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (drm_bo_clean_mm(dev)) {
|
if (drm_bo_clean_mm(dev)) {
|
||||||
|
|
|
@ -158,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||||
}
|
}
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->count_lock);
|
||||||
}
|
}
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
BUG_ON((dev->dev_mapping != NULL) &&
|
||||||
|
(dev->dev_mapping != inode->i_mapping));
|
||||||
|
if (dev->dev_mapping == NULL)
|
||||||
|
dev->dev_mapping = inode->i_mapping;
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return retcode;
|
return retcode;
|
||||||
}
|
}
|
||||||
|
@ -465,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||||
drm_fasync(-1, filp, 0);
|
drm_fasync(-1, filp, 0);
|
||||||
|
|
||||||
mutex_lock(&dev->ctxlist_mutex);
|
mutex_lock(&dev->ctxlist_mutex);
|
||||||
|
|
||||||
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
|
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
|
||||||
drm_ctx_list_t *pos, *n;
|
drm_ctx_list_t *pos, *n;
|
||||||
|
|
||||||
|
|
|
@ -123,31 +123,12 @@ void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm)
|
||||||
BUG_ON(1);
|
BUG_ON(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
|
static void drm_ttm_unlock_mm(drm_ttm_t * ttm)
|
||||||
{
|
{
|
||||||
p_mm_entry_t *entry;
|
p_mm_entry_t *entry;
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
||||||
if (mm_sem) {
|
up_write(&entry->mm->mmap_sem);
|
||||||
down_write(&entry->mm->mmap_sem);
|
|
||||||
}
|
|
||||||
if (page_table) {
|
|
||||||
spin_lock(&entry->mm->page_table_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drm_ttm_unlock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
|
|
||||||
{
|
|
||||||
p_mm_entry_t *entry;
|
|
||||||
|
|
||||||
list_for_each_entry(entry, &ttm->p_mm_list, head) {
|
|
||||||
if (page_table) {
|
|
||||||
spin_unlock(&entry->mm->page_table_lock);
|
|
||||||
}
|
|
||||||
if (mm_sem) {
|
|
||||||
up_write(&entry->mm->mmap_sem);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,30 +161,13 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
|
||||||
static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
|
static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
|
||||||
unsigned long num_pages)
|
unsigned long num_pages)
|
||||||
{
|
{
|
||||||
struct list_head *list;
|
drm_device_t *dev = ttm->dev;
|
||||||
|
loff_t offset = ((loff_t) ttm->mapping_offset + page_offset)
|
||||||
#if !defined(flush_tlb_mm) && defined(MODULE)
|
<< PAGE_SHIFT;
|
||||||
int flush_tlb = 0;
|
loff_t holelen = num_pages << PAGE_SHIFT;
|
||||||
#endif
|
|
||||||
list_for_each(list, &ttm->vma_list->head) {
|
|
||||||
drm_ttm_vma_list_t *entry =
|
|
||||||
list_entry(list, drm_ttm_vma_list_t, head);
|
|
||||||
|
|
||||||
drm_clear_vma(entry->vma,
|
|
||||||
entry->vma->vm_start +
|
|
||||||
(page_offset << PAGE_SHIFT),
|
|
||||||
entry->vma->vm_start +
|
|
||||||
((page_offset + num_pages) << PAGE_SHIFT));
|
|
||||||
|
|
||||||
#if !defined(flush_tlb_mm) && defined(MODULE)
|
|
||||||
flush_tlb = 1;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#if !defined(flush_tlb_mm) && defined(MODULE)
|
|
||||||
if (flush_tlb)
|
|
||||||
global_flush_tlb();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,15 +401,16 @@ static int drm_ttm_lock_mmap_sem(drm_ttm_t * ttm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change caching policy for range of pages in a ttm.
|
* Change caching policy for the linear kernel map
|
||||||
|
* for range of pages in a ttm.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
|
static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
|
||||||
unsigned long num_pages, int noncached,
|
unsigned long num_pages, int noncached)
|
||||||
int do_tlbflush)
|
|
||||||
{
|
{
|
||||||
int i, cur;
|
int i, cur;
|
||||||
struct page **cur_page;
|
struct page **cur_page;
|
||||||
|
int do_tlbflush = 0;
|
||||||
|
|
||||||
for (i = 0; i < num_pages; ++i) {
|
for (i = 0; i < num_pages; ++i) {
|
||||||
cur = page_offset + i;
|
cur = page_offset + i;
|
||||||
|
@ -467,6 +432,7 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
|
||||||
} else {
|
} else {
|
||||||
unmap_page_from_agp(*cur_page);
|
unmap_page_from_agp(*cur_page);
|
||||||
}
|
}
|
||||||
|
do_tlbflush = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -492,16 +458,14 @@ int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
|
||||||
ret = drm_ttm_lock_mmap_sem(ttm);
|
ret = drm_ttm_lock_mmap_sem(ttm);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
drm_ttm_lock_mm(ttm, 0, 1);
|
|
||||||
unmap_vma_pages(ttm, entry->page_offset,
|
unmap_vma_pages(ttm, entry->page_offset,
|
||||||
entry->num_pages);
|
entry->num_pages);
|
||||||
drm_ttm_unlock_mm(ttm, 0, 1);
|
|
||||||
}
|
}
|
||||||
be->unbind(entry->be);
|
be->unbind(entry->be);
|
||||||
if (ttm && be->needs_cache_adjust(be)) {
|
if (ttm && be->needs_cache_adjust(be)) {
|
||||||
drm_set_caching(ttm, entry->page_offset,
|
drm_set_caching(ttm, entry->page_offset,
|
||||||
entry->num_pages, 0, 1);
|
entry->num_pages, 0);
|
||||||
drm_ttm_unlock_mm(ttm, 1, 0);
|
drm_ttm_unlock_mm(ttm);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -653,20 +617,17 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
drm_ttm_lock_mm(ttm, 0, 1);
|
|
||||||
unmap_vma_pages(ttm, region->page_offset,
|
unmap_vma_pages(ttm, region->page_offset,
|
||||||
region->num_pages);
|
region->num_pages);
|
||||||
drm_ttm_unlock_mm(ttm, 0, 1);
|
|
||||||
|
|
||||||
drm_set_caching(ttm, region->page_offset, region->num_pages,
|
drm_set_caching(ttm, region->page_offset, region->num_pages,
|
||||||
DRM_TTM_PAGE_UNCACHED, 1);
|
DRM_TTM_PAGE_UNCACHED);
|
||||||
} else {
|
} else {
|
||||||
DRM_DEBUG("Binding cached\n");
|
DRM_DEBUG("Binding cached\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = be->bind(be, aper_offset))) {
|
if ((ret = be->bind(be, aper_offset))) {
|
||||||
if (ttm && be->needs_cache_adjust(be))
|
if (ttm && be->needs_cache_adjust(be))
|
||||||
drm_ttm_unlock_mm(ttm, 1, 0);
|
drm_ttm_unlock_mm(ttm);
|
||||||
drm_unbind_ttm_region(region);
|
drm_unbind_ttm_region(region);
|
||||||
DRM_ERROR("Couldn't bind backend.\n");
|
DRM_ERROR("Couldn't bind backend.\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -682,7 +643,7 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
|
||||||
if (ttm && be->needs_cache_adjust(be)) {
|
if (ttm && be->needs_cache_adjust(be)) {
|
||||||
ioremap_vmas(ttm, region->page_offset, region->num_pages,
|
ioremap_vmas(ttm, region->page_offset, region->num_pages,
|
||||||
aper_offset);
|
aper_offset);
|
||||||
drm_ttm_unlock_mm(ttm, 1, 0);
|
drm_ttm_unlock_mm(ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
region->state = ttm_bound;
|
region->state = ttm_bound;
|
||||||
|
@ -924,7 +885,7 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
|
||||||
}
|
}
|
||||||
|
|
||||||
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
|
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
|
||||||
|
ttm->mapping_offset = list->hash.key;
|
||||||
atomic_set(&object->usage, 1);
|
atomic_set(&object->usage, 1);
|
||||||
*ttm_object = object;
|
*ttm_object = object;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -100,6 +100,7 @@ typedef struct drm_ttm {
|
||||||
atomic_t vma_count;
|
atomic_t vma_count;
|
||||||
int mmap_sem_locked;
|
int mmap_sem_locked;
|
||||||
int destroy;
|
int destroy;
|
||||||
|
uint32_t mapping_offset;
|
||||||
} drm_ttm_t;
|
} drm_ttm_t;
|
||||||
|
|
||||||
typedef struct drm_ttm_object {
|
typedef struct drm_ttm_object {
|
||||||
|
|
Loading…
Reference in New Issue