Remove the use of reserved pages, and use locked pages instead.

Update compatibility for latest linux versions.
main
Thomas Hellstrom 2006-09-14 12:17:38 +02:00
parent 9adc9584a7
commit 682c6ed029
3 changed files with 48 additions and 25 deletions

View File

@ -54,19 +54,37 @@ static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
unsigned long addr, unsigned long end)
{
pte_t *pte;
struct page *page;
unsigned long pfn;
pte = pte_offset_map(pmd, addr);
do {
if (pte_present(*pte)) {
pte_t ptent;
ptep_get_and_clear(mm, addr, pte);
pfn = pte_pfn(*pte);
ptent = *pte;
lazy_mmu_prot_update(ptent);
} else {
ptep_get_and_clear(mm, addr, pte);
}
if (!pte_none(*pte)) {
DRM_ERROR("Ugh. Pte was presen\n");
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (atomic_add_negative(-1, &page->_mapcount)) {
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
dec_zone_page_state(page, NR_FILE_MAPPED);
#else
dec_page_state(nr_mapped);
#endif
}
put_page(page);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
dec_mm_counter(mm, file_rss);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
dec_mm_counter(mm, rss);
#else
--mm->rss;
#endif
}
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);

View File

@ -199,6 +199,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
(page_offset << PAGE_SHIFT),
entry->vma->vm_start +
((page_offset + num_pages) << PAGE_SHIFT));
#if !defined(flush_tlb_mm) && defined(MODULE)
flush_tlb = 1;
#endif
@ -209,7 +210,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
#endif
for (cur_page = first_page; cur_page != last_page; ++cur_page) {
if (page_mapcount(*cur_page) != 0) {
if (page_mapped(*cur_page)) {
DRM_ERROR("Mapped page detected. Map count is %d\n",
page_mapcount(*cur_page));
return -1;
@ -239,6 +240,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
}
DRM_DEBUG("Destroying a ttm\n");
if (ttm->be_list) {
list_for_each_safe(list, next, &ttm->be_list->head) {
drm_ttm_backend_list_t *entry =
@ -262,7 +264,22 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
do_tlbflush = 1;
}
if (*cur_page) {
ClearPageReserved(*cur_page);
ClearPageLocked(*cur_page);
/*
* Debugging code. Remove if the error message never
* shows up.
*/
if (page_count(*cur_page) != 1) {
DRM_ERROR("Erroneous page count. "
"Leaking pages.\n");
}
/*
* End debugging.
*/
__free_page(*cur_page);
--bm->cur_pages;
}
@ -526,20 +543,7 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
drm_unbind_ttm_region(entry);
if (be) {
be->clear(entry->be);
#if 0 /* Hmm, Isn't this done in unbind? */
if (be->needs_cache_adjust(be)) {
int ret = drm_ttm_lock_mmap_sem(ttm);
drm_ttm_lock_mm(ttm, 0, 1);
unmap_vma_pages(ttm, entry->page_offset,
entry->num_pages);
drm_ttm_unlock_mm(ttm, 0, 1);
drm_set_caching(ttm, entry->page_offset,
entry->num_pages, 0, 1);
if (!ret)
drm_ttm_unlock_mm(ttm, 1, 0);
}
#endif
be->clear(be);
be->destroy(be);
}
cur_page_flags = ttm->page_flags + entry->page_offset;
@ -616,7 +620,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
drm_destroy_ttm_region(entry);
return -ENOMEM;
}
SetPageReserved(*cur_page);
SetPageLocked(*cur_page);
++bm->cur_pages;
}
}

View File

@ -276,16 +276,17 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
++bm->cur_pages;
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL);
SetPageReserved(page);
}
if (!page)
return NOPAGE_OOM;
SetPageLocked(page);
get_page(page);
default_prot = vm_get_page_prot(vma->vm_flags);
BUG_ON(page_flags & DRM_TTM_PAGE_UNCACHED);
vma->vm_page_prot = default_prot;
return page;
}