drm: drop Linux < 2.6.19 support
This also means dropping the DRM_ODD_MM_COMPAT case. Signed-off-by: Pekka Paalanen <pq@iki.fi>main
parent
081b2d6da2
commit
f44c740dc7
|
@ -91,44 +91,13 @@ void drm_bo_add_to_lru(struct drm_buffer_object *bo)
|
|||
|
||||
static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
|
||||
{
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
int ret;
|
||||
|
||||
if (!bo->map_list.map)
|
||||
return 0;
|
||||
|
||||
ret = drm_bo_lock_kmm(bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
drm_bo_unmap_virtual(bo);
|
||||
if (old_is_pci)
|
||||
drm_bo_finish_unmap(bo);
|
||||
#else
|
||||
if (!bo->map_list.map)
|
||||
return 0;
|
||||
|
||||
drm_bo_unmap_virtual(bo);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
|
||||
{
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
int ret;
|
||||
|
||||
if (!bo->map_list.map)
|
||||
return;
|
||||
|
||||
ret = drm_bo_remap_bound(bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to remap a bound buffer object.\n"
|
||||
"\tThis might cause a sigbus later.\n");
|
||||
}
|
||||
drm_bo_unlock_kmm(bo);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Call bo->mutex locked.
|
||||
*/
|
||||
|
@ -237,9 +206,6 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
|
|||
goto out_err;
|
||||
|
||||
moved:
|
||||
if (old_is_pci || new_is_pci)
|
||||
drm_bo_vm_post_move(bo);
|
||||
|
||||
if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
|
||||
ret =
|
||||
dev->driver->bo_driver->invalidate_caches(dev,
|
||||
|
@ -260,9 +226,6 @@ moved:
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
if (old_is_pci || new_is_pci)
|
||||
drm_bo_vm_post_move(bo);
|
||||
|
||||
new_man = &bm->man[bo->mem.mem_type];
|
||||
if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
|
||||
drm_ttm_unbind(bo->ttm);
|
||||
|
@ -467,11 +430,6 @@ static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
BUG_ON(!list_empty(&bo->vma_list));
|
||||
BUG_ON(!list_empty(&bo->p_mm_list));
|
||||
#endif
|
||||
|
||||
if (bo->ttm) {
|
||||
drm_ttm_unbind(bo->ttm);
|
||||
drm_ttm_destroy(bo->ttm);
|
||||
|
@ -1784,10 +1742,6 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&bo->lru);
|
||||
INIT_LIST_HEAD(&bo->pinned_lru);
|
||||
INIT_LIST_HEAD(&bo->ddestroy);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
INIT_LIST_HEAD(&bo->p_mm_list);
|
||||
INIT_LIST_HEAD(&bo->vma_list);
|
||||
#endif
|
||||
bo->dev = dev;
|
||||
bo->type = type;
|
||||
bo->num_pages = num_pages;
|
||||
|
|
|
@ -297,10 +297,6 @@ int drm_buffer_object_transfer(struct drm_buffer_object *bo,
|
|||
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->pinned_lru);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
INIT_LIST_HEAD(&fbo->vma_list);
|
||||
INIT_LIST_HEAD(&fbo->p_mm_list);
|
||||
#endif
|
||||
|
||||
fbo->fence = drm_fence_reference_locked(bo->fence);
|
||||
fbo->pinned_node = NULL;
|
||||
|
@ -341,20 +337,8 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
/*
|
||||
* In this mode, we don't allow pipelining a copy blit,
|
||||
* since the buffer will be accessible from user space
|
||||
* the moment we return and rebuild the page tables.
|
||||
*
|
||||
* With normal vm operation, page tables are rebuilt
|
||||
* on demand using fault(), which waits for buffer idle.
|
||||
*/
|
||||
if (1)
|
||||
#else
|
||||
if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
|
||||
bo->mem.mm_node != NULL))
|
||||
#endif
|
||||
{
|
||||
if (bo->fence) {
|
||||
(void) drm_fence_object_wait(bo->fence, 0, 1,
|
||||
|
|
|
@ -27,31 +27,7 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
|
||||
/*
|
||||
* The protection map was exported in 2.6.19
|
||||
*/
|
||||
|
||||
pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
#ifdef MODULE
|
||||
static pgprot_t drm_protection_map[16] = {
|
||||
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
|
||||
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
|
||||
};
|
||||
|
||||
return drm_protection_map[vm_flags & 0x0F];
|
||||
#else
|
||||
extern pgprot_t protection_map[];
|
||||
return protection_map[vm_flags & 0x0F];
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
#if !defined(DRM_FULL_MM_COMPAT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||
#if !defined(DRM_FULL_MM_COMPAT)
|
||||
|
||||
static int drm_pte_is_clear(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
|
@ -193,14 +169,6 @@ out_unlock:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
||||
!defined(DRM_FULL_MM_COMPAT)
|
||||
|
||||
/**
|
||||
*/
|
||||
|
||||
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
||||
unsigned long address)
|
||||
{
|
||||
|
@ -219,243 +187,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
|
||||
/*
|
||||
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
|
||||
* workaround for a single BUG statement in do_no_page in these versions. The
|
||||
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
|
||||
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
|
||||
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
|
||||
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
|
||||
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
|
||||
* phew.
|
||||
*/
|
||||
|
||||
typedef struct p_mm_entry {
|
||||
struct list_head head;
|
||||
struct mm_struct *mm;
|
||||
atomic_t refcount;
|
||||
int locked;
|
||||
} p_mm_entry_t;
|
||||
|
||||
typedef struct vma_entry {
|
||||
struct list_head head;
|
||||
struct vm_area_struct *vma;
|
||||
} vma_entry_t;
|
||||
|
||||
|
||||
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type)
|
||||
{
|
||||
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_device *dev;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
|
||||
if (address > vma->vm_end) {
|
||||
page = NOPAGE_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
dev = bo->dev;
|
||||
|
||||
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
|
||||
DRM_ERROR("Invalid compat nopage.\n");
|
||||
page = NOPAGE_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ttm = bo->ttm;
|
||||
drm_ttm_fixup_caching(ttm);
|
||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
page = drm_ttm_get_page(ttm, page_offset);
|
||||
if (!page) {
|
||||
page = NOPAGE_OOM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
out_unlock:
|
||||
mutex_unlock(&bo->mutex);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int drm_bo_map_bound(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
|
||||
int ret = 0;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
|
||||
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||
&bus_offset, &bus_size);
|
||||
BUG_ON(ret);
|
||||
|
||||
if (bus_size) {
|
||||
struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
|
||||
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
|
||||
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
vma->vm_end - vma->vm_start,
|
||||
pgprot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
|
||||
{
|
||||
p_mm_entry_t *entry, *n_entry;
|
||||
vma_entry_t *v_entry;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
|
||||
if (!v_entry) {
|
||||
DRM_ERROR("Allocation of vma pointer entry failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
v_entry->vma = vma;
|
||||
|
||||
list_add_tail(&v_entry->head, &bo->vma_list);
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
if (mm == entry->mm) {
|
||||
atomic_inc(&entry->refcount);
|
||||
return 0;
|
||||
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
|
||||
}
|
||||
|
||||
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
|
||||
if (!n_entry) {
|
||||
DRM_ERROR("Allocation of process mm pointer entry failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&n_entry->head);
|
||||
n_entry->mm = mm;
|
||||
n_entry->locked = 0;
|
||||
atomic_set(&n_entry->refcount, 0);
|
||||
list_add_tail(&n_entry->head, &entry->head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
|
||||
{
|
||||
p_mm_entry_t *entry, *n;
|
||||
vma_entry_t *v_entry, *v_n;
|
||||
int found = 0;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
|
||||
if (v_entry->vma == vma) {
|
||||
found = 1;
|
||||
list_del(&v_entry->head);
|
||||
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(!found);
|
||||
|
||||
list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
|
||||
if (mm == entry->mm) {
|
||||
if (atomic_add_negative(-1, &entry->refcount)) {
|
||||
list_del(&entry->head);
|
||||
BUG_ON(entry->locked);
|
||||
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int drm_bo_lock_kmm(struct drm_buffer_object * bo)
|
||||
{
|
||||
p_mm_entry_t *entry;
|
||||
int lock_ok = 1;
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
BUG_ON(entry->locked);
|
||||
if (!down_write_trylock(&entry->mm->mmap_sem)) {
|
||||
lock_ok = 0;
|
||||
break;
|
||||
}
|
||||
entry->locked = 1;
|
||||
}
|
||||
|
||||
if (lock_ok)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
if (!entry->locked)
|
||||
break;
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
entry->locked = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Possible deadlock. Try again. Our callers should handle this
|
||||
* and restart.
|
||||
*/
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
|
||||
{
|
||||
p_mm_entry_t *entry;
|
||||
|
||||
list_for_each_entry(entry, &bo->p_mm_list, head) {
|
||||
BUG_ON(!entry->locked);
|
||||
up_write(&entry->mm->mmap_sem);
|
||||
entry->locked = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int drm_bo_remap_bound(struct drm_buffer_object *bo)
|
||||
{
|
||||
vma_entry_t *v_entry;
|
||||
int ret = 0;
|
||||
|
||||
if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
|
||||
list_for_each_entry(v_entry, &bo->vma_list, head) {
|
||||
ret = drm_bo_map_bound(v_entry->vma);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void drm_bo_finish_unmap(struct drm_buffer_object *bo)
|
||||
{
|
||||
vma_entry_t *v_entry;
|
||||
|
||||
list_for_each_entry(v_entry, &bo->vma_list, head) {
|
||||
v_entry->vma->vm_flags &= ~VM_PFNMAP;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* !defined(DRM_FULL_MM_COMPAT) */
|
||||
|
||||
#ifdef DRM_IDR_COMPAT_FN
|
||||
/* only called when idp->lock is held */
|
||||
|
|
|
@ -62,12 +62,6 @@
|
|||
#include <linux/cred.h>
|
||||
#endif
|
||||
|
||||
/* older kernels had different irq args */
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
#undef DRM_IRQ_ARGS
|
||||
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
|
||||
#endif
|
||||
|
||||
#ifndef list_for_each_safe
|
||||
#define list_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||
|
@ -136,10 +130,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
#define DRM_ODD_MM_COMPAT
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
|
||||
#define DRM_FULL_MM_COMPAT
|
||||
#endif
|
||||
|
@ -184,78 +174,10 @@ struct fault_data {
|
|||
int type;
|
||||
};
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type);
|
||||
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
||||
!defined(DRM_FULL_MM_COMPAT)
|
||||
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
|
||||
#endif /* ndef DRM_FULL_MM_COMPAT */
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
|
||||
struct drm_buffer_object;
|
||||
|
||||
|
||||
/*
|
||||
* Add a vma to the ttm vma list, and the
|
||||
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
||||
*/
|
||||
|
||||
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
|
||||
struct vm_area_struct *vma);
|
||||
/*
|
||||
* Delete a vma and the corresponding mm pointer from the
|
||||
* ttm lists. Needs the ttm mutex.
|
||||
*/
|
||||
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/*
|
||||
* Attempts to lock all relevant mmap_sems for a ttm, while
|
||||
* not releasing the ttm mutex. May return -EAGAIN to avoid
|
||||
* deadlocks. In that case the caller shall release the ttm mutex,
|
||||
* schedule() and try again.
|
||||
*/
|
||||
|
||||
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
|
||||
|
||||
/*
|
||||
* Unlock all relevant mmap_sems for a ttm.
|
||||
*/
|
||||
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
|
||||
|
||||
/*
|
||||
* If the ttm was bound to the aperture, this function shall be called
|
||||
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
|
||||
* vmas mapping this ttm. This is needed just after unmapping the ptes of
|
||||
* the vma, otherwise the do_nopage() function will bug :(. The function
|
||||
* releases the mmap_sems for this ttm.
|
||||
*/
|
||||
|
||||
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
|
||||
|
||||
/*
|
||||
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
||||
* fault these pfns in, because the first one will set the vma VM_PFNMAP
|
||||
* flag, which will make the next fault bug in do_nopage(). The function
|
||||
* releases the mmap_sems for this ttm.
|
||||
*/
|
||||
|
||||
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
|
||||
|
||||
|
||||
/*
|
||||
* Remap a vma for a bound ttm. Call with the ttm mutex held and
|
||||
* the relevant mmap_sem locked.
|
||||
*/
|
||||
extern int drm_bo_map_bound(struct vm_area_struct *vma);
|
||||
|
||||
#endif
|
||||
|
||||
/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
|
||||
#define DRM_IDR_COMPAT_FN
|
||||
|
@ -269,10 +191,6 @@ int idr_for_each(struct idr *idp,
|
|||
void idr_remove_all(struct idr *idp);
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||
typedef _Bool bool;
|
||||
#endif
|
||||
|
||||
|
||||
#if !defined(flush_agp_mappings)
|
||||
#define flush_agp_mappings() do {} while(0)
|
||||
|
|
|
@ -510,13 +510,6 @@ struct drm_buffer_object {
|
|||
unsigned long bus_offset;
|
||||
uint32_t vm_flags;
|
||||
void *iomap;
|
||||
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
/* dev->struct_mutex only protected. */
|
||||
struct list_head vma_list;
|
||||
struct list_head p_mm_list;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#define _DRM_BO_FLAG_UNFENCED 0x00000001
|
||||
|
|
|
@ -799,9 +799,6 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
|
|||
|
||||
drm_vm_open_locked(vma);
|
||||
atomic_inc(&bo->usage);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_bo_add_vma(bo, vma);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -834,9 +831,6 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
|
|||
drm_vm_close(vma);
|
||||
if (bo) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_bo_delete_vma(bo, vma);
|
||||
#endif
|
||||
drm_bo_usage_deref_locked((struct drm_buffer_object **)
|
||||
&vma->vm_private_data);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -852,11 +846,7 @@ static struct vm_operations_struct drm_bo_vm_ops = {
|
|||
.fault = drm_bo_vm_fault,
|
||||
#endif
|
||||
#else
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||
.nopfn = drm_bo_vm_nopfn,
|
||||
#else
|
||||
.nopage = drm_bo_vm_nopage,
|
||||
#endif
|
||||
#endif
|
||||
.open = drm_bo_vm_open,
|
||||
.close = drm_bo_vm_close,
|
||||
|
@ -878,12 +868,7 @@ static int drm_bo_mmap_locked(struct vm_area_struct *vma,
|
|||
vma->vm_private_data = map->handle;
|
||||
vma->vm_file = filp;
|
||||
vma->vm_flags |= VM_RESERVED | VM_IO;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||
vma->vm_flags |= VM_PFNMAP;
|
||||
#endif
|
||||
drm_bo_vm_open_locked(vma);
|
||||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_bo_map_bound(vma);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -235,7 +235,7 @@ void nouveau_mem_close(struct drm_device *dev)
|
|||
static uint32_t
|
||||
nouveau_mem_fb_amount_igp(struct drm_device *dev)
|
||||
{
|
||||
#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||
#if defined(__linux__)
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *bridge;
|
||||
uint32_t mem;
|
||||
|
|
Loading…
Reference in New Issue