Merge git://proxy01.pd.intel.com:9419/git/mesa/drm into crestline
commit
7c3aeafe75
|
@ -94,6 +94,11 @@ static struct {
|
||||||
} drm_np_retry =
|
} drm_np_retry =
|
||||||
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
|
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
|
||||||
|
|
||||||
|
|
||||||
|
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
|
struct fault_data *data);
|
||||||
|
|
||||||
|
|
||||||
struct page * get_nopage_retry(void)
|
struct page * get_nopage_retry(void)
|
||||||
{
|
{
|
||||||
if (atomic_read(&drm_np_retry.present) == 0) {
|
if (atomic_read(&drm_np_retry.present) == 0) {
|
||||||
|
@ -180,7 +185,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -190,14 +195,106 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
|
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
|
struct fault_data *data)
|
||||||
|
{
|
||||||
|
unsigned long address = data->address;
|
||||||
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
|
unsigned long page_offset;
|
||||||
|
struct page *page = NULL;
|
||||||
|
drm_ttm_t *ttm;
|
||||||
|
drm_device_t *dev;
|
||||||
|
unsigned long pfn;
|
||||||
|
int err;
|
||||||
|
unsigned long bus_base;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
|
||||||
|
|
||||||
|
mutex_lock(&bo->mutex);
|
||||||
|
|
||||||
|
err = drm_bo_wait(bo, 0, 1, 0);
|
||||||
|
if (err) {
|
||||||
|
data->type = (err == -EAGAIN) ?
|
||||||
|
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If buffer happens to be in a non-mappable location,
|
||||||
|
* move it to a mappable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||||
|
unsigned long _end = jiffies + 3*DRM_HZ;
|
||||||
|
uint32_t new_mask = bo->mem.mask |
|
||||||
|
DRM_BO_FLAG_MAPPABLE |
|
||||||
|
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||||
|
|
||||||
|
do {
|
||||||
|
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
||||||
|
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
DRM_ERROR("Timeout moving buffer to mappable location.\n");
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (address > vma->vm_end) {
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = bo->dev;
|
||||||
|
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||||
|
&bus_size);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
data->type = VM_FAULT_SIGBUS;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (bus_size) {
|
||||||
|
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
|
||||||
|
|
||||||
|
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
|
||||||
|
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
|
||||||
|
} else {
|
||||||
|
ttm = bo->ttm;
|
||||||
|
|
||||||
|
drm_ttm_fixup_caching(ttm);
|
||||||
|
page = drm_ttm_get_page(ttm, page_offset);
|
||||||
|
if (!page) {
|
||||||
|
data->type = VM_FAULT_OOM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
pfn = page_to_pfn(page);
|
||||||
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
err = vm_insert_pfn(vma, address, pfn);
|
||||||
|
|
||||||
|
if (!err || err == -EBUSY)
|
||||||
|
data->type = VM_FAULT_MINOR;
|
||||||
|
else
|
||||||
|
data->type = VM_FAULT_OOM;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&bo->mutex);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
||||||
|
!defined(DRM_FULL_MM_COMPAT)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While waiting for the fault() handler to appear in
|
|
||||||
* we accomplish approximately
|
|
||||||
* the same wrapping it with nopfn.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
|
||||||
|
|
|
@ -212,19 +212,10 @@ extern void free_nopage_retry(void);
|
||||||
#define NOPAGE_REFAULT get_nopage_retry()
|
#define NOPAGE_REFAULT get_nopage_retry()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(DRM_FULL_MM_COMPAT) && \
|
|
||||||
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
|
|
||||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
|
|
||||||
|
|
||||||
struct fault_data;
|
|
||||||
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|
||||||
struct fault_data *data);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
#ifndef DRM_FULL_MM_COMPAT
|
#ifndef DRM_FULL_MM_COMPAT
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
|
|
||||||
* For now, just return a dummy page that we've allocated out of
|
* For now, just return a dummy page that we've allocated out of
|
||||||
* static space. The page will be put by do_nopage() since we've already
|
* static space. The page will be put by do_nopage() since we've already
|
||||||
* filled out the pte.
|
* filled out the pte.
|
||||||
|
@ -239,15 +230,12 @@ struct fault_data {
|
||||||
int type;
|
int type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
||||||
unsigned long pfn);
|
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
||||||
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
int *type);
|
int *type);
|
||||||
#else
|
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
||||||
|
!defined(DRM_FULL_MM_COMPAT)
|
||||||
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
|
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
|
||||||
|
|
|
@ -718,28 +718,23 @@ EXPORT_SYMBOL(drm_mmap);
|
||||||
* \c Pagefault method for buffer objects.
|
* \c Pagefault method for buffer objects.
|
||||||
*
|
*
|
||||||
* \param vma Virtual memory area.
|
* \param vma Virtual memory area.
|
||||||
* \param data Fault data on failure or refault.
|
* \param address File offset.
|
||||||
* \return Always NULL as we insert pfns directly.
|
* \return Error or refault. The pfn is manually inserted.
|
||||||
*
|
*
|
||||||
* It's important that pfns are inserted while holding the bo->mutex lock.
|
* It's important that pfns are inserted while holding the bo->mutex lock.
|
||||||
* otherwise we might race with unmap_mapping_range() which is always
|
* otherwise we might race with unmap_mapping_range() which is always
|
||||||
* called with the bo->mutex lock held.
|
* called with the bo->mutex lock held.
|
||||||
*
|
*
|
||||||
* It's not pretty to modify the vma->vm_page_prot variable while not
|
* We're modifying the page attribute bits of the vma->vm_page_prot field,
|
||||||
* holding the mm semaphore in write mode. However, we have it i read mode,
|
* without holding the mmap_sem in write mode. Only in read mode.
|
||||||
* so we won't be racing with any other writers, and we only actually modify
|
* These bits are not used by the mm subsystem code, and we consider them
|
||||||
* it when no ptes are present so it shouldn't be a big deal.
|
* protected by the bo->mutex lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
|
|
||||||
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
|
||||||
#ifdef DRM_FULL_MM_COMPAT
|
#ifdef DRM_FULL_MM_COMPAT
|
||||||
static
|
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||||
#endif
|
unsigned long address)
|
||||||
struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
|
||||||
struct fault_data *data)
|
|
||||||
{
|
{
|
||||||
unsigned long address = data->address;
|
|
||||||
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
|
||||||
unsigned long page_offset;
|
unsigned long page_offset;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
@ -750,66 +745,43 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
unsigned long bus_base;
|
unsigned long bus_base;
|
||||||
unsigned long bus_offset;
|
unsigned long bus_offset;
|
||||||
unsigned long bus_size;
|
unsigned long bus_size;
|
||||||
|
int ret = NOPFN_REFAULT;
|
||||||
|
|
||||||
|
if (address > vma->vm_end)
|
||||||
mutex_lock(&bo->mutex);
|
return NOPFN_SIGBUS;
|
||||||
|
|
||||||
|
err = mutex_lock_interruptible(&bo->mutex);
|
||||||
|
if (err)
|
||||||
|
return NOPFN_REFAULT;
|
||||||
|
|
||||||
err = drm_bo_wait(bo, 0, 0, 0);
|
err = drm_bo_wait(bo, 0, 0, 0);
|
||||||
if (err) {
|
if (err) {
|
||||||
data->type = (err == -EAGAIN) ?
|
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||||
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If buffer happens to be in a non-mappable location,
|
* If buffer happens to be in a non-mappable location,
|
||||||
* move it to a mappable.
|
* move it to a mappable.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef DRM_BO_FULL_COMPAT
|
|
||||||
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||||
uint32_t new_mask = bo->mem.mask |
|
uint32_t new_mask = bo->mem.mask |
|
||||||
DRM_BO_FLAG_MAPPABLE |
|
DRM_BO_FLAG_MAPPABLE |
|
||||||
DRM_BO_FLAG_FORCE_MAPPABLE;
|
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||||
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
data->type = (err == -EAGAIN) ?
|
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||||
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
|
||||||
unsigned long _end = jiffies + 3*DRM_HZ;
|
|
||||||
uint32_t new_mask = bo->mem.mask |
|
|
||||||
DRM_BO_FLAG_MAPPABLE |
|
|
||||||
DRM_BO_FLAG_FORCE_MAPPABLE;
|
|
||||||
|
|
||||||
do {
|
|
||||||
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
|
||||||
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
|
|
||||||
|
|
||||||
if (err) {
|
|
||||||
DRM_ERROR("Timeout moving buffer to mappable location.\n");
|
|
||||||
data->type = VM_FAULT_SIGBUS;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (address > vma->vm_end) {
|
|
||||||
data->type = VM_FAULT_SIGBUS;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev = bo->dev;
|
dev = bo->dev;
|
||||||
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||||
&bus_size);
|
&bus_size);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
data->type = VM_FAULT_SIGBUS;
|
ret = NOPFN_SIGBUS;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -826,7 +798,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
drm_ttm_fixup_caching(ttm);
|
drm_ttm_fixup_caching(ttm);
|
||||||
page = drm_ttm_get_page(ttm, page_offset);
|
page = drm_ttm_get_page(ttm, page_offset);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
data->type = VM_FAULT_OOM;
|
ret = NOPFN_OOM;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
pfn = page_to_pfn(page);
|
pfn = page_to_pfn(page);
|
||||||
|
@ -834,14 +806,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = vm_insert_pfn(vma, address, pfn);
|
err = vm_insert_pfn(vma, address, pfn);
|
||||||
|
if (err) {
|
||||||
if (!err || err == -EBUSY)
|
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
|
||||||
data->type = VM_FAULT_MINOR;
|
goto out_unlock;
|
||||||
else
|
}
|
||||||
data->type = VM_FAULT_OOM;
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&bo->mutex);
|
mutex_unlock(&bo->mutex);
|
||||||
return NULL;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -897,7 +868,7 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
|
||||||
|
|
||||||
static struct vm_operations_struct drm_bo_vm_ops = {
|
static struct vm_operations_struct drm_bo_vm_ops = {
|
||||||
#ifdef DRM_FULL_MM_COMPAT
|
#ifdef DRM_FULL_MM_COMPAT
|
||||||
.fault = drm_bo_vm_fault,
|
.nopfn = drm_bo_vm_nopfn,
|
||||||
#else
|
#else
|
||||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||||
.nopfn = drm_bo_vm_nopfn,
|
.nopfn = drm_bo_vm_nopfn,
|
||||||
|
|
Loading…
Reference in New Issue