Make modesetting-gem build with recent kernels
Needed to merge in VM fault changes & pci_read_base API update.main
commit
8074b2e83d
|
@ -806,3 +806,102 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
|||
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
|
||||
#endif
|
||||
|
||||
#ifdef DRM_FULL_MM_COMPAT
|
||||
#ifdef DRM_NO_FAULT
|
||||
unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
struct page *page = NULL;
|
||||
struct drm_ttm *ttm;
|
||||
struct drm_device *dev;
|
||||
unsigned long pfn;
|
||||
int err;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long ret = NOPFN_REFAULT;
|
||||
|
||||
if (address > vma->vm_end)
|
||||
return NOPFN_SIGBUS;
|
||||
|
||||
dev = bo->dev;
|
||||
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (err)
|
||||
return NOPFN_REFAULT;
|
||||
|
||||
err = mutex_lock_interruptible(&bo->mutex);
|
||||
if (err) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return NOPFN_REFAULT;
|
||||
}
|
||||
|
||||
err = drm_bo_wait(bo, 0, 1, 0, 1);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
|
||||
/*
|
||||
* If buffer happens to be in a non-mappable location,
|
||||
* move it to a mappable.
|
||||
*/
|
||||
|
||||
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||
uint32_t new_flags = bo->mem.proposed_flags |
|
||||
DRM_BO_FLAG_MAPPABLE |
|
||||
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||
err = drm_bo_move_buffer(bo, new_flags, 0, 0);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
|
||||
if (err) {
|
||||
ret = NOPFN_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
if (bus_size) {
|
||||
struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
|
||||
|
||||
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
|
||||
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
|
||||
} else {
|
||||
ttm = bo->ttm;
|
||||
|
||||
drm_ttm_fixup_caching(ttm);
|
||||
page = drm_ttm_get_page(ttm, page_offset);
|
||||
if (!page) {
|
||||
ret = NOPFN_OOM;
|
||||
goto out_unlock;
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
|
||||
vm_get_page_prot(vma->vm_flags) :
|
||||
drm_io_prot(_DRM_TTM, vma);
|
||||
}
|
||||
|
||||
err = vm_insert_pfn(vma, address, pfn);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
out_unlock:
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -319,6 +319,9 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma);
|
|||
/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
|
||||
#define DRM_IDR_COMPAT_FN
|
||||
#define DRM_NO_FAULT
|
||||
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
#endif
|
||||
#ifdef DRM_IDR_COMPAT_FN
|
||||
int idr_for_each(struct idr *idp,
|
||||
|
|
|
@ -685,8 +685,8 @@ EXPORT_SYMBOL(drm_mmap);
|
|||
* \c Pagefault method for buffer objects.
|
||||
*
|
||||
* \param vma Virtual memory area.
|
||||
* \param address File offset.
|
||||
* \return Error or refault. The pfn is manually inserted.
|
||||
* \param vmf vm fault data
|
||||
* \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted.
|
||||
*
|
||||
* It's important that pfns are inserted while holding the bo->mutex lock.
|
||||
* otherwise we might race with unmap_mapping_range() which is always
|
||||
|
@ -699,8 +699,8 @@ EXPORT_SYMBOL(drm_mmap);
|
|||
*/
|
||||
|
||||
#ifdef DRM_FULL_MM_COMPAT
|
||||
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
static int drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
|
@ -712,25 +712,22 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long ret = NOPFN_REFAULT;
|
||||
|
||||
if (address > vma->vm_end)
|
||||
return NOPFN_SIGBUS;
|
||||
unsigned long ret = VM_FAULT_NOPAGE;
|
||||
|
||||
dev = bo->dev;
|
||||
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
||||
if (err)
|
||||
return NOPFN_REFAULT;
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
err = mutex_lock_interruptible(&bo->mutex);
|
||||
if (err) {
|
||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
||||
return NOPFN_REFAULT;
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
err = drm_bo_wait(bo, 0, 1, 0, 1);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
|
||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -748,7 +745,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||
err = drm_bo_move_buffer(bo, new_flags, 0, 0);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
@ -757,11 +754,11 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
&bus_size);
|
||||
|
||||
if (err) {
|
||||
ret = NOPFN_SIGBUS;
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
|
||||
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
if (bus_size) {
|
||||
struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
|
||||
|
@ -774,7 +771,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
drm_ttm_fixup_caching(ttm);
|
||||
page = drm_ttm_get_page(ttm, page_offset);
|
||||
if (!page) {
|
||||
ret = NOPFN_OOM;
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_unlock;
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
|
@ -783,9 +780,9 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
drm_io_prot(_DRM_TTM, vma);
|
||||
}
|
||||
|
||||
err = vm_insert_pfn(vma, address, pfn);
|
||||
err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
|
||||
ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
}
|
||||
out_unlock:
|
||||
|
@ -849,7 +846,11 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
|
|||
|
||||
static struct vm_operations_struct drm_bo_vm_ops = {
|
||||
#ifdef DRM_FULL_MM_COMPAT
|
||||
#ifdef DRM_NO_FAULT
|
||||
.nopfn = drm_bo_vm_nopfn,
|
||||
#else
|
||||
.fault = drm_bo_vm_fault,
|
||||
#endif
|
||||
#else
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
|
||||
.nopfn = drm_bo_vm_nopfn,
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
void
|
||||
i915_verify_inactive(struct drm_device *dev, char *file, int line)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
|
@ -102,7 +102,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
|||
void
|
||||
i915_dump_lru(struct drm_device *dev, const char *where)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
DRM_INFO("active list %s {\n", where);
|
||||
|
|
|
@ -115,7 +115,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
* since the bridge would only ever use standard BARs 0-1 (though it
|
||||
* doesn't anyway)
|
||||
*/
|
||||
ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
|
||||
ret = pci_read_base(bridge, pci_bar_mem64, &bridge->resource[2],
|
||||
mchbar_offset);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("pci_read_base failed: %d\n", ret);
|
||||
return;
|
||||
|
|
|
@ -388,7 +388,7 @@ struct drm_i915_private {
|
|||
/** Bit 6 swizzling required for Y tiling */
|
||||
uint32_t bit_6_swizzle_y;
|
||||
} mm;
|
||||
} drm_i915_private_t;
|
||||
};
|
||||
|
||||
struct drm_i915_file_private {
|
||||
struct {
|
||||
|
@ -523,7 +523,6 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
|
|||
extern void i915_driver_irq_preinstall(struct drm_device * dev);
|
||||
extern int i915_driver_irq_postinstall(struct drm_device * dev);
|
||||
extern void i915_driver_irq_uninstall(struct drm_device * dev);
|
||||
extern void i915_enable_interrupt(struct drm_device *dev);
|
||||
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -1101,15 +1101,62 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
*/
|
||||
void i915_driver_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
return;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE16(HWSTAM, 0xeffe);
|
||||
I915_WRITE16(IMR, 0x0);
|
||||
I915_WRITE16(IER, 0x0);
|
||||
}
|
||||
|
||||
int i915_driver_irq_postinstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, num_pipes = 2;
|
||||
|
||||
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
|
||||
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
|
||||
dev_priv->swaps_pending = 0;
|
||||
|
||||
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
|
||||
dev_priv->user_irq_refcount = 0;
|
||||
|
||||
ret = drm_vblank_init(dev, num_pipes);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
|
||||
i915_enable_interrupt(dev);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
|
||||
/*
|
||||
* Initialize the hardware status page IRQ location.
|
||||
*/
|
||||
|
||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_driver_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
return;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 temp;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
dev_priv->vblank_pipe = 0;
|
||||
|
||||
dev_priv->irq_enabled = 0;
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
I915_WRITE(IMR, 0xffffffff);
|
||||
I915_WRITE(IER, 0x0);
|
||||
|
||||
temp = I915_READ(PIPEASTAT);
|
||||
I915_WRITE(PIPEASTAT, temp);
|
||||
temp = I915_READ(PIPEBSTAT);
|
||||
I915_WRITE(PIPEBSTAT, temp);
|
||||
temp = I915_READ(IIR);
|
||||
I915_WRITE(IIR, temp);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue