2004-07-11 04:17:34 -06:00
|
|
|
/**
|
2004-09-30 15:12:10 -06:00
|
|
|
* \file drm_compat.h
|
2004-09-08 14:57:39 -06:00
|
|
|
* Backward compatability definitions for Direct Rendering Manager
|
2004-09-30 15:12:10 -06:00
|
|
|
*
|
2004-07-11 04:17:34 -06:00
|
|
|
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
|
|
|
* \author Gareth Hughes <gareth@valinux.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
|
|
|
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _DRM_COMPAT_H_
|
|
|
|
#define _DRM_COMPAT_H_
|
|
|
|
|
|
|
|
#ifndef minor
|
|
|
|
#define minor(x) MINOR((x))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MODULE_LICENSE
|
2004-09-30 15:12:10 -06:00
|
|
|
#define MODULE_LICENSE(x)
|
2004-07-11 04:17:34 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef preempt_disable
|
|
|
|
#define preempt_disable()
|
|
|
|
#define preempt_enable()
|
|
|
|
#endif
|
|
|
|
|
2004-09-30 15:12:10 -06:00
|
|
|
#ifndef pte_offset_map
|
2004-07-11 04:17:34 -06:00
|
|
|
#define pte_offset_map pte_offset
|
|
|
|
#define pte_unmap(pte)
|
|
|
|
#endif
|
|
|
|
|
2004-09-22 23:39:15 -06:00
|
|
|
#ifndef module_param
|
|
|
|
#define module_param(name, type, perm)
|
|
|
|
#endif
|
|
|
|
|
2006-12-19 00:03:20 -07:00
|
|
|
/* older kernels had different irq args */
|
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
|
|
|
#undef DRM_IRQ_ARGS
|
|
|
|
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
|
|
|
|
#endif
|
|
|
|
|
2004-07-11 04:17:34 -06:00
|
|
|
#ifndef list_for_each_safe
|
|
|
|
#define list_for_each_safe(pos, n, head) \
|
|
|
|
for (pos = (head)->next, n = pos->next; pos != (head); \
|
|
|
|
pos = n, n = pos->next)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef list_for_each_entry
|
|
|
|
#define list_for_each_entry(pos, head, member) \
|
|
|
|
for (pos = list_entry((head)->next, typeof(*pos), member), \
|
|
|
|
prefetch(pos->member.next); \
|
|
|
|
&pos->member != (head); \
|
|
|
|
pos = list_entry(pos->member.next, typeof(*pos), member), \
|
|
|
|
prefetch(pos->member.next))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef list_for_each_entry_safe
|
|
|
|
#define list_for_each_entry_safe(pos, n, head, member) \
|
|
|
|
for (pos = list_entry((head)->next, typeof(*pos), member), \
|
|
|
|
n = list_entry(pos->member.next, typeof(*pos), member); \
|
|
|
|
&pos->member != (head); \
|
|
|
|
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __user
|
|
|
|
#define __user
|
|
|
|
#endif
|
|
|
|
|
2007-11-04 19:42:22 -07:00
|
|
|
#if !defined(__put_page)
|
2004-07-11 04:17:34 -06:00
|
|
|
#define __put_page(p) atomic_dec(&(p)->count)
|
|
|
|
#endif
|
|
|
|
|
2006-04-23 03:05:05 -06:00
|
|
|
#if !defined(__GFP_COMP)
|
|
|
|
#define __GFP_COMP 0
|
|
|
|
#endif
|
|
|
|
|
2007-03-10 18:13:58 -07:00
|
|
|
#if !defined(IRQF_SHARED)
|
|
|
|
#define IRQF_SHARED SA_SHIRQ
|
|
|
|
#endif
|
|
|
|
|
2004-10-23 01:02:29 -06:00
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
|
2004-10-23 00:59:15 -06:00
|
|
|
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
|
|
|
|
{
|
2007-11-04 19:42:22 -07:00
|
|
|
return remap_page_range(vma, from,
|
2004-10-23 00:59:15 -06:00
|
|
|
pfn << PAGE_SHIFT,
|
|
|
|
size,
|
|
|
|
pgprot);
|
|
|
|
}
|
2006-12-19 04:10:34 -07:00
|
|
|
|
|
|
|
static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
|
|
|
|
{
|
|
|
|
void *addr;
|
|
|
|
|
|
|
|
addr = kmalloc(size * nmemb, flags);
|
|
|
|
if (addr != NULL)
|
|
|
|
memset((void *)addr, 0, size * nmemb);
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
2004-10-23 00:59:15 -06:00
|
|
|
#endif
|
|
|
|
|
2006-07-23 18:51:27 -06:00
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
|
|
|
|
#define mutex_lock down
|
|
|
|
#define mutex_unlock up
|
|
|
|
|
|
|
|
#define mutex semaphore
|
|
|
|
|
|
|
|
#define mutex_init(a) sema_init((a), 1)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-08-09 22:32:18 -06:00
|
|
|
#ifndef DEFINE_SPINLOCK
|
|
|
|
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
|
|
|
|
#endif
|
|
|
|
|
2004-09-04 20:36:48 -06:00
|
|
|
/* old architectures */
|
|
|
|
#ifdef __AMD64__
|
|
|
|
#define __x86_64__
|
|
|
|
#endif
|
|
|
|
|
2005-09-24 23:19:06 -06:00
|
|
|
/* sysfs __ATTR macro */
|
|
|
|
#ifndef __ATTR
|
|
|
|
#define __ATTR(_name,_mode,_show,_store) { \
|
|
|
|
.attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
|
|
|
|
.show = _show, \
|
|
|
|
.store = _store, \
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-12-20 06:40:36 -07:00
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
|
|
|
|
#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \
|
|
|
|
if (tmp) memset(tmp, 0, size); \
|
|
|
|
(tmp);})
|
|
|
|
#endif
|
|
|
|
|
2007-02-22 09:04:20 -07:00
|
|
|
#ifndef list_for_each_entry_safe_reverse
|
|
|
|
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
|
|
|
|
for (pos = list_entry((head)->prev, typeof(*pos), member), \
|
|
|
|
n = list_entry(pos->member.prev, typeof(*pos), member); \
|
|
|
|
&pos->member != (head); \
|
|
|
|
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
|
|
|
|
#endif
|
2006-12-20 06:40:36 -07:00
|
|
|
|
2006-08-21 12:38:57 -06:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
|
2007-02-14 02:49:37 -07:00
|
|
|
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
|
|
|
|
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
|
2006-10-11 14:21:01 -06:00
|
|
|
#define DRM_ODD_MM_COMPAT
|
|
|
|
#endif
|
|
|
|
|
2007-02-14 02:49:37 -07:00
|
|
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
|
|
|
|
#define DRM_FULL_MM_COMPAT
|
|
|
|
#endif
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
|
2006-08-21 12:38:57 -06:00
|
|
|
/*
|
2007-11-04 19:42:22 -07:00
|
|
|
* Flush relevant caches and clear a VMA structure so that page references
|
2006-08-21 12:38:57 -06:00
|
|
|
* will cause a page fault. Don't flush tlbs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern void drm_clear_vma(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long end);
|
|
|
|
|
|
|
|
/*
|
2007-11-04 19:42:22 -07:00
|
|
|
* Return the PTE protection map entries for the VMA flags given by
|
2006-08-21 12:38:57 -06:00
|
|
|
* flags. This is a functional interface to the kernel's protection map.
|
|
|
|
*/
|
|
|
|
|
2006-08-25 10:14:22 -06:00
|
|
|
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
2006-08-21 12:38:57 -06:00
|
|
|
|
2006-12-27 07:32:09 -07:00
|
|
|
#ifndef GFP_DMA32
|
2007-09-22 05:38:36 -06:00
|
|
|
#define GFP_DMA32 GFP_KERNEL
|
|
|
|
#endif
|
|
|
|
#ifndef __GFP_DMA32
|
|
|
|
#define __GFP_DMA32 GFP_KERNEL
|
2006-12-27 07:32:09 -07:00
|
|
|
#endif
|
2006-09-27 01:27:31 -06:00
|
|
|
|
|
|
|
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are too slow in earlier kernels.
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern int drm_unmap_page_from_agp(struct page *page);
|
|
|
|
extern int drm_map_page_into_agp(struct page *page);
|
|
|
|
|
|
|
|
#define map_page_into_agp drm_map_page_into_agp
|
|
|
|
#define unmap_page_from_agp drm_unmap_page_from_agp
|
|
|
|
#endif
|
|
|
|
|
2006-10-20 07:06:31 -06:00
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
|
|
|
extern struct page *get_nopage_retry(void);
|
|
|
|
extern void free_nopage_retry(void);
|
|
|
|
|
|
|
|
#define NOPAGE_REFAULT get_nopage_retry()
|
|
|
|
#endif
|
|
|
|
|
2007-02-14 02:49:37 -07:00
|
|
|
|
|
|
|
#ifndef DRM_FULL_MM_COMPAT
|
2006-10-10 02:37:26 -06:00
|
|
|
|
|
|
|
/*
|
2007-11-04 19:42:22 -07:00
|
|
|
* For now, just return a dummy page that we've allocated out of
|
2006-10-10 02:37:26 -06:00
|
|
|
* static space. The page will be put by do_nopage() since we've already
|
|
|
|
* filled out the pte.
|
|
|
|
*/
|
2006-10-11 05:40:35 -06:00
|
|
|
|
|
|
|
struct fault_data {
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long address;
|
|
|
|
pgoff_t pgoff;
|
|
|
|
unsigned int flags;
|
2007-11-04 19:42:22 -07:00
|
|
|
|
2006-10-11 05:40:35 -06:00
|
|
|
int type;
|
|
|
|
};
|
|
|
|
|
2007-02-14 02:49:37 -07:00
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
2007-02-02 06:47:44 -07:00
|
|
|
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
|
2007-11-04 19:42:22 -07:00
|
|
|
unsigned long address,
|
2007-02-02 06:47:44 -07:00
|
|
|
int *type);
|
2007-02-26 10:17:54 -07:00
|
|
|
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
|
|
|
|
!defined(DRM_FULL_MM_COMPAT)
|
2007-02-14 02:49:37 -07:00
|
|
|
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|
|
|
unsigned long address);
|
|
|
|
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
|
|
|
|
#endif /* ndef DRM_FULL_MM_COMPAT */
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
#ifdef DRM_ODD_MM_COMPAT
|
|
|
|
|
2007-02-02 11:49:11 -07:00
|
|
|
struct drm_buffer_object;
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2007-11-04 19:42:22 -07:00
|
|
|
* Add a vma to the ttm vma list, and the
|
2006-10-11 14:21:01 -06:00
|
|
|
* process mm pointer to the ttm mm list. Needs the ttm mutex.
|
|
|
|
*/
|
|
|
|
|
2007-11-04 19:42:22 -07:00
|
|
|
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
|
2006-10-11 14:21:01 -06:00
|
|
|
struct vm_area_struct *vma);
|
|
|
|
/*
|
|
|
|
* Delete a vma and the corresponding mm pointer from the
|
|
|
|
* ttm lists. Needs the ttm mutex.
|
|
|
|
*/
|
2007-11-04 19:42:22 -07:00
|
|
|
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
|
2007-02-02 11:49:11 -07:00
|
|
|
struct vm_area_struct *vma);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempts to lock all relevant mmap_sems for a ttm, while
|
2007-11-04 19:42:22 -07:00
|
|
|
* not releasing the ttm mutex. May return -EAGAIN to avoid
|
2006-10-11 14:21:01 -06:00
|
|
|
* deadlocks. In that case the caller shall release the ttm mutex,
|
|
|
|
* schedule() and try again.
|
|
|
|
*/
|
|
|
|
|
2007-02-02 11:49:11 -07:00
|
|
|
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock all relevant mmap_sems for a ttm.
|
|
|
|
*/
|
2007-02-02 11:49:11 -07:00
|
|
|
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the ttm was bound to the aperture, this function shall be called
|
|
|
|
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
|
|
|
|
* vmas mapping this ttm. This is needed just after unmapping the ptes of
|
|
|
|
* the vma, otherwise the do_nopage() function will bug :(. The function
|
|
|
|
* releases the mmap_sems for this ttm.
|
|
|
|
*/
|
|
|
|
|
2007-02-02 11:49:11 -07:00
|
|
|
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
/*
|
2007-11-04 19:42:22 -07:00
|
|
|
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
|
2006-10-11 14:21:01 -06:00
|
|
|
* fault these pfns in, because the first one will set the vma VM_PFNMAP
|
|
|
|
* flag, which will make the next fault bug in do_nopage(). The function
|
|
|
|
* releases the mmap_sems for this ttm.
|
|
|
|
*/
|
|
|
|
|
2007-02-02 11:49:11 -07:00
|
|
|
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remap a vma for a bound ttm. Call with the ttm mutex held and
|
|
|
|
* the relevant mmap_sem locked.
|
|
|
|
*/
|
2007-02-02 11:49:11 -07:00
|
|
|
extern int drm_bo_map_bound(struct vm_area_struct *vma);
|
2006-10-11 14:21:01 -06:00
|
|
|
|
2004-07-11 04:17:34 -06:00
|
|
|
#endif
|
2007-06-09 23:40:10 -06:00
|
|
|
|
2007-07-17 17:46:16 -06:00
|
|
|
/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
|
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
|
2007-06-09 23:40:10 -06:00
|
|
|
#define DRM_IDR_COMPAT_FN
|
2007-07-17 17:46:16 -06:00
|
|
|
#endif
|
2007-06-09 23:40:10 -06:00
|
|
|
#ifdef DRM_IDR_COMPAT_FN
|
|
|
|
int idr_for_each(struct idr *idp,
|
|
|
|
int (*fn)(int id, void *p, void *data), void *data);
|
|
|
|
void idr_remove_all(struct idr *idp);
|
|
|
|
#endif
|
|
|
|
|
2007-09-12 11:48:48 -06:00
|
|
|
|
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
|
|
|
|
void *idr_replace(struct idr *idp, void *ptr, int id);
|
|
|
|
#endif
|
|
|
|
|
2007-09-12 12:05:15 -06:00
|
|
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
|
|
|
|
typedef _Bool bool;
|
|
|
|
#endif
|
|
|
|
|
2008-03-12 04:34:29 -06:00
|
|
|
|
2008-02-29 13:57:40 -07:00
|
|
|
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
|
2008-02-28 05:47:15 -07:00
|
|
|
#define DRM_KMAP_ATOMIC_PROT_PFN
|
|
|
|
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
|
|
|
pgprot_t protection);
|
|
|
|
#endif
|
2008-03-06 16:29:35 -07:00
|
|
|
|
|
|
|
#if !defined(flush_agp_mappings)
|
|
|
|
#define flush_agp_mappings() do {} while(0)
|
|
|
|
#endif
|
|
|
|
|
2008-03-16 15:05:46 -06:00
|
|
|
#ifndef DMA_BIT_MASK
|
|
|
|
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
|
|
|
|
#endif
|
|
|
|
|
2008-05-06 23:10:23 -06:00
|
|
|
#ifndef VM_CAN_NONLINEAR
|
|
|
|
#define DRM_VM_NOPAGE 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef DRM_VM_NOPAGE
|
|
|
|
|
|
|
|
extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, int *type);
|
|
|
|
|
|
|
|
extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, int *type);
|
|
|
|
|
|
|
|
extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, int *type);
|
|
|
|
|
|
|
|
extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, int *type);
|
|
|
|
#endif
|
|
|
|
|
2006-10-11 05:40:35 -06:00
|
|
|
#endif
|