Merge remote branch 'origin/master' into modesetting-101
Conflicts: linux-core/drm_compat.cmain
commit
43891ff2d0
|
@ -2809,19 +2809,14 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
|
|||
|
||||
int drmBOBusy(int fd, drmBO *buf, int *busy)
|
||||
{
|
||||
if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
|
||||
!(buf->replyFlags & DRM_BO_REP_BUSY)) {
|
||||
*busy = 0;
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
int ret = drmBOInfo(fd, buf);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*busy = (buf->replyFlags & DRM_BO_REP_BUSY);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
|
||||
unsigned memType)
|
||||
|
|
|
@ -666,7 +666,7 @@ void drm_agp_chipset_flush(struct drm_device *dev)
|
|||
{
|
||||
agp_flush_chipset(dev->agp->bridge);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_flush_chipset);
|
||||
EXPORT_SYMBOL(drm_agp_chipset_flush);
|
||||
#endif
|
||||
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
|
|
@ -596,3 +596,36 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
|
|||
map->page = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_kunmap);
|
||||
|
||||
int drm_bo_pfn_prot(struct drm_buffer_object *bo,
|
||||
unsigned long dst_offset,
|
||||
unsigned long *pfn,
|
||||
pgprot_t *prot)
|
||||
{
|
||||
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||
struct drm_device *dev = bo->dev;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long bus_base;
|
||||
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
|
||||
int ret;
|
||||
|
||||
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (bus_size != 0)
|
||||
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
|
||||
else if (!bo->ttm)
|
||||
return -EINVAL;
|
||||
else
|
||||
*pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
|
||||
|
||||
*prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
|
||||
PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_pfn_prot);
|
||||
|
||||
|
|
|
@ -779,3 +779,32 @@ struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
|
|||
}
|
||||
EXPORT_SYMBOL(pci_get_bus_and_slot);
|
||||
#endif
|
||||
|
||||
#if defined(DRM_KMAP_ATOMIC_PROT_PFN) && defined(CONFIG_HIMEM)
|
||||
#define drm_kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
|
||||
|
||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||
pgprot_t protection)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
static pte_t *km_pte;
|
||||
static int initialized = 0;
|
||||
|
||||
if (unlikely(!initialized)) {
|
||||
km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
pagefault_disable();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
set_pte(km_pte-idx, pfn_pte(pfn, protection));
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
|
||||
|
||||
|
||||
|
|
|
@ -343,4 +343,9 @@ extern struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devf
|
|||
#define PM_EVENT_PRETHAW 3
|
||||
#endif
|
||||
|
||||
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
|
||||
#define DRM_KMAP_ATOMIC_PROT_PFN
|
||||
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||
pgprot_t protection);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -383,7 +383,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
|
|||
* The array of page pointers was allocated with vmalloc
|
||||
* instead of drm_calloc.
|
||||
*/
|
||||
#define DRM_TTM_PAGE_VMALLOC (1 << 4)
|
||||
#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
|
||||
/*
|
||||
* This ttm is mapped from user space
|
||||
*/
|
||||
|
@ -741,6 +741,10 @@ static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
|
|||
extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
|
||||
extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
|
||||
unsigned long num_pages, struct drm_bo_kmap_obj *map);
|
||||
extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
|
||||
unsigned long dst_offset,
|
||||
unsigned long *pfn,
|
||||
pgprot_t *prot);
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -42,11 +42,12 @@ void drm_ttm_cache_flush(void)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_ttm_cache_flush);
|
||||
|
||||
/*
|
||||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*
|
||||
* Uses kmalloc if possible. Otherwise falls back to vmalloc.
|
||||
*/
|
||||
|
||||
static void drm_ttm_alloc_pages(struct drm_ttm *ttm)
|
||||
static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
|
@ -60,19 +61,19 @@ static void drm_ttm_alloc_pages(struct drm_ttm *ttm)
|
|||
if (!ttm->pages) {
|
||||
ttm->pages = vmalloc_user(size);
|
||||
if (ttm->pages)
|
||||
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
|
||||
ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
|
||||
}
|
||||
if (!ttm->pages)
|
||||
drm_free_memctl(size);
|
||||
}
|
||||
|
||||
static void drm_ttm_free_pages(struct drm_ttm *ttm)
|
||||
static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
|
||||
if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
|
||||
vfree(ttm->pages);
|
||||
ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
|
||||
ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
|
||||
} else {
|
||||
drm_free(ttm->pages, size, DRM_MEM_TTM);
|
||||
}
|
||||
|
@ -215,7 +216,7 @@ int drm_ttm_destroy(struct drm_ttm *ttm)
|
|||
else
|
||||
drm_ttm_free_alloced_pages(ttm);
|
||||
|
||||
drm_ttm_free_pages(ttm);
|
||||
drm_ttm_free_page_directory(ttm);
|
||||
}
|
||||
|
||||
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
|
||||
|
@ -349,7 +350,7 @@ struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
|
|||
* Account also for AGP module memory usage.
|
||||
*/
|
||||
|
||||
drm_ttm_alloc_pages(ttm);
|
||||
drm_ttm_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
drm_ttm_destroy(ttm);
|
||||
DRM_ERROR("Failed allocating page table\n");
|
||||
|
|
|
@ -645,8 +645,14 @@ struct drm_set_version {
|
|||
|
||||
#define DRM_FENCE_FLAG_EMIT 0x00000001
|
||||
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
||||
/**
|
||||
* On hardware with no interrupt events for operation completion,
|
||||
* indicates that the kernel should sleep while waiting for any blocking
|
||||
* operation to complete rather than spinning.
|
||||
*
|
||||
* Has no effect otherwise.
|
||||
*/
|
||||
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
||||
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
||||
#define DRM_FENCE_FLAG_NO_USER 0x00000010
|
||||
|
||||
/* Reserved for driver use */
|
||||
|
@ -795,13 +801,12 @@ struct drm_fence_arg {
|
|||
* with it as a result of this operation
|
||||
*/
|
||||
#define DRM_BO_HINT_DONT_FENCE 0x00000004
|
||||
/*
|
||||
* Sleep while waiting for the operation to complete.
|
||||
* Without this flag, the kernel will, instead, spin
|
||||
* until this operation has completed. I'm not sure
|
||||
* why you would ever want this, so please always
|
||||
* provide DRM_BO_HINT_WAIT_LAZY to any operation
|
||||
* which may block
|
||||
/**
|
||||
* On hardware with no interrupt events for operation completion,
|
||||
* indicates that the kernel should sleep while waiting for any blocking
|
||||
* operation to complete rather than spinning.
|
||||
*
|
||||
* Has no effect otherwise.
|
||||
*/
|
||||
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
|
||||
/*
|
||||
|
|
|
@ -804,6 +804,7 @@ struct i915_relocatee_info {
|
|||
unsigned page_offset;
|
||||
struct drm_bo_kmap_obj kmap;
|
||||
int is_iomem;
|
||||
int idle;
|
||||
};
|
||||
|
||||
struct drm_i915_validate_buffer {
|
||||
|
@ -859,6 +860,14 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
|||
drm_bo_kunmap(&relocatee->kmap);
|
||||
relocatee->data_page = NULL;
|
||||
relocatee->offset = new_cmd_offset;
|
||||
|
||||
if (unlikely(!relocatee->idle)) {
|
||||
ret = drm_bo_wait(relocatee->buf, 0, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
relocatee->idle = 1;
|
||||
}
|
||||
|
||||
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
||||
1, &relocatee->kmap);
|
||||
if (ret) {
|
||||
|
@ -1002,10 +1011,6 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
|||
}
|
||||
|
||||
mutex_lock (&relocatee.buf->mutex);
|
||||
ret = drm_bo_wait (relocatee.buf, 0, 0, FALSE);
|
||||
if (ret)
|
||||
goto out_err1;
|
||||
|
||||
while (reloc_user_ptr) {
|
||||
ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
|
||||
if (ret) {
|
||||
|
|
|
@ -415,6 +415,13 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
|
|||
if (i915_in_vblank(dev, pipe))
|
||||
count++;
|
||||
#endif
|
||||
/* count may be reset by other driver(e.g. 2D driver),
|
||||
we have no way to know if it is wrapped or resetted
|
||||
when count is zero. do a rough guess.
|
||||
*/
|
||||
if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
|
||||
dev->last_vblank[pipe] = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue