Merge branch 'master' into modesetting-101
commit
9906c7e54b
104
libdrm/xf86drm.c
104
libdrm/xf86drm.c
|
@ -2432,7 +2432,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
|
|||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
fence->signaled = arg.signaled;
|
||||
return 0;
|
||||
return arg.error;
|
||||
}
|
||||
|
||||
int drmFenceUpdate(int fd, drmFence *fence)
|
||||
|
@ -2495,7 +2495,50 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
|
|||
* DRM_FENCE_FLAG_WAIT_LAZY
|
||||
* DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS
|
||||
*/
|
||||
|
||||
#define DRM_IOCTL_TIMEOUT_USEC 3000000UL
|
||||
|
||||
static unsigned long
|
||||
drmTimeDiff(struct timeval *now, struct timeval *then)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
val = now->tv_sec - then->tv_sec;
|
||||
val *= 1000000LL;
|
||||
val += now->tv_usec;
|
||||
val -= then->tv_usec;
|
||||
|
||||
return (unsigned long) val;
|
||||
}
|
||||
|
||||
static int
|
||||
drmIoctlTimeout(int fd, unsigned long request, void *argp)
|
||||
{
|
||||
int haveThen = 0;
|
||||
struct timeval then, now;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, request, argp);
|
||||
if (ret != 0 && errno == EAGAIN) {
|
||||
if (!haveThen) {
|
||||
gettimeofday(&then, NULL);
|
||||
haveThen = 1;
|
||||
}
|
||||
gettimeofday(&now, NULL);
|
||||
}
|
||||
} while (ret != 0 && errno == EAGAIN &&
|
||||
drmTimeDiff(&now, &then) < DRM_IOCTL_TIMEOUT_USEC);
|
||||
|
||||
if (ret != 0)
|
||||
return ((errno == EAGAIN) ? -EBUSY : -errno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
|
||||
{
|
||||
drm_fence_arg_t arg;
|
||||
|
@ -2516,17 +2559,15 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
|
|||
arg.type = flush_type;
|
||||
arg.flags = flags;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg);
|
||||
} while (ret != 0 && errno == EAGAIN);
|
||||
|
||||
ret = drmIoctlTimeout(fd, DRM_IOCTL_FENCE_WAIT, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
return ret;
|
||||
|
||||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
fence->signaled = arg.signaled;
|
||||
return 0;
|
||||
return arg.error;
|
||||
}
|
||||
|
||||
static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
||||
|
@ -2568,15 +2609,12 @@ int drmBOCreate(int fd, unsigned long size,
|
|||
|
||||
buf->virtual = NULL;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg);
|
||||
} while (ret != 0 && errno == EAGAIN);
|
||||
|
||||
ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
return ret;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
buf->mapVirtual = NULL;
|
||||
buf->virtual = user_buffer;
|
||||
buf->mapCount = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -2606,7 +2644,7 @@ int drmBOUnreference(int fd, drmBO *buf)
|
|||
{
|
||||
struct drm_bo_handle_arg arg;
|
||||
|
||||
if (buf->mapVirtual) {
|
||||
if (buf->mapVirtual && buf->mapHandle) {
|
||||
(void) munmap(buf->mapVirtual, buf->start + buf->size);
|
||||
buf->mapVirtual = NULL;
|
||||
buf->virtual = NULL;
|
||||
|
@ -2665,12 +2703,9 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
|
|||
* This IOCTL synchronizes the buffer.
|
||||
*/
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg);
|
||||
} while (ret != 0 && errno == EAGAIN);
|
||||
|
||||
if (ret)
|
||||
return -errno;
|
||||
ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_MAP, &arg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
buf->mapFlags = mapFlags;
|
||||
|
@ -2715,14 +2750,12 @@ int drmBOSetStatus(int fd, drmBO *buf,
|
|||
req->desired_tile_stride = desired_tile_stride;
|
||||
req->tile_info = tile_info;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_SETSTATUS, &arg);
|
||||
} while (ret && errno == EAGAIN);
|
||||
|
||||
ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_SETSTATUS, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
return ret;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2757,12 +2790,9 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
|
|||
req->handle = buf->handle;
|
||||
req->hint = hint;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);
|
||||
} while (ret && errno == EAGAIN);
|
||||
|
||||
ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
return ret;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
}
|
||||
|
@ -2824,35 +2854,25 @@ int drmMMTakedown(int fd, unsigned memType)
|
|||
int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict)
|
||||
{
|
||||
struct drm_mm_type_arg arg;
|
||||
int ret;
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
arg.mem_type = memType;
|
||||
arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
|
||||
arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0;
|
||||
|
||||
do{
|
||||
ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg);
|
||||
} while (ret && errno == EAGAIN);
|
||||
|
||||
return (ret) ? -errno : 0;
|
||||
return drmIoctlTimeout(fd, DRM_IOCTL_MM_LOCK, &arg);
|
||||
}
|
||||
|
||||
int drmMMUnlock(int fd, unsigned memType, int unlockBM)
|
||||
{
|
||||
struct drm_mm_type_arg arg;
|
||||
int ret;
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
|
||||
arg.mem_type = memType;
|
||||
arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
|
||||
|
||||
do{
|
||||
ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg);
|
||||
} while (ret && errno == EAGAIN);
|
||||
|
||||
return (ret) ? -errno : 0;
|
||||
return drmIoctlTimeout(fd, DRM_IOCTL_MM_UNLOCK, &arg);
|
||||
}
|
||||
|
||||
int drmBOVersion(int fd, unsigned int *major,
|
||||
|
|
|
@ -137,9 +137,9 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
|||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
int ret = 0;
|
||||
bo->ttm = NULL;
|
||||
|
||||
DRM_ASSERT_LOCKED(&bo->mutex);
|
||||
bo->ttm = NULL;
|
||||
|
||||
switch (bo->type) {
|
||||
case drm_bo_type_dc:
|
||||
|
@ -149,6 +149,18 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
|||
ret = -ENOMEM;
|
||||
break;
|
||||
case drm_bo_type_user:
|
||||
bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
|
||||
if (!bo->ttm)
|
||||
ret = -ENOMEM;
|
||||
|
||||
ret = drm_ttm_set_user(bo->ttm, current,
|
||||
bo->mem.mask & DRM_BO_FLAG_WRITE,
|
||||
bo->buffer_start,
|
||||
bo->num_pages,
|
||||
dev->bm.dummy_read_page);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal buffer object type\n");
|
||||
|
@ -783,12 +795,15 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
|
|||
}
|
||||
|
||||
static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
|
||||
int disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint64_t mask, uint32_t * res_mask)
|
||||
{
|
||||
uint64_t cur_flags = drm_bo_type_flags(mem_type);
|
||||
uint64_t flag_diff;
|
||||
|
||||
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
|
||||
return 0;
|
||||
if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
|
||||
cur_flags |= DRM_BO_FLAG_CACHED;
|
||||
if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
|
||||
|
@ -853,7 +868,9 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
|
|||
mem_type = prios[i];
|
||||
man = &bm->man[mem_type];
|
||||
|
||||
type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
|
||||
type_ok = drm_bo_mt_compatible(man,
|
||||
bo->type == drm_bo_type_user,
|
||||
mem_type, mem->mask,
|
||||
&cur_flags);
|
||||
|
||||
if (!type_ok)
|
||||
|
@ -902,7 +919,11 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
|
|||
if (!man->has_type)
|
||||
continue;
|
||||
|
||||
if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
|
||||
if (!drm_bo_mt_compatible(man,
|
||||
bo->type == drm_bo_type_user,
|
||||
mem_type,
|
||||
mem->mask,
|
||||
&cur_flags))
|
||||
continue;
|
||||
|
||||
ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
|
||||
|
@ -927,8 +948,10 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo,
|
|||
{
|
||||
uint32_t new_props;
|
||||
|
||||
if (bo->type == drm_bo_type_user) {
|
||||
DRM_ERROR("User buffers are not supported yet.\n");
|
||||
if (bo->type == drm_bo_type_user &&
|
||||
((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
||||
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
|
||||
DRM_ERROR("User buffers require cache-coherent memory.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1119,7 +1142,12 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
|||
rep->flags = bo->mem.flags;
|
||||
rep->size = bo->num_pages * PAGE_SIZE;
|
||||
rep->offset = bo->offset;
|
||||
rep->arg_handle = bo->map_list.user_token;
|
||||
|
||||
if (bo->type == drm_bo_type_dc)
|
||||
rep->arg_handle = bo->map_list.user_token;
|
||||
else
|
||||
rep->arg_handle = 0;
|
||||
|
||||
rep->mask = bo->mem.mask;
|
||||
rep->buffer_start = bo->buffer_start;
|
||||
rep->fence_flags = bo->fence_type;
|
||||
|
@ -1618,10 +1646,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
int ret = 0;
|
||||
unsigned long num_pages;
|
||||
|
||||
if (buffer_start & ~PAGE_MASK) {
|
||||
DRM_ERROR("Invalid buffer object start.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
size += buffer_start & ~PAGE_MASK;
|
||||
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (num_pages == 0) {
|
||||
DRM_ERROR("Illegal buffer object size.\n");
|
||||
|
@ -1647,23 +1672,20 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&bo->vma_list);
|
||||
#endif
|
||||
bo->dev = dev;
|
||||
if (buffer_start != 0)
|
||||
bo->type = drm_bo_type_user;
|
||||
else
|
||||
bo->type = type;
|
||||
bo->type = type;
|
||||
bo->num_pages = num_pages;
|
||||
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
||||
bo->mem.num_pages = bo->num_pages;
|
||||
bo->mem.mm_node = NULL;
|
||||
bo->mem.page_alignment = page_alignment;
|
||||
bo->buffer_start = buffer_start;
|
||||
bo->buffer_start = buffer_start & PAGE_MASK;
|
||||
bo->priv_flags = 0;
|
||||
bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_MAPPABLE;
|
||||
bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_MAPPABLE;
|
||||
atomic_inc(&bm->count);
|
||||
ret = drm_bo_new_mask(bo, mask, hint);
|
||||
ret = drm_bo_new_mask(bo, mask, mask);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
@ -1719,6 +1741,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
struct drm_bo_create_req *req = &arg->d.req;
|
||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
struct drm_buffer_object *entry;
|
||||
enum drm_bo_type bo_type;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
|
||||
|
@ -1729,8 +1752,13 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
|
||||
|
||||
if (bo_type == drm_bo_type_user)
|
||||
req->mask &= ~DRM_BO_FLAG_SHAREABLE;
|
||||
|
||||
ret = drm_buffer_object_create(file_priv->head->dev,
|
||||
req->size, drm_bo_type_dc, req->mask,
|
||||
req->size, bo_type, req->mask,
|
||||
req->hint, req->page_alignment,
|
||||
req->buffer_start, &entry);
|
||||
if (ret)
|
||||
|
@ -2185,6 +2213,13 @@ int drm_bo_driver_finish(struct drm_device * dev)
|
|||
DRM_DEBUG("Unfenced list was clean\n");
|
||||
}
|
||||
out:
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
unlock_page(bm->dummy_read_page);
|
||||
#else
|
||||
ClearPageReserved(bm->dummy_read_page);
|
||||
#endif
|
||||
__free_page(bm->dummy_read_page);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2203,11 +2238,24 @@ int drm_bo_driver_init(struct drm_device * dev)
|
|||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
int ret = -EINVAL;
|
||||
|
||||
bm->dummy_read_page = NULL;
|
||||
drm_bo_init_lock(&bm->bm_lock);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!driver)
|
||||
goto out_unlock;
|
||||
|
||||
bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
||||
if (!bm->dummy_read_page) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
SetPageLocked(bm->dummy_read_page);
|
||||
#else
|
||||
SetPageReserved(bm->dummy_read_page);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the system memory buffer type.
|
||||
* Other types need to be driver / IOCTL initialized.
|
||||
|
@ -2462,11 +2510,15 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
|
|||
|
||||
static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
|
||||
{
|
||||
struct drm_map_list *list = &bo->map_list;
|
||||
struct drm_map_list *list;
|
||||
drm_local_map_t *map;
|
||||
struct drm_device *dev = bo->dev;
|
||||
|
||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
||||
if (bo->type != drm_bo_type_dc)
|
||||
return;
|
||||
|
||||
list = &bo->map_list;
|
||||
if (list->user_token) {
|
||||
drm_ht_remove_item(&dev->map_hash, &list->hash);
|
||||
list->user_token = 0;
|
||||
|
|
|
@ -275,6 +275,8 @@ typedef struct drm_ttm_backend {
|
|||
} drm_ttm_backend_t;
|
||||
|
||||
struct drm_ttm {
|
||||
struct mm_struct *user_mm;
|
||||
struct page *dummy_read_page;
|
||||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
|
@ -300,6 +302,12 @@ extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
|||
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
||||
extern void drm_ttm_cache_flush(void);
|
||||
extern int drm_ttm_populate(struct drm_ttm * ttm);
|
||||
extern int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||
struct task_struct *tsk,
|
||||
int write,
|
||||
unsigned long start,
|
||||
unsigned long num_pages,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
/*
|
||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||
|
@ -320,11 +328,15 @@ extern int drm_destroy_ttm(struct drm_ttm * ttm);
|
|||
* Page flags.
|
||||
*/
|
||||
|
||||
#define DRM_TTM_PAGE_UNCACHED 0x01
|
||||
#define DRM_TTM_PAGE_USED 0x02
|
||||
#define DRM_TTM_PAGE_BOUND 0x04
|
||||
#define DRM_TTM_PAGE_PRESENT 0x08
|
||||
#define DRM_TTM_PAGE_VMALLOC 0x10
|
||||
#define DRM_TTM_PAGE_UNCACHED (1 << 0)
|
||||
#define DRM_TTM_PAGE_USED (1 << 1)
|
||||
#define DRM_TTM_PAGE_BOUND (1 << 2)
|
||||
#define DRM_TTM_PAGE_PRESENT (1 << 3)
|
||||
#define DRM_TTM_PAGE_VMALLOC (1 << 4)
|
||||
#define DRM_TTM_PAGE_USER (1 << 5)
|
||||
#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
|
||||
#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
|
||||
#define DRM_TTM_PAGE_USER_DMA (1 << 8)
|
||||
|
||||
/***************************************************
|
||||
* Buffer objects. (drm_bo.c, drm_bo_move.c)
|
||||
|
@ -447,6 +459,7 @@ struct drm_buffer_manager {
|
|||
uint32_t fence_type;
|
||||
unsigned long cur_pages;
|
||||
atomic_t count;
|
||||
struct page *dummy_read_page;
|
||||
};
|
||||
|
||||
struct drm_bo_driver {
|
||||
|
|
|
@ -139,15 +139,74 @@ static int drm_set_caching(struct drm_ttm * ttm, int noncached)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
|
||||
{
|
||||
struct mm_struct *mm = ttm->user_mm;
|
||||
int write;
|
||||
int dirty;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
|
||||
write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
|
||||
dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (i=0; i<ttm->num_pages; ++i) {
|
||||
page = ttm->pages[i];
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
if (page == ttm->dummy_read_page) {
|
||||
BUG_ON(write);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (write && dirty && !PageReserved(page))
|
||||
SetPageDirty(page);
|
||||
|
||||
ttm->pages[i] = NULL;
|
||||
page_cache_release(page);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
||||
{
|
||||
int i;
|
||||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||
struct page **cur_page;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
if (*cur_page) {
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
unlock_page(*cur_page);
|
||||
#else
|
||||
ClearPageReserved(*cur_page);
|
||||
#endif
|
||||
if (page_count(*cur_page) != 1) {
|
||||
DRM_ERROR("Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
}
|
||||
if (page_mapped(*cur_page)) {
|
||||
DRM_ERROR("Erroneous map count. "
|
||||
"Leaking page mappings.\n");
|
||||
}
|
||||
__free_page(*cur_page);
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all resources associated with a ttm.
|
||||
*/
|
||||
|
||||
int drm_destroy_ttm(struct drm_ttm * ttm)
|
||||
{
|
||||
|
||||
int i;
|
||||
struct page **cur_page;
|
||||
struct drm_ttm_backend *be;
|
||||
|
||||
if (!ttm)
|
||||
|
@ -160,31 +219,14 @@ int drm_destroy_ttm(struct drm_ttm * ttm)
|
|||
}
|
||||
|
||||
if (ttm->pages) {
|
||||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
|
||||
drm_set_caching(ttm, 0);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages + i;
|
||||
if (*cur_page) {
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
|
||||
unlock_page(*cur_page);
|
||||
#else
|
||||
ClearPageReserved(*cur_page);
|
||||
#endif
|
||||
if (page_count(*cur_page) != 1) {
|
||||
DRM_ERROR("Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
}
|
||||
if (page_mapped(*cur_page)) {
|
||||
DRM_ERROR("Erroneous map count. "
|
||||
"Leaking page mappings.\n");
|
||||
}
|
||||
__free_page(*cur_page);
|
||||
drm_free_memctl(PAGE_SIZE);
|
||||
--bm->cur_pages;
|
||||
}
|
||||
}
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_USER)
|
||||
drm_ttm_free_user_pages(ttm);
|
||||
else
|
||||
drm_ttm_free_alloced_pages(ttm);
|
||||
|
||||
ttm_free_pages(ttm);
|
||||
}
|
||||
|
||||
|
@ -209,6 +251,49 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_ttm_get_page);
|
||||
|
||||
|
||||
|
||||
|
||||
int drm_ttm_set_user(struct drm_ttm *ttm,
|
||||
struct task_struct *tsk,
|
||||
int write,
|
||||
unsigned long start,
|
||||
unsigned long num_pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
BUG_ON(num_pages != ttm->num_pages);
|
||||
|
||||
ttm->user_mm = mm;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->page_flags = DRM_TTM_PAGE_USER |
|
||||
((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
|
||||
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(tsk, mm, start, num_pages,
|
||||
write, 0, ttm->pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
if (ret != num_pages && write) {
|
||||
drm_ttm_free_user_pages(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i=0; i<num_pages; ++i) {
|
||||
if (ttm->pages[i] == NULL) {
|
||||
ttm->pages[i] = ttm->dummy_read_page;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int drm_ttm_populate(struct drm_ttm * ttm)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -340,7 +425,8 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
|||
}
|
||||
|
||||
ttm->state = ttm_bound;
|
||||
|
||||
if (ttm->page_flags & DRM_TTM_PAGE_USER)
|
||||
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1716,7 +1716,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
dev_priv->gart_info.bus_addr =
|
||||
dev_priv->pcigart_offset + dev_priv->fb_location;
|
||||
dev_priv->gart_info.mapping.offset =
|
||||
dev_priv->gart_info.bus_addr;
|
||||
dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
|
||||
dev_priv->gart_info.mapping.size =
|
||||
dev_priv->gart_info.table_size;
|
||||
|
||||
|
@ -2317,7 +2317,8 @@ int radeon_driver_firstopen(struct drm_device *dev)
|
|||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
|
||||
dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
|
||||
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
|
||||
drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
|
||||
_DRM_WRITE_COMBINING, &map);
|
||||
if (ret != 0)
|
||||
|
|
|
@ -307,6 +307,7 @@ typedef struct drm_radeon_private {
|
|||
|
||||
/* starting from here on, data is preserved accross an open */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
unsigned long fb_aper_offset;
|
||||
|
||||
} drm_radeon_private_t;
|
||||
|
||||
|
|
Loading…
Reference in New Issue