Add fence error member.
Modify the TTM backend bind arguments. Export a number of functions needed for driver-specific super-ioctls. Add a function to map buffer objects from the kernel, regardless of where they're currently placed. A number of error fixes.main
parent
24e33627c5
commit
da63f4ba0f
|
@ -535,8 +535,7 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
unsigned long offset,
|
struct drm_bo_mem_reg *bo_mem)
|
||||||
int cached)
|
|
||||||
{
|
{
|
||||||
struct drm_agp_ttm_backend *agp_be =
|
struct drm_agp_ttm_backend *agp_be =
|
||||||
container_of(backend, struct drm_agp_ttm_backend, backend);
|
container_of(backend, struct drm_agp_ttm_backend, backend);
|
||||||
|
@ -545,13 +544,14 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
|
|
||||||
DRM_DEBUG("drm_agp_bind_ttm\n");
|
DRM_DEBUG("drm_agp_bind_ttm\n");
|
||||||
mem->is_flushed = TRUE;
|
mem->is_flushed = TRUE;
|
||||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY :
|
mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY :
|
||||||
AGP_USER_MEMORY;
|
AGP_USER_MEMORY;
|
||||||
ret = drm_agp_bind_memory(mem, offset);
|
ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("AGP Bind memory failed\n");
|
DRM_ERROR("AGP Bind memory failed\n");
|
||||||
}
|
}
|
||||||
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
|
DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
|
||||||
|
DRM_BE_FLAG_BOUND_CACHED : 0,
|
||||||
DRM_BE_FLAG_BOUND_CACHED);
|
DRM_BE_FLAG_BOUND_CACHED);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
|
||||||
agp_be->bridge = dev->agp->bridge;
|
agp_be->bridge = dev->agp->bridge;
|
||||||
agp_be->populated = FALSE;
|
agp_be->populated = FALSE;
|
||||||
agp_be->backend.func = &agp_ttm_backend;
|
agp_be->backend.func = &agp_ttm_backend;
|
||||||
agp_be->backend.mem_type = DRM_BO_MEM_TT;
|
// agp_be->backend.mem_type = DRM_BO_MEM_TT;
|
||||||
|
agp_be->backend.dev = dev;
|
||||||
|
|
||||||
return &agp_be->backend;
|
return &agp_be->backend;
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,12 +142,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
||||||
|
|
||||||
switch (bo->type) {
|
switch (bo->type) {
|
||||||
case drm_bo_type_dc:
|
case drm_bo_type_dc:
|
||||||
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
|
|
||||||
if (!bo->ttm)
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
case drm_bo_type_kernel:
|
case drm_bo_type_kernel:
|
||||||
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
|
bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
|
||||||
if (!bo->ttm)
|
if (!bo->ttm)
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
|
@ -175,7 +171,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
|
struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (old_is_pci || new_is_pci)
|
if (old_is_pci || new_is_pci ||
|
||||||
|
((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
|
||||||
ret = drm_bo_vm_pre_move(bo, old_is_pci);
|
ret = drm_bo_vm_pre_move(bo, old_is_pci);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -190,9 +187,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
|
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
|
||||||
ret = drm_bind_ttm(bo->ttm, new_man->flags &
|
ret = drm_bind_ttm(bo->ttm, mem);
|
||||||
DRM_BO_FLAG_CACHED,
|
|
||||||
mem->mm_node->start);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
@ -242,7 +237,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
_DRM_BO_FLAG_EVICTED);
|
_DRM_BO_FLAG_EVICTED);
|
||||||
|
|
||||||
if (bo->mem.mm_node)
|
if (bo->mem.mm_node)
|
||||||
bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
|
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
|
||||||
|
bm->man[bo->mem.mem_type].gpu_offset;
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -290,6 +287,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_wait);
|
||||||
|
|
||||||
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
||||||
{
|
{
|
||||||
|
@ -417,7 +415,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
|
||||||
|
|
||||||
atomic_dec(&bm->count);
|
atomic_dec(&bm->count);
|
||||||
|
|
||||||
BUG_ON(!list_empty(&bo->base.list));
|
// BUG_ON(!list_empty(&bo->base.list));
|
||||||
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
|
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -503,6 +501,7 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
|
||||||
drm_bo_destroy_locked(tmp_bo);
|
drm_bo_destroy_locked(tmp_bo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
||||||
|
|
||||||
static void drm_bo_base_deref_locked(struct drm_file * file_priv,
|
static void drm_bo_base_deref_locked(struct drm_file * file_priv,
|
||||||
struct drm_user_object * uo)
|
struct drm_user_object * uo)
|
||||||
|
@ -531,38 +530,76 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
|
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
|
||||||
|
|
||||||
|
void drm_putback_buffer_objects(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
|
struct list_head *list = &bm->unfenced;
|
||||||
|
struct drm_buffer_object *entry, *next;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
list_for_each_entry_safe(entry, next, list, lru) {
|
||||||
|
atomic_inc(&entry->usage);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
mutex_lock(&entry->mutex);
|
||||||
|
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
list_del_init(&entry->lru);
|
||||||
|
DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
|
||||||
|
DRM_WAKEUP(&entry->event_queue);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: Might want to put back on head of list
|
||||||
|
* instead of tail here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
drm_bo_add_to_lru(entry);
|
||||||
|
mutex_unlock(&entry->mutex);
|
||||||
|
drm_bo_usage_deref_locked(&entry);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_putback_buffer_objects);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note. The caller has to register (if applicable)
|
* Note. The caller has to register (if applicable)
|
||||||
* and deregister fence object usage.
|
* and deregister fence object usage.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drm_fence_buffer_objects(struct drm_file * file_priv,
|
int drm_fence_buffer_objects(struct drm_device *dev,
|
||||||
struct list_head *list,
|
struct list_head *list,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_fence_object * fence,
|
struct drm_fence_object * fence,
|
||||||
struct drm_fence_object ** used_fence)
|
struct drm_fence_object ** used_fence)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = file_priv->head->dev;
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
|
|
||||||
struct drm_buffer_object *entry;
|
struct drm_buffer_object *entry;
|
||||||
uint32_t fence_type = 0;
|
uint32_t fence_type = 0;
|
||||||
|
uint32_t fence_class = ~0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct list_head *l;
|
struct list_head *l;
|
||||||
LIST_HEAD(f_list);
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!list)
|
if (!list)
|
||||||
list = &bm->unfenced;
|
list = &bm->unfenced;
|
||||||
|
|
||||||
|
if (fence)
|
||||||
|
fence_class = fence->class;
|
||||||
|
|
||||||
list_for_each_entry(entry, list, lru) {
|
list_for_each_entry(entry, list, lru) {
|
||||||
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
||||||
fence_type |= entry->fence_type;
|
fence_type |= entry->new_fence_type;
|
||||||
if (entry->fence_class != 0) {
|
if (fence_class == ~0)
|
||||||
DRM_ERROR("Fence class %d is not implemented yet.\n",
|
fence_class = entry->new_fence_class;
|
||||||
entry->fence_class);
|
else if (entry->new_fence_class != fence_class) {
|
||||||
|
DRM_ERROR("Unmatching fence classes on unfenced list: "
|
||||||
|
"%d and %d.\n",
|
||||||
|
fence_class,
|
||||||
|
entry->new_fence_class);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -574,14 +611,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Transfer to a local list before we release the dev->struct_mutex;
|
|
||||||
* This is so we don't get any new unfenced objects while fencing
|
|
||||||
* the ones we already have..
|
|
||||||
*/
|
|
||||||
|
|
||||||
list_splice_init(list, &f_list);
|
|
||||||
|
|
||||||
if (fence) {
|
if (fence) {
|
||||||
if ((fence_type & fence->type) != fence_type) {
|
if ((fence_type & fence->type) != fence_type) {
|
||||||
DRM_ERROR("Given fence doesn't match buffers "
|
DRM_ERROR("Given fence doesn't match buffers "
|
||||||
|
@ -591,7 +620,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
ret = drm_fence_object_create(dev, 0, fence_type,
|
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
||||||
fence_flags | DRM_FENCE_FLAG_EMIT,
|
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||||
&fence);
|
&fence);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
@ -600,8 +629,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
l = f_list.next;
|
l = list->next;
|
||||||
while (l != &f_list) {
|
while (l != list) {
|
||||||
prefetch(l->next);
|
prefetch(l->next);
|
||||||
entry = list_entry(l, struct drm_buffer_object, lru);
|
entry = list_entry(l, struct drm_buffer_object, lru);
|
||||||
atomic_inc(&entry->usage);
|
atomic_inc(&entry->usage);
|
||||||
|
@ -614,6 +643,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
if (entry->fence)
|
if (entry->fence)
|
||||||
drm_fence_usage_deref_locked(&entry->fence);
|
drm_fence_usage_deref_locked(&entry->fence);
|
||||||
entry->fence = drm_fence_reference_locked(fence);
|
entry->fence = drm_fence_reference_locked(fence);
|
||||||
|
entry->fence_class = entry->new_fence_class;
|
||||||
|
entry->fence_type = entry->new_fence_type;
|
||||||
DRM_FLAG_MASKED(entry->priv_flags, 0,
|
DRM_FLAG_MASKED(entry->priv_flags, 0,
|
||||||
_DRM_BO_FLAG_UNFENCED);
|
_DRM_BO_FLAG_UNFENCED);
|
||||||
DRM_WAKEUP(&entry->event_queue);
|
DRM_WAKEUP(&entry->event_queue);
|
||||||
|
@ -621,7 +652,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
mutex_unlock(&entry->mutex);
|
mutex_unlock(&entry->mutex);
|
||||||
drm_bo_usage_deref_locked(&entry);
|
drm_bo_usage_deref_locked(&entry);
|
||||||
l = f_list.next;
|
l = list->next;
|
||||||
}
|
}
|
||||||
DRM_DEBUG("Fenced %d buffers\n", count);
|
DRM_DEBUG("Fenced %d buffers\n", count);
|
||||||
out:
|
out:
|
||||||
|
@ -629,7 +660,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
*used_fence = fence;
|
*used_fence = fence;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_buffer_objects);
|
EXPORT_SYMBOL(drm_fence_buffer_objects);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -944,6 +974,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
||||||
atomic_inc(&bo->usage);
|
atomic_inc(&bo->usage);
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_buffer_object);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call bo->mutex locked.
|
* Call bo->mutex locked.
|
||||||
|
@ -1079,9 +1110,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
|
||||||
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
||||||
struct drm_bo_info_rep *rep)
|
struct drm_bo_info_rep *rep)
|
||||||
{
|
{
|
||||||
|
if (!rep)
|
||||||
|
return;
|
||||||
|
|
||||||
rep->handle = bo->base.hash.key;
|
rep->handle = bo->base.hash.key;
|
||||||
rep->flags = bo->mem.flags;
|
rep->flags = bo->mem.flags;
|
||||||
rep->size = bo->mem.num_pages * PAGE_SIZE;
|
rep->size = bo->num_pages * PAGE_SIZE;
|
||||||
rep->offset = bo->offset;
|
rep->offset = bo->offset;
|
||||||
rep->arg_handle = bo->map_list.user_token;
|
rep->arg_handle = bo->map_list.user_token;
|
||||||
rep->mask = bo->mem.mask;
|
rep->mask = bo->mem.mask;
|
||||||
|
@ -1260,7 +1294,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mem.num_pages = bo->mem.num_pages;
|
mem.num_pages = bo->num_pages;
|
||||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||||
mem.mask = new_mem_flags;
|
mem.mask = new_mem_flags;
|
||||||
mem.page_alignment = bo->mem.page_alignment;
|
mem.page_alignment = bo->mem.page_alignment;
|
||||||
|
@ -1308,7 +1342,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
||||||
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
|
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
||||||
(!(mem->mask & DRM_BO_FLAG_CACHED) ||
|
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
||||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1375,7 +1409,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
(unsigned long long) bo->mem.mask,
|
(unsigned long long) bo->mem.mask,
|
||||||
(unsigned long long) bo->mem.flags);
|
(unsigned long long) bo->mem.flags);
|
||||||
|
|
||||||
ret = driver->fence_type(bo, &ftype);
|
ret = driver->fence_type(bo, &fence_class, &ftype);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Driver did not support given buffer permissions\n");
|
DRM_ERROR("Driver did not support given buffer permissions\n");
|
||||||
|
@ -1405,12 +1439,14 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bo->fence_class = fence_class;
|
bo->new_fence_class = fence_class;
|
||||||
bo->fence_type = ftype;
|
bo->new_fence_type = ftype;
|
||||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
|
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Timed out waiting for buffer unmap.\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
if (bo->type == drm_bo_type_fake) {
|
if (bo->type == drm_bo_type_fake) {
|
||||||
ret = drm_bo_check_fake(dev, &bo->mem);
|
ret = drm_bo_check_fake(dev, &bo->mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1465,11 +1501,52 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_bo_handle_validate(struct drm_file *file_priv,
|
int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
uint32_t handle,
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
|
uint32_t fence_class,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_bo_info_rep *rep)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&bo->mutex);
|
||||||
|
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||||
|
DRM_ERROR
|
||||||
|
("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
|
||||||
|
"processes\n");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
|
||||||
|
ret = drm_bo_new_mask(bo, flags, hint);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = drm_buffer_object_validate(bo,
|
||||||
|
fence_class,
|
||||||
|
!(hint & DRM_BO_HINT_DONT_FENCE),
|
||||||
|
no_wait);
|
||||||
|
out:
|
||||||
|
if (rep)
|
||||||
|
drm_bo_fill_rep_arg(bo, rep);
|
||||||
|
|
||||||
|
mutex_unlock(&bo->mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_do_validate);
|
||||||
|
|
||||||
|
|
||||||
|
int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||||
uint32_t fence_class,
|
uint32_t fence_class,
|
||||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
struct drm_bo_info_rep *rep)
|
struct drm_bo_info_rep * rep,
|
||||||
|
struct drm_buffer_object **bo_rep)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = file_priv->head->dev;
|
struct drm_device *dev = file_priv->head->dev;
|
||||||
struct drm_buffer_object *bo;
|
struct drm_buffer_object *bo;
|
||||||
|
@ -1479,34 +1556,22 @@ static int drm_bo_handle_validate(struct drm_file *file_priv,
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!bo) {
|
if (!bo) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
|
||||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
no_wait, rep);
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
|
|
||||||
ret = drm_bo_new_mask(bo, flags, hint);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret =
|
|
||||||
drm_buffer_object_validate(bo, fence_class,
|
|
||||||
!(hint & DRM_BO_HINT_DONT_FENCE),
|
|
||||||
no_wait);
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
|
|
||||||
out:
|
|
||||||
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
|
|
||||||
|
if (!ret && bo_rep)
|
||||||
|
*bo_rep = bo;
|
||||||
|
else
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
drm_bo_usage_deref_unlocked(&bo);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_handle_validate);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fills out the generic buffer object ioctl reply with the information for
|
* Fills out the generic buffer object ioctl reply with the information for
|
||||||
|
@ -1612,8 +1677,9 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
#endif
|
#endif
|
||||||
bo->dev = dev;
|
bo->dev = dev;
|
||||||
bo->type = type;
|
bo->type = type;
|
||||||
|
bo->num_pages = num_pages;
|
||||||
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
||||||
bo->mem.num_pages = num_pages;
|
bo->mem.num_pages = bo->num_pages;
|
||||||
bo->mem.mm_node = NULL;
|
bo->mem.mm_node = NULL;
|
||||||
bo->mem.page_alignment = page_alignment;
|
bo->mem.page_alignment = page_alignment;
|
||||||
if (bo->type == drm_bo_type_fake) {
|
if (bo->type == drm_bo_type_fake) {
|
||||||
|
@ -1706,6 +1772,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
||||||
struct drm_bo_op_arg *arg = data;
|
struct drm_bo_op_arg *arg = data;
|
||||||
struct drm_bo_op_req *req = &arg->d.req;
|
struct drm_bo_op_req *req = &arg->d.req;
|
||||||
struct drm_bo_info_rep rep;
|
struct drm_bo_info_rep rep;
|
||||||
|
struct drm_buffer_object *dummy;
|
||||||
unsigned long next = 0;
|
unsigned long next = 0;
|
||||||
void __user *curuserarg = NULL;
|
void __user *curuserarg = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1742,7 +1809,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
||||||
req->bo_req.flags,
|
req->bo_req.flags,
|
||||||
req->bo_req.mask,
|
req->bo_req.mask,
|
||||||
req->bo_req.hint,
|
req->bo_req.hint,
|
||||||
&rep);
|
&rep, &dummy);
|
||||||
break;
|
break;
|
||||||
case drm_bo_fence:
|
case drm_bo_fence:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -2092,9 +2159,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev)
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
struct list_head *head, *list;
|
struct list_head *head, *list;
|
||||||
struct drm_buffer_object *entry;
|
struct drm_buffer_object *entry;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
|
||||||
head = &bm->unfenced;
|
head = &bm->unfenced;
|
||||||
|
|
||||||
|
if (list_empty(head))
|
||||||
|
return;
|
||||||
|
|
||||||
|
DRM_ERROR("Clean unfenced\n");
|
||||||
|
|
||||||
|
if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fixme: Should really wait here.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fence)
|
||||||
|
drm_fence_usage_deref_locked(&fence);
|
||||||
|
|
||||||
|
if (list_empty(head))
|
||||||
|
return;
|
||||||
|
|
||||||
|
DRM_ERROR("Really clean unfenced\n");
|
||||||
|
|
||||||
list = head->next;
|
list = head->next;
|
||||||
while(list != head) {
|
while(list != head) {
|
||||||
prefetch(list->next);
|
prefetch(list->next);
|
||||||
|
@ -2254,7 +2342,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
|
||||||
|
|
||||||
if (!man->has_type) {
|
if (!man->has_type) {
|
||||||
DRM_ERROR("Trying to take down uninitialized "
|
DRM_ERROR("Trying to take down uninitialized "
|
||||||
"memory manager type\n");
|
"memory manager type %u\n", mem_type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
man->use_type = 0;
|
man->use_type = 0;
|
||||||
|
@ -2276,6 +2364,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_clean_mm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*Evict all buffers of a particular mem_type, but leave memory manager
|
*Evict all buffers of a particular mem_type, but leave memory manager
|
||||||
|
|
|
@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
||||||
save_flags = old_mem->flags;
|
save_flags = old_mem->flags;
|
||||||
}
|
}
|
||||||
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
|
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
|
||||||
ret = drm_bind_ttm(ttm,
|
ret = drm_bind_ttm(ttm, new_mem);
|
||||||
new_mem->flags & DRM_BO_FLAG_CACHED,
|
|
||||||
new_mem->mm_node->start);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -344,6 +342,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
||||||
fence_flags | DRM_FENCE_FLAG_EMIT,
|
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||||
&bo->fence);
|
&bo->fence);
|
||||||
|
bo->fence_type = fence_type;
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -410,3 +409,195 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
||||||
|
|
||||||
|
int drm_bo_same_page(unsigned long offset,
|
||||||
|
unsigned long offset2)
|
||||||
|
{
|
||||||
|
return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_same_page);
|
||||||
|
|
||||||
|
unsigned long drm_bo_offset_end(unsigned long offset,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
|
||||||
|
offset = (offset + PAGE_SIZE) & PAGE_MASK;
|
||||||
|
return (end < offset) ? end : offset;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_offset_end);
|
||||||
|
|
||||||
|
|
||||||
|
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
|
||||||
|
{
|
||||||
|
pgprot_t tmp = PAGE_KERNEL;
|
||||||
|
|
||||||
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
|
#ifdef USE_PAT_WC
|
||||||
|
#warning using pat
|
||||||
|
if (drm_use_pat() && map_type == _DRM_TTM) {
|
||||||
|
pgprot_val(tmp) |= _PAGE_PAT;
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
|
||||||
|
pgprot_val(tmp) |= _PAGE_PCD;
|
||||||
|
pgprot_val(tmp) &= ~_PAGE_PWT;
|
||||||
|
}
|
||||||
|
#elif defined(__powerpc__)
|
||||||
|
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
||||||
|
if (map_type == _DRM_REGISTERS)
|
||||||
|
pgprot_val(tmp) |= _PAGE_GUARDED;
|
||||||
|
#endif
|
||||||
|
#if defined(__ia64__)
|
||||||
|
if (map_type == _DRM_TTM)
|
||||||
|
tmp = pgprot_writecombine(tmp);
|
||||||
|
else
|
||||||
|
tmp = pgprot_noncached(tmp);
|
||||||
|
#endif
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
|
||||||
|
unsigned long bus_offset, unsigned long bus_size,
|
||||||
|
struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = bo->dev;
|
||||||
|
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||||
|
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
|
||||||
|
|
||||||
|
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
|
||||||
|
map->bo_kmap_type = bo_map_premapped;
|
||||||
|
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||||
|
} else {
|
||||||
|
map->bo_kmap_type = bo_map_iomap;
|
||||||
|
map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
|
||||||
|
}
|
||||||
|
return (!map->virtual) ? -ENOMEM : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = bo->dev;
|
||||||
|
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||||
|
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
|
||||||
|
pgprot_t prot;
|
||||||
|
struct drm_ttm *ttm = bo->ttm;
|
||||||
|
struct page *d;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
BUG_ON(!ttm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Populate the part we're mapping;
|
||||||
|
*/
|
||||||
|
|
||||||
|
for (i=start_page; i< num_pages; ++i) {
|
||||||
|
d = drm_ttm_get_page(ttm, i);
|
||||||
|
if (!d)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're mapping a single page, and the desired
|
||||||
|
* page protection is consistent with the bo.
|
||||||
|
*/
|
||||||
|
|
||||||
|
map->bo_kmap_type = bo_map_kmap;
|
||||||
|
map->page = drm_ttm_get_page(ttm, start_page);
|
||||||
|
map->virtual = kmap(map->page);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to use vmap to get the desired page protection
|
||||||
|
* or to make the buffer object look contigous.
|
||||||
|
*/
|
||||||
|
|
||||||
|
prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
|
||||||
|
PAGE_KERNEL :
|
||||||
|
drm_kernel_io_prot(man->drm_bus_maptype);
|
||||||
|
map->bo_kmap_type = bo_map_vmap;
|
||||||
|
map->virtual = vmap(ttm->pages + start_page,
|
||||||
|
num_pages, 0, prot);
|
||||||
|
}
|
||||||
|
return (!map->virtual) ? -ENOMEM : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is to be used for kernel mapping of buffer objects.
|
||||||
|
* It chooses the appropriate mapping method depending on the memory type
|
||||||
|
* and caching policy the buffer currently has.
|
||||||
|
* Mapping multiple pages or buffers that live in io memory is a bit slow and
|
||||||
|
* consumes vmalloc space. Be restrictive with such mappings.
|
||||||
|
* Mapping single pages usually returns the logical kernel address, (which is fast)
|
||||||
|
* BUG may use slower temporary mappings for high memory pages or
|
||||||
|
* uncached / write-combined pages.
|
||||||
|
*
|
||||||
|
* The function fills in a drm_bo_kmap_obj which can be used to return the
|
||||||
|
* kernel virtual address of the buffer.
|
||||||
|
*
|
||||||
|
* Code servicing a non-priviliged user request is only allowed to map one
|
||||||
|
* page at a time. We might need to implement a better scheme to stop such
|
||||||
|
* processes from consuming all vmalloc space.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned long bus_base;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
|
||||||
|
map->virtual = NULL;
|
||||||
|
|
||||||
|
if (num_pages > bo->num_pages)
|
||||||
|
return -EINVAL;
|
||||||
|
if (start_page > bo->num_pages)
|
||||||
|
return -EINVAL;
|
||||||
|
#if 0
|
||||||
|
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||||
|
return -EPERM;
|
||||||
|
#endif
|
||||||
|
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||||
|
&bus_offset, &bus_size);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (bus_size == 0) {
|
||||||
|
return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||||
|
} else {
|
||||||
|
bus_offset += start_page << PAGE_SHIFT;
|
||||||
|
bus_size = num_pages << PAGE_SHIFT;
|
||||||
|
return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_kmap);
|
||||||
|
|
||||||
|
void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
if (!map->virtual)
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch(map->bo_kmap_type) {
|
||||||
|
case bo_map_iomap:
|
||||||
|
iounmap(map->virtual);
|
||||||
|
break;
|
||||||
|
case bo_map_vmap:
|
||||||
|
vunmap(map->virtual);
|
||||||
|
break;
|
||||||
|
case bo_map_kmap:
|
||||||
|
kunmap(map->page);
|
||||||
|
break;
|
||||||
|
case bo_map_premapped:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
map->virtual = NULL;
|
||||||
|
map->page = NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_kunmap);
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
uint32_t sequence, uint32_t type)
|
uint32_t sequence, uint32_t type, uint32_t error)
|
||||||
{
|
{
|
||||||
int wake = 0;
|
int wake = 0;
|
||||||
uint32_t diff;
|
uint32_t diff;
|
||||||
|
@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
int is_exe = (type & DRM_FENCE_TYPE_EXE);
|
int is_exe = (type & DRM_FENCE_TYPE_EXE);
|
||||||
int ge_last_exe;
|
int ge_last_exe;
|
||||||
|
|
||||||
|
|
||||||
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
||||||
|
|
||||||
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
|
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
|
||||||
|
@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
||||||
ge_last_exe = diff < driver->wrap_diff;
|
ge_last_exe = diff < driver->wrap_diff;
|
||||||
|
|
||||||
if (ge_last_exe)
|
|
||||||
fc->pending_flush &= ~type;
|
|
||||||
|
|
||||||
if (is_exe && ge_last_exe) {
|
if (is_exe && ge_last_exe) {
|
||||||
fc->last_exe_flush = sequence;
|
fc->last_exe_flush = sequence;
|
||||||
}
|
}
|
||||||
|
@ -75,36 +73,66 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fc->pending_flush &= ~type;
|
||||||
head = (found) ? &fence->ring : &fc->ring;
|
head = (found) ? &fence->ring : &fc->ring;
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(fence, next, head, ring) {
|
list_for_each_entry_safe_reverse(fence, next, head, ring) {
|
||||||
if (&fence->ring == &fc->ring)
|
if (&fence->ring == &fc->ring)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
fence->error = error;
|
||||||
|
fence->signaled = fence->type;
|
||||||
|
fence->submitted_flush = fence->type;
|
||||||
|
fence->flush_mask = fence->type;
|
||||||
|
list_del_init(&fence->ring);
|
||||||
|
wake = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
type |= fence->native_type;
|
type |= fence->native_type;
|
||||||
relevant = type & fence->type;
|
relevant = type & fence->type;
|
||||||
|
|
||||||
if ((fence->signaled | relevant) != fence->signaled) {
|
if ((fence->signaled | relevant) != fence->signaled) {
|
||||||
fence->signaled |= relevant;
|
fence->signaled |= relevant;
|
||||||
|
fence->flush_mask |= relevant;
|
||||||
|
fence->submitted_flush |= relevant;
|
||||||
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
||||||
fence->base.hash.key, fence->signaled);
|
fence->base.hash.key, fence->signaled);
|
||||||
fence->submitted_flush |= relevant;
|
|
||||||
wake = 1;
|
wake = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
relevant = fence->flush_mask &
|
relevant = fence->flush_mask &
|
||||||
~(fence->signaled | fence->submitted_flush);
|
~(fence->submitted_flush | fence->signaled);
|
||||||
|
|
||||||
if (relevant) {
|
|
||||||
fc->pending_flush |= relevant;
|
fc->pending_flush |= relevant;
|
||||||
fence->submitted_flush = fence->flush_mask;
|
fence->submitted_flush |= relevant;
|
||||||
}
|
|
||||||
|
|
||||||
if (!(fence->type & ~fence->signaled)) {
|
if (!(fence->type & ~fence->signaled)) {
|
||||||
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
||||||
fence->base.hash.key);
|
fence->base.hash.key);
|
||||||
list_del_init(&fence->ring);
|
list_del_init(&fence->ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reinstate lost flush flags.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if ((fc->pending_flush & type) != type) {
|
||||||
|
head = head->prev;
|
||||||
|
list_for_each_entry(fence, head, ring) {
|
||||||
|
if (&fence->ring == &fc->ring)
|
||||||
|
break;
|
||||||
|
diff = (fc->last_exe_flush - fence->sequence) &
|
||||||
|
driver->sequence_mask;
|
||||||
|
if (diff > driver->wrap_diff)
|
||||||
|
break;
|
||||||
|
|
||||||
|
relevant = fence->submitted_flush & ~fence->signaled;
|
||||||
|
fc->pending_flush |= relevant;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wake) {
|
if (wake) {
|
||||||
|
@ -141,6 +169,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
|
||||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_usage_deref_locked);
|
||||||
|
|
||||||
void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
||||||
{
|
{
|
||||||
|
@ -160,6 +189,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
|
||||||
|
|
||||||
struct drm_fence_object
|
struct drm_fence_object
|
||||||
*drm_fence_reference_locked(struct drm_fence_object *src)
|
*drm_fence_reference_locked(struct drm_fence_object *src)
|
||||||
|
@ -178,7 +208,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||||
atomic_inc(&src->usage);
|
atomic_inc(&src->usage);
|
||||||
mutex_unlock(&src->dev->struct_mutex);
|
mutex_unlock(&src->dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
||||||
|
|
||||||
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
||||||
{
|
{
|
||||||
|
@ -206,6 +236,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||||
|
|
||||||
return signaled;
|
return signaled;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_signaled);
|
||||||
|
|
||||||
static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
||||||
struct drm_fence_driver * driver, uint32_t sequence)
|
struct drm_fence_driver * driver, uint32_t sequence)
|
||||||
|
@ -241,7 +272,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
|
|
||||||
write_lock_irqsave(&fm->lock, flags);
|
write_lock_irqsave(&fm->lock, flags);
|
||||||
fence->flush_mask |= type;
|
fence->flush_mask |= type;
|
||||||
if (fence->submitted_flush == fence->signaled) {
|
if ((fence->submitted_flush & fence->signaled)
|
||||||
|
== fence->submitted_flush) {
|
||||||
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
|
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
|
||||||
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
|
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
|
||||||
drm_fence_flush_exe(fc, driver, fence->sequence);
|
drm_fence_flush_exe(fc, driver, fence->sequence);
|
||||||
|
@ -329,7 +361,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
DRM_ERROR("Fence timeout. "
|
DRM_ERROR("Fence timeout. "
|
||||||
"GPU lockup or fence driver was "
|
"GPU lockup or fence driver was "
|
||||||
"taken down.\n");
|
"taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
|
||||||
|
fence->class,
|
||||||
|
fence->sequence,
|
||||||
|
fence->type,
|
||||||
|
mask,
|
||||||
|
fence->signaled);
|
||||||
|
DRM_ERROR("Pending exe flush %d 0x%08x\n",
|
||||||
|
fc->pending_exe_flush,
|
||||||
|
fc->exe_flush_sequence);
|
||||||
}
|
}
|
||||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
return ((ret == -EINTR) ? -EAGAIN : ret);
|
||||||
}
|
}
|
||||||
|
@ -348,6 +388,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
if (mask & ~fence->type) {
|
if (mask & ~fence->type) {
|
||||||
DRM_ERROR("Wait trying to extend fence type"
|
DRM_ERROR("Wait trying to extend fence type"
|
||||||
" 0x%08x 0x%08x\n", mask, fence->type);
|
" 0x%08x 0x%08x\n", mask, fence->type);
|
||||||
|
BUG();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,6 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_wait);
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_object_emit(struct drm_fence_object * fence,
|
int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
uint32_t fence_flags, uint32_t class, uint32_t type)
|
uint32_t fence_flags, uint32_t class, uint32_t type)
|
||||||
|
@ -434,6 +477,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_emit);
|
||||||
|
|
||||||
static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
|
static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
|
||||||
uint32_t type,
|
uint32_t type,
|
||||||
|
@ -545,6 +589,23 @@ void drm_fence_manager_init(struct drm_device * dev)
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = fence->dev;
|
||||||
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
unsigned long irq_flags;
|
||||||
|
|
||||||
|
read_lock_irqsave(&fm->lock, irq_flags);
|
||||||
|
arg->handle = fence->base.hash.key;
|
||||||
|
arg->class = fence->class;
|
||||||
|
arg->type = fence->type;
|
||||||
|
arg->signaled = fence->signaled;
|
||||||
|
arg->error = fence->error;
|
||||||
|
read_unlock_irqrestore(&fm->lock, irq_flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_fill_arg);
|
||||||
|
|
||||||
|
|
||||||
void drm_fence_manager_takedown(struct drm_device * dev)
|
void drm_fence_manager_takedown(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -572,7 +633,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -600,11 +660,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
arg->handle = fence->base.hash.key;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -642,7 +698,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
struct drm_user_object *uo;
|
struct drm_user_object *uo;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -654,12 +709,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
||||||
|
drm_fence_fill_arg(fence, arg);
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -687,7 +737,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -699,11 +748,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -715,7 +760,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -728,11 +772,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ret = drm_fence_object_flush(fence, arg->type);
|
ret = drm_fence_object_flush(fence, arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -745,7 +785,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -760,11 +799,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
||||||
0, arg->type);
|
0, arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -777,7 +812,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -792,11 +826,7 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
ret = drm_fence_object_emit(fence, arg->flags, arg->class,
|
ret = drm_fence_object_emit(fence, arg->flags, arg->class,
|
||||||
arg->type);
|
arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -808,7 +838,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -821,23 +850,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags,
|
ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
|
||||||
NULL, &fence);
|
NULL, &fence);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||||
ret = drm_fence_add_user_object(file_priv, fence,
|
ret = drm_fence_add_user_object(file_priv, fence,
|
||||||
arg->flags &
|
arg->flags &
|
||||||
DRM_FENCE_FLAG_SHAREABLE);
|
DRM_FENCE_FLAG_SHAREABLE);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
arg->handle = fence->base.hash.key;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -50,6 +50,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
||||||
list_add_tail(&item->list, &priv->user_objects);
|
list_add_tail(&item->list, &priv->user_objects);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_add_user_object);
|
||||||
|
|
||||||
struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
|
struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
|
||||||
{
|
{
|
||||||
|
@ -76,6 +77,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
|
||||||
}
|
}
|
||||||
return item;
|
return item;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_user_object);
|
||||||
|
|
||||||
static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
|
static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
|
||||||
{
|
{
|
||||||
|
@ -104,6 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item
|
||||||
drm_deref_user_object(priv, item);
|
drm_deref_user_object(priv, item);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_remove_user_object);
|
||||||
|
|
||||||
static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
|
static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
|
||||||
enum drm_ref_type action)
|
enum drm_ref_type action)
|
||||||
|
@ -196,6 +199,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
||||||
|
|
||||||
return drm_hash_entry(hash, struct drm_ref_object, hash);
|
return drm_hash_entry(hash, struct drm_ref_object, hash);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_ref_object);
|
||||||
|
|
||||||
static void drm_remove_other_references(struct drm_file * priv,
|
static void drm_remove_other_references(struct drm_file * priv,
|
||||||
struct drm_user_object * ro)
|
struct drm_user_object * ro)
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define _DRM_OBJECTS_H
|
#define _DRM_OBJECTS_H
|
||||||
|
|
||||||
struct drm_device;
|
struct drm_device;
|
||||||
|
struct drm_bo_mem_reg;
|
||||||
|
|
||||||
/***************************************************
|
/***************************************************
|
||||||
* User space objects. (drm_object.c)
|
* User space objects. (drm_object.c)
|
||||||
|
@ -42,10 +43,14 @@ struct drm_device;
|
||||||
enum drm_object_type {
|
enum drm_object_type {
|
||||||
drm_fence_type,
|
drm_fence_type,
|
||||||
drm_buffer_type,
|
drm_buffer_type,
|
||||||
drm_ttm_type
|
|
||||||
/*
|
/*
|
||||||
* Add other user space object types here.
|
* Add other user space object types here.
|
||||||
*/
|
*/
|
||||||
|
drm_driver_type0 = 256,
|
||||||
|
drm_driver_type1,
|
||||||
|
drm_driver_type2,
|
||||||
|
drm_driver_type3,
|
||||||
|
drm_driver_type4
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -156,6 +161,7 @@ struct drm_fence_object {
|
||||||
uint32_t sequence;
|
uint32_t sequence;
|
||||||
uint32_t flush_mask;
|
uint32_t flush_mask;
|
||||||
uint32_t submitted_flush;
|
uint32_t submitted_flush;
|
||||||
|
uint32_t error;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define _DRM_FENCE_CLASSES 8
|
#define _DRM_FENCE_CLASSES 8
|
||||||
|
@ -192,7 +198,7 @@ struct drm_fence_driver {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
|
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
|
||||||
uint32_t sequence, uint32_t type);
|
uint32_t sequence, uint32_t type, uint32_t error);
|
||||||
extern void drm_fence_manager_init(struct drm_device *dev);
|
extern void drm_fence_manager_init(struct drm_device *dev);
|
||||||
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||||
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
|
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
|
||||||
|
@ -210,6 +216,12 @@ extern int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
||||||
uint32_t fence_flags, uint32_t class,
|
uint32_t fence_flags, uint32_t class,
|
||||||
struct drm_fence_object ** c_fence);
|
struct drm_fence_object ** c_fence);
|
||||||
|
extern int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
|
uint32_t fence_flags, uint32_t class,
|
||||||
|
uint32_t type);
|
||||||
|
extern void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||||
|
struct drm_fence_arg *arg);
|
||||||
|
|
||||||
extern int drm_fence_add_user_object(struct drm_file * priv,
|
extern int drm_fence_add_user_object(struct drm_file * priv,
|
||||||
struct drm_fence_object * fence, int shareable);
|
struct drm_fence_object * fence, int shareable);
|
||||||
|
|
||||||
|
@ -258,23 +270,22 @@ struct drm_ttm_backend_func {
|
||||||
unsigned long num_pages, struct page ** pages);
|
unsigned long num_pages, struct page ** pages);
|
||||||
void (*clear) (struct drm_ttm_backend * backend);
|
void (*clear) (struct drm_ttm_backend * backend);
|
||||||
int (*bind) (struct drm_ttm_backend * backend,
|
int (*bind) (struct drm_ttm_backend * backend,
|
||||||
unsigned long offset, int cached);
|
struct drm_bo_mem_reg * bo_mem);
|
||||||
int (*unbind) (struct drm_ttm_backend * backend);
|
int (*unbind) (struct drm_ttm_backend * backend);
|
||||||
void (*destroy) (struct drm_ttm_backend * backend);
|
void (*destroy) (struct drm_ttm_backend * backend);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct drm_ttm_backend {
|
typedef struct drm_ttm_backend {
|
||||||
|
struct drm_device *dev;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
int mem_type;
|
|
||||||
struct drm_ttm_backend_func *func;
|
struct drm_ttm_backend_func *func;
|
||||||
};
|
} drm_ttm_backend_t;
|
||||||
|
|
||||||
struct drm_ttm {
|
struct drm_ttm {
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
uint32_t page_flags;
|
uint32_t page_flags;
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
unsigned long aper_offset;
|
|
||||||
atomic_t vma_count;
|
atomic_t vma_count;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
int destroy;
|
int destroy;
|
||||||
|
@ -290,11 +301,13 @@ struct drm_ttm {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
||||||
extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset);
|
extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);
|
||||||
extern void drm_ttm_unbind(struct drm_ttm * ttm);
|
extern void drm_ttm_unbind(struct drm_ttm * ttm);
|
||||||
extern void drm_ttm_evict(struct drm_ttm * ttm);
|
extern void drm_ttm_evict(struct drm_ttm * ttm);
|
||||||
extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
||||||
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
||||||
|
extern void drm_ttm_cache_flush(void);
|
||||||
|
extern int drm_ttm_populate(struct drm_ttm * ttm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||||
|
@ -333,6 +346,8 @@ struct drm_bo_mem_reg {
|
||||||
uint32_t mem_type;
|
uint32_t mem_type;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
|
uint32_t desired_tile_stride;
|
||||||
|
uint32_t hw_tile_stride;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_buffer_object {
|
struct drm_buffer_object {
|
||||||
|
@ -356,10 +371,13 @@ struct drm_buffer_object {
|
||||||
|
|
||||||
uint32_t fence_type;
|
uint32_t fence_type;
|
||||||
uint32_t fence_class;
|
uint32_t fence_class;
|
||||||
|
uint32_t new_fence_type;
|
||||||
|
uint32_t new_fence_class;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
uint32_t priv_flags;
|
uint32_t priv_flags;
|
||||||
wait_queue_head_t event_queue;
|
wait_queue_head_t event_queue;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
|
unsigned long num_pages;
|
||||||
|
|
||||||
/* For pinned buffers */
|
/* For pinned buffers */
|
||||||
int pinned;
|
int pinned;
|
||||||
|
@ -368,7 +386,6 @@ struct drm_buffer_object {
|
||||||
struct list_head pinned_lru;
|
struct list_head pinned_lru;
|
||||||
|
|
||||||
/* For vm */
|
/* For vm */
|
||||||
|
|
||||||
struct drm_ttm *ttm;
|
struct drm_ttm *ttm;
|
||||||
struct drm_map_list map_list;
|
struct drm_map_list map_list;
|
||||||
uint32_t memory_type;
|
uint32_t memory_type;
|
||||||
|
@ -395,6 +412,7 @@ struct drm_mem_type_manager {
|
||||||
struct list_head pinned;
|
struct list_head pinned;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t drm_bus_maptype;
|
uint32_t drm_bus_maptype;
|
||||||
|
unsigned long gpu_offset;
|
||||||
unsigned long io_offset;
|
unsigned long io_offset;
|
||||||
unsigned long io_size;
|
unsigned long io_size;
|
||||||
void *io_addr;
|
void *io_addr;
|
||||||
|
@ -434,7 +452,8 @@ struct drm_bo_driver {
|
||||||
uint32_t num_mem_busy_prio;
|
uint32_t num_mem_busy_prio;
|
||||||
struct drm_ttm_backend *(*create_ttm_backend_entry)
|
struct drm_ttm_backend *(*create_ttm_backend_entry)
|
||||||
(struct drm_device * dev);
|
(struct drm_device * dev);
|
||||||
int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type);
|
int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t * type);
|
||||||
int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
|
int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
|
||||||
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
||||||
struct drm_mem_type_manager * man);
|
struct drm_mem_type_manager * man);
|
||||||
|
@ -472,32 +491,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
|
||||||
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
|
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
|
||||||
|
|
||||||
extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
|
extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
|
||||||
extern int drm_fence_buffer_objects(struct drm_file * priv,
|
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo);
|
||||||
|
extern void drm_putback_buffer_objects(struct drm_device *dev);
|
||||||
|
extern int drm_fence_buffer_objects(struct drm_device * dev,
|
||||||
struct list_head *list,
|
struct list_head *list,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_fence_object * fence,
|
struct drm_fence_object * fence,
|
||||||
struct drm_fence_object ** used_fence);
|
struct drm_fence_object ** used_fence);
|
||||||
extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
|
extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
|
||||||
|
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
||||||
|
enum drm_bo_type type, uint64_t mask,
|
||||||
|
uint32_t hint, uint32_t page_alignment,
|
||||||
|
unsigned long buffer_start,
|
||||||
|
struct drm_buffer_object **bo);
|
||||||
extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||||
int no_wait);
|
int no_wait);
|
||||||
extern int drm_bo_mem_space(struct drm_buffer_object * bo,
|
extern int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||||
struct drm_bo_mem_reg * mem, int no_wait);
|
struct drm_bo_mem_reg * mem, int no_wait);
|
||||||
extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
||||||
int no_wait, int move_unfenced);
|
int no_wait, int move_unfenced);
|
||||||
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type);
|
||||||
enum drm_bo_type type, uint64_t mask,
|
extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
|
||||||
uint32_t hint, uint32_t page_alignment,
|
|
||||||
unsigned long buffer_start,
|
|
||||||
struct drm_buffer_object **bo);
|
|
||||||
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
|
|
||||||
unsigned long p_offset, unsigned long p_size);
|
unsigned long p_offset, unsigned long p_size);
|
||||||
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
|
extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||||
extern int drm_bo_add_user_object(struct drm_file *file_priv,
|
uint32_t fence_class, uint64_t flags,
|
||||||
struct drm_buffer_object *bo, int sharable);
|
uint64_t mask, uint32_t hint,
|
||||||
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
|
struct drm_bo_info_rep * rep,
|
||||||
|
struct drm_buffer_object **bo_rep);
|
||||||
|
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
|
||||||
|
uint32_t handle,
|
||||||
|
int check_owner);
|
||||||
|
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
|
uint32_t fence_class,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_bo_info_rep *rep);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Buffer object memory move helpers.
|
* Buffer object memory move- and map helpers.
|
||||||
* drm_bo_move.c
|
* drm_bo_move.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -513,11 +544,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
uint32_t fence_type,
|
uint32_t fence_type,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_bo_mem_reg * new_mem);
|
struct drm_bo_mem_reg * new_mem);
|
||||||
|
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
|
||||||
|
extern unsigned long drm_bo_offset_end(unsigned long offset,
|
||||||
|
unsigned long end);
|
||||||
|
|
||||||
extern int drm_mem_reg_ioremap(struct drm_device *dev,
|
struct drm_bo_kmap_obj {
|
||||||
struct drm_bo_mem_reg *mem, void **virtual);
|
void *virtual;
|
||||||
extern void drm_mem_reg_iounmap(struct drm_device *dev,
|
struct page *page;
|
||||||
struct drm_bo_mem_reg *mem, void *virtual);
|
enum {
|
||||||
|
bo_map_iomap,
|
||||||
|
bo_map_vmap,
|
||||||
|
bo_map_kmap,
|
||||||
|
bo_map_premapped,
|
||||||
|
} bo_kmap_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
|
||||||
|
{
|
||||||
|
*is_iomem = (map->bo_kmap_type == bo_map_iomap ||
|
||||||
|
map->bo_kmap_type == bo_map_premapped);
|
||||||
|
return map->virtual;
|
||||||
|
}
|
||||||
|
extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
|
||||||
|
extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* drm_regman.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct drm_reg {
|
||||||
|
struct list_head head;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
uint32_t fence_type;
|
||||||
|
uint32_t new_fence_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_reg_manager {
|
||||||
|
struct list_head free;
|
||||||
|
struct list_head lru;
|
||||||
|
struct list_head unfenced;
|
||||||
|
|
||||||
|
int (*reg_reusable)(const struct drm_reg *reg, const void *data);
|
||||||
|
void (*reg_destroy)(struct drm_reg *reg);
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int drm_regs_alloc(struct drm_reg_manager *manager,
|
||||||
|
const void *data,
|
||||||
|
uint32_t fence_class,
|
||||||
|
uint32_t fence_type,
|
||||||
|
int interruptible,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_reg **reg);
|
||||||
|
|
||||||
|
extern void drm_regs_fence(struct drm_reg_manager *regs,
|
||||||
|
struct drm_fence_object *fence);
|
||||||
|
|
||||||
|
extern void drm_regs_free(struct drm_reg_manager *manager);
|
||||||
|
extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
|
||||||
|
extern void drm_regs_init(struct drm_reg_manager *manager,
|
||||||
|
int (*reg_reusable)(const struct drm_reg *,
|
||||||
|
const void *),
|
||||||
|
void (*reg_destroy)(struct drm_reg *));
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
#define DRM_ASSERT_LOCKED(_mutex) \
|
#define DRM_ASSERT_LOCKED(_mutex) \
|
||||||
|
@ -526,5 +615,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev,
|
||||||
#else
|
#else
|
||||||
#define DRM_ASSERT_LOCKED(_mutex)
|
#define DRM_ASSERT_LOCKED(_mutex)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null)
|
||||||
flush_agp_cache();
|
flush_agp_cache();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_ttm_cache_flush(void)
|
void drm_ttm_cache_flush(void)
|
||||||
{
|
{
|
||||||
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
||||||
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_ttm_cache_flush);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||||
|
@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_ttm_populate(struct drm_ttm * ttm)
|
int drm_ttm_populate(struct drm_ttm * ttm)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm)
|
||||||
drm_ttm_fixup_caching(ttm);
|
drm_ttm_fixup_caching(ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)
|
int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
||||||
{
|
{
|
||||||
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (ttm->state == ttm_unbound && !cached) {
|
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
|
||||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = be->func->bind(be, aper_offset, cached))) {
|
if ((ret = be->func->bind(be, bo_mem))) {
|
||||||
ttm->state = ttm_evicted;
|
ttm->state = ttm_evicted;
|
||||||
DRM_ERROR("Couldn't bind backend.\n");
|
DRM_ERROR("Couldn't bind backend.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->aper_offset = aper_offset;
|
|
||||||
ttm->state = ttm_bound;
|
ttm->state = ttm_bound;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -38,7 +38,9 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)
|
||||||
return drm_agp_init_ttm(dev);
|
return drm_agp_init_ttm(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type)
|
int i915_fence_types(struct drm_buffer_object *bo,
|
||||||
|
uint32_t * fclass,
|
||||||
|
uint32_t * type)
|
||||||
{
|
{
|
||||||
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||||
*type = 3;
|
*type = 3;
|
||||||
|
@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_CACHED;
|
_DRM_FLAG_MEMTYPE_CACHED;
|
||||||
man->drm_bus_maptype = 0;
|
man->drm_bus_maptype = 0;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
case DRM_BO_MEM_TT:
|
case DRM_BO_MEM_TT:
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
case DRM_BO_MEM_PRIV0:
|
case DRM_BO_MEM_PRIV0:
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||||
|
@ -196,7 +201,7 @@ static int i915_move_flip(struct drm_buffer_object * bo,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
|
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
|
|
||||||
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
|
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
|
||||||
if (diff < driver->wrap_diff && diff != 0) {
|
if (diff < driver->wrap_diff && diff != 0) {
|
||||||
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
|
drm_fence_handler(dev, 0, sequence,
|
||||||
|
DRM_FENCE_TYPE_EXE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
|
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
|
||||||
|
@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start,
|
nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
|
||||||
int cached)
|
|
||||||
{
|
{
|
||||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||||
uint64_t offset = (pg_start << PAGE_SHIFT);
|
uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached);
|
DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
|
||||||
|
offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
|
||||||
|
|
||||||
if (offset & NV_CTXDMA_PAGE_MASK)
|
if (offset & NV_CTXDMA_PAGE_MASK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||||
nvbe->dev = dev;
|
nvbe->dev = dev;
|
||||||
|
|
||||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||||
nvbe->backend.mem_type = DRM_BO_MEM_TT;
|
|
||||||
|
|
||||||
return &nvbe->backend;
|
return &nvbe->backend;
|
||||||
}
|
}
|
||||||
|
@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct drm_ttm_backend *be;
|
struct drm_ttm_backend *be;
|
||||||
struct drm_scatter_gather sgreq;
|
struct drm_scatter_gather sgreq;
|
||||||
|
struct drm_mm_node mm_node;
|
||||||
|
struct drm_bo_mem_reg mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
|
dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
|
||||||
|
@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = be->func->bind(be, 0, 0))) {
|
mm_node.start = 0;
|
||||||
|
mem.mm_node = &mm_node;
|
||||||
|
|
||||||
|
if ((ret = be->func->bind(be, &mem))) {
|
||||||
DRM_ERROR("failed bind: %d\n", ret);
|
DRM_ERROR("failed bind: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
|
||||||
return drm_agp_init_ttm(dev);
|
return drm_agp_init_ttm(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int via_fence_types(struct drm_buffer_object *bo, uint32_t * type)
|
int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass,
|
||||||
|
uint32_t * type)
|
||||||
{
|
{
|
||||||
*type = 3;
|
*type = 3;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
|
||||||
drm_idlelock_release(&dev->lock);
|
drm_idlelock_release(&dev->lock);
|
||||||
dev_priv->have_idlelock = 0;
|
dev_priv->have_idlelock = 0;
|
||||||
}
|
}
|
||||||
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types);
|
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
|
||||||
|
signaled_flush_types, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
||||||
|
|
||||||
if (signaled_flush_types) {
|
if (signaled_flush_types) {
|
||||||
drm_fence_handler(dev, 0, info->complete_sequence,
|
drm_fence_handler(dev, 0, info->complete_sequence,
|
||||||
signaled_flush_types);
|
signaled_flush_types, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -639,6 +639,7 @@ struct drm_set_version {
|
||||||
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
||||||
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
||||||
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
||||||
|
#define DRM_FENCE_FLAG_NO_USER 0x00000010
|
||||||
|
|
||||||
/* Reserved for driver use */
|
/* Reserved for driver use */
|
||||||
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
||||||
|
@ -651,7 +652,7 @@ struct drm_fence_arg {
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
unsigned int signaled;
|
unsigned int signaled;
|
||||||
unsigned int pad64;
|
unsigned int error;
|
||||||
uint64_t expand_pad[3]; /*Future expansion */
|
uint64_t expand_pad[3]; /*Future expansion */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -214,7 +214,8 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f
|
||||||
#ifdef I915_HAVE_BUFFER
|
#ifdef I915_HAVE_BUFFER
|
||||||
/* i915_buffer.c */
|
/* i915_buffer.c */
|
||||||
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
|
||||||
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type);
|
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t *type);
|
||||||
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||||
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||||
struct drm_mem_type_manager *man);
|
struct drm_mem_type_manager *man);
|
||||||
|
|
|
@ -206,7 +206,8 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
|
||||||
|
|
||||||
#ifdef VIA_HAVE_BUFFER
|
#ifdef VIA_HAVE_BUFFER
|
||||||
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
|
||||||
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type);
|
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t *type);
|
||||||
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||||
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
|
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||||
struct drm_mem_type_manager *man);
|
struct drm_mem_type_manager *man);
|
||||||
|
|
Loading…
Reference in New Issue