Merge branch 'bo-set-pin'
This branch replaces the NO_MOVE/NO_EVICT flags to buffer validation with a separate privileged ioctl to pin buffers like NO_EVICT meant before. The functionality that was supposed to be covered by NO_MOVE may be reintroduced later, possibly in a different way, after the superioctl branch is merged.main
commit
24e33627c5
|
@ -2874,6 +2874,7 @@ int drmBOUnmap(int fd, drmBO *buf)
|
|||
if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
|
||||
return -errno;
|
||||
}
|
||||
buf->mapCount--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2975,7 +2976,30 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int drmBOSetPin(int fd, drmBO *buf, int pin)
|
||||
{
|
||||
struct drm_bo_set_pin_arg arg;
|
||||
struct drm_bo_set_pin_req *req = &arg.d.req;
|
||||
struct drm_bo_info_rep *rep = &arg.d.rep;
|
||||
int ret = 0;
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
req->handle = buf->handle;
|
||||
req->pin = pin;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_SET_PIN, &arg);
|
||||
} while (ret && errno == EAGAIN);
|
||||
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drmBOBusy(int fd, drmBO *buf, int *busy)
|
||||
{
|
||||
if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
|
||||
|
|
|
@ -201,6 +201,7 @@ extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
|
|||
extern int drmBOValidateList(int fd, drmBOList *list);
|
||||
extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
|
||||
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
|
||||
int drmBOSetPin(int fd, drmBO *buf, int pin);
|
||||
|
||||
/*
|
||||
* Initialization functions.
|
||||
|
|
|
@ -269,6 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
|
|||
ifneq ($(PAGE_AGP),0)
|
||||
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
|
||||
endif
|
||||
EXTRA_CFLAGS += -g -O0
|
||||
|
||||
# Start with all modules turned off.
|
||||
CONFIG_DRM_GAMMA := n
|
||||
|
|
|
@ -80,8 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)
|
|||
|
||||
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
|
||||
|
||||
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|
||||
|| bo->mem.mem_type != bo->pinned_mem_type) {
|
||||
if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
|
||||
man = &bo->dev->bm.man[bo->mem.mem_type];
|
||||
list_add_tail(&bo->lru, &man->lru);
|
||||
} else {
|
||||
|
@ -707,6 +706,10 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
||||
* space, or we've evicted everything and there isn't enough space.
|
||||
*/
|
||||
static int drm_bo_mem_force_space(struct drm_device * dev,
|
||||
struct drm_bo_mem_reg * mem,
|
||||
uint32_t mem_type, int no_wait)
|
||||
|
@ -734,7 +737,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
|
|||
atomic_inc(&entry->usage);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&entry->mutex);
|
||||
BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
|
||||
BUG_ON(entry->pinned);
|
||||
|
||||
ret = drm_bo_evict(entry, mem_type, no_wait);
|
||||
mutex_unlock(&entry->mutex);
|
||||
|
@ -793,6 +796,14 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates space for memory region @mem according to its type.
|
||||
*
|
||||
* This function first searches for free space in compatible memory types in
|
||||
* the priority order defined by the driver. If free space isn't found, then
|
||||
* drm_bo_mem_force_space is attempted in priority order to evict and find
|
||||
* space.
|
||||
*/
|
||||
int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||
struct drm_bo_mem_reg * mem, int no_wait)
|
||||
{
|
||||
|
@ -894,18 +905,6 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo,
|
|||
DRM_ERROR("User buffers are not supported yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bo->type == drm_bo_type_fake &&
|
||||
!(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
|
||||
DRM_ERROR("Fake buffers must be pinned.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||
DRM_ERROR
|
||||
("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
|
||||
"processes\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
|
||||
DRM_BO_FLAG_READ);
|
||||
|
@ -1383,6 +1382,12 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
|
||||
DRM_ERROR("Attempt to validate pinned buffer into different memory "
|
||||
"type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're switching command submission mechanism,
|
||||
* or cannot simply rely on the hardware serializing for us.
|
||||
|
@ -1426,37 +1431,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Pinned buffers.
|
||||
*/
|
||||
|
||||
if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
|
||||
bo->pinned_mem_type = bo->mem.mem_type;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_del_init(&bo->pinned_lru);
|
||||
drm_bo_add_to_pinned_lru(bo);
|
||||
|
||||
if (bo->pinned_node != bo->mem.mm_node) {
|
||||
if (bo->pinned_node != NULL)
|
||||
drm_mm_put_block(bo->pinned_node);
|
||||
bo->pinned_node = bo->mem.mm_node;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
} else if (bo->pinned_node != NULL) {
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (bo->pinned_node != bo->mem.mm_node)
|
||||
drm_mm_put_block(bo->pinned_node);
|
||||
|
||||
list_del_init(&bo->pinned_lru);
|
||||
bo->pinned_node = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* We might need to add a TTM.
|
||||
*/
|
||||
|
@ -1534,6 +1508,10 @@ static int drm_bo_handle_validate(struct drm_file *file_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fills out the generic buffer object ioctl reply with the information for
|
||||
* the BO with id of handle.
|
||||
*/
|
||||
static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
||||
struct drm_bo_info_rep *rep)
|
||||
{
|
||||
|
@ -1600,6 +1578,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_buffer_object *bo;
|
||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
int ret = 0;
|
||||
unsigned long num_pages;
|
||||
|
||||
|
@ -1659,10 +1638,28 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
|
||||
|
||||
bo->fence_class = 0;
|
||||
ret = driver->fence_type(bo, &bo->fence_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("Driver did not support given buffer permissions\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (bo->type == drm_bo_type_fake) {
|
||||
ret = drm_bo_check_fake(dev, &bo->mem);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ret = drm_bo_add_ttm(bo);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_bo_add_to_lru(bo);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mutex_unlock(&bo->mutex);
|
||||
*buf_obj = bo;
|
||||
return 0;
|
||||
|
@ -1713,6 +1710,8 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
void __user *curuserarg = NULL;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_op_ioctl\n");
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1785,14 +1784,15 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
struct drm_buffer_object *entry;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
|
||||
(int)(req->size / 1024), req->page_alignment * 4, req->type);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_bo_lock_test(dev, file_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (req->type == drm_bo_type_fake)
|
||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||
|
||||
ret = drm_buffer_object_create(file_priv->head->dev,
|
||||
req->size, req->type, req->mask,
|
||||
|
@ -1823,6 +1823,8 @@ int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
|||
struct drm_user_object *uo;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1846,6 +1848,9 @@ int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_p
|
|||
struct drm_bo_info_req *req = &arg->d.req;
|
||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1863,6 +1868,9 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||
{
|
||||
struct drm_bo_handle_arg *arg = data;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1881,6 +1889,8 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
struct drm_user_object *uo;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1903,6 +1913,8 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
struct drm_bo_handle_arg *arg = data;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1919,6 +1931,8 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1937,6 +1951,9 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
struct drm_bo_info_req *req = &arg->d.req;
|
||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1950,6 +1967,117 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pins or unpins the given buffer object in the given memory area.
|
||||
*
|
||||
* Pinned buffers will not be evicted from or move within their memory area.
|
||||
* Must be called with the hardware lock held for pinning.
|
||||
*/
|
||||
static int
|
||||
drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
|
||||
int pin)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
if (bo->pinned == pin) {
|
||||
mutex_unlock(&bo->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pin) {
|
||||
ret = drm_bo_wait_unfenced(bo, 0, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&bo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Validate the buffer into its pinned location, with no
|
||||
* pending fence.
|
||||
*/
|
||||
ret = drm_buffer_object_validate(bo, 0, 0, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&bo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Pull the buffer off of the LRU and add it to the pinned
|
||||
* list
|
||||
*/
|
||||
bo->pinned_mem_type = bo->mem.mem_type;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_del_init(&bo->lru);
|
||||
list_del_init(&bo->pinned_lru);
|
||||
drm_bo_add_to_pinned_lru(bo);
|
||||
|
||||
if (bo->pinned_node != bo->mem.mm_node) {
|
||||
if (bo->pinned_node != NULL)
|
||||
drm_mm_put_block(bo->pinned_node);
|
||||
bo->pinned_node = bo->mem.mm_node;
|
||||
}
|
||||
|
||||
bo->pinned = pin;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
} else {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Remove our buffer from the pinned list */
|
||||
if (bo->pinned_node != bo->mem.mm_node)
|
||||
drm_mm_put_block(bo->pinned_node);
|
||||
|
||||
list_del_init(&bo->pinned_lru);
|
||||
bo->pinned_node = NULL;
|
||||
bo->pinned = pin;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
mutex_unlock(&bo->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_bo_set_pin_arg *arg = data;
|
||||
struct drm_bo_set_pin_req *req = &arg->d.req;
|
||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
||||
struct drm_buffer_object *bo;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
|
||||
req->handle, req->pin);
|
||||
|
||||
if (!dev->bm.initialized) {
|
||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (req->pin < 0 || req->pin > 1) {
|
||||
DRM_ERROR("Bad arguments to set_pin\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (req->pin)
|
||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!bo) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_bo_set_pin(dev, bo, req->pin);
|
||||
if (ret) {
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
@ -2011,11 +2139,10 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
|
||||
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
|
||||
if (bo->pinned) {
|
||||
DRM_ERROR("A pinned buffer was present at "
|
||||
"cleanup. Removing flag and evicting.\n");
|
||||
bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
|
||||
bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
|
||||
bo->pinned = 0;
|
||||
}
|
||||
|
||||
if (bo->mem.mem_type == mem_type)
|
||||
|
@ -2297,8 +2424,7 @@ int drm_bo_driver_init(struct drm_device * dev)
|
|||
* Initialize the system memory buffer type.
|
||||
* Other types need to be driver / IOCTL initialized.
|
||||
*/
|
||||
|
||||
ret = drm_bo_init_mm(dev, 0, 0, 0);
|
||||
ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -2328,6 +2454,9 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
|
||||
arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
|
||||
|
||||
if (!driver) {
|
||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
||||
return -EINVAL;
|
||||
|
@ -2382,6 +2511,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
|||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
|
||||
|
||||
if (!driver) {
|
||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
||||
return -EINVAL;
|
||||
|
@ -2419,6 +2550,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
|
|||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
|
||||
|
||||
if (!driver) {
|
||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
||||
return -EINVAL;
|
||||
|
@ -2441,6 +2574,8 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("drm_mm_unlock_ioctl\n");
|
||||
|
||||
if (!driver) {
|
||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -147,6 +147,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
|
||||
};
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
@ -653,7 +654,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
err_i1:
|
||||
atomic_dec(&dev->ioctl_count);
|
||||
if (retcode)
|
||||
DRM_DEBUG("ret = %x\n", retcode);
|
||||
DRM_DEBUG("ret = %d\n", retcode);
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_unlocked_ioctl);
|
||||
|
|
|
@ -229,26 +229,23 @@ int drm_getclient(struct drm_device *dev, void *data,
|
|||
|
||||
idx = client->idx;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (list_empty(&dev->filelist)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(pt, &dev->filelist, lhead) {
|
||||
if (i++ >= idx)
|
||||
break;
|
||||
}
|
||||
if (i++ >= idx) {
|
||||
client->auth = pt->authenticated;
|
||||
client->pid = pt->pid;
|
||||
client->uid = pt->uid;
|
||||
client->magic = pt->magic;
|
||||
client->iocs = pt->ioctl_count;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
client->auth = pt->authenticated;
|
||||
client->pid = pt->pid;
|
||||
client->uid = pt->uid;
|
||||
client->magic = pt->magic;
|
||||
client->iocs = pt->ioctl_count;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -362,6 +362,7 @@ struct drm_buffer_object {
|
|||
struct mutex mutex;
|
||||
|
||||
/* For pinned buffers */
|
||||
int pinned;
|
||||
struct drm_mm_node *pinned_node;
|
||||
uint32_t pinned_mem_type;
|
||||
struct list_head pinned_lru;
|
||||
|
@ -455,7 +456,7 @@ extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct d
|
|||
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
|
||||
int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
|
||||
extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
|
|
|
@ -670,14 +670,6 @@ struct drm_fence_arg {
|
|||
* Can also be set in the buffer mask before validation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
|
||||
* available to root and must be manually removed before buffer manager shutdown
|
||||
* or lock.
|
||||
* Flags: Acknowledge
|
||||
*/
|
||||
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
|
||||
|
||||
/*
|
||||
* Mask: Require that the buffer is placed in mappable memory when validated.
|
||||
* If not set the buffer may or may not be in mappable memory when validated.
|
||||
|
@ -795,6 +787,18 @@ struct drm_bo_op_req {
|
|||
struct drm_bo_info_req bo_req;
|
||||
};
|
||||
|
||||
struct drm_bo_set_pin_req {
|
||||
/** Buffer object ID */
|
||||
unsigned int handle;
|
||||
/**
|
||||
* - 0: Unpin the given buffer object.
|
||||
* - 1: Pin the given buffer object, requiring that its offset and
|
||||
* memory area stay constant until unpin. The intended use is for
|
||||
* scanout buffers.
|
||||
*/
|
||||
unsigned int pin;
|
||||
};
|
||||
|
||||
/*
|
||||
* Reply flags
|
||||
*/
|
||||
|
@ -860,6 +864,13 @@ struct drm_bo_op_arg {
|
|||
unsigned int pad64;
|
||||
};
|
||||
|
||||
struct drm_bo_set_pin_arg {
|
||||
union {
|
||||
struct drm_bo_set_pin_req req;
|
||||
struct drm_bo_info_rep rep;
|
||||
} d;
|
||||
};
|
||||
|
||||
#define DRM_BO_MEM_LOCAL 0
|
||||
#define DRM_BO_MEM_TT 1
|
||||
#define DRM_BO_MEM_VRAM 2
|
||||
|
@ -975,7 +986,7 @@ struct drm_mm_init_arg {
|
|||
#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg)
|
||||
#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
|
||||
#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
|
||||
|
||||
#define DRM_IOCTL_BO_SET_PIN DRM_IOWR(0xd6, struct drm_bo_set_pin_arg)
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
|
Loading…
Reference in New Issue