Merge branch 'drm-gem' into modesetting-gem

main
Jesse Barnes 2008-08-07 14:02:04 -07:00
commit c7fb19e9b0
3 changed files with 189 additions and 20 deletions

View File

@ -61,6 +61,33 @@ struct intel_bufmgr {
int (*emit_reloc)(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*pin) (dri_bo *buf, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*unpin) (dri_bo *buf);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*flink) (dri_bo *buf, uint32_t *name);
};
/* intel_bufmgr_gem.c */
@ -91,5 +118,13 @@ int intel_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf);
int intel_bo_pin(dri_bo *buf, uint32_t alignment);
int intel_bo_unpin(dri_bo *buf);
int intel_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
int intel_bo_flink(dri_bo *buf, uint32_t *name);
#endif /* INTEL_BUFMGR_GEM_H */

View File

@ -768,6 +768,81 @@ dri_gem_post_submit(dri_bo *batch_buf)
bufmgr_gem->exec_count = 0;
}
static int
dri_gem_pin(dri_bo *bo, uint32_t alignment)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_pin pin;
int ret;
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
if (ret != 0)
return -errno;
bo->offset = pin.offset;
return 0;
}
static int
dri_gem_unpin(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_unpin unpin;
int ret;
unpin.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
if (ret != 0)
return -errno;
return 0;
}
static int
dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_set_tiling set_tiling;
int ret;
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = *tiling_mode;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
if (ret != 0) {
*tiling_mode = I915_TILING_NONE;
return -errno;
}
*tiling_mode = set_tiling.tiling_mode;
return 0;
}
static int
dri_gem_flink(dri_bo *bo, uint32_t *name)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_gem_flink flink;
int ret;
flink.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret != 0)
return -errno;
*name = flink.name;
return 0;
}
/**
* Enables unlimited caching of buffer objects for reuse.
*
@ -832,6 +907,10 @@ intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
bufmgr_gem->intel_bufmgr.pin = dri_gem_pin;
bufmgr_gem->intel_bufmgr.unpin = dri_gem_unpin;
bufmgr_gem->intel_bufmgr.set_tiling = dri_gem_set_tiling;
bufmgr_gem->intel_bufmgr.flink = dri_gem_flink;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
@ -851,3 +930,55 @@ intel_bo_emit_reloc(dri_bo *reloc_buf,
return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
delta, offset, target_buf);
}
int
intel_bo_pin(dri_bo *bo, uint32_t alignment)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->pin)
return intel_bufmgr->pin(bo, alignment);
return 0;
}
int
intel_bo_unpin(dri_bo *bo)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->unpin)
return intel_bufmgr->unpin(bo);
return 0;
}
int intel_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->set_tiling)
return intel_bufmgr->set_tiling (bo, tiling_mode);
*tiling_mode = I915_TILING_NONE;
return 0;
}
int intel_bo_flink(dri_bo *bo, uint32_t *name)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->flink)
return intel_bufmgr->flink (bo, name);
return -ENODEV;
}

View File

@ -388,6 +388,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -EBADF;
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
obj, obj->size, args->read_domains, args->write_domain);
#endif
ret = i915_gem_set_domain(obj, file_priv,
args->read_domains, args->write_domain);
drm_gem_object_unreference(obj);
@ -418,8 +422,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
#if WATCH_BUF
DRM_INFO("%s: sw_finish %d (%p)\n",
__func__, args->handle, obj);
DRM_INFO("%s: sw_finish %d (%p %d)\n",
__func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@ -664,6 +668,12 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
/* If this request flushes the write domain,
* clear the write domain from the object now
*/
if (request->flush_domains & obj->write_domain)
obj->write_domain = 0;
if (obj->write_domain != 0) {
list_move_tail(&obj_priv->list,
&dev_priv->mm.flushing_list);
@ -763,7 +773,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
if (dev_priv->mm.wedged)
ret = -EIO;
if (ret)
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
__func__, ret, seqno, i915_get_gem_seqno(dev));
@ -860,18 +870,18 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
uint32_t write_domain;
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
*/
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
uint32_t write_domain = obj->write_domain;
write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
if (write_domain) {
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
@ -881,6 +891,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
}
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
@ -1079,20 +1090,12 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
page = find_get_page(mapping, i);
if (page == NULL || !PageUptodate(page)) {
if (page) {
page_cache_release(page);
page = NULL;
}
ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
if (ret) {
DRM_ERROR("shmem_getpage failed: %d\n", ret);
i915_gem_object_free_page_list(obj);
return ret;
}
unlock_page(page);
page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
DRM_ERROR("read_mapping_page failed: %d\n", ret);
i915_gem_object_free_page_list(obj);
return ret;
}
obj_priv->page_list[i] = page;
}