i915/drm: clean up a lot of the i915/drm startup/teardown sequences

When the kernel driver is loaded it sets up a lot of stuff..
it tears down the same stuff on unload.

This add a new map type called DRM_DRIVER which means the driver will clean the mapping up
and fix up the map cleaner
main
David Airlie 2007-04-13 14:33:52 +10:00 committed by Dave Airlie
parent 27598bacfd
commit cc471a361f
8 changed files with 96 additions and 185 deletions

View File

@ -57,7 +57,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
return entry;
}
}
@ -417,6 +417,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
break;
case _DRM_SHM:
vfree(map->handle);
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:

View File

@ -147,7 +147,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_list_t *r_list;
drm_map_list_t *r_list, *r_list_tmp;
drm_vma_entry_t *vma, *vma_next;
int i;
@ -238,10 +238,9 @@ int drm_lastclose(drm_device_t * dev)
}
if (dev->maplist) {
while (!list_empty(&dev->maplist->head)) {
struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
drm_rmmap_locked(dev, r_list->map);
list_for_each_entry_safe(r_list, r_list_tmp, &dev->maplist->head, head) {
if (!(r_list->map->flags & _DRM_DRIVER))
drm_rmmap_locked(dev, r_list->map);
}
}
@ -265,8 +264,7 @@ int drm_lastclose(drm_device_t * dev)
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_dma_takedown(dev);
if (dev->lock.hw_lock) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
if (dev->lock.filp) {
dev->lock.filp = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
@ -377,14 +375,6 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@ -399,7 +389,7 @@ static void drm_cleanup(drm_device_t * dev)
DRM_DEBUG("mtrr_del=%d\n", retval);
}
drm_bo_driver_finish(dev);
// drm_bo_driver_finish(dev);
if (drm_core_has_AGP(dev) && dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
@ -408,6 +398,14 @@ static void drm_cleanup(drm_device_t * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");

View File

@ -132,7 +132,6 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
}
}
if (dev->driver->load)
if ((retcode = dev->driver->load(dev, ent->driver_data)))
goto error_out_unreg;

View File

@ -80,7 +80,6 @@ static struct drm_driver driver = {
DRIVER_IRQ_VBL2,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.device_is_agp = i915_driver_device_is_agp,

View File

@ -289,7 +289,8 @@ typedef enum drm_map_flags {
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Driver will take care of it */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {

View File

@ -88,16 +88,6 @@ int i915_dma_cleanup(drm_device_t * dev)
if (dev->irq)
drm_irq_uninstall(dev);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
dev_priv->hw_status_page = NULL;
dev_priv->dma_status_page = 0;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
dev_priv->sarea_priv = NULL;
return 0;
}
@ -106,39 +96,6 @@ static int i915_initialize(drm_device_t * dev,
drm_i915_private_t * dev_priv,
drm_i915_init_t * init)
{
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
return DRM_ERR(EINVAL);
}
dev_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return DRM_ERR(ENOMEM);
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->cpp = init->cpp;
dev_priv->sarea_priv->pf_current_page = 0;
@ -152,27 +109,6 @@ static int i915_initialize(drm_device_t * dev,
*/
dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */
dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff);
if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return DRM_ERR(ENOMEM);
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
//drm_set_desired_modes(dev);
return 0;
}
@ -182,29 +118,6 @@ static int i915_dma_resume(drm_device_t * dev)
DRM_DEBUG("%s\n", __FUNCTION__);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
return DRM_ERR(EINVAL);
}
if (!dev_priv->mmio_map) {
DRM_ERROR("can not find mmio map!\n");
return DRM_ERR(EINVAL);
}
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return DRM_ERR(ENOMEM);
}
/* Program Hardware Status Page */
if (!dev_priv->hw_status_page) {
DRM_ERROR("Can not find hardware status page\n");
return DRM_ERR(EINVAL);
}
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
@ -889,23 +802,4 @@ int i915_driver_device_is_agp(drm_device_t * dev)
return 1;
}
int i915_driver_firstopen(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
DRM_DEBUG("\n");
if (!dev_priv->mmio_map) {
ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio_map);
if (ret != 0) {
DRM_ERROR("Cannot add mapping for MMIO registers\n");
return ret;
}
}
DRM_DEBUG("dev_priv->mmio map is %p\n", dev_priv->mmio_map);
return 0;
}

View File

@ -89,6 +89,7 @@ typedef struct _drm_i915_vbl_swap {
} drm_i915_vbl_swap_t;
typedef struct drm_i915_private {
drm_buffer_object_t *ring_buffer;
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
@ -922,4 +923,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define IS_MOBILE(pI810) (IS_I830(pI810) || IS_I85X(pI810) || IS_I915GM(pI810) || IS_I945GM(pI810))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif

View File

@ -151,7 +151,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
}
ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio_map);
_DRM_REGISTERS, _DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map);
if (ret != 0) {
DRM_ERROR("Cannot add mapping for MMIO registers\n");
return ret;
@ -159,20 +159,13 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
/* prebuild the SAREA */
sareapage = max(SAREA_MAX, PAGE_SIZE);
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
&map);
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
&dev_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
return ret;
}
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
return DRM_ERR(EINVAL);
}
init_waitqueue_head(&dev->lock.lock_queue);
/* FIXME: assume sarea_priv is right after SAREA */
@ -187,61 +180,34 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, dev_priv->baseaddr,
prealloc_size);
/* Allocate scanout buffer and command ring */
/* FIXME: types and other args correct? */
hsize = 1280;
vsize = 800;
bytes_per_pixel = 4;
size = hsize * vsize * bytes_per_pixel;
drm_buffer_object_create(dev, size, drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_PRIV0 | DRM_BO_FLAG_NO_MOVE,
0, PAGE_SIZE, 0,
&entry);
intel_modeset_init(dev);
fb = drm_framebuffer_create(dev);
if (!fb) {
DRM_ERROR("failed to allocate fb\n");
size = PRIMARY_RINGBUFFER_SIZE;
ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_PRIV0 |
DRM_BO_FLAG_NO_MOVE,
DRM_BO_HINT_DONT_FENCE, 0x1, 0,
&dev_priv->ring_buffer);
if (ret < 0) {
DRM_ERROR("Unable to allocate ring buffer\n");
return -EINVAL;
}
fb->width = hsize;
fb->height = vsize;
fb->pitch = hsize;
fb->bits_per_pixel = bytes_per_pixel * 8;
fb->depth = bytes_per_pixel * 8;
fb->offset = entry->offset;
fb->bo = entry;
drm_initial_config(dev, fb, false);
drmfb_probe(dev, fb);
#if 0
/* FIXME: command ring needs AGP space, do we own it at this point? */
dev_priv->ring.Start = dev_priv->baseaddr;
dev_priv->ring.End = 128*1024;
dev_priv->ring.Size = 128*1024;
/* remap the buffer object properly */
dev_priv->ring.Start = dev_priv->ring_buffer->offset + dev_priv->baseaddr;
dev_priv->ring.End = dev_priv->ring.Start + size;
dev_priv->ring.Size = size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->ring.map.offset = dev_priv->ring.Start;
dev_priv->ring.map.size = dev_priv->ring.Size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
dev_priv->ring.virtual_start = ioremap((dev_priv->ring.Start), (dev_priv->ring_buffer->mem.num_pages * PAGE_SIZE));
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return DRM_ERR(ENOMEM);
}
DRM_DEBUG("ring start %08X, %08X, %08X\n", dev_priv->ring.Start, dev_priv->ring.virtual_start, dev_priv->ring.Size);
I915_WRITE(LP_RING + RING_HEAD, 0);
I915_WRITE(LP_RING + RING_TAIL, 0);
I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start);
I915_WRITE(LP_RING + RING_LEN, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
(RING_NO_REPORT | RING_VALID));
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->cpp = 4;
dev_priv->sarea_priv->pf_current_page = 0;
/* We are using separate values as placeholders for mechanisms for
@ -271,8 +237,40 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
#endif
#if 1
/* Allocate scanout buffer and command ring */
/* FIXME: types and other args correct? */
hsize = 1280;
vsize = 800;
bytes_per_pixel = 4;
size = hsize * vsize * bytes_per_pixel;
drm_buffer_object_create(dev, size, drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_PRIV0 | DRM_BO_FLAG_NO_MOVE,
0, PAGE_SIZE, 0,
&entry);
#endif
intel_modeset_init(dev);
#if 1
fb = drm_framebuffer_create(dev);
if (!fb) {
DRM_ERROR("failed to allocate fb\n");
return -EINVAL;
}
fb->width = hsize;
fb->height = vsize;
fb->pitch = hsize;
fb->bits_per_pixel = bytes_per_pixel * 8;
fb->depth = bytes_per_pixel * 8;
fb->offset = entry->offset;
fb->bo = entry;
drm_initial_config(dev, fb, false);
drmfb_probe(dev, fb);
#endif
return 0;
}
@ -281,7 +279,26 @@ int i915_driver_unload(drm_device_t *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_framebuffer *fb;
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
dev_priv->hw_status_page = NULL;
dev_priv->dma_status_page = 0;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
I915_WRITE(LP_RING + RING_LEN, 0);
iounmap(dev_priv->ring.virtual_start);
drm_bo_driver_finish(dev);
intel_modeset_cleanup(dev);
DRM_DEBUG("%p, %p\n", dev_priv->mmio_map, dev_priv->sarea);
drm_rmmap(dev, dev_priv->mmio_map);
drm_rmmap(dev, dev_priv->sarea);
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
@ -296,7 +313,6 @@ void i915_driver_lastclose(drm_device_t * dev)
i915_dma_cleanup(dev);
dev_priv->mmio_map = NULL;
}
void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)