Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101

Conflicts:

	linux-core/Makefile.kernel
	shared-core/i915_dma.c
	shared-core/i915_drv.h
	shared-core/i915_irq.c
main
Thomas Hellstrom 2007-11-06 10:01:52 +01:00
commit c07dd80269
10 changed files with 277 additions and 67 deletions

View File

@ -14,7 +14,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
drm_edid.o drm_modes.o drm_bo_lock.o
drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o

View File

@ -498,14 +498,15 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
#define AGP_REQUIRED_MAJOR 0
#define AGP_REQUIRED_MINOR 102
static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) {
static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
{
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages,
struct page **pages) {
static int drm_agp_populate(struct drm_ttm_backend *backend,
unsigned long num_pages, struct page **pages)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
@ -521,15 +522,14 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p
mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
#endif
if (!mem) {
drm_free_memctl(num_pages *sizeof(void *));
drm_free_memctl(num_pages * sizeof(void *));
return -1;
}
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) {
for (cur_page = pages; cur_page < last_page; ++cur_page)
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
}
agp_be->mem = mem;
return 0;
}
@ -551,17 +551,17 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
mem->type = AGP_USER_CACHED_MEMORY;
ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
if (ret) {
if (ret)
DRM_ERROR("AGP Bind memory failed\n");
}
DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
return ret;
}
static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
@ -572,8 +572,8 @@ static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
return 0;
}
static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
@ -583,29 +583,27 @@ static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
unsigned long num_pages = mem->page_count;
backend->func->unbind(backend);
agp_free_memory(mem);
drm_free_memctl(num_pages *sizeof(void *));
drm_free_memctl(num_pages * sizeof(void *));
}
agp_be->mem = NULL;
}
static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) {
static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
if (agp_be) {
if (agp_be->mem) {
if (agp_be->mem)
backend->func->clear(backend);
}
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
}
}
}
static struct drm_ttm_backend_func agp_ttm_backend =
{
static struct drm_ttm_backend_func agp_ttm_backend = {
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
.clear = drm_agp_clear_ttm,
@ -647,7 +645,6 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
agp_be->bridge = dev->agp->bridge;
agp_be->populated = FALSE;
agp_be->backend.func = &agp_ttm_backend;
// agp_be->backend.mem_type = DRM_BO_MEM_TT;
agp_be->backend.dev = dev;
return &agp_be->backend;

View File

@ -1331,16 +1331,25 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
out_unlock:
mutex_lock(&dev->struct_mutex);
if (ret || !move_unfenced) {
mutex_lock(&dev->struct_mutex);
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
drm_mm_put_block(mem.mm_node);
mem.mm_node = NULL;
}
mutex_unlock(&dev->struct_mutex);
drm_bo_add_to_lru(bo);
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
DRM_WAKEUP(&bo->event_queue);
DRM_FLAG_MASKED(bo->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
}
} else {
list_add_tail(&bo->lru, &bm->unfenced);
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
_DRM_BO_FLAG_UNFENCED);
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&bm->evict_mutex);
return ret;
}

205
linux-core/drm_regman.c Normal file
View File

@ -0,0 +1,205 @@
/**************************************************************************
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* An allocate-fence manager implementation intended for sets of base-registers
* or tiling-registers.
*/
#include "drmP.h"
/*
* Allocate a compatible register and put it on the unfenced list.
*/
int drm_regs_alloc(struct drm_reg_manager *manager,
const void *data,
uint32_t fence_class,
uint32_t fence_type,
int interruptible, int no_wait, struct drm_reg **reg)
{
struct drm_reg *entry, *next_entry;
int ret;
*reg = NULL;
/*
* Search the unfenced list.
*/
list_for_each_entry(entry, &manager->unfenced, head) {
if (manager->reg_reusable(entry, data)) {
entry->new_fence_type |= fence_type;
goto out;
}
}
/*
* Search the lru list.
*/
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
struct drm_fence_object *fence = entry->fence;
if (fence->fence_class == fence_class &&
(entry->fence_type & fence_type) == entry->fence_type &&
manager->reg_reusable(entry, data)) {
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
}
/*
* Search the free list.
*/
list_for_each_entry(entry, &manager->free, head) {
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
if (no_wait)
return -EBUSY;
/*
* Go back to the lru list and try to expire fences.
*/
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
BUG_ON(!entry->fence);
ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
entry->fence_type);
if (ret)
return ret;
drm_fence_usage_deref_unlocked(&entry->fence);
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
/*
* Oops. All registers are used up :(.
*/
return -EBUSY;
out:
*reg = entry;
return 0;
}
EXPORT_SYMBOL(drm_regs_alloc);
void drm_regs_fence(struct drm_reg_manager *manager,
struct drm_fence_object *fence)
{
struct drm_reg *entry;
struct drm_reg *next_entry;
if (!fence) {
/*
* Old fence (if any) is still valid.
* Put back on free and lru lists.
*/
list_for_each_entry_safe_reverse(entry, next_entry,
&manager->unfenced, head) {
list_del(&entry->head);
list_add(&entry->head, (entry->fence) ?
&manager->lru : &manager->free);
}
} else {
/*
* Fence with a new fence and put on lru list.
*/
list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
head) {
list_del(&entry->head);
if (entry->fence)
drm_fence_usage_deref_unlocked(&entry->fence);
drm_fence_reference_unlocked(&entry->fence, fence);
entry->fence_type = entry->new_fence_type;
BUG_ON((entry->fence_type & fence->type) !=
entry->fence_type);
list_add_tail(&entry->head, &manager->lru);
}
}
}
EXPORT_SYMBOL(drm_regs_fence);
void drm_regs_free(struct drm_reg_manager *manager)
{
struct drm_reg *entry;
struct drm_reg *next_entry;
drm_regs_fence(manager, NULL);
list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
list_del(&entry->head);
manager->reg_destroy(entry);
}
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
(void)drm_fence_object_wait(entry->fence, 1, 1,
entry->fence_type);
list_del(&entry->head);
drm_fence_usage_deref_unlocked(&entry->fence);
manager->reg_destroy(entry);
}
}
EXPORT_SYMBOL(drm_regs_free);
void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
{
reg->fence = NULL;
list_add_tail(&reg->head, &manager->free);
}
EXPORT_SYMBOL(drm_regs_add);
void drm_regs_init(struct drm_reg_manager *manager,
int (*reg_reusable) (const struct drm_reg *, const void *),
void (*reg_destroy) (struct drm_reg *))
{
INIT_LIST_HEAD(&manager->free);
INIT_LIST_HEAD(&manager->lru);
INIT_LIST_HEAD(&manager->unfenced);
manager->reg_reusable = reg_reusable;
manager->reg_destroy = reg_destroy;
}
EXPORT_SYMBOL(drm_regs_init);

View File

@ -33,14 +33,14 @@
#include "i915_drm.h"
#include "i915_drv.h"
struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)
struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
{
return drm_agp_init_ttm(dev);
}
int i915_fence_types(struct drm_buffer_object *bo,
uint32_t * fclass,
uint32_t * type)
uint32_t *fclass,
uint32_t *type)
{
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
@ -49,7 +49,7 @@ int i915_fence_types(struct drm_buffer_object *bo,
return 0;
}
int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)
int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
@ -65,8 +65,8 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
int i915_init_mem_type(struct drm_device * dev, uint32_t type,
struct drm_mem_type_manager * man)
int i915_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
@ -229,25 +229,24 @@ out_cleanup:
#endif
/*
* Disable i915_move_flip for now, since we can't guarantee that the hardware lock
* is held here. To re-enable we need to make sure either
* Disable i915_move_flip for now, since we can't guarantee that the hardware
* lock is held here. To re-enable we need to make sure either
* a) The X server is using DRM to submit commands to the ring, or
* b) DRM can use the HP ring for these blits. This means i915 needs to implement
* a new ring submission mechanism and fence class.
* b) DRM can use the HP ring for these blits. This means i915 needs to
* implement a new ring submission mechanism and fence class.
*/
int i915_move(struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
int i915_move(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/)
if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/)
if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
@ -262,7 +261,7 @@ static inline void clflush(volatile void *__p)
static inline void drm_cache_flush_addr(void *virt)
{
int i;
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(virt+i);

View File

@ -2,6 +2,9 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
#include "i915_drm.h"
#include "i915_drv.h"
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
@ -13,17 +16,6 @@
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define IS_I965 (agp_dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
#define IS_G33 (agp_dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
#define I915_IFPADDR 0x60
#define I965_IFPADDR 0x70
@ -109,11 +101,15 @@ void intel_init_chipset_flush_compat(struct drm_device *dev)
{
struct pci_dev *agp_dev = dev->agp->agp_info.device;
/* not flush on i8xx */
if (!IS_I9XX(dev))
return;
intel_private.ifp_resource.name = "GMCH IFPBAR";
intel_private.ifp_resource.flags = IORESOURCE_MEM;
/* Setup chipset flush for 915 */
if (IS_I965 || IS_G33) {
if (IS_I965G(dev) || IS_G33(dev)) {
intel_i965_g33_setup_chipset_flush(agp_dev);
} else {
intel_i915_setup_chipset_flush(agp_dev);
@ -128,6 +124,10 @@ void intel_init_chipset_flush_compat(struct drm_device *dev)
void intel_fini_chipset_flush_compat(struct drm_device *dev)
{
/* not flush on i8xx */
if (!IS_I9XX(dev))
return;
iounmap(intel_private.flush_page);
release_resource(&intel_private.ifp_resource);
}

View File

@ -38,7 +38,7 @@
* Implements an intel sync flush operation.
*/
static void i915_perform_flush(struct drm_device * dev)
static void i915_perform_flush(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
@ -63,7 +63,7 @@ static void i915_perform_flush(struct drm_device * dev)
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, 0, sequence,
drm_fence_handler(dev, 0, sequence,
DRM_FENCE_TYPE_EXE, 0);
}
@ -110,7 +110,7 @@ static void i915_perform_flush(struct drm_device * dev)
}
void i915_poke_flush(struct drm_device * dev, uint32_t class)
void i915_poke_flush(struct drm_device *dev, uint32_t class)
{
struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
@ -120,8 +120,9 @@ void i915_poke_flush(struct drm_device * dev, uint32_t class)
write_unlock_irqrestore(&fm->lock, flags);
}
int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
uint32_t flags, uint32_t *sequence,
uint32_t *native_type)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
if (!dev_priv)
@ -136,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f
return 0;
}
void i915_fence_handler(struct drm_device * dev)
void i915_fence_handler(struct drm_device *dev)
{
struct drm_fence_manager *fm = &dev->fm;

View File

@ -1312,4 +1312,3 @@ int i915_driver_device_is_agp(struct drm_device * dev)
return 1;
}

View File

@ -316,7 +316,7 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f
/* i915_buffer.c */
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
uint32_t *type);
uint32_t *type);
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
@ -1288,7 +1288,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x29D2)
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev))
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev))

View File

@ -365,7 +365,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
int i915_emit_irq(struct drm_device * dev)
int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -383,8 +383,6 @@ int i915_emit_irq(struct drm_device * dev)
ADVANCE_LP_RING();
return dev_priv->counter;
}
void i915_user_irq_on(struct drm_i915_private *dev_priv)
@ -484,7 +482,8 @@ int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
/* Needs the lock as it touches the ring.
*/
int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_irq_emit *emit = data;
@ -761,13 +760,14 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
* Initialize the hardware status page IRQ location.
*/
I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
}
void i915_driver_irq_uninstall(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u16 temp;
if (!dev_priv)
return;