radeon: remove TTM from an earlier merge
parent
1e66322633
commit
75c9e0d346
|
@ -33,7 +33,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
|||
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||
nv40_graph.o nv50_graph.o \
|
||||
nv04_instmem.o nv50_instmem.o
|
||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_fence.o radeon_buffer.o
|
||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
|
||||
radeon_ms-objs := radeon_ms_drv.o radeon_ms_drm.o radeon_ms_family.o \
|
||||
radeon_ms_state.o radeon_ms_bo.o radeon_ms_irq.o \
|
||||
radeon_ms_bus.o radeon_ms_fence.o \
|
||||
|
|
|
@ -35,45 +35,6 @@
|
|||
|
||||
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
|
||||
|
||||
static __inline__ void insert_page_into_table(struct drm_ati_pcigart_info *info, u32 page_base, u32 *pci_gart)
|
||||
{
|
||||
switch(info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
*pci_gart = cpu_to_le32((page_base) | 0xc);
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
|
||||
break;
|
||||
default:
|
||||
case DRM_ATI_GART_PCI:
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __inline__ u32 get_page_base_from_table(struct drm_ati_pcigart_info *info, u32 *pci_gart)
|
||||
{
|
||||
u32 retval;
|
||||
switch(info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
retval = *pci_gart;
|
||||
retval &= ~0xc;
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
retval = *pci_gart;
|
||||
retval &= ~0xc;
|
||||
retval <<= 8;
|
||||
break;
|
||||
default:
|
||||
case DRM_ATI_GART_PCI:
|
||||
retval = *pci_gart;
|
||||
break;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void *drm_ati_alloc_pcigart_table(int order)
|
||||
{
|
||||
unsigned long address;
|
||||
|
@ -246,7 +207,18 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
page_base = (u32) entry->busaddr[i];
|
||||
|
||||
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
|
||||
insert_page_into_table(gart_info, page_base, pci_gart);
|
||||
switch(gart_info->gart_reg_if) {
|
||||
case DRM_ATI_GART_IGP:
|
||||
*pci_gart = cpu_to_le32((page_base) | 0xc);
|
||||
break;
|
||||
case DRM_ATI_GART_PCIE:
|
||||
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
|
||||
break;
|
||||
default:
|
||||
case DRM_ATI_GART_PCI:
|
||||
*pci_gart = cpu_to_le32(page_base);
|
||||
break;
|
||||
}
|
||||
pci_gart++;
|
||||
page_base += ATI_PCIGART_PAGE_SIZE;
|
||||
}
|
||||
|
@ -266,147 +238,3 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ati_pcigart_init);
|
||||
|
||||
static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
|
||||
{
|
||||
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
|
||||
}
|
||||
|
||||
static int ati_pcigart_populate(struct drm_ttm_backend *backend,
|
||||
unsigned long num_pages,
|
||||
struct page **pages,
|
||||
struct page *dummy_page)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
|
||||
DRM_ERROR("%ld\n", num_pages);
|
||||
atipci_be->pages = pages;
|
||||
atipci_be->num_pages = num_pages;
|
||||
atipci_be->populated = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
|
||||
struct drm_bo_mem_reg *bo_mem)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
off_t j;
|
||||
int i;
|
||||
struct drm_ati_pcigart_info *info = atipci_be->gart_info;
|
||||
u32 *pci_gart;
|
||||
u32 page_base;
|
||||
unsigned long offset = bo_mem->mm_node->start;
|
||||
pci_gart = info->addr;
|
||||
|
||||
DRM_ERROR("Offset is %08lX\n", bo_mem->mm_node->start);
|
||||
j = offset;
|
||||
while (j < (offset + atipci_be->num_pages)) {
|
||||
if (get_page_base_from_table(info, pci_gart+j))
|
||||
return -EBUSY;
|
||||
j++;
|
||||
}
|
||||
|
||||
for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
|
||||
struct page *cur_page = atipci_be->pages[i];
|
||||
/* write value */
|
||||
page_base = page_to_phys(cur_page);
|
||||
insert_page_into_table(info, page_base, pci_gart + j);
|
||||
}
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
wbinvd();
|
||||
#else
|
||||
mb();
|
||||
#endif
|
||||
|
||||
atipci_be->gart_flush_fn(atipci_be->dev);
|
||||
|
||||
atipci_be->bound = 1;
|
||||
atipci_be->offset = offset;
|
||||
/* need to traverse table and add entries */
|
||||
DRM_DEBUG("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
struct drm_ati_pcigart_info *info = atipci_be->gart_info;
|
||||
unsigned long offset = atipci_be->offset;
|
||||
int i;
|
||||
off_t j;
|
||||
u32 *pci_gart = info->addr;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (atipci_be->bound != 1)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
|
||||
*(pci_gart + j) = 0;
|
||||
}
|
||||
atipci_be->gart_flush_fn(atipci_be->dev);
|
||||
atipci_be->bound = 0;
|
||||
atipci_be->offset = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be =
|
||||
container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
if (atipci_be->pages) {
|
||||
backend->func->unbind(backend);
|
||||
atipci_be->pages = NULL;
|
||||
|
||||
}
|
||||
atipci_be->num_pages = 0;
|
||||
}
|
||||
|
||||
static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be;
|
||||
if (backend) {
|
||||
DRM_DEBUG("\n");
|
||||
atipci_be = container_of(backend, ati_pcigart_ttm_backend_t, backend);
|
||||
if (atipci_be) {
|
||||
if (atipci_be->pages) {
|
||||
backend->func->clear(backend);
|
||||
}
|
||||
drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
|
||||
{
|
||||
.needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
|
||||
.populate = ati_pcigart_populate,
|
||||
.clear = ati_pcigart_clear_ttm,
|
||||
.bind = ati_pcigart_bind_ttm,
|
||||
.unbind = ati_pcigart_unbind_ttm,
|
||||
.destroy = ati_pcigart_destroy_ttm,
|
||||
};
|
||||
|
||||
struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
|
||||
{
|
||||
ati_pcigart_ttm_backend_t *atipci_be;
|
||||
|
||||
atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
|
||||
if (!atipci_be)
|
||||
return NULL;
|
||||
|
||||
atipci_be->populated = 0;
|
||||
atipci_be->backend.func = &ati_pcigart_ttm_backend;
|
||||
atipci_be->gart_info = info;
|
||||
atipci_be->gart_flush_fn = gart_flush_fn;
|
||||
atipci_be->dev = dev;
|
||||
|
||||
return &atipci_be->backend;
|
||||
}
|
||||
EXPORT_SYMBOL(ati_pcigart_init_ttm);
|
||||
|
|
|
@ -1273,7 +1273,6 @@ extern int drm_sg_free(struct drm_device *dev, void *data,
|
|||
/* ATI PCIGART support (ati_pcigart.h) */
|
||||
extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
|
||||
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
|
||||
extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
|
||||
|
||||
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
|
||||
size_t align, dma_addr_t maxaddr);
|
||||
|
|
|
@ -1,263 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2007 Dave Airlie
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Dave Airlie <airlied@linux.ie>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_drv.h"
|
||||
|
||||
struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if(dev_priv->flags & RADEON_IS_AGP)
|
||||
return drm_agp_init_ttm(dev);
|
||||
else
|
||||
return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
|
||||
}
|
||||
|
||||
int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
|
||||
{
|
||||
*class = 0;
|
||||
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
else
|
||||
*type = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(4);
|
||||
RADEON_FLUSH_CACHE();
|
||||
RADEON_FLUSH_ZCACHE();
|
||||
ADVANCE_RING();
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
|
||||
{
|
||||
switch (bo->mem.mem_type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
case DRM_BO_MEM_TT:
|
||||
return DRM_BO_FLAG_MEM_LOCAL;
|
||||
case DRM_BO_MEM_VRAM:
|
||||
if (bo->mem.num_pages > 128)
|
||||
return DRM_BO_MEM_TT;
|
||||
else
|
||||
return DRM_BO_MEM_LOCAL;
|
||||
default:
|
||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||
struct drm_mem_type_manager * man)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
switch (type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CACHED;
|
||||
man->drm_bus_maptype = 0;
|
||||
break;
|
||||
case DRM_BO_MEM_VRAM:
|
||||
man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->io_addr = NULL;
|
||||
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
|
||||
man->io_offset = drm_get_resource_start(dev, 0);
|
||||
man->io_size = drm_get_resource_len(dev, 0);
|
||||
break;
|
||||
case DRM_BO_MEM_TT:
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
man->io_offset = dev->agp->agp_info.aper_base;
|
||||
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
||||
man->io_addr = NULL;
|
||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
||||
man->drm_bus_maptype = _DRM_AGP;
|
||||
} else {
|
||||
man->io_offset = dev_priv->gart_vm_start;
|
||||
man->io_size = dev_priv->gart_size;
|
||||
man->io_addr = NULL;
|
||||
man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
|
||||
man->drm_bus_maptype = _DRM_SCATTER_GATHER;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_emit_copy_blit(struct drm_device * dev,
|
||||
uint32_t src_offset,
|
||||
uint32_t dst_offset,
|
||||
uint32_t pages, int direction)
|
||||
{
|
||||
uint32_t cur_pages;
|
||||
uint32_t stride = PAGE_SIZE;
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t format, height;
|
||||
RING_LOCALS;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
/* 32-bit copy format */
|
||||
format = RADEON_COLOR_FORMAT_ARGB8888;
|
||||
|
||||
/* radeon limited to 16k stride */
|
||||
stride &= 0x3fff;
|
||||
while(pages > 0) {
|
||||
cur_pages = pages;
|
||||
if (cur_pages > 2048)
|
||||
cur_pages = 2048;
|
||||
pages -= cur_pages;
|
||||
|
||||
/* needs verification */
|
||||
BEGIN_RING(7);
|
||||
OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
|
||||
OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_BRUSH_NONE |
|
||||
(format << 8) |
|
||||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_S |
|
||||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||
if (direction) {
|
||||
OUT_RING((stride << 22) | (src_offset >> 10));
|
||||
OUT_RING((stride << 22) | (dst_offset >> 10));
|
||||
} else {
|
||||
OUT_RING((stride << 22) | (dst_offset >> 10));
|
||||
OUT_RING((stride << 22) | (src_offset >> 10));
|
||||
}
|
||||
OUT_RING(0);
|
||||
OUT_RING(pages); /* x - y */
|
||||
OUT_RING((stride << 16) | cur_pages);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
BEGIN_RING(2);
|
||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||
ADVANCE_RING();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int radeon_move_blit(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
int dir = 0;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->mm_node->start <
|
||||
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
||||
dir = 1;
|
||||
}
|
||||
|
||||
radeon_emit_copy_blit(bo->dev,
|
||||
old_mem->mm_node->start << PAGE_SHIFT,
|
||||
new_mem->mm_node->start << PAGE_SHIFT,
|
||||
new_mem->num_pages, dir);
|
||||
|
||||
|
||||
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
|
||||
DRM_FENCE_TYPE_EXE |
|
||||
DRM_RADEON_FENCE_TYPE_RW,
|
||||
DRM_RADEON_FENCE_FLAG_FLUSHED, new_mem);
|
||||
}
|
||||
|
||||
static int radeon_move_flip(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
||||
{
|
||||
struct drm_device *dev = bo->dev;
|
||||
struct drm_bo_mem_reg tmp_mem;
|
||||
int ret;
|
||||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
tmp_mem.flags = DRM_BO_FLAG_MEM_TT |
|
||||
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
|
||||
|
||||
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_ttm_bind(bo->ttm, &tmp_mem);
|
||||
if (ret)
|
||||
goto out_cleanup;
|
||||
|
||||
ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
|
||||
if (ret)
|
||||
goto out_cleanup;
|
||||
|
||||
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
|
||||
out_cleanup:
|
||||
if (tmp_mem.mm_node) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (tmp_mem.mm_node != bo->pinned_node)
|
||||
drm_mm_put_block(tmp_mem.mm_node);
|
||||
tmp_mem.mm_node = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_move(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
||||
{
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
||||
if (radeon_move_flip(bo, evict, no_wait, new_mem))
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else {
|
||||
if (radeon_move_blit(bo, evict, no_wait, new_mem))
|
||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -56,38 +56,6 @@ static struct pci_device_id pciidlist[] = {
|
|||
radeon_PCI_IDS
|
||||
};
|
||||
|
||||
|
||||
#ifdef RADEON_HAVE_FENCE
|
||||
static struct drm_fence_driver radeon_fence_driver = {
|
||||
.num_classes = 1,
|
||||
.wrap_diff = (1 << 30),
|
||||
.flush_diff = (1 << 29),
|
||||
.sequence_mask = 0xffffffffU,
|
||||
.lazy_capable = 1,
|
||||
.emit = radeon_fence_emit_sequence,
|
||||
.poke_flush = radeon_poke_flush,
|
||||
.has_irq = radeon_fence_has_irq,
|
||||
};
|
||||
#endif
|
||||
#ifdef RADEON_HAVE_BUFFER
|
||||
|
||||
static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
|
||||
static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
|
||||
|
||||
static struct drm_bo_driver radeon_bo_driver = {
|
||||
.mem_type_prio = radeon_mem_prios,
|
||||
.mem_busy_prio = radeon_busy_prios,
|
||||
.num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t),
|
||||
.num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t),
|
||||
.create_ttm_backend_entry = radeon_create_ttm_backend_entry,
|
||||
.fence_type = radeon_fence_types,
|
||||
.invalidate_caches = radeon_invalidate_caches,
|
||||
.init_mem_type = radeon_init_mem_type,
|
||||
.evict_flags = radeon_evict_flags,
|
||||
.move = radeon_move,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static struct drm_driver driver = {
|
||||
.driver_features =
|
||||
|
@ -133,13 +101,6 @@ static struct drm_driver driver = {
|
|||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
},
|
||||
|
||||
#ifdef RADEON_HAVE_FENCE
|
||||
.fence_driver = &radeon_fence_driver,
|
||||
#endif
|
||||
#ifdef RADEON_HAVE_BUFFER
|
||||
.bo_driver = &radeon_bo_driver,
|
||||
#endif
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
|
|
@ -1,125 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_drv.h"
|
||||
|
||||
/*
|
||||
* Implements an intel sync flush operation.
|
||||
*/
|
||||
|
||||
static void radeon_perform_flush(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
|
||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||
uint32_t pending_flush_types = 0;
|
||||
uint32_t sequence;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
pending_flush_types = fc->pending_flush |
|
||||
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
|
||||
|
||||
if (pending_flush_types) {
|
||||
sequence = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
drm_fence_handler(dev, 0, sequence, pending_flush_types, 0);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void radeon_poke_flush(struct drm_device * dev, uint32_t class)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
|
||||
if (class != 0)
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
radeon_perform_flush(dev);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags, uint32_t *sequence,
|
||||
uint32_t *native_type)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
RING_LOCALS;
|
||||
|
||||
if (!dev_priv)
|
||||
return -EINVAL;
|
||||
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) {
|
||||
*native_type |= DRM_RADEON_FENCE_TYPE_RW;
|
||||
|
||||
BEGIN_RING(4);
|
||||
|
||||
RADEON_FLUSH_CACHE();
|
||||
RADEON_FLUSH_ZCACHE();
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
radeon_emit_irq(dev);
|
||||
*sequence = (uint32_t) dev_priv->counter;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_fence_handler(struct drm_device * dev)
|
||||
{
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
|
||||
write_lock(&fm->lock);
|
||||
radeon_perform_flush(dev);
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
||||
int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
|
||||
{
|
||||
/*
|
||||
* We have an irq that tells us when we have a new breadcrumb.
|
||||
*/
|
||||
|
||||
if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1518,28 +1518,6 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
}
|
||||
}
|
||||
|
||||
void radeon_gart_flush(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->flags & RADEON_IS_IGPGART) {
|
||||
RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
|
||||
RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x1);
|
||||
RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
|
||||
RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x0);
|
||||
} else if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
|
||||
tmp |= RADEON_PCIE_TX_GART_INVALIDATE_TLB;
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
||||
tmp &= ~RADEON_PCIE_TX_GART_INVALIDATE_TLB;
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
||||
} else {
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -2455,9 +2433,6 @@ int radeon_driver_firstopen(struct drm_device *dev)
|
|||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
#ifdef RADEON_HAVE_BUFFER
|
||||
drm_bo_driver_init(dev);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,11 +104,6 @@
|
|||
#define DRIVER_MINOR 28
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#if defined(__linux__)
|
||||
#define RADEON_HAVE_FENCE
|
||||
#define RADEON_HAVE_BUFFER
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Radeon chip families
|
||||
*/
|
||||
|
@ -296,9 +291,8 @@ typedef struct drm_radeon_private {
|
|||
struct mem_block *fb_heap;
|
||||
|
||||
/* SW interrupt */
|
||||
wait_queue_head_t irq_queue;
|
||||
int counter;
|
||||
|
||||
wait_queue_head_t swi_queue;
|
||||
atomic_t swi_emitted;
|
||||
int vblank_crtc;
|
||||
uint32_t irq_enable_reg;
|
||||
int irq_enabled;
|
||||
|
@ -361,7 +355,6 @@ extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file
|
|||
extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern void radeon_gart_flush(struct drm_device *dev);
|
||||
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
|
||||
|
||||
extern void radeon_freelist_reset(struct drm_device * dev);
|
||||
|
@ -381,7 +374,6 @@ extern void radeon_mem_release(struct drm_file *file_priv,
|
|||
/* radeon_irq.c */
|
||||
extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
extern int radeon_emit_irq(struct drm_device * dev);
|
||||
|
||||
extern void radeon_do_release(struct drm_device * dev);
|
||||
extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
|
@ -415,30 +407,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
struct drm_file *file_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf);
|
||||
|
||||
|
||||
#ifdef RADEON_HAVE_FENCE
|
||||
/* i915_fence.c */
|
||||
|
||||
|
||||
extern void radeon_fence_handler(struct drm_device *dev);
|
||||
extern int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags, uint32_t *sequence,
|
||||
uint32_t *native_type);
|
||||
extern void radeon_poke_flush(struct drm_device *dev, uint32_t class);
|
||||
extern int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
|
||||
#endif
|
||||
|
||||
#ifdef RADEON_HAVE_BUFFER
|
||||
/* radeon_buffer.c */
|
||||
extern struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device *dev);
|
||||
extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type);
|
||||
extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||
extern uint64_t radeon_evict_flags(struct drm_buffer_object *bo);
|
||||
extern int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||
struct drm_mem_type_manager * man);
|
||||
extern int radeon_move(struct drm_buffer_object * bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
|
||||
#endif
|
||||
/* Flags for stats.boxes
|
||||
*/
|
||||
#define RADEON_BOX_DMA_IDLE 0x1
|
||||
|
@ -1368,19 +1336,4 @@ do { \
|
|||
write &= mask; \
|
||||
} while (0)
|
||||
|
||||
/* Breadcrumb - swi irq */
|
||||
#define READ_BREADCRUMB(dev_priv) RADEON_READ(RADEON_LAST_SWI_REG)
|
||||
|
||||
static inline int radeon_update_breadcrumb(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->sarea_priv->last_fence = ++dev_priv->counter;
|
||||
|
||||
if (dev_priv->counter > 0x7FFFFFFFUL)
|
||||
dev_priv->sarea_priv->last_fence = dev_priv->counter = 1;
|
||||
|
||||
return dev_priv->counter;
|
||||
}
|
||||
|
||||
#endif /* __RADEON_DRV_H__ */
|
||||
|
|
|
@ -128,12 +128,9 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
stat &= dev_priv->irq_enable_reg;
|
||||
|
||||
/* SW interrupt */
|
||||
if (stat & RADEON_SW_INT_TEST) {
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
#ifdef RADEON_HAVE_FENCE
|
||||
radeon_fence_handler(dev);
|
||||
#endif
|
||||
}
|
||||
if (stat & RADEON_SW_INT_TEST)
|
||||
DRM_WAKEUP(&dev_priv->swi_queue);
|
||||
|
||||
/* VBLANK interrupt */
|
||||
if (stat & RADEON_CRTC_VBLANK_STAT)
|
||||
drm_handle_vblank(dev, 0);
|
||||
|
@ -143,13 +140,14 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int radeon_emit_irq(struct drm_device * dev)
|
||||
static int radeon_emit_irq(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int ret;
|
||||
RING_LOCALS;
|
||||
|
||||
ret = radeon_update_breadcrumb(dev);
|
||||
atomic_inc(&dev_priv->swi_emitted);
|
||||
ret = atomic_read(&dev_priv->swi_emitted);
|
||||
|
||||
BEGIN_RING(4);
|
||||
OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
|
||||
|
@ -166,13 +164,13 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
|
|||
(drm_radeon_private_t *) dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (READ_BREADCRUMB(dev_priv) >= swi_nr)
|
||||
if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
|
||||
return 0;
|
||||
|
||||
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
|
||||
|
||||
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= swi_nr);
|
||||
DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
|
||||
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -260,8 +258,8 @@ int radeon_driver_irq_postinstall(struct drm_device * dev)
|
|||
(drm_radeon_private_t *) dev->dev_private;
|
||||
int ret;
|
||||
|
||||
dev_priv->counter = 0;
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
atomic_set(&dev_priv->swi_emitted, 0);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
|
||||
|
||||
ret = drm_vblank_init(dev, 2);
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue