Merge branch 'modesetting-gem' of ssh://git.freedesktop.org/git/mesa/drm into modesetting-gem
commit
6d59bad8e9
|
@ -15,7 +15,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
||||||
drm_hashtab.o drm_mm.o drm_compat.o \
|
drm_hashtab.o drm_mm.o drm_compat.o \
|
||||||
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
|
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
|
||||||
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
|
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
|
||||||
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
|
drm_regman.o drm_vm_nopage_compat.o drm_gem.o drm_uncached.o
|
||||||
tdfx-objs := tdfx_drv.o
|
tdfx-objs := tdfx_drv.o
|
||||||
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
||||||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||||
|
|
|
@ -78,6 +78,10 @@
|
||||||
#define ENCODER_OBJECT_ID_DP_DP501 0x1D
|
#define ENCODER_OBJECT_ID_DP_DP501 0x1D
|
||||||
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
|
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
|
||||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
|
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
|
||||||
|
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
|
||||||
|
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
|
||||||
|
|
||||||
|
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
|
||||||
|
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
/* Connector Object ID Definition */
|
/* Connector Object ID Definition */
|
||||||
|
@ -118,6 +122,8 @@
|
||||||
#define GRAPH_OBJECT_ENUM_ID2 0x02
|
#define GRAPH_OBJECT_ENUM_ID2 0x02
|
||||||
#define GRAPH_OBJECT_ENUM_ID3 0x03
|
#define GRAPH_OBJECT_ENUM_ID3 0x03
|
||||||
#define GRAPH_OBJECT_ENUM_ID4 0x04
|
#define GRAPH_OBJECT_ENUM_ID4 0x04
|
||||||
|
#define GRAPH_OBJECT_ENUM_ID5 0x05
|
||||||
|
#define GRAPH_OBJECT_ENUM_ID6 0x06
|
||||||
|
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
/* Graphics Object ID Bit definition */
|
/* Graphics Object ID Bit definition */
|
||||||
|
@ -173,7 +179,7 @@
|
||||||
#define ENCODER_SI178_ENUM_ID1 0x2117
|
#define ENCODER_SI178_ENUM_ID1 0x2117
|
||||||
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
|
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
|
||||||
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
|
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
|
||||||
#define ENCODER_VT1625_ENUM_ID1 0x211A
|
#define ENCODER_VT1625_ENUM_ID1 0x211A
|
||||||
#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
|
#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
|
||||||
#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
|
#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
|
||||||
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
|
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
|
||||||
|
@ -323,6 +329,26 @@
|
||||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
|
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||||
|
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||||
|
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||||
|
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||||
|
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||||
|
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
/* Connector Object ID definition - Shared with BIOS */
|
/* Connector Object ID definition - Shared with BIOS */
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
|
@ -453,6 +479,14 @@
|
||||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||||
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
|
||||||
|
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
|
#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||||
|
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
|
||||||
|
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||||
|
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
/* Router Object ID definition - Shared with BIOS */
|
/* Router Object ID definition - Shared with BIOS */
|
||||||
/****************************************************/
|
/****************************************************/
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1850,6 +1850,7 @@ int drm_bo_driver_finish(struct drm_device *dev)
|
||||||
__free_page(bm->dummy_read_page);
|
__free_page(bm->dummy_read_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_uncached_fini();
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1869,6 +1870,8 @@ int drm_bo_driver_init(struct drm_device *dev)
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
drm_uncached_init();
|
||||||
|
|
||||||
bm->dummy_read_page = NULL;
|
bm->dummy_read_page = NULL;
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
if (!driver)
|
if (!driver)
|
||||||
|
|
|
@ -1423,7 +1423,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
|
||||||
set.mode = mode;
|
set.mode = mode;
|
||||||
set.connectors = connector_set;
|
set.connectors = connector_set;
|
||||||
set.num_connectors = crtc_req->count_connectors;
|
set.num_connectors = crtc_req->count_connectors;
|
||||||
set.fb =fb;
|
set.fb = fb;
|
||||||
ret = crtc->funcs->set_config(&set);
|
ret = crtc->funcs->set_config(&set);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -683,6 +683,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
if (set->crtc->fb != set->fb)
|
if (set->crtc->fb != set->fb)
|
||||||
set->crtc->fb = set->fb;
|
set->crtc->fb = set->fb;
|
||||||
crtc_funcs->mode_set_base(set->crtc, set->x, set->y);
|
crtc_funcs->mode_set_base(set->crtc, set->x, set->y);
|
||||||
|
set->crtc->x = set->x;
|
||||||
|
set->crtc->y = set->y;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(save_encoders);
|
kfree(save_encoders);
|
||||||
|
@ -802,3 +804,30 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_helper_resume_force_mode);
|
EXPORT_SYMBOL(drm_helper_resume_force_mode);
|
||||||
|
|
||||||
|
void drm_helper_set_connector_dpms(struct drm_connector *connector,
|
||||||
|
int dpms_mode)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
struct drm_encoder *encoder;
|
||||||
|
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||||
|
struct drm_mode_object *obj;
|
||||||
|
|
||||||
|
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||||
|
if (connector->encoder_ids[i] == 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
obj = drm_mode_object_find(connector->dev,
|
||||||
|
connector->encoder_ids[i],
|
||||||
|
DRM_MODE_OBJECT_ENCODER);
|
||||||
|
if (!obj)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
encoder = obj_to_encoder(obj);
|
||||||
|
encoder_funcs = encoder->helper_private;
|
||||||
|
if (encoder_funcs->dpms)
|
||||||
|
encoder_funcs->dpms(encoder, dpms_mode);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_helper_set_connector_dpms);
|
||||||
|
|
|
@ -93,4 +93,6 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, con
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int drm_helper_resume_force_mode(struct drm_device *dev);
|
extern int drm_helper_resume_force_mode(struct drm_device *dev);
|
||||||
|
extern void drm_helper_set_connector_dpms(struct drm_connector *connector,
|
||||||
|
int dpms_mode);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -661,6 +661,9 @@ struct drm_bo_lock {
|
||||||
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
|
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
|
||||||
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
|
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
|
||||||
|
|
||||||
|
#define _DRM_BM_ALLOCATOR_CACHED 0x0
|
||||||
|
#define _DRM_BM_ALLOCATOR_UNCACHED 0x1
|
||||||
|
|
||||||
struct drm_buffer_manager {
|
struct drm_buffer_manager {
|
||||||
struct drm_bo_lock bm_lock;
|
struct drm_bo_lock bm_lock;
|
||||||
struct mutex evict_mutex;
|
struct mutex evict_mutex;
|
||||||
|
@ -679,6 +682,7 @@ struct drm_buffer_manager {
|
||||||
unsigned long cur_pages;
|
unsigned long cur_pages;
|
||||||
atomic_t count;
|
atomic_t count;
|
||||||
struct page *dummy_read_page;
|
struct page *dummy_read_page;
|
||||||
|
int allocator_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_bo_driver {
|
struct drm_bo_driver {
|
||||||
|
@ -894,6 +898,15 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m
|
||||||
void **virtual);
|
void **virtual);
|
||||||
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
||||||
void *virtual);
|
void *virtual);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* drm_uncached.c
|
||||||
|
*/
|
||||||
|
extern int drm_uncached_init(void);
|
||||||
|
extern void drm_uncached_fini(void);
|
||||||
|
extern struct page *drm_get_uncached_page(void);
|
||||||
|
extern void drm_put_uncached_page(struct page *page);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
#define DRM_ASSERT_LOCKED(_mutex) \
|
#define DRM_ASSERT_LOCKED(_mutex) \
|
||||||
BUG_ON(!mutex_is_locked(_mutex) || \
|
BUG_ON(!mutex_is_locked(_mutex) || \
|
||||||
|
|
|
@ -120,14 +120,18 @@ static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *drm_ttm_alloc_page(void)
|
static struct page *drm_ttm_alloc_page(struct drm_ttm *ttm)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (drm_alloc_memctl(PAGE_SIZE))
|
if (drm_alloc_memctl(PAGE_SIZE))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||||
|
page = drm_get_uncached_page();
|
||||||
|
else
|
||||||
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||||
|
|
||||||
if (!page) {
|
if (!page) {
|
||||||
drm_free_memctl(PAGE_SIZE);
|
drm_free_memctl(PAGE_SIZE);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -149,6 +153,9 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
|
||||||
struct page **cur_page;
|
struct page **cur_page;
|
||||||
int do_tlbflush = 0;
|
int do_tlbflush = 0;
|
||||||
|
|
||||||
|
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
|
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -215,14 +222,18 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
|
||||||
for (i = 0; i < ttm->num_pages; ++i) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
cur_page = ttm->pages + i;
|
cur_page = ttm->pages + i;
|
||||||
if (*cur_page) {
|
if (*cur_page) {
|
||||||
|
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
|
||||||
|
drm_put_uncached_page(*cur_page);
|
||||||
|
else {
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
ClearPageReserved(*cur_page);
|
ClearPageReserved(*cur_page);
|
||||||
#endif
|
#endif
|
||||||
if (page_count(*cur_page) != 1)
|
if (page_count(*cur_page) != 1)
|
||||||
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
DRM_ERROR("Erroneous page count. Leaking pages.\n");
|
||||||
if (page_mapped(*cur_page))
|
if (page_mapped(*cur_page))
|
||||||
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
|
||||||
__free_page(*cur_page);
|
__free_page(*cur_page);
|
||||||
|
}
|
||||||
drm_free_memctl(PAGE_SIZE);
|
drm_free_memctl(PAGE_SIZE);
|
||||||
--bm->cur_pages;
|
--bm->cur_pages;
|
||||||
}
|
}
|
||||||
|
@ -268,7 +279,7 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
|
||||||
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
struct drm_buffer_manager *bm = &ttm->dev->bm;
|
||||||
|
|
||||||
while(NULL == (p = ttm->pages[index])) {
|
while(NULL == (p = ttm->pages[index])) {
|
||||||
p = drm_ttm_alloc_page();
|
p = drm_ttm_alloc_page(ttm);
|
||||||
if (!p)
|
if (!p)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) Red Hat Inc.
|
||||||
|
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Dave Airlie <airlied@redhat.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* simple list based uncached page allocator
|
||||||
|
* - Add chunks of 1MB to the allocator at a time.
|
||||||
|
* - Use page->lru to keep a free list
|
||||||
|
* - doesn't track currently in use pages
|
||||||
|
*
|
||||||
|
* TODO: Add shrinker support
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "drmP.h"
|
||||||
|
#include <asm/agp.h>
|
||||||
|
|
||||||
|
static struct list_head uncached_free_list;
|
||||||
|
|
||||||
|
static struct mutex uncached_mutex;
|
||||||
|
static int uncached_inited;
|
||||||
|
static int total_uncached_pages;
|
||||||
|
|
||||||
|
/* add 1MB at a time */
|
||||||
|
#define NUM_PAGES_TO_ADD 256
|
||||||
|
|
||||||
|
static void drm_uncached_page_put(struct page *page)
|
||||||
|
{
|
||||||
|
unmap_page_from_agp(page);
|
||||||
|
put_page(page);
|
||||||
|
__free_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_uncached_add_pages_locked(int num_pages)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
DRM_DEBUG("adding uncached memory %ld\n", num_pages * PAGE_SIZE);
|
||||||
|
for (i = 0; i < num_pages; i++) {
|
||||||
|
|
||||||
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
|
||||||
|
if (!page) {
|
||||||
|
DRM_ERROR("unable to get page %d\n", i);
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
get_page(page);
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
set_memory_wc((unsigned long)page_address(page), 1);
|
||||||
|
#else
|
||||||
|
map_page_into_agp(page);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
list_add(&page->lru, &uncached_free_list);
|
||||||
|
total_uncached_pages++;
|
||||||
|
}
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct page *drm_get_uncached_page(void)
|
||||||
|
{
|
||||||
|
struct page *page = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&uncached_mutex);
|
||||||
|
if (list_empty(&uncached_free_list)) {
|
||||||
|
ret = drm_uncached_add_pages_locked(NUM_PAGES_TO_ADD);
|
||||||
|
if (ret == 0)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
page = list_first_entry(&uncached_free_list, struct page, lru);
|
||||||
|
list_del(&page->lru);
|
||||||
|
|
||||||
|
mutex_unlock(&uncached_mutex);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
void drm_put_uncached_page(struct page *page)
|
||||||
|
{
|
||||||
|
mutex_lock(&uncached_mutex);
|
||||||
|
list_add(&page->lru, &uncached_free_list);
|
||||||
|
mutex_unlock(&uncached_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void drm_uncached_release_all_pages(void)
|
||||||
|
{
|
||||||
|
struct page *page, *tmp;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(page, tmp, &uncached_free_list, lru) {
|
||||||
|
list_del(&page->lru);
|
||||||
|
drm_uncached_page_put(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_uncached_init(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (uncached_inited)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&uncached_free_list);
|
||||||
|
|
||||||
|
mutex_init(&uncached_mutex);
|
||||||
|
uncached_inited = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void drm_uncached_fini(void)
|
||||||
|
{
|
||||||
|
if (!uncached_inited)
|
||||||
|
return;
|
||||||
|
|
||||||
|
uncached_inited = 0;
|
||||||
|
drm_uncached_release_all_pages();
|
||||||
|
}
|
||||||
|
|
|
@ -77,6 +77,22 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
|
||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
|
||||||
|
uint64_t val)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = connector->dev;
|
||||||
|
|
||||||
|
if (property == dev->mode_config.dpms_property) {
|
||||||
|
if (val > 3)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
drm_helper_set_connector_dpms(connector, val);
|
||||||
|
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int radeon_lvds_get_modes(struct drm_connector *connector)
|
static int radeon_lvds_get_modes(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
|
@ -146,6 +162,7 @@ struct drm_connector_funcs radeon_lvds_connector_funcs = {
|
||||||
.detect = radeon_lvds_detect,
|
.detect = radeon_lvds_detect,
|
||||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
|
.set_property = radeon_connector_set_property,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int radeon_vga_get_modes(struct drm_connector *connector)
|
static int radeon_vga_get_modes(struct drm_connector *connector)
|
||||||
|
@ -197,6 +214,7 @@ struct drm_connector_funcs radeon_vga_connector_funcs = {
|
||||||
.detect = radeon_vga_detect,
|
.detect = radeon_vga_detect,
|
||||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
|
.set_property = radeon_connector_set_property,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -289,6 +307,7 @@ struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
|
||||||
struct drm_connector_funcs radeon_dvi_connector_funcs = {
|
struct drm_connector_funcs radeon_dvi_connector_funcs = {
|
||||||
.detect = radeon_dvi_detect,
|
.detect = radeon_dvi_detect,
|
||||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||||
|
.set_property = radeon_connector_set_property,
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -204,6 +204,10 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
|
|
||||||
radeon_lock_cursor(crtc, true);
|
radeon_lock_cursor(crtc, true);
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
/* avivo cursor are offset into the total surface */
|
||||||
|
x += crtc->x;
|
||||||
|
y += crtc->y;
|
||||||
|
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||||
RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||||
((xorigin ? 0: x) << 16) |
|
((xorigin ? 0: x) << 16) |
|
||||||
(yorigin ? 0 : y));
|
(yorigin ? 0 : y));
|
||||||
|
|
|
@ -707,6 +707,7 @@ int radeonfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_heigh
|
||||||
uint32_t surface_width, uint32_t surface_height,
|
uint32_t surface_width, uint32_t surface_height,
|
||||||
struct radeon_framebuffer **radeon_fb_p)
|
struct radeon_framebuffer **radeon_fb_p)
|
||||||
{
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
struct fb_info *info;
|
struct fb_info *info;
|
||||||
struct radeonfb_par *par;
|
struct radeonfb_par *par;
|
||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
|
@ -743,6 +744,8 @@ int radeonfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_heigh
|
||||||
goto out_unref;
|
goto out_unref;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_priv->mm.vram_visible -= aligned_size;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
fb = radeon_framebuffer_create(dev, &mode_cmd, fbo);
|
fb = radeon_framebuffer_create(dev, &mode_cmd, fbo);
|
||||||
if (!fb) {
|
if (!fb) {
|
||||||
|
@ -1136,6 +1139,7 @@ EXPORT_SYMBOL(radeonfb_probe);
|
||||||
|
|
||||||
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||||
{
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
struct fb_info *info;
|
struct fb_info *info;
|
||||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||||
|
|
||||||
|
@ -1147,6 +1151,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||||
if (info) {
|
if (info) {
|
||||||
unregister_framebuffer(info);
|
unregister_framebuffer(info);
|
||||||
drm_bo_kunmap(&radeon_fb->kmap_obj);
|
drm_bo_kunmap(&radeon_fb->kmap_obj);
|
||||||
|
dev_priv->mm.vram_visible += radeon_fb->obj->size;
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
drm_gem_object_unreference(radeon_fb->obj);
|
drm_gem_object_unreference(radeon_fb->obj);
|
||||||
radeon_fb->obj = NULL;
|
radeon_fb->obj = NULL;
|
||||||
|
|
|
@ -57,14 +57,15 @@ static void radeon_fence_poll(struct drm_device *dev, uint32_t fence_class,
|
||||||
{
|
{
|
||||||
struct drm_radeon_private *dev_priv = (struct drm_radeon_private *) dev->dev_private;
|
struct drm_radeon_private *dev_priv = (struct drm_radeon_private *) dev->dev_private;
|
||||||
uint32_t sequence;
|
uint32_t sequence;
|
||||||
if (waiting_types & DRM_FENCE_TYPE_EXE) {
|
|
||||||
|
|
||||||
sequence = READ_BREADCRUMB(dev_priv);
|
sequence = RADEON_READ(RADEON_SCRATCH_REG3);
|
||||||
|
/* this used to be READ_BREADCRUMB(dev_priv); but it caused
|
||||||
|
* a race somewhere in the fencing irq
|
||||||
|
*/
|
||||||
|
|
||||||
DRM_DEBUG("polling %d\n", sequence);
|
DRM_DEBUG("polling %d\n", sequence);
|
||||||
drm_fence_handler(dev, 0, sequence,
|
drm_fence_handler(dev, 0, sequence,
|
||||||
DRM_FENCE_TYPE_EXE, 0);
|
DRM_FENCE_TYPE_EXE, 0);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_fence_handler(struct drm_device * dev)
|
void radeon_fence_handler(struct drm_device * dev)
|
||||||
|
|
|
@ -68,7 +68,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
||||||
args->vram_visible = dev_priv->mm.vram_visible;
|
args->vram_visible = dev_priv->mm.vram_visible;
|
||||||
|
|
||||||
args->gart_start = dev_priv->mm.gart_start;
|
args->gart_start = dev_priv->mm.gart_start;
|
||||||
args->gart_size = dev_priv->mm.gart_size;
|
args->gart_size = dev_priv->mm.gart_useable;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -586,6 +586,9 @@ static int radeon_gart_init(struct drm_device *dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* subtract from VRAM value reporting to userspace */
|
||||||
|
dev_priv->mm.vram_visible -= RADEON_PCIGART_TABLE_SIZE;
|
||||||
|
|
||||||
dev_priv->mm.pcie_table_backup = kzalloc(RADEON_PCIGART_TABLE_SIZE, GFP_KERNEL);
|
dev_priv->mm.pcie_table_backup = kzalloc(RADEON_PCIGART_TABLE_SIZE, GFP_KERNEL);
|
||||||
if (!dev_priv->mm.pcie_table_backup)
|
if (!dev_priv->mm.pcie_table_backup)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -677,6 +680,8 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
|
||||||
dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
|
dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
|
||||||
dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
|
dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
|
||||||
|
|
||||||
|
dev_priv->mm.gart_useable -= RADEON_DEFAULT_RING_SIZE + PAGE_SIZE;
|
||||||
|
|
||||||
/* init the indirect buffers */
|
/* init the indirect buffers */
|
||||||
radeon_gem_ib_init(dev);
|
radeon_gem_ib_init(dev);
|
||||||
radeon_gem_dma_bufs_init(dev);
|
radeon_gem_dma_bufs_init(dev);
|
||||||
|
@ -963,6 +968,9 @@ int radeon_gem_mm_init(struct drm_device *dev)
|
||||||
/* init TTM underneath */
|
/* init TTM underneath */
|
||||||
drm_bo_driver_init(dev);
|
drm_bo_driver_init(dev);
|
||||||
|
|
||||||
|
/* use the uncached allocator */
|
||||||
|
dev->bm.allocator_type = _DRM_BM_ALLOCATOR_UNCACHED;
|
||||||
|
|
||||||
/* size the mappable VRAM memory for now */
|
/* size the mappable VRAM memory for now */
|
||||||
radeon_vram_setup(dev);
|
radeon_vram_setup(dev);
|
||||||
|
|
||||||
|
@ -983,6 +991,7 @@ int radeon_gem_mm_init(struct drm_device *dev)
|
||||||
|
|
||||||
dev_priv->mm.gart_size = (32 * 1024 * 1024);
|
dev_priv->mm.gart_size = (32 * 1024 * 1024);
|
||||||
dev_priv->mm.gart_start = 0;
|
dev_priv->mm.gart_start = 0;
|
||||||
|
dev_priv->mm.gart_useable = dev_priv->mm.gart_size;
|
||||||
ret = radeon_gart_init(dev);
|
ret = radeon_gart_init(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1287,6 +1296,7 @@ static int radeon_gem_ib_init(struct drm_device *dev)
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_priv->mm.gart_useable -= RADEON_IB_SIZE * RADEON_NUM_IB;
|
||||||
dev_priv->ib_alloc_bitmap = 0;
|
dev_priv->ib_alloc_bitmap = 0;
|
||||||
|
|
||||||
dev_priv->cs.ib_get = radeon_gem_ib_get;
|
dev_priv->cs.ib_get = radeon_gem_ib_get;
|
||||||
|
@ -1523,6 +1533,7 @@ static int radeon_gem_dma_bufs_init(struct drm_device *dev)
|
||||||
DRM_ERROR("Failed to mmap DMA buffers\n");
|
DRM_ERROR("Failed to mmap DMA buffers\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
dev_priv->mm.gart_useable -= size;
|
||||||
DRM_DEBUG("\n");
|
DRM_DEBUG("\n");
|
||||||
radeon_gem_addbufs(dev);
|
radeon_gem_addbufs(dev);
|
||||||
|
|
||||||
|
|
|
@ -1313,9 +1313,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
|
||||||
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
|
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
|
||||||
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
|
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
|
||||||
|
|
||||||
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
|
dev_priv->ring.fetch_size_l2ow = 2;
|
||||||
dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
|
|
||||||
|
|
||||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||||
|
|
||||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||||
|
@ -2513,8 +2511,7 @@ int radeon_modeset_cp_init(struct drm_device *dev)
|
||||||
dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
|
dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
|
||||||
dev_priv->ring.rptr_update = 4096;
|
dev_priv->ring.rptr_update = 4096;
|
||||||
dev_priv->ring.rptr_update_l2qw = drm_order(4096 / 8);
|
dev_priv->ring.rptr_update_l2qw = drm_order(4096 / 8);
|
||||||
dev_priv->ring.fetch_size = 32;
|
dev_priv->ring.fetch_size_l2ow = 2; /* do what tcore does */
|
||||||
dev_priv->ring.fetch_size_l2ow = drm_order(32 / 16);
|
|
||||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||||
|
|
||||||
|
@ -2522,6 +2519,9 @@ int radeon_modeset_cp_init(struct drm_device *dev)
|
||||||
|
|
||||||
r300_init_reg_flags(dev);
|
r300_init_reg_flags(dev);
|
||||||
|
|
||||||
|
/* turn off HDP read cache for now */
|
||||||
|
RADEON_WRITE(RADEON_HOST_PATH_CNTL, RADEON_READ(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS);
|
||||||
|
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
if (dev_priv->flags & RADEON_IS_AGP)
|
if (dev_priv->flags & RADEON_IS_AGP)
|
||||||
radeon_modeset_agp_init(dev);
|
radeon_modeset_agp_init(dev);
|
||||||
|
@ -2841,3 +2841,35 @@ void radeon_gart_flush(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void radeon_commit_ring(drm_radeon_private_t *dev_priv)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 *ring;
|
||||||
|
int tail_aligned;
|
||||||
|
|
||||||
|
/* check if the ring is padded out to 16-dword alignment */
|
||||||
|
|
||||||
|
tail_aligned = dev_priv->ring.tail & 0xf;
|
||||||
|
if (tail_aligned) {
|
||||||
|
int num_p2 = 16 - tail_aligned;
|
||||||
|
|
||||||
|
ring = dev_priv->ring.start;
|
||||||
|
/* pad with some CP_PACKET2 */
|
||||||
|
for (i = 0; i < num_p2; i++)
|
||||||
|
ring[dev_priv->ring.tail + i] = CP_PACKET2();
|
||||||
|
|
||||||
|
dev_priv->ring.tail += i;
|
||||||
|
|
||||||
|
dev_priv->ring.space -= num_p2 * sizeof(u32);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->ring.tail &= dev_priv->ring.tail_mask;
|
||||||
|
|
||||||
|
DRM_MEMORYBARRIER();
|
||||||
|
GET_RING_HEAD( dev_priv );
|
||||||
|
|
||||||
|
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail );
|
||||||
|
/* read from PCI bus to ensure correct posting */
|
||||||
|
RADEON_READ( RADEON_CP_RB_RPTR );
|
||||||
|
}
|
||||||
|
|
|
@ -219,7 +219,6 @@ typedef struct drm_radeon_ring_buffer {
|
||||||
int rptr_update; /* Double Words */
|
int rptr_update; /* Double Words */
|
||||||
int rptr_update_l2qw; /* log2 Quad Words */
|
int rptr_update_l2qw; /* log2 Quad Words */
|
||||||
|
|
||||||
int fetch_size; /* Double Words */
|
|
||||||
int fetch_size_l2ow; /* log2 Oct Words */
|
int fetch_size_l2ow; /* log2 Oct Words */
|
||||||
|
|
||||||
u32 tail;
|
u32 tail;
|
||||||
|
@ -275,6 +274,8 @@ struct radeon_mm_info {
|
||||||
uint64_t gart_start;
|
uint64_t gart_start;
|
||||||
uint64_t gart_size;
|
uint64_t gart_size;
|
||||||
|
|
||||||
|
uint64_t gart_useable;
|
||||||
|
|
||||||
void *pcie_table_backup;
|
void *pcie_table_backup;
|
||||||
|
|
||||||
struct radeon_mm_obj pcie_table;
|
struct radeon_mm_obj pcie_table;
|
||||||
|
@ -792,8 +793,10 @@ int radeon_resume(struct drm_device *dev);
|
||||||
# define R500_DISPLAY_INT_STATUS (1 << 0)
|
# define R500_DISPLAY_INT_STATUS (1 << 0)
|
||||||
|
|
||||||
#define RADEON_HOST_PATH_CNTL 0x0130
|
#define RADEON_HOST_PATH_CNTL 0x0130
|
||||||
# define RADEON_HDP_SOFT_RESET (1 << 26)
|
|
||||||
# define RADEON_HDP_APER_CNTL (1 << 23)
|
# define RADEON_HDP_APER_CNTL (1 << 23)
|
||||||
|
# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24)
|
||||||
|
# define RADEON_HDP_SOFT_RESET (1 << 26)
|
||||||
|
# define RADEON_HDP_READ_BUFFER_INVALIDATED (1 << 27)
|
||||||
|
|
||||||
#define RADEON_NB_TOM 0x15c
|
#define RADEON_NB_TOM 0x15c
|
||||||
|
|
||||||
|
@ -1515,15 +1518,16 @@ do { \
|
||||||
|
|
||||||
#define RADEON_VERBOSE 0
|
#define RADEON_VERBOSE 0
|
||||||
|
|
||||||
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
|
#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
|
||||||
|
|
||||||
#define BEGIN_RING( n ) do { \
|
#define BEGIN_RING( n ) do { \
|
||||||
if ( RADEON_VERBOSE ) { \
|
if ( RADEON_VERBOSE ) { \
|
||||||
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
|
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
|
||||||
} \
|
} \
|
||||||
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
|
_align_nr = (n + 0xf) & ~0xf; \
|
||||||
|
if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
|
||||||
COMMIT_RING(); \
|
COMMIT_RING(); \
|
||||||
radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \
|
radeon_wait_ring(dev_priv, _align_nr * sizeof(u32)); \
|
||||||
} \
|
} \
|
||||||
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
|
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
|
||||||
ring = dev_priv->ring.start; \
|
ring = dev_priv->ring.start; \
|
||||||
|
@ -1540,19 +1544,14 @@ do { \
|
||||||
DRM_ERROR( \
|
DRM_ERROR( \
|
||||||
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
|
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
|
||||||
((dev_priv->ring.tail + _nr) & mask), \
|
((dev_priv->ring.tail + _nr) & mask), \
|
||||||
write, __LINE__); \
|
write, __LINE__); \
|
||||||
} else \
|
} else \
|
||||||
dev_priv->ring.tail = write; \
|
dev_priv->ring.tail = write; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define COMMIT_RING() do { \
|
#define COMMIT_RING() do { \
|
||||||
/* Flush writes to ring */ \
|
radeon_commit_ring(dev_priv); \
|
||||||
DRM_MEMORYBARRIER(); \
|
} while(0)
|
||||||
GET_RING_HEAD( dev_priv ); \
|
|
||||||
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
|
|
||||||
/* read from PCI bus to ensure correct posting */ \
|
|
||||||
RADEON_READ( RADEON_CP_RB_RPTR ); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define OUT_RING( x ) do { \
|
#define OUT_RING( x ) do { \
|
||||||
if ( RADEON_VERBOSE ) { \
|
if ( RADEON_VERBOSE ) { \
|
||||||
|
@ -1731,6 +1730,8 @@ extern void radeon_gem_proc_cleanup(struct drm_minor *minor);
|
||||||
#define MARK_CHECK_OFFSET 2
|
#define MARK_CHECK_OFFSET 2
|
||||||
#define MARK_CHECK_SCISSOR 3
|
#define MARK_CHECK_SCISSOR 3
|
||||||
|
|
||||||
|
extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
|
||||||
|
|
||||||
extern int r300_check_range(unsigned reg, int count);
|
extern int r300_check_range(unsigned reg, int count);
|
||||||
extern int r300_get_reg_flags(unsigned reg);
|
extern int r300_get_reg_flags(unsigned reg);
|
||||||
#endif /* __RADEON_DRV_H__ */
|
#endif /* __RADEON_DRV_H__ */
|
||||||
|
|
Loading…
Reference in New Issue