Merge branch 'radeon-gem-cs' into modesetting-gem
Conflicts: libdrm/xf86drm.c linux-core/Makefile.kernel linux-core/drmP.h linux-core/drm_compat.h linux-core/drm_drv.c linux-core/drm_stub.c linux-core/drm_vm.c shared-core/i915_dma.c shared-core/r300_cmdbuf.c shared-core/radeon_drv.hmain
commit
2d4420c666
|
@ -653,7 +653,7 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||||
bo_fake->refcount = 1;
|
bo_fake->refcount = 1;
|
||||||
bo_fake->id = ++bufmgr_fake->buf_nr;
|
bo_fake->id = ++bufmgr_fake->buf_nr;
|
||||||
bo_fake->name = name;
|
bo_fake->name = name;
|
||||||
bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
|
bo_fake->flags = BM_PINNED;
|
||||||
bo_fake->is_static = 1;
|
bo_fake->is_static = 1;
|
||||||
|
|
||||||
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||||
|
|
|
@ -12,17 +12,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
||||||
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
|
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
|
||||||
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
|
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
|
||||||
drm_memory_debug.o ati_pcigart.o drm_sman.o \
|
drm_memory_debug.o ati_pcigart.o drm_sman.o \
|
||||||
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
|
drm_hashtab.o drm_mm.o drm_compat.o \
|
||||||
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
|
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
|
||||||
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
|
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
|
||||||
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
|
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
|
||||||
tdfx-objs := tdfx_drv.o
|
tdfx-objs := tdfx_drv.o
|
||||||
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
|
||||||
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
||||||
i810-objs := i810_drv.o i810_dma.o
|
i810-objs := i810_drv.o i810_dma.o
|
||||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
|
||||||
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
|
i915_compat.o i915_suspend.o i915_opregion.o \
|
||||||
i915_opregion.o \
|
|
||||||
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
|
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
|
||||||
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
|
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
|
||||||
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
|
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
|
||||||
|
@ -43,8 +42,9 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||||
nv50_kms_wrapper.o \
|
nv50_kms_wrapper.o \
|
||||||
nv50_fbcon.o
|
nv50_fbcon.o
|
||||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \
|
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \
|
||||||
radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o \
|
radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o radeon_cs.o \
|
||||||
atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o
|
atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o radeon_legacy_crtc.o radeon_legacy_encoders.o \
|
||||||
|
radeon_cursor.o
|
||||||
sis-objs := sis_drv.o sis_mm.o
|
sis-objs := sis_drv.o sis_mm.o
|
||||||
ffb-objs := ffb_drv.o ffb_context.o
|
ffb-objs := ffb_drv.o ffb_context.o
|
||||||
savage-objs := savage_drv.o savage_bci.o savage_state.o
|
savage-objs := savage_drv.o savage_bci.o savage_state.o
|
||||||
|
|
|
@ -90,7 +90,7 @@ int drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
||||||
if (gart_info->table_handle == NULL)
|
if (gart_info->table_handle == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memset(gart_info->table_handle, 0, gart_info->table_size);
|
memset(gart_info->table_handle->vaddr, 0, gart_info->table_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_ati_alloc_pcigart_table);
|
EXPORT_SYMBOL(drm_ati_alloc_pcigart_table);
|
||||||
|
@ -111,7 +111,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
|
||||||
|
|
||||||
/* we need to support large memory configurations */
|
/* we need to support large memory configurations */
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
DRM_ERROR("no scatter/gather memory!\n");
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,11 +205,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
|
||||||
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
|
||||||
wbinvd();
|
|
||||||
#else
|
|
||||||
mb();
|
mb();
|
||||||
#endif
|
|
||||||
|
|
||||||
done:
|
done:
|
||||||
gart_info->addr = address;
|
gart_info->addr = address;
|
||||||
|
@ -266,11 +261,7 @@ static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
gart_insert_page_into_table(info, page_base, pci_gart + j);
|
gart_insert_page_into_table(info, page_base, pci_gart + j);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
|
||||||
wbinvd();
|
|
||||||
#else
|
|
||||||
mb();
|
mb();
|
||||||
#endif
|
|
||||||
|
|
||||||
atipci_be->gart_flush_fn(atipci_be->dev);
|
atipci_be->gart_flush_fn(atipci_be->dev);
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,22 @@
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
#include "atom-bits.h"
|
#include "atom-bits.h"
|
||||||
|
|
||||||
|
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_device *dev = crtc->dev;
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
|
||||||
|
ENABLE_CRTC_PS_ALLOCATION args;
|
||||||
|
|
||||||
|
memset(&args, 0, sizeof(args));
|
||||||
|
|
||||||
|
args.ucCRTC = radeon_crtc->crtc_id;
|
||||||
|
args.ucEnable = lock;
|
||||||
|
|
||||||
|
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
|
}
|
||||||
|
|
||||||
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
|
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
@ -127,7 +143,7 @@ void atombios_crtc_set_timing(struct drm_crtc *crtc, SET_CRTC_TIMING_PARAMETERS_
|
||||||
conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
|
conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
|
||||||
conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
|
conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
|
||||||
conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
|
conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
|
||||||
conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
|
conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
|
||||||
conv_param.ucReserved = crtc_param->ucReserved;
|
conv_param.ucReserved = crtc_param->ucReserved;
|
||||||
|
|
||||||
printk("executing set crtc timing\n");
|
printk("executing set crtc timing\n");
|
||||||
|
@ -150,29 +166,21 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||||
|
|
||||||
memset(&spc_param, 0, sizeof(SET_PIXEL_CLOCK_PS_ALLOCATION));
|
memset(&spc_param, 0, sizeof(SET_PIXEL_CLOCK_PS_ALLOCATION));
|
||||||
|
|
||||||
|
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||||
|
|
||||||
|
radeon_compute_pll(&dev_priv->mode_info.pll, mode->clock,
|
||||||
|
&sclock, &fb_div, &ref_div, &post_div, pll_flags);
|
||||||
|
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
uint32_t temp;
|
uint32_t ss_cntl;
|
||||||
|
|
||||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
|
||||||
|
|
||||||
radeon_compute_pll(&dev_priv->mode_info.pll, mode->clock,
|
|
||||||
&temp, &fb_div, &ref_div, &post_div, pll_flags);
|
|
||||||
sclock = temp;
|
|
||||||
|
|
||||||
if (radeon_crtc->crtc_id == 0) {
|
if (radeon_crtc->crtc_id == 0) {
|
||||||
temp = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL);
|
ss_cntl = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL);
|
||||||
RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, temp & ~1);
|
RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
|
||||||
} else {
|
} else {
|
||||||
temp = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL);
|
ss_cntl = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL);
|
||||||
RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, temp & ~1);
|
RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
#if 0 // TODO r400
|
|
||||||
sclock = save->dot_clock_freq;
|
|
||||||
fb_div = save->feedback_div;
|
|
||||||
post_div = save->post_div;
|
|
||||||
ref_div = save->ppll_ref_div;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* */
|
/* */
|
||||||
|
@ -201,7 +209,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||||
spc3_ptr->ucPostDiv = post_div;
|
spc3_ptr->ucPostDiv = post_div;
|
||||||
spc3_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
spc3_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||||
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
||||||
|
|
||||||
/* TODO insert output encoder object stuff herre for r600 */
|
/* TODO insert output encoder object stuff herre for r600 */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -220,7 +228,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||||
|
|
||||||
void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y)
|
void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
struct radeon_framebuffer *radeon_fb;
|
struct radeon_framebuffer *radeon_fb;
|
||||||
|
@ -251,19 +259,17 @@ void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y)
|
||||||
DRM_ERROR("Unsupported screen depth %d\n", crtc->fb->bits_per_pixel);
|
DRM_ERROR("Unsupported screen depth %d\n", crtc->fb->bits_per_pixel);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO tiling */
|
/* TODO tiling */
|
||||||
if (radeon_crtc->crtc_id == 0)
|
if (radeon_crtc->crtc_id == 0)
|
||||||
RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0);
|
RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0);
|
||||||
else
|
else
|
||||||
RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0);
|
RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0);
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, AVIVO_D1GRPH_UPDATE_LOCK);
|
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
|
RADEON_WRITE(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
|
RADEON_WRITE(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
|
RADEON_WRITE(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, x);
|
RADEON_WRITE(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, x);
|
||||||
|
@ -274,20 +280,13 @@ void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y)
|
||||||
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
|
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
|
RADEON_WRITE(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
|
RADEON_WRITE(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
|
||||||
|
|
||||||
/* unlock the grph regs */
|
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, 0);
|
|
||||||
|
|
||||||
/* lock the mode regs */
|
|
||||||
RADEON_WRITE(AVIVO_D1SCL_UPDATE + radeon_crtc->crtc_offset, AVIVO_D1SCL_UPDATE_LOCK);
|
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
|
RADEON_WRITE(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
|
||||||
crtc->mode.vdisplay);
|
crtc->mode.vdisplay);
|
||||||
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y);
|
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y);
|
||||||
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||||
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
||||||
/* unlock the mode regs */
|
|
||||||
RADEON_WRITE(AVIVO_D1SCL_UPDATE + radeon_crtc->crtc_offset, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void atombios_crtc_mode_set(struct drm_crtc *crtc,
|
void atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||||
|
@ -324,7 +323,7 @@ void atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
|
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
|
crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
|
||||||
|
|
||||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
|
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
|
crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
|
||||||
|
|
||||||
|
@ -337,9 +336,8 @@ void atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
|
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
|
||||||
|
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
if (radeon_is_avivo(dev_priv))
|
||||||
atombios_crtc_set_base(crtc, x, y);
|
atombios_crtc_set_base(crtc, x, y);
|
||||||
}
|
|
||||||
|
|
||||||
atombios_crtc_set_pll(crtc, adjusted_mode, pll_flags);
|
atombios_crtc_set_pll(crtc, adjusted_mode, pll_flags);
|
||||||
|
|
||||||
|
@ -357,11 +355,13 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||||
|
atombios_lock_crtc(crtc, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||||
|
atombios_lock_crtc(crtc, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
||||||
|
|
|
@ -166,7 +166,6 @@ typedef unsigned long uintptr_t;
|
||||||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||||
#define DRM_MAP_HASH_OFFSET 0x10000000
|
#define DRM_MAP_HASH_OFFSET 0x10000000
|
||||||
#define DRM_MAP_HASH_ORDER 12
|
#define DRM_MAP_HASH_ORDER 12
|
||||||
#define DRM_OBJECT_HASH_ORDER 12
|
|
||||||
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
|
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
|
||||||
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
|
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
|
||||||
/*
|
/*
|
||||||
|
@ -405,14 +404,6 @@ struct drm_buf_entry {
|
||||||
struct drm_freelist freelist;
|
struct drm_freelist freelist;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
enum drm_ref_type {
|
|
||||||
_DRM_REF_USE = 0,
|
|
||||||
_DRM_REF_TYPE1,
|
|
||||||
_DRM_NO_REF_TYPES
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/** File private data */
|
/** File private data */
|
||||||
struct drm_file {
|
struct drm_file {
|
||||||
int authenticated;
|
int authenticated;
|
||||||
|
@ -424,21 +415,11 @@ struct drm_file {
|
||||||
struct drm_minor *minor;
|
struct drm_minor *minor;
|
||||||
unsigned long lock_count;
|
unsigned long lock_count;
|
||||||
|
|
||||||
/*
|
|
||||||
* The user object hash table is global and resides in the
|
|
||||||
* drm_device structure. We protect the lists and hash tables with the
|
|
||||||
* device struct_mutex. A bit coarse-grained but probably the best
|
|
||||||
* option.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct list_head refd_objects;
|
|
||||||
|
|
||||||
/** Mapping of mm object handles to object pointers. */
|
/** Mapping of mm object handles to object pointers. */
|
||||||
struct idr object_idr;
|
struct idr object_idr;
|
||||||
/** Lock for synchronization of access to object_idr. */
|
/** Lock for synchronization of access to object_idr. */
|
||||||
spinlock_t table_lock;
|
spinlock_t table_lock;
|
||||||
|
|
||||||
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
|
|
||||||
struct file *filp;
|
struct file *filp;
|
||||||
void *driver_priv;
|
void *driver_priv;
|
||||||
|
|
||||||
|
@ -684,7 +665,9 @@ struct drm_gem_object {
|
||||||
|
|
||||||
/* per-master structure */
|
/* per-master structure */
|
||||||
struct drm_master {
|
struct drm_master {
|
||||||
|
|
||||||
|
struct kref refcount; /* refcount for this master */
|
||||||
|
|
||||||
struct list_head head; /**< each minor contains a list of masters */
|
struct list_head head; /**< each minor contains a list of masters */
|
||||||
struct drm_minor *minor; /**< link back to minor we are a master for */
|
struct drm_minor *minor; /**< link back to minor we are a master for */
|
||||||
|
|
||||||
|
@ -901,7 +884,6 @@ struct drm_device {
|
||||||
int map_count; /**< Number of mappable regions */
|
int map_count; /**< Number of mappable regions */
|
||||||
struct drm_open_hash map_hash; /**< User token hash table for maps */
|
struct drm_open_hash map_hash; /**< User token hash table for maps */
|
||||||
struct drm_mm offset_manager; /**< User token manager */
|
struct drm_mm offset_manager; /**< User token manager */
|
||||||
struct drm_open_hash object_hash; /**< User token hash table for objects */
|
|
||||||
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
|
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
|
||||||
struct page *ttm_dummy_page;
|
struct page *ttm_dummy_page;
|
||||||
|
|
||||||
|
@ -1366,12 +1348,13 @@ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern struct drm_master *drm_get_master(struct drm_minor *minor);
|
struct drm_master *drm_master_create(struct drm_minor *minor);
|
||||||
extern void drm_put_master(struct drm_master *master);
|
extern struct drm_master *drm_master_get(struct drm_master *master);
|
||||||
|
extern void drm_master_put(struct drm_master **master);
|
||||||
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||||
struct drm_driver *driver);
|
struct drm_driver *driver);
|
||||||
extern int drm_put_dev(struct drm_device *dev);
|
extern int drm_put_dev(struct drm_device *dev);
|
||||||
extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p);
|
extern int drm_put_minor(struct drm_minor **minor_p);
|
||||||
extern unsigned int drm_debug; /* 1 to enable debug output */
|
extern unsigned int drm_debug; /* 1 to enable debug output */
|
||||||
|
|
||||||
extern struct class *drm_class;
|
extern struct class *drm_class;
|
||||||
|
|
|
@ -565,18 +565,6 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
|
||||||
|
|
||||||
static void drm_bo_base_deref_locked(struct drm_file *file_priv,
|
|
||||||
struct drm_user_object *uo)
|
|
||||||
{
|
|
||||||
struct drm_buffer_object *bo =
|
|
||||||
drm_user_object_entry(uo, struct drm_buffer_object, base);
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
|
|
||||||
|
|
||||||
drm_bo_takedown_vm_locked(bo);
|
|
||||||
drm_bo_usage_deref_locked(&bo);
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
|
void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
|
||||||
{
|
{
|
||||||
struct drm_buffer_object *tmp_bo = *bo;
|
struct drm_buffer_object *tmp_bo = *bo;
|
||||||
|
@ -1067,41 +1055,13 @@ static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Call dev->struct_mutex locked.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
|
||||||
uint32_t handle, int check_owner)
|
|
||||||
{
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
|
|
||||||
uo = drm_lookup_user_object(file_priv, handle);
|
|
||||||
|
|
||||||
if (!uo || (uo->type != drm_buffer_type)) {
|
|
||||||
DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (check_owner && file_priv != uo->owner) {
|
|
||||||
if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
|
|
||||||
atomic_inc(&bo->usage);
|
|
||||||
return bo;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_lookup_buffer_object);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call bo->mutex locked.
|
* Call bo->mutex locked.
|
||||||
* Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
|
* Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
|
||||||
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
|
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
|
int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
|
||||||
{
|
{
|
||||||
struct drm_fence_object *fence = bo->fence;
|
struct drm_fence_object *fence = bo->fence;
|
||||||
|
|
||||||
|
@ -1157,149 +1117,6 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Fill in the ioctl reply argument with buffer info.
|
|
||||||
* Bo locked.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
|
||||||
struct drm_bo_info_rep *rep)
|
|
||||||
{
|
|
||||||
if (!rep)
|
|
||||||
return;
|
|
||||||
|
|
||||||
rep->handle = bo->base.hash.key;
|
|
||||||
rep->flags = bo->mem.flags;
|
|
||||||
rep->size = bo->num_pages * PAGE_SIZE;
|
|
||||||
rep->offset = bo->offset;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* drm_bo_type_device buffers have user-visible
|
|
||||||
* handles which can be used to share across
|
|
||||||
* processes. Hand that back to the application
|
|
||||||
*/
|
|
||||||
if (bo->type == drm_bo_type_device)
|
|
||||||
rep->arg_handle = bo->map_list.user_token;
|
|
||||||
else
|
|
||||||
rep->arg_handle = 0;
|
|
||||||
|
|
||||||
rep->proposed_flags = bo->mem.proposed_flags;
|
|
||||||
rep->buffer_start = bo->buffer_start;
|
|
||||||
rep->fence_flags = bo->fence_type;
|
|
||||||
rep->rep_flags = 0;
|
|
||||||
rep->page_alignment = bo->mem.page_alignment;
|
|
||||||
|
|
||||||
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
|
|
||||||
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
|
|
||||||
DRM_BO_REP_BUSY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_fill_rep_arg);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait for buffer idle and register that we've mapped the buffer.
|
|
||||||
* Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
|
|
||||||
* so that if the client dies, the mapping is automatically
|
|
||||||
* unregistered.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
|
|
||||||
uint32_t map_flags, unsigned hint,
|
|
||||||
struct drm_bo_info_rep *rep)
|
|
||||||
{
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
int ret = 0;
|
|
||||||
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (!bo)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
|
||||||
do {
|
|
||||||
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
|
|
||||||
|
|
||||||
ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
|
|
||||||
if (unlikely(ret))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
|
|
||||||
drm_bo_evict_cached(bo);
|
|
||||||
|
|
||||||
} while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
|
|
||||||
|
|
||||||
atomic_inc(&bo->mapped);
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
if (ret) {
|
|
||||||
if (atomic_dec_and_test(&bo->mapped))
|
|
||||||
wake_up_all(&bo->event_queue);
|
|
||||||
|
|
||||||
} else
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
struct drm_ref_object *ro;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
|
||||||
if (!bo) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
|
|
||||||
if (!ro) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_remove_ref_object(file_priv, ro);
|
|
||||||
drm_bo_usage_deref_locked(&bo);
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Call struct-sem locked.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
|
|
||||||
struct drm_user_object *uo,
|
|
||||||
enum drm_ref_type action)
|
|
||||||
{
|
|
||||||
struct drm_buffer_object *bo =
|
|
||||||
drm_user_object_entry(uo, struct drm_buffer_object, base);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We DON'T want to take the bo->lock here, because we want to
|
|
||||||
* hold it when we wait for unmapped buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
BUG_ON(action != _DRM_REF_TYPE1);
|
|
||||||
|
|
||||||
if (atomic_dec_and_test(&bo->mapped))
|
|
||||||
wake_up_all(&bo->event_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bo->mutex locked.
|
* bo->mutex locked.
|
||||||
* Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
|
* Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
|
||||||
|
@ -1594,8 +1411,7 @@ static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
|
||||||
|
|
||||||
int drm_bo_do_validate(struct drm_buffer_object *bo,
|
int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
uint32_t fence_class,
|
uint32_t fence_class)
|
||||||
struct drm_bo_info_rep *rep)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
|
int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
|
||||||
|
@ -1622,132 +1438,12 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
|
|
||||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||||
out:
|
out:
|
||||||
if (rep)
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
|
|
||||||
mutex_unlock(&bo->mutex);
|
mutex_unlock(&bo->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_do_validate);
|
EXPORT_SYMBOL(drm_bo_do_validate);
|
||||||
|
|
||||||
/**
|
|
||||||
* drm_bo_handle_validate
|
|
||||||
*
|
|
||||||
* @file_priv: the drm file private, used to get a handle to the user context
|
|
||||||
*
|
|
||||||
* @handle: the buffer object handle
|
|
||||||
*
|
|
||||||
* @flags: access rights, mapping parameters and cacheability. See
|
|
||||||
* the DRM_BO_FLAG_* values in drm.h
|
|
||||||
*
|
|
||||||
* @mask: Which flag values to change; this allows callers to modify
|
|
||||||
* things without knowing the current state of other flags.
|
|
||||||
*
|
|
||||||
* @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
|
|
||||||
* values in drm.h.
|
|
||||||
*
|
|
||||||
* @fence_class: a driver-specific way of doing fences. Presumably,
|
|
||||||
* this would be used if the driver had more than one submission and
|
|
||||||
* fencing mechanism. At this point, there isn't any use of this
|
|
||||||
* from the user mode code.
|
|
||||||
*
|
|
||||||
* @rep: To be stuffed with the reply from validation
|
|
||||||
*
|
|
||||||
* @bp_rep: To be stuffed with the buffer object pointer
|
|
||||||
*
|
|
||||||
* Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
|
|
||||||
* of a pointer to a buffer object. Optionally return a pointer to the buffer object.
|
|
||||||
* This is a convenience wrapper only.
|
|
||||||
*/
|
|
||||||
|
|
||||||
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
|
||||||
uint64_t flags, uint64_t mask,
|
|
||||||
uint32_t hint,
|
|
||||||
uint32_t fence_class,
|
|
||||||
struct drm_bo_info_rep *rep,
|
|
||||||
struct drm_buffer_object **bo_rep)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (!bo)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (bo->base.owner != file_priv)
|
|
||||||
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
|
|
||||||
|
|
||||||
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
|
|
||||||
|
|
||||||
if (!ret && bo_rep)
|
|
||||||
*bo_rep = bo;
|
|
||||||
else
|
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_handle_validate);
|
|
||||||
|
|
||||||
|
|
||||||
static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
|
|
||||||
struct drm_bo_info_rep *rep)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (!bo)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: Quick busy here?
|
|
||||||
*/
|
|
||||||
|
|
||||||
drm_bo_busy(bo, 1);
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
|
|
||||||
uint32_t hint,
|
|
||||||
struct drm_bo_info_rep *rep)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (!bo)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
|
||||||
ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
out:
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_buffer_object_create(struct drm_device *dev,
|
int drm_buffer_object_create(struct drm_device *dev,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
enum drm_bo_type type,
|
enum drm_bo_type type,
|
||||||
|
@ -1822,7 +1518,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
|
|
||||||
mutex_unlock(&bo->mutex);
|
mutex_unlock(&bo->mutex);
|
||||||
ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
|
ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
|
||||||
0, NULL);
|
0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_err_unlocked;
|
goto out_err_unlocked;
|
||||||
|
|
||||||
|
@ -1837,230 +1533,6 @@ out_err_unlocked:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_buffer_object_create);
|
EXPORT_SYMBOL(drm_buffer_object_create);
|
||||||
|
|
||||||
|
|
||||||
int drm_bo_add_user_object(struct drm_file *file_priv,
|
|
||||||
struct drm_buffer_object *bo, int shareable)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_add_user_object(file_priv, &bo->base, shareable);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
bo->base.remove = drm_bo_base_deref_locked;
|
|
||||||
bo->base.type = drm_buffer_type;
|
|
||||||
bo->base.ref_struct_locked = NULL;
|
|
||||||
bo->base.unref = drm_buffer_user_object_unmap;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_add_user_object);
|
|
||||||
|
|
||||||
int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_create_arg *arg = data;
|
|
||||||
struct drm_bo_create_req *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
struct drm_buffer_object *entry;
|
|
||||||
enum drm_bo_type bo_type;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
|
|
||||||
(int)(req->size / 1024), req->page_alignment * 4);
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the buffer creation request comes in with a starting address,
|
|
||||||
* that points at the desired user pages to map. Otherwise, create
|
|
||||||
* a drm_bo_type_device buffer, which uses pages allocated from the kernel
|
|
||||||
*/
|
|
||||||
bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* User buffers cannot be shared
|
|
||||||
*/
|
|
||||||
if (bo_type == drm_bo_type_user)
|
|
||||||
req->flags &= ~DRM_BO_FLAG_SHAREABLE;
|
|
||||||
|
|
||||||
ret = drm_buffer_object_create(file_priv->minor->dev,
|
|
||||||
req->size, bo_type, req->flags,
|
|
||||||
req->hint, req->page_alignment,
|
|
||||||
req->buffer_start, &entry);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret = drm_bo_add_user_object(file_priv, entry,
|
|
||||||
req->flags & DRM_BO_FLAG_SHAREABLE);
|
|
||||||
if (ret) {
|
|
||||||
drm_bo_usage_deref_unlocked(&entry);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&entry->mutex);
|
|
||||||
drm_bo_fill_rep_arg(entry, rep);
|
|
||||||
mutex_unlock(&entry->mutex);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_setstatus_ioctl(struct drm_device *dev,
|
|
||||||
void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_map_wait_idle_arg *arg = data;
|
|
||||||
struct drm_bo_info_req *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (!bo)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (bo->base.owner != file_priv)
|
|
||||||
req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
|
|
||||||
|
|
||||||
ret = drm_bo_do_validate(bo, req->flags, req->mask,
|
|
||||||
req->hint | DRM_BO_HINT_DONT_FENCE,
|
|
||||||
bo->fence_class, rep);
|
|
||||||
|
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
|
||||||
|
|
||||||
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_map_wait_idle_arg *arg = data;
|
|
||||||
struct drm_bo_info_req *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
int ret;
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
|
|
||||||
req->hint, rep);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_handle_arg *arg = data;
|
|
||||||
int ret;
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_buffer_object_unmap(file_priv, arg->handle);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_reference_info_arg *arg = data;
|
|
||||||
struct drm_bo_handle_arg *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_user_object_ref(file_priv, req->handle,
|
|
||||||
drm_buffer_type, &uo);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = drm_bo_handle_info(file_priv, req->handle, rep);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_handle_arg *arg = data;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_reference_info_arg *arg = data;
|
|
||||||
struct drm_bo_handle_arg *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_handle_info(file_priv, req->handle, rep);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_map_wait_idle_arg *arg = data;
|
|
||||||
struct drm_bo_info_req *req = &arg->d.req;
|
|
||||||
struct drm_bo_info_rep *rep = &arg->d.rep;
|
|
||||||
int ret;
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_handle_wait(file_priv, req->handle,
|
|
||||||
req->hint, rep);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drm_bo_leave_list(struct drm_buffer_object *bo,
|
static int drm_bo_leave_list(struct drm_buffer_object *bo,
|
||||||
uint32_t mem_type,
|
uint32_t mem_type,
|
||||||
int free_pinned,
|
int free_pinned,
|
||||||
|
@ -2240,7 +1712,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm);
|
||||||
*point since we have the hardware lock.
|
*point since we have the hardware lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
|
int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
|
@ -2389,7 +1861,6 @@ int drm_bo_driver_init(struct drm_device *dev)
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
bm->dummy_read_page = NULL;
|
bm->dummy_read_page = NULL;
|
||||||
drm_bo_init_lock(&bm->bm_lock);
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
if (!driver)
|
if (!driver)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -2435,191 +1906,6 @@ out_unlock:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_driver_init);
|
EXPORT_SYMBOL(drm_bo_driver_init);
|
||||||
|
|
||||||
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_mm_init_arg *arg = data;
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
|
||||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!driver) {
|
|
||||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = -EINVAL;
|
|
||||||
if (arg->magic != DRM_BO_INIT_MAGIC) {
|
|
||||||
DRM_ERROR("You are using an old libdrm that is not compatible with\n"
|
|
||||||
"\tthe kernel DRM module. Please upgrade your libdrm.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
if (arg->major != DRM_BO_INIT_MAJOR) {
|
|
||||||
DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
|
|
||||||
"\tversion don't match. Got %d, expected %d.\n",
|
|
||||||
arg->major, DRM_BO_INIT_MAJOR);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (!bm->initialized) {
|
|
||||||
DRM_ERROR("DRM memory manager was not initialized.\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
if (arg->mem_type == 0) {
|
|
||||||
DRM_ERROR("System memory buffers already initialized.\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ret = drm_bo_init_mm(dev, arg->mem_type,
|
|
||||||
arg->p_offset, arg->p_size, 0);
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_mm_type_arg *arg = data;
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
|
||||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!driver) {
|
|
||||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = -EINVAL;
|
|
||||||
if (!bm->initialized) {
|
|
||||||
DRM_ERROR("DRM memory manager was not initialized\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
if (arg->mem_type == 0) {
|
|
||||||
DRM_ERROR("No takedown for System memory buffers.\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ret = 0;
|
|
||||||
if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
|
|
||||||
if (ret == -EINVAL)
|
|
||||||
DRM_ERROR("Memory manager type %d not clean. "
|
|
||||||
"Delaying takedown\n", arg->mem_type);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_mm_type_arg *arg = data;
|
|
||||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!driver) {
|
|
||||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
|
|
||||||
DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
|
|
||||||
ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_bo_lock_mm(dev, arg->mem_type);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
if (ret) {
|
|
||||||
(void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_mm_unlock_ioctl(struct drm_device *dev,
|
|
||||||
void *data,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_mm_type_arg *arg = data;
|
|
||||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!driver) {
|
|
||||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
|
|
||||||
ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_mm_info_arg *arg = data;
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
|
||||||
struct drm_bo_driver *driver = dev->driver->bo_driver;
|
|
||||||
struct drm_mem_type_manager *man;
|
|
||||||
int ret = 0;
|
|
||||||
int mem_type = arg->mem_type;
|
|
||||||
|
|
||||||
if (!driver) {
|
|
||||||
DRM_ERROR("Buffer objects are not supported by this driver\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mem_type >= DRM_BO_MEM_TYPES) {
|
|
||||||
DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (!bm->initialized) {
|
|
||||||
DRM_ERROR("DRM memory manager was not initialized\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
man = &bm->man[arg->mem_type];
|
|
||||||
|
|
||||||
arg->p_size = man->size;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* buffer object vm functions.
|
* buffer object vm functions.
|
||||||
*/
|
*/
|
||||||
|
@ -2792,15 +2078,3 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_bo_version_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
|
|
||||||
|
|
||||||
arg->major = DRM_BO_INIT_MAJOR;
|
|
||||||
arg->minor = DRM_BO_INIT_MINOR;
|
|
||||||
arg->patchlevel = DRM_BO_INIT_PATCH;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,189 +0,0 @@
|
||||||
/**************************************************************************
|
|
||||||
*
|
|
||||||
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
|
||||||
/*
|
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This file implements a simple replacement for the buffer manager use
|
|
||||||
* of the heavyweight hardware lock.
|
|
||||||
* The lock is a read-write lock. Taking it in read mode is fast, and
|
|
||||||
* intended for in-kernel use only.
|
|
||||||
* Taking it in write mode is slow.
|
|
||||||
*
|
|
||||||
* The write mode is used only when there is a need to block all
|
|
||||||
* user-space processes from allocating a
|
|
||||||
* new memory area.
|
|
||||||
* Typical use in write mode is X server VT switching, and it's allowed
|
|
||||||
* to leave kernel space with the write lock held. If a user-space process
|
|
||||||
* dies while having the write-lock, it will be released during the file
|
|
||||||
* descriptor release.
|
|
||||||
*
|
|
||||||
* The read lock is typically placed at the start of an IOCTL- or
|
|
||||||
* user-space callable function that may end up allocating a memory area.
|
|
||||||
* This includes setstatus, super-ioctls and no_pfn; the latter may move
|
|
||||||
* unmappable regions to mappable. It's a bug to leave kernel space with the
|
|
||||||
* read lock held.
|
|
||||||
*
|
|
||||||
* Both read- and write lock taking may be interruptible for low signal-delivery
|
|
||||||
* latency. The locking functions will return -EAGAIN if interrupted by a
|
|
||||||
* signal.
|
|
||||||
*
|
|
||||||
* Locking order: The lock should be taken BEFORE any kernel mutexes
|
|
||||||
* or spinlocks.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
|
|
||||||
void drm_bo_init_lock(struct drm_bo_lock *lock)
|
|
||||||
{
|
|
||||||
DRM_INIT_WAITQUEUE(&lock->queue);
|
|
||||||
atomic_set(&lock->write_lock_pending, 0);
|
|
||||||
atomic_set(&lock->readers, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_bo_read_unlock(struct drm_bo_lock *lock)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_test(&lock->readers))
|
|
||||||
wake_up_all(&lock->queue);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_read_unlock);
|
|
||||||
|
|
||||||
int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
|
|
||||||
{
|
|
||||||
while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!interruptible) {
|
|
||||||
wait_event(lock->queue,
|
|
||||||
atomic_read(&lock->write_lock_pending) == 0);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ret = wait_event_interruptible
|
|
||||||
(lock->queue, atomic_read(&lock->write_lock_pending) == 0);
|
|
||||||
if (ret)
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
|
|
||||||
int ret;
|
|
||||||
if (!interruptible) {
|
|
||||||
wait_event(lock->queue,
|
|
||||||
atomic_read(&lock->readers) != -1);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ret = wait_event_interruptible
|
|
||||||
(lock->queue, atomic_read(&lock->readers) != -1);
|
|
||||||
if (ret)
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_bo_read_lock);
|
|
||||||
|
|
||||||
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
|
|
||||||
return -EINVAL;
|
|
||||||
wake_up_all(&lock->queue);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drm_bo_write_lock_remove(struct drm_file *file_priv,
|
|
||||||
struct drm_user_object *item)
|
|
||||||
{
|
|
||||||
struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = __drm_bo_write_unlock(lock);
|
|
||||||
BUG_ON(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct drm_device *dev;
|
|
||||||
|
|
||||||
atomic_inc(&lock->write_lock_pending);
|
|
||||||
|
|
||||||
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
|
|
||||||
if (!interruptible) {
|
|
||||||
wait_event(lock->queue,
|
|
||||||
atomic_read(&lock->readers) == 0);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ret = wait_event_interruptible
|
|
||||||
(lock->queue, atomic_read(&lock->readers) == 0);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
atomic_dec(&lock->write_lock_pending);
|
|
||||||
wake_up_all(&lock->queue);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add a dummy user-object, the destructor of which will
|
|
||||||
* make sure the lock is released if the client dies
|
|
||||||
* while holding it.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (atomic_dec_and_test(&lock->write_lock_pending))
|
|
||||||
wake_up_all(&lock->queue);
|
|
||||||
dev = file_priv->minor->dev;
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_add_user_object(file_priv, &lock->base, 0);
|
|
||||||
lock->base.remove = &drm_bo_write_lock_remove;
|
|
||||||
lock->base.type = drm_lock_type;
|
|
||||||
if (ret)
|
|
||||||
(void)__drm_bo_write_unlock(lock);
|
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct drm_ref_object *ro;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
if (lock->base.owner != file_priv) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
|
|
||||||
BUG_ON(!ro);
|
|
||||||
drm_remove_ref_object(file_priv, ro);
|
|
||||||
lock->base.owner = NULL;
|
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -1528,6 +1528,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
||||||
dev->buf_use++; /* Can't allocate more after this call */
|
dev->buf_use++; /* Can't allocate more after this call */
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->count_lock);
|
||||||
|
|
||||||
|
DRM_DEBUG("dma buf count %d, req count %d\n", request->count, dma->buf_count);
|
||||||
if (request->count >= dma->buf_count) {
|
if (request->count >= dma->buf_count) {
|
||||||
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|
||||||
|| (drm_core_check_feature(dev, DRIVER_SG)
|
|| (drm_core_check_feature(dev, DRIVER_SG)
|
||||||
|
@ -1538,10 +1539,12 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
||||||
unsigned long token = dev->agp_buffer_token;
|
unsigned long token = dev->agp_buffer_token;
|
||||||
|
|
||||||
if (!map) {
|
if (!map) {
|
||||||
|
DRM_DEBUG("No map\n");
|
||||||
retcode = -EINVAL;
|
retcode = -EINVAL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
|
DRM_DEBUG("%x %d\n", token, map->size);
|
||||||
virtual = do_mmap(file_priv->filp, 0, map->size,
|
virtual = do_mmap(file_priv->filp, 0, map->size,
|
||||||
PROT_READ | PROT_WRITE,
|
PROT_READ | PROT_WRITE,
|
||||||
MAP_SHARED,
|
MAP_SHARED,
|
||||||
|
@ -1555,6 +1558,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
}
|
}
|
||||||
if (virtual > -1024UL) {
|
if (virtual > -1024UL) {
|
||||||
|
DRM_DEBUG("mmap failed\n");
|
||||||
/* Real error */
|
/* Real error */
|
||||||
retcode = (signed long)virtual;
|
retcode = (signed long)virtual;
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -203,7 +203,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions);
|
||||||
* LOCKING:
|
* LOCKING:
|
||||||
* Caller must hold mode config lock.
|
* Caller must hold mode config lock.
|
||||||
*/
|
*/
|
||||||
static void drm_pick_crtcs (struct drm_device *dev)
|
void drm_pick_crtcs (struct drm_device *dev)
|
||||||
{
|
{
|
||||||
int c, o, assigned;
|
int c, o, assigned;
|
||||||
struct drm_connector *connector, *connector_equal;
|
struct drm_connector *connector, *connector_equal;
|
||||||
|
@ -715,48 +715,4 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
|
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
|
||||||
|
|
||||||
/**
|
|
||||||
* drm_get_buffer_object - find the buffer object for a given handle
|
|
||||||
* @dev: DRM device
|
|
||||||
* @bo: pointer to caller's buffer_object pointer
|
|
||||||
* @handle: handle to lookup
|
|
||||||
*
|
|
||||||
* LOCKING:
|
|
||||||
* Must take @dev's struct_mutex to protect buffer object lookup.
|
|
||||||
*
|
|
||||||
* Given @handle, lookup the buffer object in @dev and put it in the caller's
|
|
||||||
* @bo pointer.
|
|
||||||
*
|
|
||||||
* RETURNS:
|
|
||||||
* Zero on success, -EINVAL if the handle couldn't be found.
|
|
||||||
*/
|
|
||||||
int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
|
|
||||||
{
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
struct drm_hash_item *hash;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
*bo = NULL;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Couldn't find handle.\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
uo = drm_hash_entry(hash, struct drm_user_object, hash);
|
|
||||||
if (uo->type != drm_buffer_type) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
*bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
|
|
||||||
ret = 0;
|
|
||||||
out_err:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_get_buffer_object);
|
|
||||||
|
|
||||||
|
|
|
@ -58,6 +58,7 @@ int drm_dma_setup(struct drm_device *dev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_dma_setup);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup the DMA resources.
|
* Cleanup the DMA resources.
|
||||||
|
@ -120,6 +121,7 @@ void drm_dma_takedown(struct drm_device *dev)
|
||||||
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
|
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
|
||||||
dev->dma = NULL;
|
dev->dma = NULL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_dma_takedown);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free a buffer.
|
* Free a buffer.
|
||||||
|
|
|
@ -146,36 +146,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
|
|
||||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
|
|
||||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
|
|
||||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
|
|
||||||
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
|
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
|
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
|
|
||||||
|
|
||||||
#if OS_HAS_GEM
|
#if OS_HAS_GEM
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
|
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
|
||||||
|
@ -202,8 +172,6 @@ int drm_lastclose(struct drm_device * dev)
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
DRM_DEBUG("\n");
|
||||||
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
||||||
drm_bo_driver_finish(dev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can't do much about this function failing.
|
* We can't do much about this function failing.
|
||||||
|
@ -213,9 +181,11 @@ int drm_lastclose(struct drm_device * dev)
|
||||||
dev->driver->lastclose(dev);
|
dev->driver->lastclose(dev);
|
||||||
DRM_DEBUG("driver lastclose completed\n");
|
DRM_DEBUG("driver lastclose completed\n");
|
||||||
|
|
||||||
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
|
drm_bo_driver_finish(dev);
|
||||||
|
|
||||||
/* if (dev->irq_enabled)
|
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_irq_uninstall(dev); */
|
drm_irq_uninstall(dev);
|
||||||
|
|
||||||
/* Free drawable information memory */
|
/* Free drawable information memory */
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
@ -223,13 +193,8 @@ int drm_lastclose(struct drm_device * dev)
|
||||||
drm_drawable_free_all(dev);
|
drm_drawable_free_all(dev);
|
||||||
del_timer(&dev->timer);
|
del_timer(&dev->timer);
|
||||||
|
|
||||||
if (dev->primary->master) {
|
|
||||||
drm_put_master(dev->primary->master);
|
|
||||||
dev->primary->master = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clear AGP information */
|
/* Clear AGP information */
|
||||||
if (drm_core_has_AGP(dev) && dev->agp) {
|
if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
struct drm_agp_mem *entry, *tempe;
|
struct drm_agp_mem *entry, *tempe;
|
||||||
|
|
||||||
/* Remove AGP resources, but leave dev->agp
|
/* Remove AGP resources, but leave dev->agp
|
||||||
|
@ -287,7 +252,7 @@ int drm_lastclose(struct drm_device * dev)
|
||||||
}
|
}
|
||||||
dev->queue_count = 0;
|
dev->queue_count = 0;
|
||||||
|
|
||||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_dma_takedown(dev);
|
drm_dma_takedown(dev);
|
||||||
|
|
||||||
dev->dev_mapping = NULL;
|
dev->dev_mapping = NULL;
|
||||||
|
@ -431,11 +396,11 @@ static void drm_cleanup(struct drm_device * dev)
|
||||||
drm_ctxbitmap_cleanup(dev);
|
drm_ctxbitmap_cleanup(dev);
|
||||||
drm_ht_remove(&dev->map_hash);
|
drm_ht_remove(&dev->map_hash);
|
||||||
drm_mm_takedown(&dev->offset_manager);
|
drm_mm_takedown(&dev->offset_manager);
|
||||||
drm_ht_remove(&dev->object_hash);
|
|
||||||
|
|
||||||
drm_put_minor(dev, &dev->primary);
|
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_put_minor(dev, &dev->control);
|
drm_put_minor(&dev->control);
|
||||||
|
|
||||||
|
drm_put_minor(&dev->primary);
|
||||||
|
|
||||||
if (drm_put_dev(dev))
|
if (drm_put_dev(dev))
|
||||||
DRM_ERROR("Cannot unload module\n");
|
DRM_ERROR("Cannot unload module\n");
|
||||||
|
|
|
@ -134,8 +134,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||||
|
|
||||||
if (new_type) {
|
if (new_type) {
|
||||||
fence->signaled_types |= new_type;
|
fence->signaled_types |= new_type;
|
||||||
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
DRM_DEBUG("Fence %p signaled 0x%08x\n",
|
||||||
fence->base.hash.key, fence->signaled_types);
|
fence, fence->signaled_types);
|
||||||
|
|
||||||
if (driver->needed_flush)
|
if (driver->needed_flush)
|
||||||
fc->pending_flush |= driver->needed_flush(fence);
|
fc->pending_flush |= driver->needed_flush(fence);
|
||||||
|
@ -147,8 +147,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||||
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
|
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
|
||||||
|
|
||||||
if (!(fence->type & ~fence->signaled_types)) {
|
if (!(fence->type & ~fence->signaled_types)) {
|
||||||
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
DRM_DEBUG("Fence completely signaled %p\n",
|
||||||
fence->base.hash.key);
|
fence);
|
||||||
list_del_init(&fence->ring);
|
list_del_init(&fence->ring);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,10 +196,9 @@ void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
|
||||||
*fence = NULL;
|
*fence = NULL;
|
||||||
if (atomic_dec_and_test(&tmp_fence->usage)) {
|
if (atomic_dec_and_test(&tmp_fence->usage)) {
|
||||||
drm_fence_unring(dev, &tmp_fence->ring);
|
drm_fence_unring(dev, &tmp_fence->ring);
|
||||||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
DRM_DEBUG("Destroyed a fence object %p\n",
|
||||||
tmp_fence->base.hash.key);
|
tmp_fence);
|
||||||
atomic_dec(&fm->count);
|
atomic_dec(&fm->count);
|
||||||
BUG_ON(!list_empty(&tmp_fence->base.list));
|
|
||||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,7 +216,6 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
|
||||||
if (atomic_read(&tmp_fence->usage) == 0) {
|
if (atomic_read(&tmp_fence->usage) == 0) {
|
||||||
drm_fence_unring(dev, &tmp_fence->ring);
|
drm_fence_unring(dev, &tmp_fence->ring);
|
||||||
atomic_dec(&fm->count);
|
atomic_dec(&fm->count);
|
||||||
BUG_ON(!list_empty(&tmp_fence->base.list));
|
|
||||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
@ -244,15 +242,6 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
||||||
|
|
||||||
static void drm_fence_object_destroy(struct drm_file *priv,
|
|
||||||
struct drm_user_object *base)
|
|
||||||
{
|
|
||||||
struct drm_fence_object *fence =
|
|
||||||
drm_user_object_entry(base, struct drm_fence_object, base);
|
|
||||||
|
|
||||||
drm_fence_usage_deref_locked(&fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
|
int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -477,7 +466,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
|
||||||
* Avoid hitting BUG() for kernel-only fence objects.
|
* Avoid hitting BUG() for kernel-only fence objects.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
INIT_LIST_HEAD(&fence->base.list);
|
|
||||||
fence->fence_class = fence_class;
|
fence->fence_class = fence_class;
|
||||||
fence->type = type;
|
fence->type = type;
|
||||||
fence->signaled_types = 0;
|
fence->signaled_types = 0;
|
||||||
|
@ -493,26 +481,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_fence_add_user_object(struct drm_file *priv,
|
|
||||||
struct drm_fence_object *fence, int shareable)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_add_user_object(priv, &fence->base, shareable);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
atomic_inc(&fence->usage);
|
|
||||||
fence->base.type = drm_fence_type;
|
|
||||||
fence->base.remove = &drm_fence_object_destroy;
|
|
||||||
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
|
|
||||||
out:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_fence_add_user_object);
|
|
||||||
|
|
||||||
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
|
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t type, unsigned flags,
|
uint32_t type, unsigned flags,
|
||||||
struct drm_fence_object **c_fence)
|
struct drm_fence_object **c_fence)
|
||||||
|
@ -569,261 +537,7 @@ void drm_fence_manager_init(struct drm_device *dev)
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_fence_fill_arg(struct drm_fence_object *fence,
|
|
||||||
struct drm_fence_arg *arg)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = fence->dev;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
unsigned long irq_flags;
|
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, irq_flags);
|
|
||||||
arg->handle = fence->base.hash.key;
|
|
||||||
arg->fence_class = fence->fence_class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled_types;
|
|
||||||
arg->error = fence->error;
|
|
||||||
arg->sequence = fence->sequence;
|
|
||||||
read_unlock_irqrestore(&fm->lock, irq_flags);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_fence_fill_arg);
|
|
||||||
|
|
||||||
void drm_fence_manager_takedown(struct drm_device *dev)
|
void drm_fence_manager_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
|
|
||||||
uint32_t handle)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
uo = drm_lookup_user_object(priv, handle);
|
|
||||||
if (!uo || (uo->type != drm_fence_type)) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return fence;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arg->flags & DRM_FENCE_FLAG_EMIT)
|
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
|
||||||
ret = drm_fence_object_create(dev, arg->fence_class,
|
|
||||||
arg->type, arg->flags, &fence);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ret = drm_fence_add_user_object(file_priv, fence,
|
|
||||||
arg->flags &
|
|
||||||
DRM_FENCE_FLAG_SHAREABLE);
|
|
||||||
if (ret) {
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* usage > 0. No need to lock dev->struct_mutex;
|
|
||||||
*/
|
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
|
||||||
if (!fence)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
|
||||||
if (!fence)
|
|
||||||
return -EINVAL;
|
|
||||||
ret = drm_fence_object_flush(fence, arg->type);
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
|
||||||
if (!fence)
|
|
||||||
return -EINVAL;
|
|
||||||
ret = drm_fence_object_wait(fence,
|
|
||||||
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
|
||||||
0, arg->type);
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
|
||||||
if (!fence)
|
|
||||||
return -EINVAL;
|
|
||||||
ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
|
|
||||||
arg->type);
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
|
||||||
ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
|
|
||||||
NULL, &fence);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
|
|
||||||
ret = drm_fence_add_user_object(file_priv, fence,
|
|
||||||
arg->flags &
|
|
||||||
DRM_FENCE_FLAG_SHAREABLE);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
|
||||||
|
|
||||||
drm_fence_fill_arg(fence, arg);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
|
@ -54,10 +54,11 @@ static int drm_setup(struct drm_device * dev)
|
||||||
|
|
||||||
atomic_set(&dev->ioctl_count, 0);
|
atomic_set(&dev->ioctl_count, 0);
|
||||||
atomic_set(&dev->vma_count, 0);
|
atomic_set(&dev->vma_count, 0);
|
||||||
dev->buf_use = 0;
|
|
||||||
atomic_set(&dev->buf_alloc, 0);
|
|
||||||
|
|
||||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
|
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
|
dev->buf_use = 0;
|
||||||
|
atomic_set(&dev->buf_alloc, 0);
|
||||||
|
|
||||||
i = drm_dma_setup(dev);
|
i = drm_dma_setup(dev);
|
||||||
if (i < 0)
|
if (i < 0)
|
||||||
return i;
|
return i;
|
||||||
|
@ -221,7 +222,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||||
int minor_id = iminor(inode);
|
int minor_id = iminor(inode);
|
||||||
struct drm_file *priv;
|
struct drm_file *priv;
|
||||||
int ret;
|
int ret;
|
||||||
int i, j;
|
|
||||||
|
|
||||||
if (filp->f_flags & O_EXCL)
|
if (filp->f_flags & O_EXCL)
|
||||||
return -EBUSY; /* No exclusive opens */
|
return -EBUSY; /* No exclusive opens */
|
||||||
|
@ -246,22 +246,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||||
priv->lock_count = 0;
|
priv->lock_count = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&priv->lhead);
|
INIT_LIST_HEAD(&priv->lhead);
|
||||||
INIT_LIST_HEAD(&priv->refd_objects);
|
|
||||||
INIT_LIST_HEAD(&priv->fbs);
|
INIT_LIST_HEAD(&priv->fbs);
|
||||||
|
|
||||||
for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
|
|
||||||
ret = drm_ht_create(&priv->refd_object_hash[i],
|
|
||||||
DRM_FILE_HASH_ORDER);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
for (j = 0; j < i; ++j)
|
|
||||||
drm_ht_remove(&priv->refd_object_hash[j]);
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev->driver->driver_features & DRIVER_GEM)
|
if (dev->driver->driver_features & DRIVER_GEM)
|
||||||
drm_gem_open(dev, priv);
|
drm_gem_open(dev, priv);
|
||||||
|
|
||||||
|
@ -275,28 +261,34 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||||
/* if there is no current master make this fd it */
|
/* if there is no current master make this fd it */
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
if (!priv->minor->master) {
|
if (!priv->minor->master) {
|
||||||
priv->minor->master = drm_get_master(priv->minor);
|
/* create a new master */
|
||||||
|
priv->minor->master = drm_master_create(priv->minor);
|
||||||
if (!priv->minor->master) {
|
if (!priv->minor->master) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->is_master = 1;
|
priv->is_master = 1;
|
||||||
priv->master = priv->minor->master;
|
/* take another reference for the copy in the local file priv */
|
||||||
|
priv->master = drm_master_get(priv->minor->master);
|
||||||
|
|
||||||
priv->authenticated = 1;
|
priv->authenticated = 1;
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
if (dev->driver->master_create) {
|
if (dev->driver->master_create) {
|
||||||
ret = dev->driver->master_create(dev, priv->master);
|
ret = dev->driver->master_create(dev, priv->master);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_put_master(priv->minor->master);
|
mutex_lock(&dev->struct_mutex);
|
||||||
priv->minor->master = priv->master = NULL;
|
/* drop both references if this fails */
|
||||||
|
drm_master_put(&priv->minor->master);
|
||||||
|
drm_master_put(&priv->master);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
priv->master = priv->minor->master;
|
/* get a reference to the master */
|
||||||
|
priv->master = drm_master_get(priv->minor->master);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,33 +338,6 @@ int drm_fasync(int fd, struct file *filp, int on)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fasync);
|
EXPORT_SYMBOL(drm_fasync);
|
||||||
|
|
||||||
static void drm_object_release(struct file *filp)
|
|
||||||
{
|
|
||||||
struct drm_file *priv = filp->private_data;
|
|
||||||
struct list_head *head;
|
|
||||||
struct drm_ref_object *ref_object;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Free leftover ref objects created by me. Note that we cannot use
|
|
||||||
* list_for_each() here, as the struct_mutex may be temporarily
|
|
||||||
* released by the remove_() functions, and thus the lists may be
|
|
||||||
* altered.
|
|
||||||
* Also, a drm_remove_ref_object() will not remove it
|
|
||||||
* from the list unless its refcount is 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
head = &priv->refd_objects;
|
|
||||||
while (head->next != head) {
|
|
||||||
ref_object = list_entry(head->next, struct drm_ref_object, list);
|
|
||||||
drm_remove_ref_object(priv, ref_object);
|
|
||||||
head = &priv->refd_objects;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
|
|
||||||
drm_ht_remove(&priv->refd_object_hash[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release file.
|
* Release file.
|
||||||
*
|
*
|
||||||
|
@ -495,6 +460,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_fb_release(filp);
|
drm_fb_release(filp);
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (file_priv->is_master) {
|
if (file_priv->is_master) {
|
||||||
struct drm_file *temp;
|
struct drm_file *temp;
|
||||||
list_for_each_entry(temp, &dev->filelist, lhead) {
|
list_for_each_entry(temp, &dev->filelist, lhead) {
|
||||||
|
@ -503,19 +470,17 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||||
temp->authenticated = 0;
|
temp->authenticated = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (file_priv->minor->master == file_priv->master)
|
if (file_priv->minor->master == file_priv->master) {
|
||||||
file_priv->minor->master = NULL;
|
/* drop the reference held my the minor */
|
||||||
drm_put_master(file_priv->master);
|
drm_master_put(&file_priv->minor->master);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file_priv->master = NULL;
|
/* drop the reference held my the file priv */
|
||||||
|
drm_master_put(&file_priv->master);
|
||||||
file_priv->is_master = 0;
|
file_priv->is_master = 0;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
drm_object_release(filp);
|
|
||||||
|
|
||||||
list_del(&file_priv->lhead);
|
list_del(&file_priv->lhead);
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (dev->driver->postclose)
|
if (dev->driver->postclose)
|
||||||
|
|
|
@ -188,6 +188,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
|
||||||
}
|
}
|
||||||
return pt;
|
return pt;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_realloc);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate pages.
|
* Allocate pages.
|
||||||
|
|
|
@ -1,294 +0,0 @@
|
||||||
/**************************************************************************
|
|
||||||
*
|
|
||||||
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
|
||||||
/*
|
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
|
|
||||||
int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
|
|
||||||
int shareable)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
|
||||||
|
|
||||||
/* The refcount will be bumped to 1 when we add the ref object below. */
|
|
||||||
atomic_set(&item->refcount, 0);
|
|
||||||
item->shareable = shareable;
|
|
||||||
item->owner = priv;
|
|
||||||
|
|
||||||
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
|
|
||||||
(unsigned long)item, 31, 0, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
|
|
||||||
if (ret)
|
|
||||||
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_add_user_object);
|
|
||||||
|
|
||||||
struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
struct drm_hash_item *hash;
|
|
||||||
int ret;
|
|
||||||
struct drm_user_object *item;
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
|
||||||
|
|
||||||
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
|
|
||||||
if (ret)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
item = drm_hash_entry(hash, struct drm_user_object, hash);
|
|
||||||
|
|
||||||
if (priv != item->owner) {
|
|
||||||
struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
|
|
||||||
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Object not registered for usage\n");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return item;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_lookup_user_object);
|
|
||||||
|
|
||||||
static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (atomic_dec_and_test(&item->refcount)) {
|
|
||||||
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
|
||||||
BUG_ON(ret);
|
|
||||||
item->remove(priv, item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
|
|
||||||
enum drm_ref_type action)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
switch (action) {
|
|
||||||
case _DRM_REF_USE:
|
|
||||||
atomic_inc(&ro->refcount);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
if (!ro->ref_struct_locked) {
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
ro->ref_struct_locked(priv, ro, action);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
|
|
||||||
enum drm_ref_type ref_action)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct drm_ref_object *item;
|
|
||||||
struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
|
|
||||||
if (!referenced_object->shareable && priv != referenced_object->owner) {
|
|
||||||
DRM_ERROR("Not allowed to reference this object\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this is not a usage reference, Check that usage has been registered
|
|
||||||
* first. Otherwise strange things may happen on destruction.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
|
|
||||||
item =
|
|
||||||
drm_lookup_ref_object(priv, referenced_object,
|
|
||||||
_DRM_REF_USE);
|
|
||||||
if (!item) {
|
|
||||||
DRM_ERROR
|
|
||||||
("Object not registered for usage by this client\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (NULL !=
|
|
||||||
(item =
|
|
||||||
drm_lookup_ref_object(priv, referenced_object, ref_action))) {
|
|
||||||
atomic_inc(&item->refcount);
|
|
||||||
return drm_object_ref_action(priv, referenced_object,
|
|
||||||
ref_action);
|
|
||||||
}
|
|
||||||
|
|
||||||
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
|
|
||||||
if (item == NULL) {
|
|
||||||
DRM_ERROR("Could not allocate reference object\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_set(&item->refcount, 1);
|
|
||||||
item->hash.key = (unsigned long)referenced_object;
|
|
||||||
ret = drm_ht_insert_item(ht, &item->hash);
|
|
||||||
item->unref_action = ref_action;
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
list_add(&item->list, &priv->refd_objects);
|
|
||||||
ret = drm_object_ref_action(priv, referenced_object, ref_action);
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
|
|
||||||
struct drm_user_object *referenced_object,
|
|
||||||
enum drm_ref_type ref_action)
|
|
||||||
{
|
|
||||||
struct drm_hash_item *hash;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
|
|
||||||
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
|
|
||||||
(unsigned long)referenced_object, &hash);
|
|
||||||
if (ret)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return drm_hash_entry(hash, struct drm_ref_object, hash);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_lookup_ref_object);
|
|
||||||
|
|
||||||
static void drm_remove_other_references(struct drm_file *priv,
|
|
||||||
struct drm_user_object *ro)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct drm_open_hash *ht;
|
|
||||||
struct drm_hash_item *hash;
|
|
||||||
struct drm_ref_object *item;
|
|
||||||
|
|
||||||
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
|
|
||||||
ht = &priv->refd_object_hash[i];
|
|
||||||
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
|
|
||||||
item = drm_hash_entry(hash, struct drm_ref_object, hash);
|
|
||||||
drm_remove_ref_object(priv, item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
|
|
||||||
struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
|
|
||||||
enum drm_ref_type unref_action;
|
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
|
|
||||||
unref_action = item->unref_action;
|
|
||||||
if (atomic_dec_and_test(&item->refcount)) {
|
|
||||||
ret = drm_ht_remove_item(ht, &item->hash);
|
|
||||||
BUG_ON(ret);
|
|
||||||
list_del_init(&item->list);
|
|
||||||
if (unref_action == _DRM_REF_USE)
|
|
||||||
drm_remove_other_references(priv, user_object);
|
|
||||||
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (unref_action) {
|
|
||||||
case _DRM_REF_USE:
|
|
||||||
drm_deref_user_object(priv, user_object);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG_ON(!user_object->unref);
|
|
||||||
user_object->unref(priv, user_object, unref_action);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_remove_ref_object);
|
|
||||||
|
|
||||||
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
|
||||||
enum drm_object_type type, struct drm_user_object **object)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
struct drm_hash_item *hash;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Could not find user object to reference.\n");
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
uo = drm_hash_entry(hash, struct drm_user_object, hash);
|
|
||||||
if (uo->type != type) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
|
|
||||||
if (ret)
|
|
||||||
goto out_err;
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
*object = uo;
|
|
||||||
return 0;
|
|
||||||
out_err:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
|
|
||||||
enum drm_object_type type)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = priv->minor->dev;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
struct drm_ref_object *ro;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
uo = drm_lookup_user_object(priv, user_token);
|
|
||||||
if (!uo || (uo->type != type)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
|
|
||||||
if (!ro) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
drm_remove_ref_object(priv, ro);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return 0;
|
|
||||||
out_err:
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
|
@ -34,110 +34,201 @@
|
||||||
struct drm_device;
|
struct drm_device;
|
||||||
struct drm_bo_mem_reg;
|
struct drm_bo_mem_reg;
|
||||||
|
|
||||||
/***************************************************
|
#define DRM_FENCE_FLAG_EMIT 0x00000001
|
||||||
* User space objects. (drm_object.c)
|
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
||||||
*/
|
|
||||||
|
|
||||||
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
|
|
||||||
|
|
||||||
enum drm_object_type {
|
|
||||||
drm_fence_type,
|
|
||||||
drm_buffer_type,
|
|
||||||
drm_lock_type,
|
|
||||||
/*
|
|
||||||
* Add other user space object types here.
|
|
||||||
*/
|
|
||||||
drm_driver_type0 = 256,
|
|
||||||
drm_driver_type1,
|
|
||||||
drm_driver_type2,
|
|
||||||
drm_driver_type3,
|
|
||||||
drm_driver_type4
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A user object is a structure that helps the drm give out user handles
|
|
||||||
* to kernel internal objects and to keep track of these objects so that
|
|
||||||
* they can be destroyed, for example when the user space process exits.
|
|
||||||
* Designed to be accessible using a user space 32-bit handle.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct drm_user_object {
|
|
||||||
struct drm_hash_item hash;
|
|
||||||
struct list_head list;
|
|
||||||
enum drm_object_type type;
|
|
||||||
atomic_t refcount;
|
|
||||||
int shareable;
|
|
||||||
struct drm_file *owner;
|
|
||||||
void (*ref_struct_locked) (struct drm_file *priv,
|
|
||||||
struct drm_user_object *obj,
|
|
||||||
enum drm_ref_type ref_action);
|
|
||||||
void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
|
|
||||||
enum drm_ref_type unref_action);
|
|
||||||
void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A ref object is a structure which is used to
|
|
||||||
* keep track of references to user objects and to keep track of these
|
|
||||||
* references so that they can be destroyed for example when the user space
|
|
||||||
* process exits. Designed to be accessible using a pointer to the _user_ object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct drm_ref_object {
|
|
||||||
struct drm_hash_item hash;
|
|
||||||
struct list_head list;
|
|
||||||
atomic_t refcount;
|
|
||||||
enum drm_ref_type unref_action;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Must be called with the struct_mutex held.
|
* On hardware with no interrupt events for operation completion,
|
||||||
|
* indicates that the kernel should sleep while waiting for any blocking
|
||||||
|
* operation to complete rather than spinning.
|
||||||
|
*
|
||||||
|
* Has no effect otherwise.
|
||||||
|
*/
|
||||||
|
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
||||||
|
#define DRM_FENCE_FLAG_NO_USER 0x00000010
|
||||||
|
|
||||||
|
/* Reserved for driver use */
|
||||||
|
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
||||||
|
|
||||||
|
#define DRM_FENCE_TYPE_EXE 0x00000001
|
||||||
|
|
||||||
|
struct drm_fence_arg {
|
||||||
|
unsigned int handle;
|
||||||
|
unsigned int fence_class;
|
||||||
|
unsigned int type;
|
||||||
|
unsigned int flags;
|
||||||
|
unsigned int signaled;
|
||||||
|
unsigned int error;
|
||||||
|
unsigned int sequence;
|
||||||
|
unsigned int pad64;
|
||||||
|
uint64_t expand_pad[2]; /*Future expansion */
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Buffer permissions, referring to how the GPU uses the buffers.
|
||||||
|
* these translate to fence types used for the buffers.
|
||||||
|
* Typically a texture buffer is read, A destination buffer is write and
|
||||||
|
* a command (batch-) buffer is exe. Can be or-ed together.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
|
#define DRM_BO_FLAG_READ (1ULL << 0)
|
||||||
int shareable);
|
#define DRM_BO_FLAG_WRITE (1ULL << 1)
|
||||||
|
#define DRM_BO_FLAG_EXE (1ULL << 2)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* All of the bits related to access mode
|
||||||
|
*/
|
||||||
|
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
|
||||||
|
/*
|
||||||
|
* Status flags. Can be read to determine the actual state of a buffer.
|
||||||
|
* Can also be set in the buffer mask before validation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
|
||||||
|
* available to root and must be manually removed before buffer manager shutdown
|
||||||
|
* or lock.
|
||||||
|
* Flags: Acknowledge
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mask: Require that the buffer is placed in mappable memory when validated.
|
||||||
|
* If not set the buffer may or may not be in mappable memory when validated.
|
||||||
|
* Flags: If set, the buffer is in mappable memory.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
|
||||||
|
|
||||||
|
/* Mask: The buffer should be shareable with other processes.
|
||||||
|
* Flags: The buffer is shareable with other processes.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
|
||||||
|
|
||||||
|
/* Mask: If set, place the buffer in cache-coherent memory if available.
|
||||||
|
* If clear, never place the buffer in cache coherent memory if validated.
|
||||||
|
* Flags: The buffer is currently in cache-coherent memory.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_CACHED (1ULL << 7)
|
||||||
|
|
||||||
|
/* Mask: Make sure that every time this buffer is validated,
|
||||||
|
* it ends up on the same location provided that the memory mask is the same.
|
||||||
|
* The buffer will also not be evicted when claiming space for
|
||||||
|
* other buffers. Basically a pinned buffer but it may be thrown out as
|
||||||
|
* part of buffer manager shutdown or locking.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
|
||||||
|
|
||||||
|
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
|
||||||
|
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
|
||||||
|
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
|
||||||
|
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
|
||||||
|
* as the eviction to local memory (TTM unbind) on map is just a side effect
|
||||||
|
* to prevent aggressive cache prefetch from the GPU disturbing the cache
|
||||||
|
* management that the DRM is doing.
|
||||||
|
*
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
* Buffers allocated with this flag should not be used for suballocators
|
||||||
|
* This type may have issues on CPUs with over-aggressive caching
|
||||||
|
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
|
||||||
|
|
||||||
|
|
||||||
|
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
|
||||||
|
* Flags: Acknowledge.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
|
||||||
|
#define DRM_BO_FLAG_TILE (1ULL << 15)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory type flags that can be or'ed together in the mask, but only
|
||||||
|
* one appears in flags.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* System memory */
|
||||||
|
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
|
||||||
|
/* Translation table memory */
|
||||||
|
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
|
||||||
|
/* Vram memory */
|
||||||
|
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
|
||||||
|
/* Up to the driver to define. */
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
|
||||||
|
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
|
||||||
|
/* We can add more of these now with a 64-bit flag type */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is a mask covering all of the memory type flags; easier to just
|
||||||
|
* use a single constant than a bunch of | values. It covers
|
||||||
|
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
|
||||||
|
*/
|
||||||
|
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
|
||||||
|
/*
|
||||||
|
* This adds all of the CPU-mapping options in with the memory
|
||||||
|
* type to label all bits which change how the page gets mapped
|
||||||
|
*/
|
||||||
|
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
|
||||||
|
DRM_BO_FLAG_CACHED_MAPPED | \
|
||||||
|
DRM_BO_FLAG_CACHED | \
|
||||||
|
DRM_BO_FLAG_MAPPABLE)
|
||||||
|
|
||||||
|
/* Driver-private flags */
|
||||||
|
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't block on validate and map. Instead, return EBUSY.
|
||||||
|
*/
|
||||||
|
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
|
||||||
|
/*
|
||||||
|
* Don't place this buffer on the unfenced list. This means
|
||||||
|
* that the buffer will not end up having a fence associated
|
||||||
|
* with it as a result of this operation
|
||||||
|
*/
|
||||||
|
#define DRM_BO_HINT_DONT_FENCE 0x00000004
|
||||||
/**
|
/**
|
||||||
* Must be called with the struct_mutex held.
|
* On hardware with no interrupt events for operation completion,
|
||||||
|
* indicates that the kernel should sleep while waiting for any blocking
|
||||||
|
* operation to complete rather than spinning.
|
||||||
|
*
|
||||||
|
* Has no effect otherwise.
|
||||||
*/
|
*/
|
||||||
|
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
|
||||||
extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
|
|
||||||
uint32_t key);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called with the struct_mutex held. May temporarily release it.
|
* The client has compute relocations refering to this buffer using the
|
||||||
|
* offset in the presumed_offset field. If that offset ends up matching
|
||||||
|
* where this buffer lands, the kernel is free to skip executing those
|
||||||
|
* relocations
|
||||||
*/
|
*/
|
||||||
|
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
|
||||||
|
|
||||||
extern int drm_add_ref_object(struct drm_file *priv,
|
|
||||||
struct drm_user_object *referenced_object,
|
|
||||||
enum drm_ref_type ref_action);
|
|
||||||
|
|
||||||
/*
|
#define DRM_BO_MEM_LOCAL 0
|
||||||
* Must be called with the struct_mutex held.
|
#define DRM_BO_MEM_TT 1
|
||||||
*/
|
#define DRM_BO_MEM_VRAM 2
|
||||||
|
#define DRM_BO_MEM_PRIV0 3
|
||||||
|
#define DRM_BO_MEM_PRIV1 4
|
||||||
|
#define DRM_BO_MEM_PRIV2 5
|
||||||
|
#define DRM_BO_MEM_PRIV3 6
|
||||||
|
#define DRM_BO_MEM_PRIV4 7
|
||||||
|
|
||||||
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
|
#define DRM_BO_MEM_TYPES 8 /* For now. */
|
||||||
struct drm_user_object *referenced_object,
|
|
||||||
enum drm_ref_type ref_action);
|
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
|
||||||
/*
|
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
|
|
||||||
* release the struct_mutex before calling drm_remove_ref_object.
|
|
||||||
* This function may temporarily release the struct_mutex.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
|
|
||||||
extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
|
|
||||||
enum drm_object_type type,
|
|
||||||
struct drm_user_object **object);
|
|
||||||
extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
|
|
||||||
enum drm_object_type type);
|
|
||||||
|
|
||||||
/***************************************************
|
/***************************************************
|
||||||
* Fence objects. (drm_fence.c)
|
* Fence objects. (drm_fence.c)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct drm_fence_object {
|
struct drm_fence_object {
|
||||||
struct drm_user_object base;
|
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
atomic_t usage;
|
atomic_t usage;
|
||||||
|
|
||||||
|
@ -470,7 +561,6 @@ enum drm_bo_type {
|
||||||
|
|
||||||
struct drm_buffer_object {
|
struct drm_buffer_object {
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_user_object base;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is a possibility that the usage variable is zero,
|
* If there is a possibility that the usage variable is zero,
|
||||||
|
@ -546,7 +636,7 @@ struct drm_mem_type_manager {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_bo_lock {
|
struct drm_bo_lock {
|
||||||
struct drm_user_object base;
|
// struct drm_user_object base;
|
||||||
wait_queue_head_t queue;
|
wait_queue_head_t queue;
|
||||||
atomic_t write_lock_pending;
|
atomic_t write_lock_pending;
|
||||||
atomic_t readers;
|
atomic_t readers;
|
||||||
|
@ -655,22 +745,10 @@ struct drm_bo_driver {
|
||||||
/*
|
/*
|
||||||
* buffer objects (drm_bo.c)
|
* buffer objects (drm_bo.c)
|
||||||
*/
|
*/
|
||||||
extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
uint32_t fence_class);
|
||||||
extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
|
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
|
||||||
extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
|
||||||
extern int drm_bo_driver_finish(struct drm_device *dev);
|
extern int drm_bo_driver_finish(struct drm_device *dev);
|
||||||
extern int drm_bo_driver_init(struct drm_device *dev);
|
extern int drm_bo_driver_init(struct drm_device *dev);
|
||||||
extern int drm_bo_pci_offset(struct drm_device *dev,
|
extern int drm_bo_pci_offset(struct drm_device *dev,
|
||||||
|
@ -707,18 +785,9 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_c
|
||||||
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
|
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
|
||||||
unsigned long p_offset, unsigned long p_size,
|
unsigned long p_offset, unsigned long p_size,
|
||||||
int kern_init);
|
int kern_init);
|
||||||
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
|
|
||||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
|
||||||
uint32_t fence_class,
|
|
||||||
struct drm_bo_info_rep *rep,
|
|
||||||
struct drm_buffer_object **bo_rep);
|
|
||||||
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
||||||
uint32_t handle,
|
uint32_t handle,
|
||||||
int check_owner);
|
int check_owner);
|
||||||
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
|
||||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
|
||||||
uint32_t fence_class,
|
|
||||||
struct drm_bo_info_rep *rep);
|
|
||||||
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
|
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
|
||||||
|
|
||||||
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
|
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
|
||||||
|
@ -766,8 +835,6 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
|
||||||
unsigned long dst_offset,
|
unsigned long dst_offset,
|
||||||
unsigned long *pfn,
|
unsigned long *pfn,
|
||||||
pgprot_t *prot);
|
pgprot_t *prot);
|
||||||
extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
|
||||||
struct drm_bo_info_rep *rep);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -812,23 +879,6 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m
|
||||||
void **virtual);
|
void **virtual);
|
||||||
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
||||||
void *virtual);
|
void *virtual);
|
||||||
/*
|
|
||||||
* drm_bo_lock.c
|
|
||||||
* Simple replacement for the hardware lock on buffer manager init and clean.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
|
|
||||||
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
|
|
||||||
extern int drm_bo_read_lock(struct drm_bo_lock *lock,
|
|
||||||
int interruptible);
|
|
||||||
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
|
|
||||||
int interruptible,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
|
|
||||||
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
|
|
||||||
struct drm_file *file_priv);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
#define DRM_ASSERT_LOCKED(_mutex) \
|
#define DRM_ASSERT_LOCKED(_mutex) \
|
||||||
BUG_ON(!mutex_is_locked(_mutex) || \
|
BUG_ON(!mutex_is_locked(_mutex) || \
|
||||||
|
|
|
@ -88,30 +88,7 @@ again:
|
||||||
return new_id;
|
return new_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
struct drm_master *drm_master_create(struct drm_minor *minor)
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!file_priv->master)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!file_priv->minor->master && file_priv->minor->master != file_priv->master)
|
|
||||||
file_priv->minor->master = file_priv->master;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
if (!file_priv->master)
|
|
||||||
return -EINVAL;
|
|
||||||
file_priv->minor->master = NULL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct drm_master *drm_get_master(struct drm_minor *minor)
|
|
||||||
{
|
{
|
||||||
struct drm_master *master;
|
struct drm_master *master;
|
||||||
|
|
||||||
|
@ -119,7 +96,7 @@ struct drm_master *drm_get_master(struct drm_minor *minor)
|
||||||
if (!master)
|
if (!master)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
// INIT_LIST_HEAD(&master->filelist);
|
kref_init(&master->refcount);
|
||||||
spin_lock_init(&master->lock.spinlock);
|
spin_lock_init(&master->lock.spinlock);
|
||||||
init_waitqueue_head(&master->lock.lock_queue);
|
init_waitqueue_head(&master->lock.lock_queue);
|
||||||
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
|
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
|
||||||
|
@ -131,8 +108,15 @@ struct drm_master *drm_get_master(struct drm_minor *minor)
|
||||||
return master;
|
return master;
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_put_master(struct drm_master *master)
|
struct drm_master *drm_master_get(struct drm_master *master)
|
||||||
{
|
{
|
||||||
|
kref_get(&master->refcount);
|
||||||
|
return master;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void drm_master_destroy(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct drm_master *master = container_of(kref, struct drm_master, refcount);
|
||||||
struct drm_magic_entry *pt, *next;
|
struct drm_magic_entry *pt, *next;
|
||||||
struct drm_device *dev = master->minor->dev;
|
struct drm_device *dev = master->minor->dev;
|
||||||
|
|
||||||
|
@ -166,21 +150,56 @@ void drm_put_master(struct drm_master *master)
|
||||||
drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
|
drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void drm_master_put(struct drm_master **master)
|
||||||
|
{
|
||||||
|
kref_put(&(*master)->refcount, drm_master_destroy);
|
||||||
|
*master = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!file_priv->master)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!file_priv->minor->master && file_priv->minor->master != file_priv->master) {
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
file_priv->minor->master = drm_master_get(file_priv->master);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
if (!file_priv->master)
|
||||||
|
return -EINVAL;
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
drm_master_put(&file_priv->minor->master);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
|
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
|
||||||
const struct pci_device_id *ent,
|
const struct pci_device_id *ent,
|
||||||
struct drm_driver *driver)
|
struct drm_driver *driver)
|
||||||
{
|
{
|
||||||
int retcode;
|
int retcode;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&dev->filelist);
|
||||||
INIT_LIST_HEAD(&dev->ctxlist);
|
INIT_LIST_HEAD(&dev->ctxlist);
|
||||||
INIT_LIST_HEAD(&dev->vmalist);
|
INIT_LIST_HEAD(&dev->vmalist);
|
||||||
INIT_LIST_HEAD(&dev->maplist);
|
INIT_LIST_HEAD(&dev->maplist);
|
||||||
INIT_LIST_HEAD(&dev->filelist);
|
|
||||||
|
|
||||||
spin_lock_init(&dev->count_lock);
|
spin_lock_init(&dev->count_lock);
|
||||||
spin_lock_init(&dev->drw_lock);
|
spin_lock_init(&dev->drw_lock);
|
||||||
spin_lock_init(&dev->tasklet_lock);
|
spin_lock_init(&dev->tasklet_lock);
|
||||||
// spin_lock_init(&dev->lock.spinlock);
|
|
||||||
init_timer(&dev->timer);
|
init_timer(&dev->timer);
|
||||||
mutex_init(&dev->struct_mutex);
|
mutex_init(&dev->struct_mutex);
|
||||||
mutex_init(&dev->ctxlist_mutex);
|
mutex_init(&dev->ctxlist_mutex);
|
||||||
|
@ -206,12 +225,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
|
|
||||||
drm_ht_remove(&dev->map_hash);
|
|
||||||
drm_mm_takedown(&dev->offset_manager);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* the DRM has 6 counters */
|
/* the DRM has 6 counters */
|
||||||
dev->counters = 6;
|
dev->counters = 6;
|
||||||
dev->types[0] = _DRM_STAT_LOCK;
|
dev->types[0] = _DRM_STAT_LOCK;
|
||||||
|
@ -407,10 +420,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err_g5:
|
err_g5:
|
||||||
drm_put_minor(dev, &dev->primary);
|
drm_put_minor(&dev->primary);
|
||||||
err_g4:
|
err_g4:
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_put_minor(dev, &dev->control);
|
drm_put_minor(&dev->control);
|
||||||
err_g3:
|
err_g3:
|
||||||
if (!drm_fb_loaded)
|
if (!drm_fb_loaded)
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
@ -461,14 +474,14 @@ int drm_put_dev(struct drm_device * dev)
|
||||||
* last minor released.
|
* last minor released.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p)
|
int drm_put_minor(struct drm_minor **minor_p)
|
||||||
{
|
{
|
||||||
struct drm_minor *minor = *minor_p;
|
struct drm_minor *minor = *minor_p;
|
||||||
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
||||||
|
|
||||||
if (minor->type == DRM_MINOR_LEGACY) {
|
if (minor->type == DRM_MINOR_LEGACY) {
|
||||||
if (dev->driver->proc_cleanup)
|
if (minor->dev->driver->proc_cleanup)
|
||||||
dev->driver->proc_cleanup(minor);
|
minor->dev->driver->proc_cleanup(minor);
|
||||||
drm_proc_cleanup(minor, drm_proc_root);
|
drm_proc_cleanup(minor, drm_proc_root);
|
||||||
}
|
}
|
||||||
drm_sysfs_device_remove(minor);
|
drm_sysfs_device_remove(minor);
|
||||||
|
|
|
@ -715,13 +715,9 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
unsigned long ret = VM_FAULT_NOPAGE;
|
unsigned long ret = VM_FAULT_NOPAGE;
|
||||||
|
|
||||||
dev = bo->dev;
|
dev = bo->dev;
|
||||||
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
|
||||||
if (err)
|
|
||||||
return VM_FAULT_NOPAGE;
|
|
||||||
|
|
||||||
err = mutex_lock_interruptible(&bo->mutex);
|
err = mutex_lock_interruptible(&bo->mutex);
|
||||||
if (err) {
|
if (err) {
|
||||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
return VM_FAULT_NOPAGE;
|
return VM_FAULT_NOPAGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -788,7 +784,6 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma,
|
||||||
out_unlock:
|
out_unlock:
|
||||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
|
||||||
mutex_unlock(&bo->mutex);
|
mutex_unlock(&bo->mutex);
|
||||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,311 +0,0 @@
|
||||||
/**************************************************************************
|
|
||||||
*
|
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
|
||||||
/*
|
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
#include "i915_drm.h"
|
|
||||||
#include "i915_drv.h"
|
|
||||||
|
|
||||||
struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
return drm_agp_init_ttm(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_fence_type(struct drm_buffer_object *bo,
|
|
||||||
uint32_t *fclass,
|
|
||||||
uint32_t *type)
|
|
||||||
{
|
|
||||||
if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
|
||||||
*type = 3;
|
|
||||||
else
|
|
||||||
*type = 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* FIXME: Only emit once per batchbuffer submission.
|
|
||||||
*/
|
|
||||||
|
|
||||||
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
|
|
||||||
|
|
||||||
if (flags & DRM_BO_FLAG_READ)
|
|
||||||
flush_cmd |= MI_READ_FLUSH;
|
|
||||||
if (flags & DRM_BO_FLAG_EXE)
|
|
||||||
flush_cmd |= MI_EXE_FLUSH;
|
|
||||||
|
|
||||||
return i915_emit_mi_flush(dev, flush_cmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
|
||||||
struct drm_mem_type_manager *man)
|
|
||||||
{
|
|
||||||
switch (type) {
|
|
||||||
case DRM_BO_MEM_LOCAL:
|
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
|
||||||
_DRM_FLAG_MEMTYPE_CACHED;
|
|
||||||
man->drm_bus_maptype = 0;
|
|
||||||
man->gpu_offset = 0;
|
|
||||||
break;
|
|
||||||
case DRM_BO_MEM_TT:
|
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
|
||||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
|
||||||
(unsigned)type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
man->io_offset = dev->agp->agp_info.aper_base;
|
|
||||||
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
|
||||||
man->io_addr = NULL;
|
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
|
||||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
|
||||||
man->gpu_offset = 0;
|
|
||||||
break;
|
|
||||||
case DRM_BO_MEM_VRAM:
|
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
|
||||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
|
||||||
(unsigned)type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
man->io_offset = dev->agp->agp_info.aper_base;
|
|
||||||
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
|
|
||||||
man->io_addr = NULL;
|
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
|
||||||
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
|
||||||
man->gpu_offset = 0;
|
|
||||||
break;
|
|
||||||
case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
|
|
||||||
DRM_ERROR("PRIV0 not used yet.\n");
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* i915_evict_flags:
|
|
||||||
*
|
|
||||||
* @bo: the buffer object to be evicted
|
|
||||||
*
|
|
||||||
* Return the bo flags for a buffer which is not mapped to the hardware.
|
|
||||||
* These will be placed in proposed_flags so that when the move is
|
|
||||||
* finished, they'll end up in bo->mem.flags
|
|
||||||
*/
|
|
||||||
uint64_t i915_evict_flags(struct drm_buffer_object *bo)
|
|
||||||
{
|
|
||||||
switch (bo->mem.mem_type) {
|
|
||||||
case DRM_BO_MEM_LOCAL:
|
|
||||||
case DRM_BO_MEM_TT:
|
|
||||||
return DRM_BO_FLAG_MEM_LOCAL;
|
|
||||||
default:
|
|
||||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0 /* See comment below */
|
|
||||||
|
|
||||||
static void i915_emit_copy_blit(struct drm_device * dev,
|
|
||||||
uint32_t src_offset,
|
|
||||||
uint32_t dst_offset,
|
|
||||||
uint32_t pages, int direction)
|
|
||||||
{
|
|
||||||
uint32_t cur_pages;
|
|
||||||
uint32_t stride = PAGE_SIZE;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
RING_LOCALS;
|
|
||||||
|
|
||||||
if (!dev_priv)
|
|
||||||
return;
|
|
||||||
|
|
||||||
i915_kernel_lost_context(dev);
|
|
||||||
while (pages > 0) {
|
|
||||||
cur_pages = pages;
|
|
||||||
if (cur_pages > 2048)
|
|
||||||
cur_pages = 2048;
|
|
||||||
pages -= cur_pages;
|
|
||||||
|
|
||||||
BEGIN_LP_RING(6);
|
|
||||||
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
|
|
||||||
XY_SRC_COPY_BLT_WRITE_RGB);
|
|
||||||
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
|
|
||||||
(1 << 25) | (direction ? (1 << 30) : 0));
|
|
||||||
OUT_RING((cur_pages << 16) | PAGE_SIZE);
|
|
||||||
OUT_RING(dst_offset);
|
|
||||||
OUT_RING(stride & 0xffff);
|
|
||||||
OUT_RING(src_offset);
|
|
||||||
ADVANCE_LP_RING();
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_move_blit(struct drm_buffer_object * bo,
|
|
||||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
|
||||||
{
|
|
||||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
|
||||||
int dir = 0;
|
|
||||||
|
|
||||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
|
||||||
(new_mem->mm_node->start <
|
|
||||||
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
|
||||||
dir = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
i915_emit_copy_blit(bo->dev,
|
|
||||||
old_mem->mm_node->start << PAGE_SHIFT,
|
|
||||||
new_mem->mm_node->start << PAGE_SHIFT,
|
|
||||||
new_mem->num_pages, dir);
|
|
||||||
|
|
||||||
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
|
|
||||||
|
|
||||||
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
|
|
||||||
DRM_FENCE_TYPE_EXE |
|
|
||||||
DRM_I915_FENCE_TYPE_RW,
|
|
||||||
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flip destination ttm into cached-coherent AGP,
|
|
||||||
* then blit and subsequently move out again.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int i915_move_flip(struct drm_buffer_object * bo,
|
|
||||||
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = bo->dev;
|
|
||||||
struct drm_bo_mem_reg tmp_mem;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
tmp_mem = *new_mem;
|
|
||||||
tmp_mem.mm_node = NULL;
|
|
||||||
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
|
|
||||||
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
|
|
||||||
|
|
||||||
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
|
|
||||||
if (ret)
|
|
||||||
goto out_cleanup;
|
|
||||||
|
|
||||||
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
|
|
||||||
if (ret)
|
|
||||||
goto out_cleanup;
|
|
||||||
|
|
||||||
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
|
|
||||||
out_cleanup:
|
|
||||||
if (tmp_mem.mm_node) {
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (tmp_mem.mm_node != bo->pinned_node)
|
|
||||||
drm_mm_put_block(tmp_mem.mm_node);
|
|
||||||
tmp_mem.mm_node = NULL;
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Disable i915_move_flip for now, since we can't guarantee that the hardware
|
|
||||||
* lock is held here. To re-enable we need to make sure either
|
|
||||||
* a) The X server is using DRM to submit commands to the ring, or
|
|
||||||
* b) DRM can use the HP ring for these blits. This means i915 needs to
|
|
||||||
* implement a new ring submission mechanism and fence class.
|
|
||||||
*/
|
|
||||||
int i915_move(struct drm_buffer_object *bo,
|
|
||||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
|
|
||||||
{
|
|
||||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
|
||||||
|
|
||||||
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
|
||||||
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
|
|
||||||
if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
|
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
|
||||||
} else {
|
|
||||||
if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
|
|
||||||
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
|
|
||||||
static inline void clflush(volatile void *__p)
|
|
||||||
{
|
|
||||||
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void drm_cache_flush_addr(void *virt)
|
|
||||||
{
|
|
||||||
#ifdef cpu_has_clflush
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
|
||||||
clflush(virt+i);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void drm_cache_flush_page(struct page *p)
|
|
||||||
{
|
|
||||||
drm_cache_flush_addr(page_address(p));
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_flush_ttm(struct drm_ttm *ttm)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!ttm)
|
|
||||||
return;
|
|
||||||
|
|
||||||
DRM_MEMORYBARRIER();
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
#ifndef cpu_has_clflush
|
|
||||||
#define cpu_has_clflush 0
|
|
||||||
#endif
|
|
||||||
/* Hopefully nobody has built an x86-64 processor without clflush */
|
|
||||||
if (!cpu_has_clflush) {
|
|
||||||
wbinvd();
|
|
||||||
DRM_MEMORYBARRIER();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (i = ttm->num_pages - 1; i >= 0; i--)
|
|
||||||
drm_cache_flush_page(drm_ttm_get_page(ttm, i));
|
|
||||||
|
|
||||||
DRM_MEMORYBARRIER();
|
|
||||||
}
|
|
|
@ -1,921 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
|
||||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
|
||||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Authors:
|
|
||||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
|
||||||
* Dave Airlie
|
|
||||||
* Keith Packard
|
|
||||||
* ... ?
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
#include "drm.h"
|
|
||||||
#include "i915_drm.h"
|
|
||||||
#include "i915_drv.h"
|
|
||||||
|
|
||||||
#if DRM_DEBUG_CODE
|
|
||||||
#define DRM_DEBUG_RELOCATION (drm_debug != 0)
|
|
||||||
#else
|
|
||||||
#define DRM_DEBUG_RELOCATION 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
enum i915_buf_idle {
|
|
||||||
I915_RELOC_UNCHECKED,
|
|
||||||
I915_RELOC_IDLE,
|
|
||||||
I915_RELOC_BUSY
|
|
||||||
};
|
|
||||||
|
|
||||||
struct i915_relocatee_info {
|
|
||||||
struct drm_buffer_object *buf;
|
|
||||||
unsigned long offset;
|
|
||||||
uint32_t *data_page;
|
|
||||||
unsigned page_offset;
|
|
||||||
struct drm_bo_kmap_obj kmap;
|
|
||||||
int is_iomem;
|
|
||||||
int dst;
|
|
||||||
int idle;
|
|
||||||
int performed_ring_relocs;
|
|
||||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
|
||||||
unsigned long pfn;
|
|
||||||
pgprot_t pg_prot;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_validate_buffer {
|
|
||||||
struct drm_buffer_object *buffer;
|
|
||||||
int presumed_offset_correct;
|
|
||||||
void __user *data;
|
|
||||||
int ret;
|
|
||||||
enum i915_buf_idle idle;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* I'd like to use MI_STORE_DATA_IMM here, but I can't make
|
|
||||||
* it work. Seems like GART writes are broken with that
|
|
||||||
* instruction. Also I'm not sure that MI_FLUSH will
|
|
||||||
* act as a memory barrier for that instruction. It will
|
|
||||||
* for this single dword 2D blit.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
|
|
||||||
uint32_t value)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv =
|
|
||||||
(struct drm_i915_private *)dev->dev_private;
|
|
||||||
|
|
||||||
RING_LOCALS;
|
|
||||||
i915_kernel_lost_context(dev);
|
|
||||||
BEGIN_LP_RING(6);
|
|
||||||
OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
|
|
||||||
OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
|
|
||||||
OUT_RING((0x1 << 16) | (0x4));
|
|
||||||
OUT_RING(offset);
|
|
||||||
OUT_RING(value);
|
|
||||||
OUT_RING(0);
|
|
||||||
ADVANCE_LP_RING();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
|
|
||||||
*buffers, unsigned num_buffers)
|
|
||||||
{
|
|
||||||
while (num_buffers--)
|
|
||||||
drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
struct i915_relocatee_info *relocatee, uint32_t * reloc)
|
|
||||||
{
|
|
||||||
unsigned index;
|
|
||||||
unsigned long new_cmd_offset;
|
|
||||||
u32 val;
|
|
||||||
int ret, i;
|
|
||||||
int buf_index = -1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: O(relocs * buffers) complexity.
|
|
||||||
*/
|
|
||||||
|
|
||||||
for (i = 0; i <= num_buffers; i++)
|
|
||||||
if (buffers[i].buffer)
|
|
||||||
if (reloc[2] == buffers[i].buffer->base.hash.key)
|
|
||||||
buf_index = i;
|
|
||||||
|
|
||||||
if (buf_index == -1) {
|
|
||||||
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Short-circuit relocations that were correctly
|
|
||||||
* guessed by the client
|
|
||||||
*/
|
|
||||||
if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
new_cmd_offset = reloc[0];
|
|
||||||
if (!relocatee->data_page ||
|
|
||||||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
|
||||||
struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
|
|
||||||
|
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
|
||||||
relocatee->data_page = NULL;
|
|
||||||
relocatee->offset = new_cmd_offset;
|
|
||||||
|
|
||||||
if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
|
|
||||||
ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
relocatee->idle = I915_RELOC_IDLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
|
|
||||||
(mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
|
|
||||||
drm_bo_evict_cached(relocatee->buf);
|
|
||||||
|
|
||||||
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
|
||||||
1, &relocatee->kmap);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR
|
|
||||||
("Could not map command buffer to apply relocs\n %08lx",
|
|
||||||
new_cmd_offset);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
|
||||||
&relocatee->is_iomem);
|
|
||||||
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
val = buffers[buf_index].buffer->offset;
|
|
||||||
index = (reloc[0] - relocatee->page_offset) >> 2;
|
|
||||||
|
|
||||||
/* add in validate */
|
|
||||||
val = val + reloc[1];
|
|
||||||
|
|
||||||
if (DRM_DEBUG_RELOCATION) {
|
|
||||||
if (buffers[buf_index].presumed_offset_correct &&
|
|
||||||
relocatee->data_page[index] != val) {
|
|
||||||
DRM_DEBUG
|
|
||||||
("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
|
|
||||||
reloc[0], reloc[1], buf_index,
|
|
||||||
relocatee->data_page[index], val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (relocatee->is_iomem)
|
|
||||||
iowrite32(val, relocatee->data_page + index);
|
|
||||||
else
|
|
||||||
relocatee->data_page[index] = val;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_process_relocs(struct drm_file *file_priv,
|
|
||||||
uint32_t buf_handle,
|
|
||||||
uint32_t __user ** reloc_user_ptr,
|
|
||||||
struct i915_relocatee_info *relocatee,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
uint32_t num_buffers)
|
|
||||||
{
|
|
||||||
int ret, reloc_stride;
|
|
||||||
uint32_t cur_offset;
|
|
||||||
uint32_t reloc_count;
|
|
||||||
uint32_t reloc_type;
|
|
||||||
uint32_t reloc_buf_size;
|
|
||||||
uint32_t *reloc_buf = NULL;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* do a copy from user from the user ptr */
|
|
||||||
ret = get_user(reloc_count, *reloc_user_ptr);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Could not map relocation buffer.\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Could not map relocation buffer.\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reloc_type != 0) {
|
|
||||||
DRM_ERROR("Unsupported relocation type requested\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
reloc_buf_size =
|
|
||||||
(I915_RELOC_HEADER +
|
|
||||||
(reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
|
|
||||||
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
|
|
||||||
if (!reloc_buf) {
|
|
||||||
DRM_ERROR("Out of memory for reloc buffer\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get next relocate buffer handle */
|
|
||||||
*reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
|
|
||||||
|
|
||||||
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
|
|
||||||
|
|
||||||
DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
|
|
||||||
*reloc_user_ptr);
|
|
||||||
|
|
||||||
for (i = 0; i < reloc_count; i++) {
|
|
||||||
cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
|
|
||||||
|
|
||||||
ret = i915_apply_reloc(file_priv, num_buffers, buffers,
|
|
||||||
relocatee, reloc_buf + cur_offset);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (reloc_buf)
|
|
||||||
kfree(reloc_buf);
|
|
||||||
|
|
||||||
if (relocatee->data_page) {
|
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
|
||||||
relocatee->data_page = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
|
||||||
uint32_t __user * reloc_user_ptr,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
uint32_t buf_count)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
struct i915_relocatee_info relocatee;
|
|
||||||
int ret = 0;
|
|
||||||
int b;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Short circuit relocations when all previous
|
|
||||||
* buffers offsets were correctly guessed by
|
|
||||||
* the client
|
|
||||||
*/
|
|
||||||
if (!DRM_DEBUG_RELOCATION) {
|
|
||||||
for (b = 0; b < buf_count; b++)
|
|
||||||
if (!buffers[b].presumed_offset_correct)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (b == buf_count)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(&relocatee, 0, sizeof(relocatee));
|
|
||||||
relocatee.idle = I915_RELOC_UNCHECKED;
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
if (!relocatee.buf) {
|
|
||||||
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&relocatee.buf->mutex);
|
|
||||||
while (reloc_user_ptr) {
|
|
||||||
ret =
|
|
||||||
i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
|
|
||||||
&relocatee, buffers, buf_count);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("process relocs failed\n");
|
|
||||||
goto out_err1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out_err1:
|
|
||||||
mutex_unlock(&relocatee.buf->mutex);
|
|
||||||
drm_bo_usage_deref_unlocked(&relocatee.buf);
|
|
||||||
out_err:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
|
|
||||||
{
|
|
||||||
if (relocatee->data_page) {
|
|
||||||
#ifndef DRM_KMAP_ATOMIC_PROT_PFN
|
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
|
||||||
#else
|
|
||||||
kunmap_atomic(relocatee->data_page, KM_USER0);
|
|
||||||
#endif
|
|
||||||
relocatee->data_page = NULL;
|
|
||||||
}
|
|
||||||
relocatee->buf = NULL;
|
|
||||||
relocatee->dst = ~0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
unsigned int dst, unsigned long dst_offset)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
|
|
||||||
i915_clear_relocatee(relocatee);
|
|
||||||
relocatee->dst = dst;
|
|
||||||
relocatee->buf = buffers[dst].buffer;
|
|
||||||
relocatee->idle = buffers[dst].idle;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check for buffer idle. If the buffer is busy, revert to
|
|
||||||
* ring relocations.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (relocatee->idle == I915_RELOC_UNCHECKED) {
|
|
||||||
preempt_enable();
|
|
||||||
mutex_lock(&relocatee->buf->mutex);
|
|
||||||
|
|
||||||
ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
|
|
||||||
if (ret == 0)
|
|
||||||
relocatee->idle = I915_RELOC_IDLE;
|
|
||||||
else {
|
|
||||||
relocatee->idle = I915_RELOC_BUSY;
|
|
||||||
relocatee->performed_ring_relocs = 1;
|
|
||||||
}
|
|
||||||
mutex_unlock(&relocatee->buf->mutex);
|
|
||||||
preempt_disable();
|
|
||||||
buffers[dst].idle = relocatee->idle;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (relocatee->idle == I915_RELOC_BUSY)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
|
|
||||||
DRM_ERROR("Relocation destination out of bounds.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
|
|
||||||
NULL == relocatee->data_page)) {
|
|
||||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
|
||||||
if (NULL != relocatee->data_page) {
|
|
||||||
kunmap_atomic(relocatee->data_page, KM_USER0);
|
|
||||||
relocatee->data_page = NULL;
|
|
||||||
}
|
|
||||||
ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
|
|
||||||
&relocatee->pfn, &relocatee->pg_prot);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Can't map relocation destination.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
relocatee->data_page =
|
|
||||||
kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
|
|
||||||
relocatee->pg_prot);
|
|
||||||
#else
|
|
||||||
if (NULL != relocatee->data_page) {
|
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
|
||||||
relocatee->data_page = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
|
|
||||||
1, &relocatee->kmap);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Can't map relocation destination.\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
|
||||||
&relocatee->is_iomem);
|
|
||||||
#endif
|
|
||||||
relocatee->page_offset = dst_offset & PAGE_MASK;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_apply_post_reloc(uint32_t reloc[],
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
uint32_t num_buffers,
|
|
||||||
struct i915_relocatee_info *relocatee)
|
|
||||||
{
|
|
||||||
uint32_t reloc_buffer = reloc[2];
|
|
||||||
uint32_t dst_buffer = reloc[3];
|
|
||||||
uint32_t val;
|
|
||||||
uint32_t index;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (likely(buffers[reloc_buffer].presumed_offset_correct))
|
|
||||||
return 0;
|
|
||||||
if (unlikely(reloc_buffer >= num_buffers)) {
|
|
||||||
DRM_ERROR("Invalid reloc buffer index.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
if (unlikely(dst_buffer >= num_buffers)) {
|
|
||||||
DRM_ERROR("Invalid dest buffer index.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
|
|
||||||
if (unlikely(ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
val = buffers[reloc_buffer].buffer->offset;
|
|
||||||
index = (reloc[0] - relocatee->page_offset) >> 2;
|
|
||||||
val = val + reloc[1];
|
|
||||||
|
|
||||||
if (relocatee->idle == I915_RELOC_BUSY) {
|
|
||||||
i915_emit_ring_reloc(relocatee->buf->dev,
|
|
||||||
relocatee->buf->offset + reloc[0], val);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
|
||||||
relocatee->data_page[index] = val;
|
|
||||||
#else
|
|
||||||
if (likely(relocatee->is_iomem))
|
|
||||||
iowrite32(val, relocatee->data_page + index);
|
|
||||||
else
|
|
||||||
relocatee->data_page[index] = val;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_post_relocs(struct drm_file *file_priv,
|
|
||||||
uint32_t __user * new_reloc_ptr,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
unsigned int num_buffers)
|
|
||||||
{
|
|
||||||
uint32_t *reloc;
|
|
||||||
uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
|
|
||||||
uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
|
|
||||||
struct i915_relocatee_info relocatee;
|
|
||||||
uint32_t reloc_type;
|
|
||||||
uint32_t num_relocs;
|
|
||||||
uint32_t count;
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
int short_circuit = 1;
|
|
||||||
uint32_t __user *reloc_ptr;
|
|
||||||
uint64_t new_reloc_data;
|
|
||||||
uint32_t reloc_buf_size;
|
|
||||||
uint32_t *reloc_buf;
|
|
||||||
|
|
||||||
for (i = 0; i < num_buffers; ++i) {
|
|
||||||
if (unlikely(!buffers[i].presumed_offset_correct)) {
|
|
||||||
short_circuit = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (likely(short_circuit))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
memset(&relocatee, 0, sizeof(relocatee));
|
|
||||||
|
|
||||||
while (new_reloc_ptr) {
|
|
||||||
reloc_ptr = new_reloc_ptr;
|
|
||||||
|
|
||||||
ret = get_user(num_relocs, reloc_ptr);
|
|
||||||
if (unlikely(ret))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
|
|
||||||
header_size +
|
|
||||||
num_relocs * reloc_stride)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
ret = __get_user(reloc_type, reloc_ptr + 1);
|
|
||||||
if (unlikely(ret))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (unlikely(reloc_type != 1)) {
|
|
||||||
DRM_ERROR("Unsupported relocation type requested.\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = __get_user(new_reloc_data, reloc_ptr + 2);
|
|
||||||
new_reloc_ptr = (uint32_t __user *) (unsigned long)
|
|
||||||
new_reloc_data;
|
|
||||||
|
|
||||||
reloc_ptr += I915_RELOC_HEADER;
|
|
||||||
|
|
||||||
if (num_relocs == 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
reloc_buf_size =
|
|
||||||
(num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
|
|
||||||
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
|
|
||||||
if (!reloc_buf) {
|
|
||||||
DRM_ERROR("Out of memory for reloc buffer\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
reloc = reloc_buf;
|
|
||||||
preempt_disable();
|
|
||||||
for (count = 0; count < num_relocs; ++count) {
|
|
||||||
ret = i915_apply_post_reloc(reloc, buffers,
|
|
||||||
num_buffers, &relocatee);
|
|
||||||
if (unlikely(ret)) {
|
|
||||||
preempt_enable();
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
reloc += I915_RELOC0_STRIDE;
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
if (reloc_buf) {
|
|
||||||
kfree(reloc_buf);
|
|
||||||
reloc_buf = NULL;
|
|
||||||
}
|
|
||||||
i915_clear_relocatee(&relocatee);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
/*
|
|
||||||
* Flush ring relocs so the command parser will pick them up.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (relocatee.performed_ring_relocs)
|
|
||||||
(void)i915_emit_mi_flush(file_priv->minor->dev, 0);
|
|
||||||
|
|
||||||
i915_clear_relocatee(&relocatee);
|
|
||||||
if (reloc_buf) {
|
|
||||||
kfree(reloc_buf);
|
|
||||||
reloc_buf = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_check_presumed(struct drm_i915_op_arg *arg,
|
|
||||||
struct drm_buffer_object *bo,
|
|
||||||
uint32_t __user * data, int *presumed_ok)
|
|
||||||
{
|
|
||||||
struct drm_bo_op_req *req = &arg->d.req;
|
|
||||||
uint32_t hint_offset;
|
|
||||||
uint32_t hint = req->bo_req.hint;
|
|
||||||
|
|
||||||
*presumed_ok = 0;
|
|
||||||
|
|
||||||
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
|
|
||||||
return 0;
|
|
||||||
if (bo->offset == req->bo_req.presumed_offset) {
|
|
||||||
*presumed_ok = 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
|
|
||||||
* the user-space IOCTL argument list, since the buffer has moved,
|
|
||||||
* we're about to apply relocations and we might subsequently
|
|
||||||
* hit an -EAGAIN. In that case the argument list will be reused by
|
|
||||||
* user-space, but the presumed offset is no longer valid.
|
|
||||||
*
|
|
||||||
* Needless to say, this is a bit ugly.
|
|
||||||
*/
|
|
||||||
|
|
||||||
hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
|
|
||||||
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
|
|
||||||
return __put_user(hint, data + hint_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validate, add fence and relocate a block of bos from a userspace list
|
|
||||||
*/
|
|
||||||
int i915_validate_buffer_list(struct drm_file *file_priv,
|
|
||||||
unsigned int fence_class, uint64_t data,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
uint32_t * num_buffers,
|
|
||||||
uint32_t __user ** post_relocs)
|
|
||||||
{
|
|
||||||
struct drm_i915_op_arg arg;
|
|
||||||
struct drm_bo_op_req *req = &arg.d.req;
|
|
||||||
int ret = 0;
|
|
||||||
unsigned buf_count = 0;
|
|
||||||
uint32_t buf_handle;
|
|
||||||
uint32_t __user *reloc_user_ptr;
|
|
||||||
struct drm_i915_validate_buffer *item = buffers;
|
|
||||||
*post_relocs = NULL;
|
|
||||||
|
|
||||||
do {
|
|
||||||
if (buf_count >= *num_buffers) {
|
|
||||||
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
item = buffers + buf_count;
|
|
||||||
item->buffer = NULL;
|
|
||||||
item->presumed_offset_correct = 0;
|
|
||||||
item->idle = I915_RELOC_UNCHECKED;
|
|
||||||
|
|
||||||
if (copy_from_user
|
|
||||||
(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
if (req->op != drm_bo_validate) {
|
|
||||||
DRM_ERROR
|
|
||||||
("Buffer object operation wasn't \"validate\".\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
item->ret = 0;
|
|
||||||
item->data = (void __user *)(unsigned long)data;
|
|
||||||
|
|
||||||
buf_handle = req->bo_req.handle;
|
|
||||||
reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Switch mode to post-validation relocations?
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
|
|
||||||
(reloc_user_ptr != NULL))) {
|
|
||||||
uint32_t reloc_type;
|
|
||||||
|
|
||||||
ret = get_user(reloc_type, reloc_user_ptr + 1);
|
|
||||||
if (ret)
|
|
||||||
goto out_err;
|
|
||||||
|
|
||||||
if (reloc_type == 1)
|
|
||||||
*post_relocs = reloc_user_ptr;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
|
|
||||||
ret =
|
|
||||||
i915_exec_reloc(file_priv, buf_handle,
|
|
||||||
reloc_user_ptr, buffers, buf_count);
|
|
||||||
if (ret)
|
|
||||||
goto out_err;
|
|
||||||
DRM_MEMORYBARRIER();
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
|
||||||
req->bo_req.flags,
|
|
||||||
req->bo_req.mask, req->bo_req.hint,
|
|
||||||
req->bo_req.fence_class,
|
|
||||||
NULL, &item->buffer);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("error on handle validate %d\n", ret);
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
buf_count++;
|
|
||||||
|
|
||||||
ret = i915_check_presumed(&arg, item->buffer,
|
|
||||||
(uint32_t __user *)
|
|
||||||
(unsigned long)data,
|
|
||||||
&item->presumed_offset_correct);
|
|
||||||
if (ret)
|
|
||||||
goto out_err;
|
|
||||||
|
|
||||||
data = arg.next;
|
|
||||||
} while (data != 0);
|
|
||||||
out_err:
|
|
||||||
*num_buffers = buf_count;
|
|
||||||
item->ret = (ret != -EAGAIN) ? ret : 0;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove all buffers from the unfenced list.
|
|
||||||
* If the execbuffer operation was aborted, for example due to a signal,
|
|
||||||
* this also make sure that buffers retain their original state and
|
|
||||||
* fence pointers.
|
|
||||||
* Copy back buffer information to user-space unless we were interrupted
|
|
||||||
* by a signal. In which case the IOCTL must be rerun.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int i915_handle_copyback(struct drm_device *dev,
|
|
||||||
struct drm_i915_validate_buffer *buffers,
|
|
||||||
unsigned int num_buffers, int ret)
|
|
||||||
{
|
|
||||||
int err = ret;
|
|
||||||
int i;
|
|
||||||
struct drm_i915_op_arg arg;
|
|
||||||
struct drm_buffer_object *bo;
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
drm_putback_buffer_objects(dev);
|
|
||||||
|
|
||||||
if (ret != -EAGAIN) {
|
|
||||||
for (i = 0; i < num_buffers; ++i) {
|
|
||||||
arg.handled = 1;
|
|
||||||
arg.d.rep.ret = buffers->ret;
|
|
||||||
bo = buffers->buffer;
|
|
||||||
mutex_lock(&bo->mutex);
|
|
||||||
drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
|
|
||||||
err = -EFAULT;
|
|
||||||
buffers++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a fence object, and if that fails, pretend that everything is
|
|
||||||
* OK and just idle the GPU.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void i915_fence_or_sync(struct drm_file *file_priv,
|
|
||||||
uint32_t fence_flags,
|
|
||||||
struct drm_fence_arg *fence_arg,
|
|
||||||
struct drm_fence_object **fence_p)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_object *fence;
|
|
||||||
|
|
||||||
ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Fence creation failed.
|
|
||||||
* Fall back to synchronous operation and idle the engine.
|
|
||||||
*/
|
|
||||||
|
|
||||||
(void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
|
|
||||||
(void)i915_quiescent(dev);
|
|
||||||
|
|
||||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Communicate to user-space that
|
|
||||||
* fence creation has failed and that
|
|
||||||
* the engine is idle.
|
|
||||||
*/
|
|
||||||
|
|
||||||
fence_arg->handle = ~0;
|
|
||||||
fence_arg->error = ret;
|
|
||||||
}
|
|
||||||
drm_putback_buffer_objects(dev);
|
|
||||||
if (fence_p)
|
|
||||||
*fence_p = NULL;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
|
||||||
|
|
||||||
ret = drm_fence_add_user_object(file_priv, fence,
|
|
||||||
fence_flags &
|
|
||||||
DRM_FENCE_FLAG_SHAREABLE);
|
|
||||||
if (!ret)
|
|
||||||
drm_fence_fill_arg(fence, fence_arg);
|
|
||||||
else {
|
|
||||||
/*
|
|
||||||
* Fence user object creation failed.
|
|
||||||
* We must idle the engine here as well, as user-
|
|
||||||
* space expects a fence object to wait on. Since we
|
|
||||||
* have a fence object we wait for it to signal
|
|
||||||
* to indicate engine "sufficiently" idle.
|
|
||||||
*/
|
|
||||||
|
|
||||||
(void)drm_fence_object_wait(fence, 0, 1, fence->type);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
fence_arg->handle = ~0;
|
|
||||||
fence_arg->error = ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fence_p)
|
|
||||||
*fence_p = fence;
|
|
||||||
else if (fence)
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_execbuffer(struct drm_device *dev, void *data,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *)
|
|
||||||
dev->dev_private;
|
|
||||||
struct drm_i915_master_private *master_priv =
|
|
||||||
(struct drm_i915_master_private *)
|
|
||||||
dev->primary->master->driver_priv;
|
|
||||||
struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
|
|
||||||
master_priv->sarea_priv;
|
|
||||||
struct drm_i915_execbuffer *exec_buf = data;
|
|
||||||
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
|
|
||||||
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
|
|
||||||
int num_buffers;
|
|
||||||
int ret;
|
|
||||||
uint32_t __user *post_relocs;
|
|
||||||
|
|
||||||
if (!dev_priv->allow_batchbuffer) {
|
|
||||||
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
|
|
||||||
batch->num_cliprects *
|
|
||||||
sizeof(struct
|
|
||||||
drm_clip_rect)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The cmdbuf_mutex makes sure the validate-submit-fence
|
|
||||||
* operation is atomic.
|
|
||||||
*/
|
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
|
||||||
if (ret) {
|
|
||||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
num_buffers = exec_buf->num_buffers;
|
|
||||||
|
|
||||||
if (!dev_priv->val_bufs) {
|
|
||||||
dev_priv->val_bufs =
|
|
||||||
vmalloc(sizeof(struct drm_i915_validate_buffer) *
|
|
||||||
dev_priv->max_validate_buffers);
|
|
||||||
}
|
|
||||||
if (!dev_priv->val_bufs) {
|
|
||||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* validate buffer list + fixup relocations */
|
|
||||||
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
|
||||||
dev_priv->val_bufs, &num_buffers,
|
|
||||||
&post_relocs);
|
|
||||||
if (ret)
|
|
||||||
goto out_err0;
|
|
||||||
|
|
||||||
if (post_relocs) {
|
|
||||||
ret = i915_post_relocs(file_priv, post_relocs,
|
|
||||||
dev_priv->val_bufs, num_buffers);
|
|
||||||
if (ret)
|
|
||||||
goto out_err0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* make sure all previous memory operations have passed */
|
|
||||||
DRM_MEMORYBARRIER();
|
|
||||||
|
|
||||||
if (!post_relocs) {
|
|
||||||
drm_agp_chipset_flush(dev);
|
|
||||||
batch->start =
|
|
||||||
dev_priv->val_bufs[num_buffers - 1].buffer->offset;
|
|
||||||
} else {
|
|
||||||
batch->start += dev_priv->val_bufs[0].buffer->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
|
|
||||||
batch->start, batch->used, batch->num_cliprects);
|
|
||||||
|
|
||||||
ret = i915_dispatch_batchbuffer(dev, batch);
|
|
||||||
if (ret)
|
|
||||||
goto out_err0;
|
|
||||||
if (sarea_priv)
|
|
||||||
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
|
||||||
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
|
|
||||||
|
|
||||||
out_err0:
|
|
||||||
ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
||||||
drm_bo_read_unlock(&dev->bm.bm_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
|
@ -1,273 +0,0 @@
|
||||||
/**************************************************************************
|
|
||||||
*
|
|
||||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
||||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
||||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
**************************************************************************/
|
|
||||||
/*
|
|
||||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
#include "drm.h"
|
|
||||||
#include "i915_drm.h"
|
|
||||||
#include "i915_drv.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initiate a sync flush if it's not already pending.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
|
|
||||||
struct drm_fence_class_manager *fc)
|
|
||||||
{
|
|
||||||
if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
|
|
||||||
!dev_priv->flush_pending) {
|
|
||||||
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
|
|
||||||
dev_priv->flush_flags = fc->pending_flush;
|
|
||||||
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
|
|
||||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
|
||||||
dev_priv->flush_pending = 1;
|
|
||||||
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void i915_report_rwflush(struct drm_device *dev,
|
|
||||||
struct drm_i915_private *dev_priv)
|
|
||||||
{
|
|
||||||
if (unlikely(dev_priv->flush_pending)) {
|
|
||||||
|
|
||||||
uint32_t flush_flags;
|
|
||||||
uint32_t i_status;
|
|
||||||
uint32_t flush_sequence;
|
|
||||||
|
|
||||||
i_status = READ_HWSP(dev_priv, 0);
|
|
||||||
if ((i_status & (1 << 12)) !=
|
|
||||||
(dev_priv->saved_flush_status & (1 << 12))) {
|
|
||||||
flush_flags = dev_priv->flush_flags;
|
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
|
||||||
dev_priv->flush_pending = 0;
|
|
||||||
drm_fence_handler(dev, 0, flush_sequence,
|
|
||||||
flush_flags, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_fence_flush(struct drm_device *dev,
|
|
||||||
uint32_t fence_class)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv =
|
|
||||||
(struct drm_i915_private *) dev->dev_private;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
|
||||||
unsigned long irq_flags;
|
|
||||||
|
|
||||||
if (unlikely(!dev_priv))
|
|
||||||
return;
|
|
||||||
|
|
||||||
write_lock_irqsave(&fm->lock, irq_flags);
|
|
||||||
i915_initiate_rwflush(dev_priv, fc);
|
|
||||||
write_unlock_irqrestore(&fm->lock, irq_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
|
|
||||||
uint32_t waiting_types)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
|
||||||
uint32_t sequence;
|
|
||||||
|
|
||||||
if (unlikely(!dev_priv))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* First, report any executed sync flush:
|
|
||||||
*/
|
|
||||||
|
|
||||||
i915_report_rwflush(dev, dev_priv);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Report A new breadcrumb, and adjust IRQs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (waiting_types & DRM_FENCE_TYPE_EXE) {
|
|
||||||
|
|
||||||
sequence = READ_BREADCRUMB(dev_priv);
|
|
||||||
drm_fence_handler(dev, 0, sequence,
|
|
||||||
DRM_FENCE_TYPE_EXE, 0);
|
|
||||||
|
|
||||||
if (dev_priv->fence_irq_on &&
|
|
||||||
!(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
|
|
||||||
i915_user_irq_off(dev);
|
|
||||||
dev_priv->fence_irq_on = 0;
|
|
||||||
} else if (!dev_priv->fence_irq_on &&
|
|
||||||
(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
|
|
||||||
i915_user_irq_on(dev);
|
|
||||||
dev_priv->fence_irq_on = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There may be new RW flushes pending. Start them.
|
|
||||||
*/
|
|
||||||
|
|
||||||
i915_initiate_rwflush(dev_priv, fc);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* And possibly, but unlikely, they finish immediately.
|
|
||||||
*/
|
|
||||||
|
|
||||||
i915_report_rwflush(dev, dev_priv);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
|
||||||
uint32_t flags, uint32_t *sequence,
|
|
||||||
uint32_t *native_type)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
|
||||||
if (unlikely(!dev_priv))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
i915_emit_irq(dev);
|
|
||||||
*sequence = (uint32_t) dev_priv->counter;
|
|
||||||
*native_type = DRM_FENCE_TYPE_EXE;
|
|
||||||
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
|
|
||||||
*native_type |= DRM_I915_FENCE_TYPE_RW;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_fence_handler(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
|
||||||
|
|
||||||
write_lock(&fm->lock);
|
|
||||||
if (likely(dev_priv->fence_irq_on))
|
|
||||||
i915_fence_poll(dev, 0, fc->waiting_types);
|
|
||||||
write_unlock(&fm->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need a separate wait function since we need to poll for
|
|
||||||
* sync flushes.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int i915_fence_wait(struct drm_fence_object *fence,
|
|
||||||
int lazy, int interruptible, uint32_t mask)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = fence->dev;
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
|
||||||
int ret;
|
|
||||||
unsigned long _end = jiffies + 3 * DRM_HZ;
|
|
||||||
|
|
||||||
drm_fence_object_flush(fence, mask);
|
|
||||||
if (likely(interruptible))
|
|
||||||
ret = wait_event_interruptible_timeout
|
|
||||||
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
|
|
||||||
3 * DRM_HZ);
|
|
||||||
else
|
|
||||||
ret = wait_event_timeout
|
|
||||||
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
|
|
||||||
3 * DRM_HZ);
|
|
||||||
|
|
||||||
if (unlikely(ret == -ERESTARTSYS))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
if (unlikely(ret == 0))
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
if (likely(mask == DRM_FENCE_TYPE_EXE ||
|
|
||||||
drm_fence_object_signaled(fence, mask)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove this code snippet when fixed. HWSTAM doesn't let
|
|
||||||
* flush info through...
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
|
|
||||||
unsigned long irq_flags;
|
|
||||||
|
|
||||||
DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
|
|
||||||
msleep(100);
|
|
||||||
dev_priv->flush_pending = 0;
|
|
||||||
write_lock_irqsave(&fm->lock, irq_flags);
|
|
||||||
drm_fence_handler(dev, fence->fence_class,
|
|
||||||
fence->sequence, fence->type, 0);
|
|
||||||
write_unlock_irqrestore(&fm->lock, irq_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Poll for sync flush completion.
|
|
||||||
*/
|
|
||||||
|
|
||||||
return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
|
|
||||||
{
|
|
||||||
uint32_t flush_flags = fence->waiting_types &
|
|
||||||
~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
|
|
||||||
|
|
||||||
if (likely(flush_flags == 0 ||
|
|
||||||
((flush_flags & ~fence->native_types) == 0) ||
|
|
||||||
(fence->signaled_types != DRM_FENCE_TYPE_EXE)))
|
|
||||||
return 0;
|
|
||||||
else {
|
|
||||||
struct drm_device *dev = fence->dev;
|
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
|
||||||
|
|
||||||
if (unlikely(!dev_priv))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (dev_priv->flush_pending) {
|
|
||||||
uint32_t diff = (dev_priv->flush_sequence - fence->sequence) &
|
|
||||||
driver->sequence_mask;
|
|
||||||
|
|
||||||
if (diff < driver->wrap_diff)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flush_flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct drm_fence_driver i915_fence_driver = {
|
|
||||||
.num_classes = 1,
|
|
||||||
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
|
|
||||||
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
|
|
||||||
.sequence_mask = BREADCRUMB_MASK,
|
|
||||||
.has_irq = NULL,
|
|
||||||
.emit = i915_fence_emit_sequence,
|
|
||||||
.flush = i915_fence_flush,
|
|
||||||
.poll = i915_fence_poll,
|
|
||||||
.needed_flush = i915_fence_needed_flush,
|
|
||||||
.wait = i915_fence_wait,
|
|
||||||
};
|
|
|
@ -62,12 +62,16 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio_for_ddc(struct drm_de
|
||||||
i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
|
i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
|
||||||
i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
|
i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
|
||||||
i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
|
i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
|
||||||
|
i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
|
||||||
|
i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
|
||||||
i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
|
i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
|
||||||
i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
|
i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
|
||||||
i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
|
i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
|
||||||
i2c.put_data_mask = (1 << gpio.ucDataEnShift);
|
i2c.put_data_mask = (1 << gpio.ucDataEnShift);
|
||||||
i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
|
i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
|
||||||
i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
|
i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
|
||||||
|
i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
|
||||||
|
i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
|
||||||
i2c.valid = true;
|
i2c.valid = true;
|
||||||
|
|
||||||
return i2c;
|
return i2c;
|
||||||
|
@ -94,7 +98,7 @@ static void radeon_atom_apply_quirks(struct drm_device *dev, int index)
|
||||||
mode_info->bios_connector[index].ddc_i2c.valid = false;
|
mode_info->bios_connector[index].ddc_i2c.valid = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev)
|
bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -146,7 +150,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device
|
||||||
}
|
}
|
||||||
|
|
||||||
mode_info->bios_connector[i].dac_type = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
|
mode_info->bios_connector[i].dac_type = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
|
||||||
|
|
||||||
if ((i == ATOM_DEVICE_TV1_INDEX) ||
|
if ((i == ATOM_DEVICE_TV1_INDEX) ||
|
||||||
(i == ATOM_DEVICE_TV2_INDEX) ||
|
(i == ATOM_DEVICE_TV2_INDEX) ||
|
||||||
(i == ATOM_DEVICE_TV1_INDEX))
|
(i == ATOM_DEVICE_TV1_INDEX))
|
||||||
|
@ -161,7 +165,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device
|
||||||
mode_info->bios_connector[i].ddc_i2c =
|
mode_info->bios_connector[i].ddc_i2c =
|
||||||
radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux);
|
radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux);
|
||||||
} else
|
} else
|
||||||
mode_info->bios_connector[i].ddc_i2c =
|
mode_info->bios_connector[i].ddc_i2c =
|
||||||
radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux);
|
radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux);
|
||||||
|
|
||||||
if (i == ATOM_DEVICE_DFP1_INDEX)
|
if (i == ATOM_DEVICE_DFP1_INDEX)
|
||||||
|
@ -243,7 +247,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
DRM_DEBUG("BIOS Connector table\n");
|
DRM_DEBUG("BIOS Connector table\n");
|
||||||
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
|
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
|
||||||
if (!mode_info->bios_connector[i].valid)
|
if (!mode_info->bios_connector[i].valid)
|
||||||
|
@ -265,7 +269,7 @@ union firmware_info {
|
||||||
ATOM_FIRMWARE_INFO_V1_3 info_13;
|
ATOM_FIRMWARE_INFO_V1_3 info_13;
|
||||||
ATOM_FIRMWARE_INFO_V1_4 info_14;
|
ATOM_FIRMWARE_INFO_V1_4 info_14;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool radeon_atom_get_clock_info(struct drm_device *dev)
|
bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
@ -284,8 +288,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||||
pll->reference_div = 0;
|
pll->reference_div = 0;
|
||||||
|
|
||||||
pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
|
pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
|
||||||
pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
||||||
|
|
||||||
if (pll->pll_out_min == 0) {
|
if (pll->pll_out_min == 0) {
|
||||||
if (radeon_is_avivo(dev_priv))
|
if (radeon_is_avivo(dev_priv))
|
||||||
pll->pll_out_min = 64800;
|
pll->pll_out_min = 64800;
|
||||||
|
@ -298,7 +302,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||||
|
|
||||||
pll->xclk = le16_to_cpu(firmware_info->info.usMaxPixelClock);
|
pll->xclk = le16_to_cpu(firmware_info->info.usMaxPixelClock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
union lvds_info {
|
union lvds_info {
|
||||||
|
@ -330,7 +334,7 @@ void radeon_get_lvds_info(struct radeon_encoder *encoder)
|
||||||
encoder->vblank = le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
|
encoder->vblank = le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
|
||||||
encoder->hoverplus = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
|
encoder->hoverplus = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
|
||||||
encoder->hsync_width = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
|
encoder->hsync_width = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
|
||||||
encoder->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs);
|
encoder->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable)
|
void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable)
|
||||||
|
@ -342,7 +346,7 @@ void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable)
|
||||||
int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
|
int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
|
||||||
|
|
||||||
args.ucEnable = enable;
|
args.ucEnable = enable;
|
||||||
|
|
||||||
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
|
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -355,7 +359,7 @@ void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable)
|
||||||
int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
|
int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
|
||||||
|
|
||||||
args.ucEnable = enable;
|
args.ucEnable = enable;
|
||||||
|
|
||||||
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
|
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,10 +55,14 @@ int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
|
||||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
RING_LOCALS;
|
RING_LOCALS;
|
||||||
|
|
||||||
|
if (!dev_priv->cp_running)
|
||||||
|
return 0;
|
||||||
|
|
||||||
BEGIN_RING(4);
|
BEGIN_RING(4);
|
||||||
RADEON_FLUSH_CACHE();
|
RADEON_FLUSH_CACHE();
|
||||||
RADEON_FLUSH_ZCACHE();
|
RADEON_FLUSH_ZCACHE();
|
||||||
ADVANCE_RING();
|
ADVANCE_RING();
|
||||||
|
COMMIT_RING();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,6 +265,6 @@ uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
|
||||||
case DRM_BO_MEM_TT:
|
case DRM_BO_MEM_TT:
|
||||||
return DRM_BO_FLAG_MEM_LOCAL;
|
return DRM_BO_FLAG_MEM_LOCAL;
|
||||||
default:
|
default:
|
||||||
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
|
return DRM_BO_FLAG_MEM_TT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,9 +36,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct edid *edid;
|
struct edid *edid;
|
||||||
|
|
||||||
avivo_i2c_do_lock(radeon_connector, 1);
|
radeon_i2c_do_lock(radeon_connector, 1);
|
||||||
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||||
avivo_i2c_do_lock(radeon_connector, 0);
|
radeon_i2c_do_lock(radeon_connector, 0);
|
||||||
if (edid) {
|
if (edid) {
|
||||||
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
|
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
|
||||||
ret = drm_add_edid_modes(&radeon_connector->base, edid);
|
ret = drm_add_edid_modes(&radeon_connector->base, edid);
|
||||||
|
@ -53,7 +53,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
radeon_encoder_update_panel_size(lvds_encoder, connector);
|
radeon_encoder_update_panel_size(lvds_encoder, connector);
|
||||||
#endif
|
#endif
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,33 +111,33 @@ struct drm_connector_funcs radeon_lvds_connector_funcs = {
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int radeon_atom_vga_get_modes(struct drm_connector *connector)
|
static int radeon_vga_get_modes(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = radeon_ddc_get_modes(radeon_connector);
|
ret = radeon_ddc_get_modes(radeon_connector);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int radeon_atom_vga_mode_valid(struct drm_connector *connector,
|
static int radeon_vga_mode_valid(struct drm_connector *connector,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
|
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum drm_connector_status radeon_atom_vga_detect(struct drm_connector *connector)
|
static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct edid *edid;
|
struct edid *edid;
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||||
|
|
||||||
avivo_i2c_do_lock(radeon_connector, 1);
|
radeon_i2c_do_lock(radeon_connector, 1);
|
||||||
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||||
avivo_i2c_do_lock(radeon_connector, 0);
|
radeon_i2c_do_lock(radeon_connector, 0);
|
||||||
if (edid) {
|
if (edid) {
|
||||||
kfree(edid);
|
kfree(edid);
|
||||||
return connector_status_connected;
|
return connector_status_connected;
|
||||||
|
@ -152,20 +152,20 @@ static enum drm_connector_status radeon_atom_vga_detect(struct drm_connector *co
|
||||||
return encoder_funcs->detect(encoder, connector);
|
return encoder_funcs->detect(encoder, connector);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_connector_helper_funcs radeon_atom_vga_connector_helper_funcs = {
|
struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
|
||||||
.get_modes = radeon_atom_vga_get_modes,
|
.get_modes = radeon_vga_get_modes,
|
||||||
.mode_valid = radeon_atom_vga_mode_valid,
|
.mode_valid = radeon_vga_mode_valid,
|
||||||
.best_encoder = radeon_best_single_encoder,
|
.best_encoder = radeon_best_single_encoder,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_connector_funcs radeon_atom_vga_connector_funcs = {
|
struct drm_connector_funcs radeon_vga_connector_funcs = {
|
||||||
.detect = radeon_atom_vga_detect,
|
.detect = radeon_vga_detect,
|
||||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *connector)
|
static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct edid *edid;
|
struct edid *edid;
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
|
@ -175,9 +175,9 @@ static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *co
|
||||||
int i;
|
int i;
|
||||||
enum drm_connector_status ret;
|
enum drm_connector_status ret;
|
||||||
|
|
||||||
avivo_i2c_do_lock(radeon_connector, 1);
|
radeon_i2c_do_lock(radeon_connector, 1);
|
||||||
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||||
avivo_i2c_do_lock(radeon_connector, 0);
|
radeon_i2c_do_lock(radeon_connector, 0);
|
||||||
if (edid) {
|
if (edid) {
|
||||||
/* if the monitor is digital - set the bits */
|
/* if the monitor is digital - set the bits */
|
||||||
if (edid->digital)
|
if (edid->digital)
|
||||||
|
@ -212,7 +212,7 @@ static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *co
|
||||||
}
|
}
|
||||||
|
|
||||||
/* okay need to be smart in here about which encoder to pick */
|
/* okay need to be smart in here about which encoder to pick */
|
||||||
struct drm_encoder *radeon_atom_dvi_encoder(struct drm_connector *connector)
|
struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
int enc_id = connector->encoder_ids[0];
|
int enc_id = connector->encoder_ids[0];
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
|
@ -253,14 +253,14 @@ struct drm_encoder *radeon_atom_dvi_encoder(struct drm_connector *connector)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_connector_helper_funcs radeon_atom_dvi_connector_helper_funcs = {
|
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
|
||||||
.get_modes = radeon_atom_vga_get_modes,
|
.get_modes = radeon_vga_get_modes,
|
||||||
.mode_valid = radeon_atom_vga_mode_valid,
|
.mode_valid = radeon_vga_mode_valid,
|
||||||
.best_encoder = radeon_atom_dvi_encoder,
|
.best_encoder = radeon_dvi_encoder,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_connector_funcs radeon_atom_dvi_connector_funcs = {
|
struct drm_connector_funcs radeon_dvi_connector_funcs = {
|
||||||
.detect = radeon_atom_dvi_detect,
|
.detect = radeon_dvi_detect,
|
||||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||||
.destroy = radeon_connector_destroy,
|
.destroy = radeon_connector_destroy,
|
||||||
};
|
};
|
||||||
|
@ -272,12 +272,12 @@ static struct connector_funcs {
|
||||||
struct drm_connector_helper_funcs *helper_funcs;
|
struct drm_connector_helper_funcs *helper_funcs;
|
||||||
int conn_type;
|
int conn_type;
|
||||||
char *i2c_id;
|
char *i2c_id;
|
||||||
} connector_fns[] = {
|
} connector_fns[] = {
|
||||||
{ CONNECTOR_NONE, NULL, NULL, DRM_MODE_CONNECTOR_Unknown },
|
{ CONNECTOR_NONE, NULL, NULL, DRM_MODE_CONNECTOR_Unknown },
|
||||||
{ CONNECTOR_VGA, &radeon_atom_vga_connector_funcs, &radeon_atom_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA , "VGA"},
|
{ CONNECTOR_VGA, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA , "VGA"},
|
||||||
{ CONNECTOR_LVDS, &radeon_lvds_connector_funcs, &radeon_lvds_connector_helper_funcs, DRM_MODE_CONNECTOR_LVDS, "LVDS" },
|
{ CONNECTOR_LVDS, &radeon_lvds_connector_funcs, &radeon_lvds_connector_helper_funcs, DRM_MODE_CONNECTOR_LVDS, "LVDS" },
|
||||||
{ CONNECTOR_DVI_A, &radeon_atom_vga_connector_funcs, &radeon_atom_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_DVIA, "DVI" },
|
{ CONNECTOR_DVI_A, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_DVIA, "DVI" },
|
||||||
{ CONNECTOR_DVI_I, &radeon_atom_dvi_connector_funcs, &radeon_atom_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVII, "DVI" },
|
{ CONNECTOR_DVI_I, &radeon_dvi_connector_funcs, &radeon_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVII, "DVI" },
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
{ CONNECTOR_DVI_D, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
{ CONNECTOR_DVI_D, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
||||||
|
@ -294,14 +294,14 @@ static struct connector_funcs {
|
||||||
{ CONNECTOR_DIN, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
{ CONNECTOR_DIN, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
||||||
{ CONNECTOR_DISPLAY_PORT, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
{ CONNECTOR_DISPLAY_PORT, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA },
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_index)
|
struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_index)
|
||||||
{
|
{
|
||||||
struct radeon_connector *radeon_connector;
|
struct radeon_connector *radeon_connector;
|
||||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
struct radeon_mode_info *mode_info = &dev_priv->mode_info;
|
struct radeon_mode_info *mode_info = &dev_priv->mode_info;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
int table_idx;
|
int table_idx;
|
||||||
|
|
||||||
for (table_idx = 0; table_idx < ARRAY_SIZE(connector_fns); table_idx++) {
|
for (table_idx = 0; table_idx < ARRAY_SIZE(connector_fns); table_idx++) {
|
||||||
|
@ -323,7 +323,7 @@ struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_inde
|
||||||
connector_fns[table_idx].conn_type);
|
connector_fns[table_idx].conn_type);
|
||||||
|
|
||||||
drm_connector_helper_add(&radeon_connector->base, connector_fns[table_idx].helper_funcs);
|
drm_connector_helper_add(&radeon_connector->base, connector_fns[table_idx].helper_funcs);
|
||||||
|
|
||||||
if (mode_info->bios_connector[bios_index].ddc_i2c.valid) {
|
if (mode_info->bios_connector[bios_index].ddc_i2c.valid) {
|
||||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, &mode_info->bios_connector[bios_index].ddc_i2c,
|
radeon_connector->ddc_bus = radeon_i2c_create(dev, &mode_info->bios_connector[bios_index].ddc_i2c,
|
||||||
connector_fns[table_idx].i2c_id);
|
connector_fns[table_idx].i2c_id);
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../shared-core/radeon_cs.c
|
|
@ -0,0 +1,243 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||||
|
* Copyright 2008 Red Hat Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Dave Airlie
|
||||||
|
* Alex Deucher
|
||||||
|
*/
|
||||||
|
#include "drmP.h"
|
||||||
|
#include "radeon_drm.h"
|
||||||
|
#include "radeon_drv.h"
|
||||||
|
|
||||||
|
#define CURSOR_WIDTH 64
|
||||||
|
#define CURSOR_HEIGHT 64
|
||||||
|
|
||||||
|
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
|
||||||
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
uint32_t cur_lock;
|
||||||
|
|
||||||
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
cur_lock = RADEON_READ(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
|
||||||
|
if (lock)
|
||||||
|
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||||
|
else
|
||||||
|
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||||
|
RADEON_WRITE(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
|
||||||
|
} else {
|
||||||
|
switch(radeon_crtc->crtc_id) {
|
||||||
|
case 0:
|
||||||
|
cur_lock = RADEON_READ(RADEON_CUR_OFFSET);
|
||||||
|
if (lock)
|
||||||
|
cur_lock |= RADEON_CUR_LOCK;
|
||||||
|
else
|
||||||
|
cur_lock &= ~RADEON_CUR_LOCK;
|
||||||
|
RADEON_WRITE(RADEON_CUR_OFFSET, cur_lock);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
cur_lock = RADEON_READ(RADEON_CUR2_OFFSET);
|
||||||
|
if (lock)
|
||||||
|
cur_lock |= RADEON_CUR2_LOCK;
|
||||||
|
else
|
||||||
|
cur_lock &= ~RADEON_CUR2_LOCK;
|
||||||
|
RADEON_WRITE(RADEON_CUR2_OFFSET, cur_lock);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_hide_cursor(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
||||||
|
|
||||||
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||||
|
RADEON_WRITE_P(RADEON_MM_DATA, 0, ~AVIVO_D1CURSOR_EN);
|
||||||
|
} else {
|
||||||
|
switch(radeon_crtc->crtc_id) {
|
||||||
|
case 0:
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
RADEON_WRITE_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_show_cursor(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
||||||
|
|
||||||
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||||
|
RADEON_WRITE(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
|
||||||
|
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||||
|
} else {
|
||||||
|
switch(radeon_crtc->crtc_id) {
|
||||||
|
case 0:
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
RADEON_WRITE_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
|
||||||
|
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
|
||||||
|
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||||
|
uint32_t width, uint32_t height)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
||||||
|
struct drm_radeon_gem_object *obj_priv;
|
||||||
|
|
||||||
|
obj_priv = obj->driver_private;
|
||||||
|
|
||||||
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
RADEON_WRITE(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||||
|
dev_priv->fb_location + obj_priv->bo->offset);
|
||||||
|
RADEON_WRITE(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
|
||||||
|
(width - 1) << 16 | (height - 1));
|
||||||
|
} else {
|
||||||
|
switch(radeon_crtc->crtc_id) {
|
||||||
|
case 0:
|
||||||
|
/* offset is from DISP_BASE_ADDRESS */
|
||||||
|
RADEON_WRITE(RADEON_CUR_OFFSET, obj_priv->bo->offset);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
/* offset is from DISP2_BASE_ADDRESS */
|
||||||
|
RADEON_WRITE(RADEON_CUR2_OFFSET, obj_priv->bo->offset);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
|
struct drm_file *file_priv,
|
||||||
|
uint32_t handle,
|
||||||
|
uint32_t width,
|
||||||
|
uint32_t height)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
|
||||||
|
if (!handle) {
|
||||||
|
/* turn off cursor */
|
||||||
|
radeon_hide_cursor(crtc);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
|
||||||
|
if (!obj) {
|
||||||
|
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||||
|
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
radeon_lock_cursor(crtc, true);
|
||||||
|
// XXX only 27 bit offset for legacy cursor
|
||||||
|
radeon_set_cursor(crtc, obj, width, height);
|
||||||
|
radeon_show_cursor(crtc);
|
||||||
|
radeon_lock_cursor(crtc, false);
|
||||||
|
|
||||||
|
mutex_lock(&crtc->dev->struct_mutex);
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
mutex_unlock(&crtc->dev->struct_mutex);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
|
int x, int y)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
||||||
|
int xorigin = 0, yorigin = 0;
|
||||||
|
|
||||||
|
if (x < 0)
|
||||||
|
xorigin = -x + 1;
|
||||||
|
if (y < 0)
|
||||||
|
yorigin = -x + 1;
|
||||||
|
if (xorigin >= CURSOR_WIDTH)
|
||||||
|
xorigin = CURSOR_WIDTH - 1;
|
||||||
|
if (yorigin >= CURSOR_WIDTH)
|
||||||
|
yorigin = CURSOR_WIDTH - 1;
|
||||||
|
|
||||||
|
radeon_lock_cursor(crtc, true);
|
||||||
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
|
RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||||
|
((xorigin ? 0: x) << 16) |
|
||||||
|
(yorigin ? 0 : y));
|
||||||
|
RADEON_WRITE(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||||
|
} else {
|
||||||
|
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||||
|
y /= 2;
|
||||||
|
else if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
||||||
|
y *= 2;
|
||||||
|
|
||||||
|
switch(radeon_crtc->crtc_id) {
|
||||||
|
case 0:
|
||||||
|
RADEON_WRITE(RADEON_CUR_HORZ_VERT_OFF, (RADEON_CUR_LOCK
|
||||||
|
| (xorigin << 16)
|
||||||
|
| yorigin));
|
||||||
|
RADEON_WRITE(RADEON_CUR_HORZ_VERT_POSN, (RADEON_CUR_LOCK
|
||||||
|
| ((xorigin ? 0 : x) << 16)
|
||||||
|
| (yorigin ? 0 : y)));
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
RADEON_WRITE(RADEON_CUR2_HORZ_VERT_OFF, (RADEON_CUR2_LOCK
|
||||||
|
| (xorigin << 16)
|
||||||
|
| yorigin));
|
||||||
|
RADEON_WRITE(RADEON_CUR2_HORZ_VERT_POSN, (RADEON_CUR2_LOCK
|
||||||
|
| ((xorigin ? 0 : x) << 16)
|
||||||
|
| (yorigin ? 0 : y)));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
radeon_lock_cursor(crtc, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -32,9 +32,6 @@
|
||||||
|
|
||||||
#include "drm_crtc_helper.h"
|
#include "drm_crtc_helper.h"
|
||||||
|
|
||||||
#define CURSOR_WIDTH 64
|
|
||||||
#define CURSOR_HEIGHT 64
|
|
||||||
|
|
||||||
int radeon_ddc_dump(struct drm_connector *connector);
|
int radeon_ddc_dump(struct drm_connector *connector);
|
||||||
|
|
||||||
|
|
||||||
|
@ -48,11 +45,11 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
|
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
|
RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
|
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
|
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
|
||||||
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
|
RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
|
||||||
|
@ -62,7 +59,6 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
RADEON_WRITE(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
|
RADEON_WRITE(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
|
|
||||||
RADEON_WRITE8(AVIVO_DC_LUT_RW_INDEX, i);
|
RADEON_WRITE8(AVIVO_DC_LUT_RW_INDEX, i);
|
||||||
RADEON_WRITE(AVIVO_DC_LUT_30_COLOR,
|
RADEON_WRITE(AVIVO_DC_LUT_30_COLOR,
|
||||||
(radeon_crtc->lut_r[i] << 22) |
|
(radeon_crtc->lut_r[i] << 22) |
|
||||||
|
@ -73,34 +69,43 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
RADEON_WRITE(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
|
RADEON_WRITE(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
|
struct drm_device *dev = crtc->dev;
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
int i;
|
||||||
|
uint32_t dac2_cntl;
|
||||||
|
|
||||||
|
dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2);
|
||||||
|
if (radeon_crtc->crtc_id == 0)
|
||||||
|
dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
|
||||||
|
else
|
||||||
|
dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
|
||||||
|
RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl);
|
||||||
|
|
||||||
|
for (i = 0; i < 256; i++) {
|
||||||
|
RADEON_WRITE8(RADEON_PALETTE_INDEX, i);
|
||||||
|
RADEON_WRITE(RADEON_PALETTE_DATA,
|
||||||
|
(radeon_crtc->lut_r[i] << 16) |
|
||||||
|
(radeon_crtc->lut_g[i] << 8) |
|
||||||
|
(radeon_crtc->lut_b[i] << 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void radeon_crtc_load_lut(struct drm_crtc *crtc)
|
void radeon_crtc_load_lut(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
u32 temp;
|
|
||||||
int i;
|
|
||||||
if (!crtc->enabled)
|
if (!crtc->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (radeon_is_avivo(dev_priv))
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
|
||||||
avivo_crtc_load_lut(crtc);
|
avivo_crtc_load_lut(crtc);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
temp = RADEON_READ(RADEON_DAC_CNTL2);
|
|
||||||
if (radeon_crtc->crtc_id == 0)
|
|
||||||
temp &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
|
|
||||||
else
|
else
|
||||||
temp |= RADEON_DAC2_PALETTE_ACC_CTL;
|
legacy_crtc_load_lut(crtc);
|
||||||
RADEON_WRITE(RADEON_DAC_CNTL2, temp);
|
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
|
||||||
// OUTPAL(i, radeon_crtc->lut_r[i], radeon_crtc->lut_g[i], radeon_crtc->lut_b[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Sets the color ramps on behalf of RandR */
|
/** Sets the color ramps on behalf of RandR */
|
||||||
|
@ -116,140 +121,32 @@ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||||
radeon_crtc->lut_b[regno] = blue >> 8;
|
radeon_crtc->lut_b[regno] = blue >> 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
|
|
||||||
struct drm_display_mode *mode,
|
|
||||||
struct drm_display_mode *adjusted_mode)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_crtc_mode_set(struct drm_crtc *crtc,
|
|
||||||
struct drm_display_mode *mode,
|
|
||||||
struct drm_display_mode *adjusted_mode,
|
|
||||||
int x, int y)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_crtc_prepare(struct drm_crtc *crtc)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_crtc_commit(struct drm_crtc *crtc)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void avivo_lock_cursor(struct drm_crtc *crtc, bool lock)
|
|
||||||
{
|
|
||||||
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
||||||
|
|
||||||
uint32_t tmp;
|
|
||||||
|
|
||||||
tmp = RADEON_READ(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
|
|
||||||
if (lock)
|
|
||||||
tmp |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
|
||||||
else
|
|
||||||
tmp &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
|
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, tmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
|
||||||
struct drm_file *file_priv,
|
|
||||||
uint32_t handle,
|
|
||||||
uint32_t width,
|
|
||||||
uint32_t height)
|
|
||||||
{
|
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
||||||
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
|
||||||
struct drm_gem_object *obj;
|
|
||||||
struct drm_radeon_gem_object *obj_priv;
|
|
||||||
|
|
||||||
if (!handle) {
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, 0);
|
|
||||||
return 0;
|
|
||||||
/* turn off cursor */
|
|
||||||
}
|
|
||||||
|
|
||||||
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
|
|
||||||
if (!obj) {
|
|
||||||
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
obj_priv = obj->driver_private;
|
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, 0);
|
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
|
||||||
dev_priv->fb_location + obj_priv->bo->offset);
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
|
|
||||||
(CURSOR_WIDTH - 1) << 16 | (CURSOR_HEIGHT - 1));
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
|
|
||||||
AVIVO_D1CURSOR_EN | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&crtc->dev->struct_mutex);
|
|
||||||
drm_gem_object_unreference(obj);
|
|
||||||
mutex_unlock(&crtc->dev->struct_mutex);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
|
||||||
int x, int y)
|
|
||||||
{
|
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
||||||
struct drm_radeon_private *dev_priv = crtc->dev->dev_private;
|
|
||||||
int xorigin = 0, yorigin = 0;
|
|
||||||
|
|
||||||
if (x < 0) xorigin = -x+1;
|
|
||||||
if (y < 0) yorigin = -x+1;
|
|
||||||
if (xorigin >= CURSOR_WIDTH) xorigin = CURSOR_WIDTH - 1;
|
|
||||||
if (yorigin >= CURSOR_WIDTH) yorigin = CURSOR_WIDTH - 1;
|
|
||||||
|
|
||||||
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
|
|
||||||
y /= 2;
|
|
||||||
else if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
|
||||||
y *= 2;
|
|
||||||
|
|
||||||
if (radeon_is_avivo(dev_priv)) {
|
|
||||||
avivo_lock_cursor(crtc, true);
|
|
||||||
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
|
||||||
((xorigin ? 0: x) << 16) |
|
|
||||||
(yorigin ? 0 : y));
|
|
||||||
RADEON_WRITE(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
|
||||||
avivo_lock_cursor(crtc, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||||
u16 *blue, uint32_t size)
|
u16 *blue, uint32_t size)
|
||||||
{
|
{
|
||||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||||
int i;
|
int i, j;
|
||||||
|
|
||||||
if (size != 256)
|
if (size != 256)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
if (crtc->fb->depth == 16) {
|
||||||
radeon_crtc->lut_r[i] = red[i] >> 8;
|
for (i = 0; i < 64; i++) {
|
||||||
radeon_crtc->lut_g[i] = green[i] >> 8;
|
if (i <= 31) {
|
||||||
radeon_crtc->lut_b[i] = blue[i] >> 8;
|
for (j = 0; j < 8; j++) {
|
||||||
|
radeon_crtc->lut_r[i * 8 + j] = red[i] >> 8;
|
||||||
|
radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (j = 0; j < 4; j++)
|
||||||
|
radeon_crtc->lut_g[i * 4 + j] = green[i] >> 8;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < 256; i++) {
|
||||||
|
radeon_crtc->lut_r[i] = red[i] >> 8;
|
||||||
|
radeon_crtc->lut_g[i] = green[i] >> 8;
|
||||||
|
radeon_crtc->lut_b[i] = blue[i] >> 8;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
radeon_crtc_load_lut(crtc);
|
radeon_crtc_load_lut(crtc);
|
||||||
|
@ -263,15 +160,6 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
|
||||||
kfree(radeon_crtc);
|
kfree(radeon_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_crtc_helper_funcs radeon_helper_funcs = {
|
|
||||||
.dpms = radeon_crtc_dpms,
|
|
||||||
.mode_fixup = radeon_crtc_mode_fixup,
|
|
||||||
.mode_set = radeon_crtc_mode_set,
|
|
||||||
.mode_set_base = radeon_crtc_set_base,
|
|
||||||
.prepare = radeon_crtc_prepare,
|
|
||||||
.commit = radeon_crtc_commit,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct drm_crtc_funcs radeon_crtc_funcs = {
|
static const struct drm_crtc_funcs radeon_crtc_funcs = {
|
||||||
.cursor_set = radeon_crtc_cursor_set,
|
.cursor_set = radeon_crtc_cursor_set,
|
||||||
.cursor_move = radeon_crtc_cursor_move,
|
.cursor_move = radeon_crtc_cursor_move,
|
||||||
|
@ -309,7 +197,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
|
||||||
if (dev_priv->is_atom_bios && dev_priv->chip_family > CHIP_RS690)
|
if (dev_priv->is_atom_bios && dev_priv->chip_family > CHIP_RS690)
|
||||||
radeon_atombios_init_crtc(dev, radeon_crtc);
|
radeon_atombios_init_crtc(dev, radeon_crtc);
|
||||||
else
|
else
|
||||||
drm_crtc_helper_add(&radeon_crtc->base, &radeon_helper_funcs);
|
radeon_legacy_init_crtc(dev, radeon_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool radeon_legacy_setup_enc_conn(struct drm_device *dev)
|
bool radeon_legacy_setup_enc_conn(struct drm_device *dev)
|
||||||
|
@ -360,8 +248,14 @@ bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||||
if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) ||
|
if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) ||
|
||||||
(mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_A) ||
|
(mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_A) ||
|
||||||
(mode_info->bios_connector[i].connector_type == CONNECTOR_VGA)) {
|
(mode_info->bios_connector[i].connector_type == CONNECTOR_VGA)) {
|
||||||
if (radeon_is_avivo(dev_priv))
|
if (radeon_is_avivo(dev_priv)) {
|
||||||
encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 0);
|
encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 0);
|
||||||
|
} else {
|
||||||
|
if (mode_info->bios_connector[i].dac_type == DAC_PRIMARY)
|
||||||
|
encoder = radeon_encoder_legacy_primary_dac_add(dev, i, 0);
|
||||||
|
else if (mode_info->bios_connector[i].dac_type == DAC_TVDAC)
|
||||||
|
encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0);
|
||||||
|
}
|
||||||
if (encoder)
|
if (encoder)
|
||||||
drm_mode_connector_attach_encoder(connector, encoder);
|
drm_mode_connector_attach_encoder(connector, encoder);
|
||||||
}
|
}
|
||||||
|
@ -370,7 +264,13 @@ bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||||
if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) ||
|
if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) ||
|
||||||
(mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_D)) {
|
(mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_D)) {
|
||||||
if (radeon_is_avivo(dev_priv))
|
if (radeon_is_avivo(dev_priv))
|
||||||
encoder = radeon_encoder_atom_tmds_add(dev, i, mode_info->bios_connector[i].dac_type);
|
encoder = radeon_encoder_atom_tmds_add(dev, i, mode_info->bios_connector[i].tmds_type);
|
||||||
|
else {
|
||||||
|
if (mode_info->bios_connector[i].tmds_type == TMDS_INT)
|
||||||
|
encoder = radeon_encoder_legacy_tmds_int_add(dev, i);
|
||||||
|
else if (mode_info->bios_connector[i].dac_type == TMDS_EXT)
|
||||||
|
encoder = radeon_encoder_legacy_tmds_ext_add(dev, i);
|
||||||
|
}
|
||||||
if (encoder)
|
if (encoder)
|
||||||
drm_mode_connector_attach_encoder(connector, encoder);
|
drm_mode_connector_attach_encoder(connector, encoder);
|
||||||
}
|
}
|
||||||
|
@ -379,6 +279,10 @@ bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||||
if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN) {
|
if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN) {
|
||||||
if (radeon_is_avivo(dev_priv))
|
if (radeon_is_avivo(dev_priv))
|
||||||
encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 1);
|
encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 1);
|
||||||
|
else {
|
||||||
|
if (mode_info->bios_connector[i].dac_type == DAC_TVDAC)
|
||||||
|
encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0);
|
||||||
|
}
|
||||||
if (encoder)
|
if (encoder)
|
||||||
drm_mode_connector_attach_encoder(connector, encoder);
|
drm_mode_connector_attach_encoder(connector, encoder);
|
||||||
}
|
}
|
||||||
|
@ -389,31 +293,6 @@ bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void avivo_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
|
|
||||||
{
|
|
||||||
struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private;
|
|
||||||
uint32_t temp;
|
|
||||||
struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
|
|
||||||
|
|
||||||
temp = RADEON_READ(rec->mask_clk_reg);
|
|
||||||
if (lock_state)
|
|
||||||
temp |= rec->put_clk_mask;
|
|
||||||
else
|
|
||||||
temp &= ~rec->put_clk_mask;
|
|
||||||
RADEON_WRITE(rec->mask_clk_reg, temp);
|
|
||||||
temp = RADEON_READ(rec->mask_clk_reg);
|
|
||||||
|
|
||||||
temp = RADEON_READ(rec->mask_data_reg);
|
|
||||||
if (lock_state)
|
|
||||||
temp |= rec->put_data_mask;
|
|
||||||
else
|
|
||||||
temp &= ~rec->put_data_mask;
|
|
||||||
RADEON_WRITE(rec->mask_data_reg, temp);
|
|
||||||
temp = RADEON_READ(rec->mask_data_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||||
{
|
{
|
||||||
struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private;
|
struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private;
|
||||||
|
@ -422,12 +301,9 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||||
|
|
||||||
if (!radeon_connector->ddc_bus)
|
if (!radeon_connector->ddc_bus)
|
||||||
return -1;
|
return -1;
|
||||||
|
radeon_i2c_do_lock(radeon_connector, 1);
|
||||||
if (radeon_is_avivo(dev_priv))
|
|
||||||
avivo_i2c_do_lock(radeon_connector, 1);
|
|
||||||
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||||
if (radeon_is_avivo(dev_priv))
|
radeon_i2c_do_lock(radeon_connector, 0);
|
||||||
avivo_i2c_do_lock(radeon_connector, 0);
|
|
||||||
if (edid) {
|
if (edid) {
|
||||||
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
|
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
|
||||||
ret = drm_add_edid_modes(&radeon_connector->base, edid);
|
ret = drm_add_edid_modes(&radeon_connector->base, edid);
|
||||||
|
@ -445,7 +321,9 @@ int radeon_ddc_dump(struct drm_connector *connector)
|
||||||
|
|
||||||
if (!radeon_connector->ddc_bus)
|
if (!radeon_connector->ddc_bus)
|
||||||
return -1;
|
return -1;
|
||||||
|
radeon_i2c_do_lock(radeon_connector, 1);
|
||||||
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
|
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
|
||||||
|
radeon_i2c_do_lock(radeon_connector, 0);
|
||||||
if (edid) {
|
if (edid) {
|
||||||
kfree(edid);
|
kfree(edid);
|
||||||
}
|
}
|
||||||
|
@ -456,7 +334,7 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
|
||||||
{
|
{
|
||||||
uint64_t x, y, result;
|
uint64_t x, y, result;
|
||||||
uint64_t mod;
|
uint64_t mod;
|
||||||
|
|
||||||
n += d / 2;
|
n += d / 2;
|
||||||
|
|
||||||
mod = do_div(n, d);
|
mod = do_div(n, d);
|
||||||
|
@ -542,7 +420,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||||
|
|
||||||
current_freq = radeon_div((uint64_t)pll->reference_freq * 10000 * feedback_div,
|
current_freq = radeon_div((uint64_t)pll->reference_freq * 10000 * feedback_div,
|
||||||
ref_div * post_div);
|
ref_div * post_div);
|
||||||
|
|
||||||
error = abs(current_freq - freq);
|
error = abs(current_freq - freq);
|
||||||
vco_diff = abs(vco - best_vco);
|
vco_diff = abs(vco - best_vco);
|
||||||
|
|
||||||
|
@ -573,7 +451,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||||
best_vco_diff = vco_diff;
|
best_vco_diff = vco_diff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current_freq < freq)
|
if (current_freq < freq)
|
||||||
min_feed_div = feedback_div+1;
|
min_feed_div = feedback_div+1;
|
||||||
else
|
else
|
||||||
|
@ -581,7 +459,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*dot_clock_p = best_freq / 10000;
|
*dot_clock_p = best_freq / 10000;
|
||||||
*fb_div_p = best_feedback_div;
|
*fb_div_p = best_feedback_div;
|
||||||
*ref_div_p = best_ref_div;
|
*ref_div_p = best_ref_div;
|
||||||
|
@ -703,7 +581,7 @@ int radeon_modeset_init(struct drm_device *dev)
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
drm_helper_initial_config(dev, false);
|
drm_helper_initial_config(dev, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -30,9 +30,9 @@
|
||||||
|
|
||||||
extern int atom_debug;
|
extern int atom_debug;
|
||||||
|
|
||||||
static void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
|
void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode,
|
struct drm_display_mode *mode,
|
||||||
struct drm_display_mode *adjusted_mode)
|
struct drm_display_mode *adjusted_mode)
|
||||||
{
|
{
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
if (mode->hdisplay < radeon_encoder->panel_xres ||
|
if (mode->hdisplay < radeon_encoder->panel_xres ||
|
||||||
|
@ -279,7 +279,7 @@ static void radeon_lvtma_dpms(struct drm_encoder *encoder, int mode)
|
||||||
case DRM_MODE_DPMS_ON:
|
case DRM_MODE_DPMS_ON:
|
||||||
atombios_display_device_control(encoder, index, ATOM_ENABLE);
|
atombios_display_device_control(encoder, index, ATOM_ENABLE);
|
||||||
break;
|
break;
|
||||||
case DRM_MODE_DPMS_STANDBY:
|
case DRM_MODE_DPMS_STANDBY:
|
||||||
case DRM_MODE_DPMS_SUSPEND:
|
case DRM_MODE_DPMS_SUSPEND:
|
||||||
case DRM_MODE_DPMS_OFF:
|
case DRM_MODE_DPMS_OFF:
|
||||||
atombios_display_device_control(encoder, index, ATOM_DISABLE);
|
atombios_display_device_control(encoder, index, ATOM_DISABLE);
|
||||||
|
@ -319,7 +319,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_lvtma_helper_funcs = {
|
||||||
.commit = radeon_lvtma_commit,
|
.commit = radeon_lvtma_commit,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void radeon_enc_destroy(struct drm_encoder *encoder)
|
void radeon_enc_destroy(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||||
drm_encoder_cleanup(encoder);
|
drm_encoder_cleanup(encoder);
|
||||||
|
@ -792,7 +792,7 @@ static void radeon_atom_tmds_dpms(struct drm_encoder *encoder, int mode)
|
||||||
case DRM_MODE_DPMS_ON:
|
case DRM_MODE_DPMS_ON:
|
||||||
atombios_display_device_control(encoder, index, ATOM_ENABLE);
|
atombios_display_device_control(encoder, index, ATOM_ENABLE);
|
||||||
break;
|
break;
|
||||||
case DRM_MODE_DPMS_STANDBY:
|
case DRM_MODE_DPMS_STANDBY:
|
||||||
case DRM_MODE_DPMS_SUSPEND:
|
case DRM_MODE_DPMS_SUSPEND:
|
||||||
case DRM_MODE_DPMS_OFF:
|
case DRM_MODE_DPMS_OFF:
|
||||||
atombios_display_device_control(encoder, index, ATOM_DISABLE);
|
atombios_display_device_control(encoder, index, ATOM_DISABLE);
|
||||||
|
@ -895,68 +895,3 @@ struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bio
|
||||||
return encoder;
|
return encoder;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = encoder->dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
|
|
||||||
{
|
|
||||||
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
|
|
||||||
{
|
|
||||||
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
|
|
||||||
struct drm_display_mode *mode,
|
|
||||||
struct drm_display_mode *adjusted_mode)
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
|
|
||||||
.dpms = radeon_legacy_lvds_dpms,
|
|
||||||
.mode_fixup = radeon_lvtma_mode_fixup,
|
|
||||||
.prepare = radeon_legacy_lvds_prepare,
|
|
||||||
.mode_set = radeon_legacy_lvds_mode_set,
|
|
||||||
.commit = radeon_legacy_lvds_commit,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
|
|
||||||
.destroy = radeon_enc_destroy,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index)
|
|
||||||
{
|
|
||||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
|
||||||
struct radeon_mode_info *mode_info = &dev_priv->mode_info;
|
|
||||||
struct radeon_encoder *radeon_encoder;
|
|
||||||
struct drm_encoder *encoder;
|
|
||||||
radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
|
|
||||||
if (!radeon_encoder) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
encoder = &radeon_encoder->base;
|
|
||||||
|
|
||||||
encoder->possible_crtcs = 0x3;
|
|
||||||
encoder->possible_clones = 0;
|
|
||||||
drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs,
|
|
||||||
DRM_MODE_ENCODER_LVDS);
|
|
||||||
|
|
||||||
drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
|
|
||||||
|
|
||||||
/* TODO get the LVDS info from the BIOS for panel size etc. */
|
|
||||||
/* get the lvds info from the bios */
|
|
||||||
radeon_combios_get_lvds_info(radeon_encoder);
|
|
||||||
|
|
||||||
/* LVDS gets default RMX full scaling */
|
|
||||||
radeon_encoder->rmx_type = RMX_FULL;
|
|
||||||
|
|
||||||
return encoder;
|
|
||||||
}
|
|
||||||
|
|
|
@ -27,6 +27,11 @@
|
||||||
#include "radeon_drm.h"
|
#include "radeon_drm.h"
|
||||||
#include "radeon_drv.h"
|
#include "radeon_drv.h"
|
||||||
|
|
||||||
|
static int radeon_gem_ib_init(struct drm_device *dev);
|
||||||
|
static int radeon_gem_ib_destroy(struct drm_device *dev);
|
||||||
|
static int radeon_gem_dma_bufs_init(struct drm_device *dev);
|
||||||
|
static void radeon_gem_dma_bufs_destroy(struct drm_device *dev);
|
||||||
|
|
||||||
int radeon_gem_init_object(struct drm_gem_object *obj)
|
int radeon_gem_init_object(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_radeon_gem_object *obj_priv;
|
struct drm_radeon_gem_object *obj_priv;
|
||||||
|
@ -240,9 +245,11 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||||
DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
|
DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
|
||||||
/* validate into a pin with no fence */
|
/* validate into a pin with no fence */
|
||||||
|
|
||||||
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
|
||||||
DRM_BO_HINT_DONT_FENCE,
|
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
||||||
0, NULL);
|
DRM_BO_HINT_DONT_FENCE, 0);
|
||||||
|
} else
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
args->offset = obj_priv->bo->offset;
|
args->offset = obj_priv->bo->offset;
|
||||||
DRM_DEBUG("got here %p %p\n", obj, obj_priv->bo);
|
DRM_DEBUG("got here %p %p\n", obj, obj_priv->bo);
|
||||||
|
@ -270,8 +277,7 @@ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
||||||
/* validate into a pin with no fence */
|
/* validate into a pin with no fence */
|
||||||
|
|
||||||
ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT,
|
ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT,
|
||||||
DRM_BO_HINT_DONT_FENCE,
|
DRM_BO_HINT_DONT_FENCE, 0);
|
||||||
0, NULL);
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
|
@ -315,7 +321,7 @@ int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
|
||||||
//VB_AGE_TEST_WITH_RETURN(dev_priv);
|
//VB_AGE_TEST_WITH_RETURN(dev_priv);
|
||||||
|
|
||||||
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
||||||
0 , 0, NULL);
|
0 , 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -481,6 +487,7 @@ static int radeon_gart_init(struct drm_device *dev)
|
||||||
|
|
||||||
/* setup a 32MB GART */
|
/* setup a 32MB GART */
|
||||||
dev_priv->gart_size = dev_priv->mm.gart_size;
|
dev_priv->gart_size = dev_priv->mm.gart_size;
|
||||||
|
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
|
||||||
|
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
/* setup VRAM vs GART here */
|
/* setup VRAM vs GART here */
|
||||||
|
@ -513,19 +520,19 @@ static int radeon_gart_init(struct drm_device *dev)
|
||||||
ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
|
ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
|
||||||
drm_bo_type_kernel,
|
drm_bo_type_kernel,
|
||||||
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
||||||
0, 1, 0, &dev_priv->mm.pcie_table);
|
0, 1, 0, &dev_priv->mm.pcie_table.bo);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table, dev_priv->mm.pcie_table->offset);
|
DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset);
|
||||||
ret = drm_bo_kmap(dev_priv->mm.pcie_table, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
|
ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
|
||||||
&dev_priv->mm.pcie_table_map);
|
&dev_priv->mm.pcie_table.kmap);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
dev_priv->pcigart_offset_set = 2;
|
dev_priv->pcigart_offset_set = 2;
|
||||||
dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table->offset;
|
dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset;
|
||||||
dev_priv->gart_info.addr = dev_priv->mm.pcie_table_map.virtual;
|
dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual;
|
||||||
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
|
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
|
||||||
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
|
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
|
||||||
memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
|
memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
|
||||||
|
@ -543,8 +550,8 @@ static int radeon_gart_init(struct drm_device *dev)
|
||||||
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
|
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
|
||||||
else
|
else
|
||||||
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
|
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
|
||||||
dev_priv->gart_info.addr = NULL;
|
dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr;
|
||||||
dev_priv->gart_info.bus_addr = 0;
|
dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* gart values setup - start the GART */
|
/* gart values setup - start the GART */
|
||||||
|
@ -566,14 +573,14 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
|
||||||
drm_bo_type_kernel,
|
drm_bo_type_kernel,
|
||||||
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
|
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
|
||||||
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
||||||
0, 1, 0, &dev_priv->mm.ring);
|
0, 1, 0, &dev_priv->mm.ring.bo);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to allocate ring\n");
|
DRM_ERROR("failed to allocate ring\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_bo_kmap(dev_priv->mm.ring, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
|
ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
|
||||||
&dev_priv->mm.ring_map);
|
&dev_priv->mm.ring.kmap);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to map ring\n");
|
DRM_ERROR("failed to map ring\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -583,24 +590,27 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
|
||||||
drm_bo_type_kernel,
|
drm_bo_type_kernel,
|
||||||
DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
|
DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
|
||||||
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
|
||||||
0, 1, 0, &dev_priv->mm.ring_read_ptr);
|
0, 1, 0, &dev_priv->mm.ring_read.bo);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to allocate ring read\n");
|
DRM_ERROR("failed to allocate ring read\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_bo_kmap(dev_priv->mm.ring_read_ptr, 0,
|
ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0,
|
||||||
PAGE_SIZE >> PAGE_SHIFT,
|
PAGE_SIZE >> PAGE_SHIFT,
|
||||||
&dev_priv->mm.ring_read_ptr_map);
|
&dev_priv->mm.ring_read.kmap);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to map ring read\n");
|
DRM_ERROR("failed to map ring read\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
|
DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
|
||||||
dev_priv->mm.ring, dev_priv->mm.ring->offset, dev_priv->mm.ring_map.virtual,
|
dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
|
||||||
dev_priv->mm.ring_read_ptr, dev_priv->mm.ring_read_ptr->offset, dev_priv->mm.ring_read_ptr_map.virtual);
|
dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
|
||||||
|
|
||||||
|
/* init the indirect buffers */
|
||||||
|
radeon_gem_ib_init(dev);
|
||||||
|
radeon_gem_dma_bufs_init(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -634,6 +644,8 @@ int radeon_gem_mm_init(struct drm_device *dev)
|
||||||
ret = radeon_alloc_gart_objects(dev);
|
ret = radeon_alloc_gart_objects(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
dev_priv->mm_enabled = true;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,16 +653,20 @@ void radeon_gem_mm_fini(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
radeon_gem_dma_bufs_destroy(dev);
|
||||||
|
radeon_gem_ib_destroy(dev);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (dev_priv->mm.ring_read_ptr) {
|
|
||||||
drm_bo_kunmap(&dev_priv->mm.ring_read_ptr_map);
|
if (dev_priv->mm.ring_read.bo) {
|
||||||
drm_bo_usage_deref_locked(&dev_priv->mm.ring_read_ptr);
|
drm_bo_kunmap(&dev_priv->mm.ring_read.kmap);
|
||||||
|
drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->mm.ring) {
|
if (dev_priv->mm.ring.bo) {
|
||||||
drm_bo_kunmap(&dev_priv->mm.ring_map);
|
drm_bo_kunmap(&dev_priv->mm.ring.kmap);
|
||||||
drm_bo_usage_deref_locked(&dev_priv->mm.ring);
|
drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
|
if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
|
||||||
|
@ -658,9 +674,9 @@ void radeon_gem_mm_fini(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||||
if (dev_priv->mm.pcie_table) {
|
if (dev_priv->mm.pcie_table.bo) {
|
||||||
drm_bo_kunmap(&dev_priv->mm.pcie_table_map);
|
drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
|
||||||
drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table);
|
drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -669,6 +685,9 @@ void radeon_gem_mm_fini(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
drm_bo_driver_finish(dev);
|
||||||
|
dev_priv->mm_enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_gem_object_pin(struct drm_gem_object *obj,
|
int radeon_gem_object_pin(struct drm_gem_object *obj,
|
||||||
|
@ -680,8 +699,479 @@ int radeon_gem_object_pin(struct drm_gem_object *obj,
|
||||||
obj_priv = obj->driver_private;
|
obj_priv = obj->driver_private;
|
||||||
|
|
||||||
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
||||||
DRM_BO_HINT_DONT_FENCE, 0, NULL);
|
DRM_BO_HINT_DONT_FENCE, 0);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define RADEON_IB_MEMORY (1*1024*1024)
|
||||||
|
#define RADEON_IB_SIZE (65536)
|
||||||
|
|
||||||
|
#define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE)
|
||||||
|
|
||||||
|
int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset)
|
||||||
|
{
|
||||||
|
int i, index = -1;
|
||||||
|
int ret;
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
for (i = 0; i < RADEON_NUM_IB; i++) {
|
||||||
|
if (!(dev_priv->ib_alloc_bitmap & (1 << i))){
|
||||||
|
index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if all in use we need to wait */
|
||||||
|
if (index == -1) {
|
||||||
|
for (i = 0; i < RADEON_NUM_IB; i++) {
|
||||||
|
if (dev_priv->ib_alloc_bitmap & (1 << i)) {
|
||||||
|
mutex_lock(&dev_priv->ib_objs[i]->bo->mutex);
|
||||||
|
ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0);
|
||||||
|
mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex);
|
||||||
|
if (ret)
|
||||||
|
continue;
|
||||||
|
dev_priv->ib_alloc_bitmap &= ~(1 << i);
|
||||||
|
index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (index == -1) {
|
||||||
|
DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (dwords > RADEON_IB_SIZE / sizeof(uint32_t))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0,
|
||||||
|
DRM_BO_FLAG_NO_EVICT,
|
||||||
|
0, 0);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to validate IB %d\n", index);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
*card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset;
|
||||||
|
*ib = dev_priv->ib_objs[index]->kmap.virtual;
|
||||||
|
dev_priv->ib_alloc_bitmap |= (1 << i);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < RADEON_NUM_IB; i++) {
|
||||||
|
|
||||||
|
if (dev_priv->ib_objs[i]->kmap.virtual == ib) {
|
||||||
|
/* emit a fence object */
|
||||||
|
ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
|
||||||
|
if (ret) {
|
||||||
|
|
||||||
|
drm_putback_buffer_objects(dev);
|
||||||
|
}
|
||||||
|
/* dereference the fence object */
|
||||||
|
if (fence)
|
||||||
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static int radeon_gem_ib_destroy(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (dev_priv->ib_objs) {
|
||||||
|
for (i = 0; i < RADEON_NUM_IB; i++) {
|
||||||
|
if (dev_priv->ib_objs[i]) {
|
||||||
|
drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap);
|
||||||
|
drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo);
|
||||||
|
}
|
||||||
|
drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
|
||||||
|
}
|
||||||
|
drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
|
||||||
|
}
|
||||||
|
dev_priv->ib_objs = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
uint32_t *reloc, uint32_t *offset)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
/* relocate the handle */
|
||||||
|
int domains = reloc[2];
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
int flags = 0;
|
||||||
|
int ret;
|
||||||
|
struct drm_radeon_gem_object *obj_priv;
|
||||||
|
|
||||||
|
obj = drm_gem_object_lookup(dev, file_priv, reloc[1]);
|
||||||
|
if (!obj)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
obj_priv = obj->driver_private;
|
||||||
|
if (domains == RADEON_GEM_DOMAIN_VRAM) {
|
||||||
|
flags = DRM_BO_FLAG_MEM_VRAM;
|
||||||
|
} else {
|
||||||
|
flags = DRM_BO_FLAG_MEM_TT;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM, 0, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (flags == DRM_BO_FLAG_MEM_VRAM)
|
||||||
|
*offset = obj_priv->bo->offset + dev_priv->fb_location;
|
||||||
|
else
|
||||||
|
*offset = obj_priv->bo->offset + dev_priv->gart_vm_start;
|
||||||
|
|
||||||
|
/* BAD BAD BAD - LINKED LIST THE OBJS and UNREF ONCE IB is SUBMITTED */
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allocate 1MB of 64k IBs the the kernel can keep mapped */
|
||||||
|
static int radeon_gem_ib_init(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
|
||||||
|
if (!dev_priv->ib_objs)
|
||||||
|
goto free_all;
|
||||||
|
|
||||||
|
for (i = 0; i < RADEON_NUM_IB; i++) {
|
||||||
|
dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
|
||||||
|
if (!dev_priv->ib_objs[i])
|
||||||
|
goto free_all;
|
||||||
|
|
||||||
|
ret = drm_buffer_object_create(dev, RADEON_IB_SIZE,
|
||||||
|
drm_bo_type_kernel,
|
||||||
|
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
|
||||||
|
DRM_BO_FLAG_MAPPABLE, 0,
|
||||||
|
0, 0, &dev_priv->ib_objs[i]->bo);
|
||||||
|
if (ret)
|
||||||
|
goto free_all;
|
||||||
|
|
||||||
|
ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT,
|
||||||
|
&dev_priv->ib_objs[i]->kmap);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto free_all;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->ib_alloc_bitmap = 0;
|
||||||
|
|
||||||
|
dev_priv->cs.ib_get = radeon_gem_ib_get;
|
||||||
|
dev_priv->cs.ib_free = radeon_gem_ib_free;
|
||||||
|
|
||||||
|
radeon_cs_init(dev);
|
||||||
|
dev_priv->cs.relocate = radeon_gem_relocate;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
free_all:
|
||||||
|
radeon_gem_ib_destroy(dev);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RADEON_DMA_BUFFER_SIZE (64 * 1024)
|
||||||
|
#define RADEON_DMA_BUFFER_COUNT (16)
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup after an error on one of the addbufs() functions.
|
||||||
|
*
|
||||||
|
* \param dev DRM device.
|
||||||
|
* \param entry buffer entry where the error occurred.
|
||||||
|
*
|
||||||
|
* Frees any pages and buffers associated with the given entry.
|
||||||
|
*/
|
||||||
|
static void drm_cleanup_buf_error(struct drm_device * dev,
|
||||||
|
struct drm_buf_entry * entry)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (entry->seg_count) {
|
||||||
|
for (i = 0; i < entry->seg_count; i++) {
|
||||||
|
if (entry->seglist[i]) {
|
||||||
|
drm_pci_free(dev, entry->seglist[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
drm_free(entry->seglist,
|
||||||
|
entry->seg_count *
|
||||||
|
sizeof(*entry->seglist), DRM_MEM_SEGS);
|
||||||
|
|
||||||
|
entry->seg_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry->buf_count) {
|
||||||
|
for (i = 0; i < entry->buf_count; i++) {
|
||||||
|
if (entry->buflist[i].dev_private) {
|
||||||
|
drm_free(entry->buflist[i].dev_private,
|
||||||
|
entry->buflist[i].dev_priv_size,
|
||||||
|
DRM_MEM_BUFS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
drm_free(entry->buflist,
|
||||||
|
entry->buf_count *
|
||||||
|
sizeof(*entry->buflist), DRM_MEM_BUFS);
|
||||||
|
|
||||||
|
entry->buf_count = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int radeon_gem_addbufs(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_device_dma *dma = dev->dma;
|
||||||
|
struct drm_buf_entry *entry;
|
||||||
|
struct drm_buf *buf;
|
||||||
|
unsigned long offset;
|
||||||
|
unsigned long agp_offset;
|
||||||
|
int count;
|
||||||
|
int order;
|
||||||
|
int size;
|
||||||
|
int alignment;
|
||||||
|
int page_order;
|
||||||
|
int total;
|
||||||
|
int byte_count;
|
||||||
|
int i;
|
||||||
|
struct drm_buf **temp_buflist;
|
||||||
|
|
||||||
|
if (!dma)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
count = RADEON_DMA_BUFFER_COUNT;
|
||||||
|
order = drm_order(RADEON_DMA_BUFFER_SIZE);
|
||||||
|
size = 1 << order;
|
||||||
|
|
||||||
|
alignment = PAGE_ALIGN(size);
|
||||||
|
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||||
|
total = PAGE_SIZE << page_order;
|
||||||
|
|
||||||
|
byte_count = 0;
|
||||||
|
agp_offset = dev_priv->mm.dma_bufs.bo->offset;
|
||||||
|
|
||||||
|
DRM_DEBUG("count: %d\n", count);
|
||||||
|
DRM_DEBUG("order: %d\n", order);
|
||||||
|
DRM_DEBUG("size: %d\n", size);
|
||||||
|
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
|
||||||
|
DRM_DEBUG("alignment: %d\n", alignment);
|
||||||
|
DRM_DEBUG("page_order: %d\n", page_order);
|
||||||
|
DRM_DEBUG("total: %d\n", total);
|
||||||
|
|
||||||
|
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||||
|
return -EINVAL;
|
||||||
|
if (dev->queue_count)
|
||||||
|
return -EBUSY; /* Not while in use */
|
||||||
|
|
||||||
|
spin_lock(&dev->count_lock);
|
||||||
|
if (dev->buf_use) {
|
||||||
|
spin_unlock(&dev->count_lock);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
atomic_inc(&dev->buf_alloc);
|
||||||
|
spin_unlock(&dev->count_lock);
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
entry = &dma->bufs[order];
|
||||||
|
if (entry->buf_count) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return -ENOMEM; /* May only call once for each order */
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count < 0 || count > 4096) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
|
||||||
|
DRM_MEM_BUFS);
|
||||||
|
if (!entry->buflist) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
|
||||||
|
|
||||||
|
entry->buf_size = size;
|
||||||
|
entry->page_order = page_order;
|
||||||
|
|
||||||
|
offset = 0;
|
||||||
|
|
||||||
|
while (entry->buf_count < count) {
|
||||||
|
buf = &entry->buflist[entry->buf_count];
|
||||||
|
buf->idx = dma->buf_count + entry->buf_count;
|
||||||
|
buf->total = alignment;
|
||||||
|
buf->order = order;
|
||||||
|
buf->used = 0;
|
||||||
|
|
||||||
|
buf->offset = (dma->byte_count + offset);
|
||||||
|
buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset;
|
||||||
|
buf->address = (void *)(agp_offset + offset);
|
||||||
|
buf->next = NULL;
|
||||||
|
buf->waiting = 0;
|
||||||
|
buf->pending = 0;
|
||||||
|
init_waitqueue_head(&buf->dma_wait);
|
||||||
|
buf->file_priv = NULL;
|
||||||
|
|
||||||
|
buf->dev_priv_size = dev->driver->dev_priv_size;
|
||||||
|
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
|
||||||
|
if (!buf->dev_private) {
|
||||||
|
/* Set count correctly so we free the proper amount. */
|
||||||
|
entry->buf_count = count;
|
||||||
|
drm_cleanup_buf_error(dev, entry);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(buf->dev_private, 0, buf->dev_priv_size);
|
||||||
|
|
||||||
|
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
|
||||||
|
|
||||||
|
offset += alignment;
|
||||||
|
entry->buf_count++;
|
||||||
|
byte_count += PAGE_SIZE << page_order;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("byte_count: %d\n", byte_count);
|
||||||
|
|
||||||
|
temp_buflist = drm_realloc(dma->buflist,
|
||||||
|
dma->buf_count * sizeof(*dma->buflist),
|
||||||
|
(dma->buf_count + entry->buf_count)
|
||||||
|
* sizeof(*dma->buflist), DRM_MEM_BUFS);
|
||||||
|
if (!temp_buflist) {
|
||||||
|
/* Free the entry because it isn't valid */
|
||||||
|
drm_cleanup_buf_error(dev, entry);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
dma->buflist = temp_buflist;
|
||||||
|
|
||||||
|
for (i = 0; i < entry->buf_count; i++) {
|
||||||
|
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
dma->buf_count += entry->buf_count;
|
||||||
|
dma->seg_count += entry->seg_count;
|
||||||
|
dma->page_count += byte_count >> PAGE_SHIFT;
|
||||||
|
dma->byte_count += byte_count;
|
||||||
|
|
||||||
|
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
|
||||||
|
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
dma->flags = _DRM_DMA_USE_SG;
|
||||||
|
atomic_dec(&dev->buf_alloc);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int radeon_gem_dma_bufs_init(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = drm_dma_setup(dev);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = drm_buffer_object_create(dev, size, drm_bo_type_device,
|
||||||
|
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT |
|
||||||
|
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0,
|
||||||
|
0, 0, &dev_priv->mm.dma_bufs.bo);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to create DMA bufs\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT,
|
||||||
|
&dev_priv->mm.dma_bufs.kmap);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to mmap DMA buffers\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
DRM_DEBUG("\n");
|
||||||
|
radeon_gem_addbufs(dev);
|
||||||
|
|
||||||
|
DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size);
|
||||||
|
dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT;
|
||||||
|
dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual;
|
||||||
|
dev_priv->mm.fake_agp_map.size = size;
|
||||||
|
|
||||||
|
dev->agp_buffer_map = &dev_priv->mm.fake_agp_map;
|
||||||
|
dev_priv->gart_buffers_offset = dev_priv->mm.dma_bufs.bo->offset + dev_priv->gart_vm_start;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_gem_dma_bufs_destroy(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
drm_dma_takedown(dev);
|
||||||
|
|
||||||
|
drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap);
|
||||||
|
drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name)
|
||||||
|
{
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
|
||||||
|
spin_lock(&dev->object_name_lock);
|
||||||
|
obj = idr_find(&dev->object_name_idr, name);
|
||||||
|
if (obj)
|
||||||
|
drm_gem_object_reference(obj);
|
||||||
|
spin_unlock(&dev->object_name_lock);
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
struct drm_radeon_master_private *master_priv = master->driver_priv;
|
||||||
|
drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
struct drm_radeon_gem_object *obj_priv;
|
||||||
|
|
||||||
|
/* update front_pitch_offset and back_pitch_offset */
|
||||||
|
obj = gem_object_get(dev, sarea_priv->front_handle);
|
||||||
|
if (obj) {
|
||||||
|
obj_priv = obj->driver_private;
|
||||||
|
|
||||||
|
dev_priv->front_offset = obj_priv->bo->offset;
|
||||||
|
dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) |
|
||||||
|
((obj_priv->bo->offset
|
||||||
|
+ dev_priv->fb_location) >> 10));
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = gem_object_get(dev, sarea_priv->back_handle);
|
||||||
|
if (obj) {
|
||||||
|
obj_priv = obj->driver_private;
|
||||||
|
dev_priv->back_offset = obj_priv->bo->offset;
|
||||||
|
dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) |
|
||||||
|
((obj_priv->bo->offset
|
||||||
|
+ dev_priv->fb_location) >> 10));
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
}
|
||||||
|
dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -27,6 +27,38 @@
|
||||||
#include "radeon_drm.h"
|
#include "radeon_drm.h"
|
||||||
#include "radeon_drv.h"
|
#include "radeon_drv.h"
|
||||||
|
|
||||||
|
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
|
||||||
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private;
|
||||||
|
uint32_t temp;
|
||||||
|
struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
|
||||||
|
|
||||||
|
if (lock_state) {
|
||||||
|
temp = RADEON_READ(rec->a_clk_reg);
|
||||||
|
temp &= ~(rec->a_clk_mask);
|
||||||
|
RADEON_WRITE(rec->a_clk_reg, temp);
|
||||||
|
|
||||||
|
temp = RADEON_READ(rec->a_data_reg);
|
||||||
|
temp &= ~(rec->a_data_mask);
|
||||||
|
RADEON_WRITE(rec->a_data_reg, temp);
|
||||||
|
}
|
||||||
|
|
||||||
|
temp = RADEON_READ(rec->mask_clk_reg);
|
||||||
|
if (lock_state)
|
||||||
|
temp |= rec->mask_clk_mask;
|
||||||
|
else
|
||||||
|
temp &= ~rec->mask_clk_mask;
|
||||||
|
RADEON_WRITE(rec->mask_clk_reg, temp);
|
||||||
|
temp = RADEON_READ(rec->mask_clk_reg);
|
||||||
|
|
||||||
|
temp = RADEON_READ(rec->mask_data_reg);
|
||||||
|
if (lock_state)
|
||||||
|
temp |= rec->mask_data_mask;
|
||||||
|
else
|
||||||
|
temp &= ~rec->mask_data_mask;
|
||||||
|
RADEON_WRITE(rec->mask_data_reg, temp);
|
||||||
|
temp = RADEON_READ(rec->mask_data_reg);
|
||||||
|
}
|
||||||
|
|
||||||
static int get_clock(void *i2c_priv)
|
static int get_clock(void *i2c_priv)
|
||||||
{
|
{
|
||||||
|
@ -88,7 +120,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||||
i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
||||||
if (i2c == NULL)
|
if (i2c == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
i2c->adapter.owner = THIS_MODULE;
|
i2c->adapter.owner = THIS_MODULE;
|
||||||
i2c->adapter.id = I2C_HW_B_RADEON;
|
i2c->adapter.id = I2C_HW_B_RADEON;
|
||||||
i2c->adapter.algo_data = &i2c->algo;
|
i2c->adapter.algo_data = &i2c->algo;
|
||||||
|
@ -113,7 +145,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||||
out_free:
|
out_free:
|
||||||
drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -87,10 +87,23 @@ enum radeon_rmx_type {
|
||||||
RMX_CENTER,
|
RMX_CENTER,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum radeon_tv_std {
|
||||||
|
TV_STD_NTSC,
|
||||||
|
TV_STD_PAL,
|
||||||
|
TV_STD_PAL_M,
|
||||||
|
TV_STD_PAL_60,
|
||||||
|
TV_STD_NTSC_J,
|
||||||
|
TV_STD_SCART_PAL,
|
||||||
|
TV_STD_SECAM,
|
||||||
|
TV_STD_PAL_CN,
|
||||||
|
};
|
||||||
|
|
||||||
struct radeon_i2c_bus_rec {
|
struct radeon_i2c_bus_rec {
|
||||||
bool valid;
|
bool valid;
|
||||||
uint32_t mask_clk_reg;
|
uint32_t mask_clk_reg;
|
||||||
uint32_t mask_data_reg;
|
uint32_t mask_data_reg;
|
||||||
|
uint32_t a_clk_reg;
|
||||||
|
uint32_t a_data_reg;
|
||||||
uint32_t put_clk_reg;
|
uint32_t put_clk_reg;
|
||||||
uint32_t put_data_reg;
|
uint32_t put_data_reg;
|
||||||
uint32_t get_clk_reg;
|
uint32_t get_clk_reg;
|
||||||
|
@ -101,6 +114,8 @@ struct radeon_i2c_bus_rec {
|
||||||
uint32_t put_data_mask;
|
uint32_t put_data_mask;
|
||||||
uint32_t get_clk_mask;
|
uint32_t get_clk_mask;
|
||||||
uint32_t get_data_mask;
|
uint32_t get_data_mask;
|
||||||
|
uint32_t a_clk_mask;
|
||||||
|
uint32_t a_data_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_bios_connector {
|
struct radeon_bios_connector {
|
||||||
|
@ -115,8 +130,13 @@ struct radeon_bios_connector {
|
||||||
int igp_lane_info;
|
int igp_lane_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct radeon_tmds_pll {
|
||||||
|
uint32_t freq;
|
||||||
|
uint32_t value;
|
||||||
|
};
|
||||||
|
|
||||||
#define RADEON_MAX_BIOS_CONNECTOR 16
|
#define RADEON_MAX_BIOS_CONNECTOR 16
|
||||||
|
|
||||||
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
|
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
|
||||||
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
|
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
|
||||||
#define RADEON_PLL_USE_REF_DIV (1 << 2)
|
#define RADEON_PLL_USE_REF_DIV (1 << 2)
|
||||||
|
@ -124,27 +144,177 @@ struct radeon_bios_connector {
|
||||||
#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
|
#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
|
||||||
|
|
||||||
struct radeon_pll {
|
struct radeon_pll {
|
||||||
uint16_t reference_freq;
|
uint16_t reference_freq;
|
||||||
uint16_t reference_div;
|
uint16_t reference_div;
|
||||||
uint32_t pll_in_min;
|
uint32_t pll_in_min;
|
||||||
uint32_t pll_in_max;
|
uint32_t pll_in_max;
|
||||||
uint32_t pll_out_min;
|
uint32_t pll_out_min;
|
||||||
uint32_t pll_out_max;
|
uint32_t pll_out_max;
|
||||||
uint16_t xclk;
|
uint16_t xclk;
|
||||||
|
|
||||||
|
uint32_t min_ref_div;
|
||||||
|
uint32_t max_ref_div;
|
||||||
|
uint32_t min_post_div;
|
||||||
|
uint32_t max_post_div;
|
||||||
|
uint32_t min_feedback_div;
|
||||||
|
uint32_t max_feedback_div;
|
||||||
|
uint32_t best_vco;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MAX_H_CODE_TIMING_LEN 32
|
||||||
|
#define MAX_V_CODE_TIMING_LEN 32
|
||||||
|
|
||||||
|
struct radeon_legacy_state {
|
||||||
|
|
||||||
|
uint32_t bus_cntl;
|
||||||
|
|
||||||
|
/* DAC */
|
||||||
|
uint32_t dac_cntl;
|
||||||
|
uint32_t dac2_cntl;
|
||||||
|
uint32_t dac_macro_cntl;
|
||||||
|
|
||||||
|
/* CRTC 1 */
|
||||||
|
uint32_t crtc_gen_cntl;
|
||||||
|
uint32_t crtc_ext_cntl;
|
||||||
|
uint32_t crtc_h_total_disp;
|
||||||
|
uint32_t crtc_h_sync_strt_wid;
|
||||||
|
uint32_t crtc_v_total_disp;
|
||||||
|
uint32_t crtc_v_sync_strt_wid;
|
||||||
|
uint32_t crtc_offset;
|
||||||
|
uint32_t crtc_offset_cntl;
|
||||||
|
uint32_t crtc_pitch;
|
||||||
|
uint32_t disp_merge_cntl;
|
||||||
|
uint32_t grph_buffer_cntl;
|
||||||
|
uint32_t crtc_more_cntl;
|
||||||
|
uint32_t crtc_tile_x0_y0;
|
||||||
|
|
||||||
|
/* CRTC 2 */
|
||||||
|
uint32_t crtc2_gen_cntl;
|
||||||
|
uint32_t crtc2_h_total_disp;
|
||||||
|
uint32_t crtc2_h_sync_strt_wid;
|
||||||
|
uint32_t crtc2_v_total_disp;
|
||||||
|
uint32_t crtc2_v_sync_strt_wid;
|
||||||
|
uint32_t crtc2_offset;
|
||||||
|
uint32_t crtc2_offset_cntl;
|
||||||
|
uint32_t crtc2_pitch;
|
||||||
|
uint32_t crtc2_tile_x0_y0;
|
||||||
|
|
||||||
|
uint32_t disp_output_cntl;
|
||||||
|
uint32_t disp_tv_out_cntl;
|
||||||
|
uint32_t disp_hw_debug;
|
||||||
|
uint32_t disp2_merge_cntl;
|
||||||
|
uint32_t grph2_buffer_cntl;
|
||||||
|
|
||||||
|
/* FP regs */
|
||||||
|
uint32_t fp_crtc_h_total_disp;
|
||||||
|
uint32_t fp_crtc_v_total_disp;
|
||||||
|
uint32_t fp_gen_cntl;
|
||||||
|
uint32_t fp2_gen_cntl;
|
||||||
|
uint32_t fp_h_sync_strt_wid;
|
||||||
|
uint32_t fp_h2_sync_strt_wid;
|
||||||
|
uint32_t fp_horz_stretch;
|
||||||
|
uint32_t fp_horz_vert_active;
|
||||||
|
uint32_t fp_panel_cntl;
|
||||||
|
uint32_t fp_v_sync_strt_wid;
|
||||||
|
uint32_t fp_v2_sync_strt_wid;
|
||||||
|
uint32_t fp_vert_stretch;
|
||||||
|
uint32_t lvds_gen_cntl;
|
||||||
|
uint32_t lvds_pll_cntl;
|
||||||
|
uint32_t tmds_pll_cntl;
|
||||||
|
uint32_t tmds_transmitter_cntl;
|
||||||
|
|
||||||
|
/* Computed values for PLL */
|
||||||
|
uint32_t dot_clock_freq;
|
||||||
|
uint32_t pll_output_freq;
|
||||||
|
int feedback_div;
|
||||||
|
int reference_div;
|
||||||
|
int post_div;
|
||||||
|
|
||||||
|
/* PLL registers */
|
||||||
|
uint32_t ppll_ref_div;
|
||||||
|
uint32_t ppll_div_3;
|
||||||
|
uint32_t htotal_cntl;
|
||||||
|
uint32_t vclk_ecp_cntl;
|
||||||
|
|
||||||
|
/* Computed values for PLL2 */
|
||||||
|
uint32_t dot_clock_freq_2;
|
||||||
|
uint32_t pll_output_freq_2;
|
||||||
|
int feedback_div_2;
|
||||||
|
int reference_div_2;
|
||||||
|
int post_div_2;
|
||||||
|
|
||||||
|
/* PLL2 registers */
|
||||||
|
uint32_t p2pll_ref_div;
|
||||||
|
uint32_t p2pll_div_0;
|
||||||
|
uint32_t htotal_cntl2;
|
||||||
|
uint32_t pixclks_cntl;
|
||||||
|
|
||||||
|
bool palette_valid;
|
||||||
|
uint32_t palette[256];
|
||||||
|
uint32_t palette2[256];
|
||||||
|
|
||||||
|
uint32_t disp2_req_cntl1;
|
||||||
|
uint32_t disp2_req_cntl2;
|
||||||
|
uint32_t dmif_mem_cntl1;
|
||||||
|
uint32_t disp1_req_cntl1;
|
||||||
|
|
||||||
|
uint32_t fp_2nd_gen_cntl;
|
||||||
|
uint32_t fp2_2_gen_cntl;
|
||||||
|
uint32_t tmds2_cntl;
|
||||||
|
uint32_t tmds2_transmitter_cntl;
|
||||||
|
|
||||||
|
/* TV out registers */
|
||||||
|
uint32_t tv_master_cntl;
|
||||||
|
uint32_t tv_htotal;
|
||||||
|
uint32_t tv_hsize;
|
||||||
|
uint32_t tv_hdisp;
|
||||||
|
uint32_t tv_hstart;
|
||||||
|
uint32_t tv_vtotal;
|
||||||
|
uint32_t tv_vdisp;
|
||||||
|
uint32_t tv_timing_cntl;
|
||||||
|
uint32_t tv_vscaler_cntl1;
|
||||||
|
uint32_t tv_vscaler_cntl2;
|
||||||
|
uint32_t tv_sync_size;
|
||||||
|
uint32_t tv_vrestart;
|
||||||
|
uint32_t tv_hrestart;
|
||||||
|
uint32_t tv_frestart;
|
||||||
|
uint32_t tv_ftotal;
|
||||||
|
uint32_t tv_clock_sel_cntl;
|
||||||
|
uint32_t tv_clkout_cntl;
|
||||||
|
uint32_t tv_data_delay_a;
|
||||||
|
uint32_t tv_data_delay_b;
|
||||||
|
uint32_t tv_dac_cntl;
|
||||||
|
uint32_t tv_pll_cntl;
|
||||||
|
uint32_t tv_pll_cntl1;
|
||||||
|
uint32_t tv_pll_fine_cntl;
|
||||||
|
uint32_t tv_modulator_cntl1;
|
||||||
|
uint32_t tv_modulator_cntl2;
|
||||||
|
uint32_t tv_frame_lock_cntl;
|
||||||
|
uint32_t tv_pre_dac_mux_cntl;
|
||||||
|
uint32_t tv_rgb_cntl;
|
||||||
|
uint32_t tv_y_saw_tooth_cntl;
|
||||||
|
uint32_t tv_y_rise_cntl;
|
||||||
|
uint32_t tv_y_fall_cntl;
|
||||||
|
uint32_t tv_uv_adr;
|
||||||
|
uint32_t tv_upsamp_and_gain_cntl;
|
||||||
|
uint32_t tv_gain_limit_settings;
|
||||||
|
uint32_t tv_linear_gain_settings;
|
||||||
|
uint32_t tv_crc_cntl;
|
||||||
|
uint32_t tv_sync_cntl;
|
||||||
|
uint32_t gpiopad_a;
|
||||||
|
uint32_t pll_test_cntl;
|
||||||
|
|
||||||
|
uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
|
||||||
|
uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
|
||||||
|
|
||||||
|
|
||||||
uint32_t min_ref_div;
|
|
||||||
uint32_t max_ref_div;
|
|
||||||
uint32_t min_post_div;
|
|
||||||
uint32_t max_post_div;
|
|
||||||
uint32_t min_feedback_div;
|
|
||||||
uint32_t max_feedback_div;
|
|
||||||
uint32_t best_vco;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_mode_info {
|
struct radeon_mode_info {
|
||||||
struct atom_context *atom_context;
|
struct atom_context *atom_context;
|
||||||
struct radeon_bios_connector bios_connector[RADEON_MAX_BIOS_CONNECTOR];
|
struct radeon_bios_connector bios_connector[RADEON_MAX_BIOS_CONNECTOR];
|
||||||
struct radeon_pll pll;
|
struct radeon_pll pll;
|
||||||
|
struct radeon_legacy_state legacy_state;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_crtc {
|
struct radeon_crtc {
|
||||||
|
@ -178,20 +348,40 @@ struct radeon_encoder {
|
||||||
enum radeon_tmds_type tmds;
|
enum radeon_tmds_type tmds;
|
||||||
} type;
|
} type;
|
||||||
int atom_device; /* atom devices */
|
int atom_device; /* atom devices */
|
||||||
|
|
||||||
|
/* preferred mode */
|
||||||
uint32_t panel_xres, panel_yres;
|
uint32_t panel_xres, panel_yres;
|
||||||
uint32_t hoverplus, hsync_width;
|
uint32_t hoverplus, hsync_width;
|
||||||
uint32_t hblank;
|
uint32_t hblank;
|
||||||
uint32_t voverplus, vsync_width;
|
uint32_t voverplus, vsync_width;
|
||||||
uint32_t vblank;
|
uint32_t vblank;
|
||||||
uint32_t panel_pwr_delay;
|
|
||||||
uint32_t dotclock;
|
uint32_t dotclock;
|
||||||
|
|
||||||
|
/* legacy lvds */
|
||||||
|
uint16_t panel_vcc_delay;
|
||||||
|
uint16_t panel_pwr_delay;
|
||||||
|
uint16_t panel_digon_delay;
|
||||||
|
uint16_t panel_blon_delay;
|
||||||
|
uint32_t panel_ref_divider;
|
||||||
|
uint32_t panel_post_divider;
|
||||||
|
uint32_t panel_fb_divider;
|
||||||
|
bool use_bios_dividers;
|
||||||
|
uint32_t lvds_gen_cntl;
|
||||||
|
|
||||||
|
/* legacy tv dac */
|
||||||
|
uint32_t ps2_tvdac_adj;
|
||||||
|
uint32_t ntsc_tvdac_adj;
|
||||||
|
uint32_t pal_tvdac_adj;
|
||||||
|
enum radeon_tv_std tv_std;
|
||||||
|
|
||||||
|
/* legacy int tmds */
|
||||||
|
struct radeon_tmds_pll tmds_pll[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_connector {
|
struct radeon_connector {
|
||||||
struct drm_connector base;
|
struct drm_connector base;
|
||||||
struct radeon_i2c_chan *ddc_bus;
|
struct radeon_i2c_chan *ddc_bus;
|
||||||
int use_digital;
|
int use_digital;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_framebuffer {
|
struct radeon_framebuffer {
|
||||||
|
@ -221,6 +411,10 @@ struct drm_encoder *radeon_encoder_lvtma_add(struct drm_device *dev, int bios_in
|
||||||
struct drm_encoder *radeon_encoder_atom_dac_add(struct drm_device *dev, int bios_index, int dac_id, int with_tv);
|
struct drm_encoder *radeon_encoder_atom_dac_add(struct drm_device *dev, int bios_index, int dac_id, int with_tv);
|
||||||
struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bios_index, int tmds_type);
|
struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bios_index, int tmds_type);
|
||||||
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
|
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
|
||||||
|
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
|
||||||
|
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
|
||||||
|
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
|
||||||
|
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
|
||||||
|
|
||||||
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
|
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
|
||||||
extern void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y);
|
extern void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y);
|
||||||
|
@ -229,10 +423,22 @@ extern void atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||||
struct drm_display_mode *adjusted_mode,
|
struct drm_display_mode *adjusted_mode,
|
||||||
int x, int y);
|
int x, int y);
|
||||||
extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
|
extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
|
||||||
|
|
||||||
|
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
|
struct drm_file *file_priv,
|
||||||
|
uint32_t handle,
|
||||||
|
uint32_t width,
|
||||||
|
uint32_t height);
|
||||||
|
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||||
|
int x, int y);
|
||||||
|
|
||||||
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
|
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
|
||||||
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
|
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
|
||||||
extern void radeon_get_lvds_info(struct radeon_encoder *encoder);
|
extern void radeon_get_lvds_info(struct radeon_encoder *encoder);
|
||||||
extern bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
|
extern bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
|
||||||
|
extern bool radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
|
||||||
|
extern bool radeon_combios_get_tv_info(struct radeon_encoder *encoder);
|
||||||
|
extern bool radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
|
||||||
extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||||
u16 blue, int regno);
|
u16 blue, int regno);
|
||||||
struct drm_framebuffer *radeon_user_framebuffer_create(struct drm_device *dev,
|
struct drm_framebuffer *radeon_user_framebuffer_create(struct drm_device *dev,
|
||||||
|
@ -245,11 +451,18 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
||||||
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
|
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
|
||||||
void radeon_atombios_init_crtc(struct drm_device *dev,
|
void radeon_atombios_init_crtc(struct drm_device *dev,
|
||||||
struct radeon_crtc *radeon_crtc);
|
struct radeon_crtc *radeon_crtc);
|
||||||
void avivo_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
|
void radeon_legacy_init_crtc(struct drm_device *dev,
|
||||||
|
struct radeon_crtc *radeon_crtc);
|
||||||
|
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
|
||||||
|
|
||||||
void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable);
|
void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable);
|
||||||
void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable);
|
void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable);
|
||||||
void radeon_get_clock_info(struct drm_device *dev);
|
void radeon_get_clock_info(struct drm_device *dev);
|
||||||
extern bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev);
|
extern bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev);
|
||||||
|
|
||||||
|
void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
|
||||||
|
struct drm_display_mode *mode,
|
||||||
|
struct drm_display_mode *adjusted_mode);
|
||||||
|
void radeon_enc_destroy(struct drm_encoder *encoder);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -374,6 +374,9 @@
|
||||||
# define RADEON_CRTC_ICON_EN (1 << 15)
|
# define RADEON_CRTC_ICON_EN (1 << 15)
|
||||||
# define RADEON_CRTC_CUR_EN (1 << 16)
|
# define RADEON_CRTC_CUR_EN (1 << 16)
|
||||||
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
|
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
|
||||||
|
# define RADEON_CRTC_CUR_MODE_SHIFT 20
|
||||||
|
# define RADEON_CRTC_CUR_MODE_MONO 0
|
||||||
|
# define RADEON_CRTC_CUR_MODE_24BPP 2
|
||||||
# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
|
# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
|
||||||
# define RADEON_CRTC_EN (1 << 25)
|
# define RADEON_CRTC_EN (1 << 25)
|
||||||
# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
|
# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
|
||||||
|
@ -556,6 +559,24 @@
|
||||||
# define RADEON_DAC_PDWN_R (1 << 16)
|
# define RADEON_DAC_PDWN_R (1 << 16)
|
||||||
# define RADEON_DAC_PDWN_G (1 << 17)
|
# define RADEON_DAC_PDWN_G (1 << 17)
|
||||||
# define RADEON_DAC_PDWN_B (1 << 18)
|
# define RADEON_DAC_PDWN_B (1 << 18)
|
||||||
|
#define RADEON_DISP_PWR_MAN 0x0d08
|
||||||
|
# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0)
|
||||||
|
# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4)
|
||||||
|
# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8)
|
||||||
|
# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8)
|
||||||
|
# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8)
|
||||||
|
# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
|
||||||
|
# define RADEON_DISP_D3_RST (1 << 16)
|
||||||
|
# define RADEON_DISP_D3_REG_RST (1 << 17)
|
||||||
|
# define RADEON_DISP_D3_GRPH_RST (1 << 18)
|
||||||
|
# define RADEON_DISP_D3_SUBPIC_RST (1 << 19)
|
||||||
|
# define RADEON_DISP_D3_OV0_RST (1 << 20)
|
||||||
|
# define RADEON_DISP_D1D2_GRPH_RST (1 << 21)
|
||||||
|
# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22)
|
||||||
|
# define RADEON_DISP_D1D2_OV0_RST (1 << 23)
|
||||||
|
# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24)
|
||||||
|
# define RADEON_TV_ENABLE_RST (1 << 25)
|
||||||
|
# define RADEON_AUTO_PWRUP_EN (1 << 26)
|
||||||
#define RADEON_TV_DAC_CNTL 0x088c
|
#define RADEON_TV_DAC_CNTL 0x088c
|
||||||
# define RADEON_TV_DAC_NBLANK (1 << 0)
|
# define RADEON_TV_DAC_NBLANK (1 << 0)
|
||||||
# define RADEON_TV_DAC_NHOLD (1 << 1)
|
# define RADEON_TV_DAC_NHOLD (1 << 1)
|
||||||
|
@ -1006,14 +1027,23 @@
|
||||||
# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
|
# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
|
||||||
# define RADEON_LVDS_PANEL_TYPE (1 << 2)
|
# define RADEON_LVDS_PANEL_TYPE (1 << 2)
|
||||||
# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
|
# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
|
||||||
|
# define RADEON_LVDS_NO_FM (0 << 4)
|
||||||
|
# define RADEON_LVDS_2_GREY (1 << 4)
|
||||||
|
# define RADEON_LVDS_4_GREY (2 << 4)
|
||||||
# define RADEON_LVDS_RST_FM (1 << 6)
|
# define RADEON_LVDS_RST_FM (1 << 6)
|
||||||
# define RADEON_LVDS_EN (1 << 7)
|
# define RADEON_LVDS_EN (1 << 7)
|
||||||
# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
|
# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
|
||||||
# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
|
# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
|
||||||
# define RADEON_LVDS_BL_MOD_EN (1 << 16)
|
# define RADEON_LVDS_BL_MOD_EN (1 << 16)
|
||||||
|
# define RADEON_LVDS_BL_CLK_SEL (1 << 17)
|
||||||
# define RADEON_LVDS_DIGON (1 << 18)
|
# define RADEON_LVDS_DIGON (1 << 18)
|
||||||
# define RADEON_LVDS_BLON (1 << 19)
|
# define RADEON_LVDS_BLON (1 << 19)
|
||||||
|
# define RADEON_LVDS_FP_POL_LOW (1 << 20)
|
||||||
|
# define RADEON_LVDS_LP_POL_LOW (1 << 21)
|
||||||
|
# define RADEON_LVDS_DTM_POL_LOW (1 << 22)
|
||||||
# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
|
# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
|
||||||
|
# define RADEON_LVDS_FPDI_EN (1 << 27)
|
||||||
|
# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28
|
||||||
#define RADEON_LVDS_PLL_CNTL 0x02d4
|
#define RADEON_LVDS_PLL_CNTL 0x02d4
|
||||||
# define RADEON_HSYNC_DELAY_SHIFT 28
|
# define RADEON_HSYNC_DELAY_SHIFT 28
|
||||||
# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
|
# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
|
||||||
|
@ -2095,16 +2125,19 @@
|
||||||
# define RADEON_STENCIL_ENABLE (1 << 7)
|
# define RADEON_STENCIL_ENABLE (1 << 7)
|
||||||
# define RADEON_Z_ENABLE (1 << 8)
|
# define RADEON_Z_ENABLE (1 << 8)
|
||||||
# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
|
# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
|
||||||
# define RADEON_COLOR_FORMAT_ARGB1555 (3 << 10)
|
# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
|
||||||
# define RADEON_COLOR_FORMAT_RGB565 (4 << 10)
|
|
||||||
# define RADEON_COLOR_FORMAT_ARGB8888 (6 << 10)
|
# define RADEON_COLOR_FORMAT_ARGB1555 3
|
||||||
# define RADEON_COLOR_FORMAT_RGB332 (7 << 10)
|
# define RADEON_COLOR_FORMAT_RGB565 4
|
||||||
# define RADEON_COLOR_FORMAT_Y8 (8 << 10)
|
# define RADEON_COLOR_FORMAT_ARGB8888 6
|
||||||
# define RADEON_COLOR_FORMAT_RGB8 (9 << 10)
|
# define RADEON_COLOR_FORMAT_RGB332 7
|
||||||
# define RADEON_COLOR_FORMAT_YUV422_VYUY (11 << 10)
|
# define RADEON_COLOR_FORMAT_Y8 8
|
||||||
# define RADEON_COLOR_FORMAT_YUV422_YVYU (12 << 10)
|
# define RADEON_COLOR_FORMAT_RGB8 9
|
||||||
# define RADEON_COLOR_FORMAT_aYUV444 (14 << 10)
|
# define RADEON_COLOR_FORMAT_YUV422_VYUY 11
|
||||||
# define RADEON_COLOR_FORMAT_ARGB4444 (15 << 10)
|
# define RADEON_COLOR_FORMAT_YUV422_YVYU 12
|
||||||
|
# define RADEON_COLOR_FORMAT_aYUV444 14
|
||||||
|
# define RADEON_COLOR_FORMAT_ARGB4444 15
|
||||||
|
|
||||||
# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
|
# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
|
||||||
#define RADEON_RB3D_COLOROFFSET 0x1c40
|
#define RADEON_RB3D_COLOROFFSET 0x1c40
|
||||||
# define RADEON_COLOROFFSET_MASK 0xfffffff0
|
# define RADEON_COLOROFFSET_MASK 0xfffffff0
|
||||||
|
@ -3086,6 +3119,10 @@
|
||||||
# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
|
# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
|
||||||
# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
|
# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
|
||||||
# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
|
# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
|
||||||
|
|
||||||
|
#define R300_CP_RESYNC_ADDR 0x778
|
||||||
|
#define R300_CP_RESYNC_DATA 0x77c
|
||||||
|
|
||||||
#define RADEON_CP_CSQ_STAT 0x07f8
|
#define RADEON_CP_CSQ_STAT 0x07f8
|
||||||
# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
|
# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
|
||||||
# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
|
# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
|
||||||
|
@ -3121,6 +3158,7 @@
|
||||||
# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
|
# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
|
||||||
# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
|
# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
|
||||||
# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
|
# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
|
||||||
|
# define R300_CP_PACKET0_REG_MASK 0x00001fff
|
||||||
# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
|
# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
|
||||||
# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
|
# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
|
||||||
|
|
||||||
|
@ -3565,14 +3603,14 @@
|
||||||
#define AVIVO_D1GRPH_X_END 0x6134
|
#define AVIVO_D1GRPH_X_END 0x6134
|
||||||
#define AVIVO_D1GRPH_Y_END 0x6138
|
#define AVIVO_D1GRPH_Y_END 0x6138
|
||||||
#define AVIVO_D1GRPH_UPDATE 0x6144
|
#define AVIVO_D1GRPH_UPDATE 0x6144
|
||||||
# define AVIVO_D1GRPH_UPDATE_LOCK (1<<16)
|
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
|
||||||
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
|
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
|
||||||
|
|
||||||
#define AVIVO_D1CUR_CONTROL 0x6400
|
#define AVIVO_D1CUR_CONTROL 0x6400
|
||||||
# define AVIVO_D1CURSOR_EN (1<<0)
|
# define AVIVO_D1CURSOR_EN (1 << 0)
|
||||||
# define AVIVO_D1CURSOR_MODE_SHIFT 8
|
# define AVIVO_D1CURSOR_MODE_SHIFT 8
|
||||||
# define AVIVO_D1CURSOR_MODE_MASK (0x3<<8)
|
# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
|
||||||
# define AVIVO_D1CURSOR_MODE_24BPP (0x2)
|
# define AVIVO_D1CURSOR_MODE_24BPP 2
|
||||||
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
|
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
|
||||||
#define AVIVO_D1CUR_SIZE 0x6410
|
#define AVIVO_D1CUR_SIZE 0x6410
|
||||||
#define AVIVO_D1CUR_POSITION 0x6414
|
#define AVIVO_D1CUR_POSITION 0x6414
|
||||||
|
@ -4419,6 +4457,8 @@
|
||||||
# define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0)
|
# define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0)
|
||||||
# define R300_MACRO_TILE (1 << 2)
|
# define R300_MACRO_TILE (1 << 2)
|
||||||
|
|
||||||
|
#define R300_TX_BORDER_COLOR_0 0x45c0
|
||||||
|
|
||||||
#define R300_TX_ENABLE 0x4104
|
#define R300_TX_ENABLE 0x4104
|
||||||
# define R300_TEX_0_ENABLE (1 << 0)
|
# define R300_TEX_0_ENABLE (1 << 0)
|
||||||
# define R300_TEX_1_ENABLE (1 << 1)
|
# define R300_TEX_1_ENABLE (1 << 1)
|
||||||
|
@ -4705,7 +4745,24 @@
|
||||||
# define R300_READ_ENABLE (1 << 2)
|
# define R300_READ_ENABLE (1 << 2)
|
||||||
#define R300_RB3D_ABLENDCNTL 0x4e08
|
#define R300_RB3D_ABLENDCNTL 0x4e08
|
||||||
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
|
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
|
||||||
|
#define R300_RB3D_COLOROFFSET0 0x4e28
|
||||||
|
#define R300_RB3D_COLORPITCH0 0x4e38
|
||||||
|
# define R300_COLORTILE (1 << 16)
|
||||||
|
# define R300_COLORENDIAN_WORD (1 << 19)
|
||||||
|
# define R300_COLORENDIAN_DWORD (2 << 19)
|
||||||
|
# define R300_COLORENDIAN_HALF_DWORD (3 << 19)
|
||||||
|
# define R300_COLORFORMAT_ARGB1555 (3 << 21)
|
||||||
|
# define R300_COLORFORMAT_RGB565 (4 << 21)
|
||||||
|
# define R300_COLORFORMAT_ARGB8888 (6 << 21)
|
||||||
|
# define R300_COLORFORMAT_ARGB32323232 (7 << 21)
|
||||||
|
# define R300_COLORFORMAT_I8 (9 << 21)
|
||||||
|
# define R300_COLORFORMAT_ARGB16161616 (10 << 21)
|
||||||
|
# define R300_COLORFORMAT_VYUY (11 << 21)
|
||||||
|
# define R300_COLORFORMAT_YVYU (12 << 21)
|
||||||
|
# define R300_COLORFORMAT_UV88 (13 << 21)
|
||||||
|
# define R300_COLORFORMAT_ARGB4444 (15 << 21)
|
||||||
|
|
||||||
|
#define R300_RB3D_AARESOLVE_CTL 0x4e88
|
||||||
#define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c
|
#define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c
|
||||||
# define R300_BLUE_MASK_EN (1 << 0)
|
# define R300_BLUE_MASK_EN (1 << 0)
|
||||||
# define R300_GREEN_MASK_EN (1 << 1)
|
# define R300_GREEN_MASK_EN (1 << 1)
|
||||||
|
|
|
@ -675,324 +675,6 @@ struct drm_set_version {
|
||||||
int drm_dd_minor;
|
int drm_dd_minor;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#define DRM_FENCE_FLAG_EMIT 0x00000001
|
|
||||||
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
|
||||||
/**
|
|
||||||
* On hardware with no interrupt events for operation completion,
|
|
||||||
* indicates that the kernel should sleep while waiting for any blocking
|
|
||||||
* operation to complete rather than spinning.
|
|
||||||
*
|
|
||||||
* Has no effect otherwise.
|
|
||||||
*/
|
|
||||||
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
|
||||||
#define DRM_FENCE_FLAG_NO_USER 0x00000010
|
|
||||||
|
|
||||||
/* Reserved for driver use */
|
|
||||||
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
|
||||||
|
|
||||||
#define DRM_FENCE_TYPE_EXE 0x00000001
|
|
||||||
|
|
||||||
struct drm_fence_arg {
|
|
||||||
unsigned int handle;
|
|
||||||
unsigned int fence_class;
|
|
||||||
unsigned int type;
|
|
||||||
unsigned int flags;
|
|
||||||
unsigned int signaled;
|
|
||||||
unsigned int error;
|
|
||||||
unsigned int sequence;
|
|
||||||
unsigned int pad64;
|
|
||||||
uint64_t expand_pad[2]; /*Future expansion */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Buffer permissions, referring to how the GPU uses the buffers.
|
|
||||||
* these translate to fence types used for the buffers.
|
|
||||||
* Typically a texture buffer is read, A destination buffer is write and
|
|
||||||
* a command (batch-) buffer is exe. Can be or-ed together.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DRM_BO_FLAG_READ (1ULL << 0)
|
|
||||||
#define DRM_BO_FLAG_WRITE (1ULL << 1)
|
|
||||||
#define DRM_BO_FLAG_EXE (1ULL << 2)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* All of the bits related to access mode
|
|
||||||
*/
|
|
||||||
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
|
|
||||||
/*
|
|
||||||
* Status flags. Can be read to determine the actual state of a buffer.
|
|
||||||
* Can also be set in the buffer mask before validation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
|
|
||||||
* available to root and must be manually removed before buffer manager shutdown
|
|
||||||
* or lock.
|
|
||||||
* Flags: Acknowledge
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mask: Require that the buffer is placed in mappable memory when validated.
|
|
||||||
* If not set the buffer may or may not be in mappable memory when validated.
|
|
||||||
* Flags: If set, the buffer is in mappable memory.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
|
|
||||||
|
|
||||||
/* Mask: The buffer should be shareable with other processes.
|
|
||||||
* Flags: The buffer is shareable with other processes.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
|
|
||||||
|
|
||||||
/* Mask: If set, place the buffer in cache-coherent memory if available.
|
|
||||||
* If clear, never place the buffer in cache coherent memory if validated.
|
|
||||||
* Flags: The buffer is currently in cache-coherent memory.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_CACHED (1ULL << 7)
|
|
||||||
|
|
||||||
/* Mask: Make sure that every time this buffer is validated,
|
|
||||||
* it ends up on the same location provided that the memory mask is the same.
|
|
||||||
* The buffer will also not be evicted when claiming space for
|
|
||||||
* other buffers. Basically a pinned buffer but it may be thrown out as
|
|
||||||
* part of buffer manager shutdown or locking.
|
|
||||||
* Flags: Acknowledge.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
|
|
||||||
|
|
||||||
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
|
|
||||||
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
|
|
||||||
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
|
|
||||||
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
|
|
||||||
* as the eviction to local memory (TTM unbind) on map is just a side effect
|
|
||||||
* to prevent aggressive cache prefetch from the GPU disturbing the cache
|
|
||||||
* management that the DRM is doing.
|
|
||||||
*
|
|
||||||
* Flags: Acknowledge.
|
|
||||||
* Buffers allocated with this flag should not be used for suballocators
|
|
||||||
* This type may have issues on CPUs with over-aggressive caching
|
|
||||||
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
|
|
||||||
|
|
||||||
|
|
||||||
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
|
|
||||||
* Flags: Acknowledge.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
|
|
||||||
* Flags: Acknowledge.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
|
|
||||||
#define DRM_BO_FLAG_TILE (1ULL << 15)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Memory type flags that can be or'ed together in the mask, but only
|
|
||||||
* one appears in flags.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* System memory */
|
|
||||||
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
|
|
||||||
/* Translation table memory */
|
|
||||||
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
|
|
||||||
/* Vram memory */
|
|
||||||
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
|
|
||||||
/* Up to the driver to define. */
|
|
||||||
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
|
|
||||||
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
|
|
||||||
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
|
|
||||||
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
|
|
||||||
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
|
|
||||||
/* We can add more of these now with a 64-bit flag type */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a mask covering all of the memory type flags; easier to just
|
|
||||||
* use a single constant than a bunch of | values. It covers
|
|
||||||
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
|
|
||||||
*/
|
|
||||||
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
|
|
||||||
/*
|
|
||||||
* This adds all of the CPU-mapping options in with the memory
|
|
||||||
* type to label all bits which change how the page gets mapped
|
|
||||||
*/
|
|
||||||
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
|
|
||||||
DRM_BO_FLAG_CACHED_MAPPED | \
|
|
||||||
DRM_BO_FLAG_CACHED | \
|
|
||||||
DRM_BO_FLAG_MAPPABLE)
|
|
||||||
|
|
||||||
/* Driver-private flags */
|
|
||||||
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't block on validate and map. Instead, return EBUSY.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
|
|
||||||
/*
|
|
||||||
* Don't place this buffer on the unfenced list. This means
|
|
||||||
* that the buffer will not end up having a fence associated
|
|
||||||
* with it as a result of this operation
|
|
||||||
*/
|
|
||||||
#define DRM_BO_HINT_DONT_FENCE 0x00000004
|
|
||||||
/**
|
|
||||||
* On hardware with no interrupt events for operation completion,
|
|
||||||
* indicates that the kernel should sleep while waiting for any blocking
|
|
||||||
* operation to complete rather than spinning.
|
|
||||||
*
|
|
||||||
* Has no effect otherwise.
|
|
||||||
*/
|
|
||||||
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
|
|
||||||
/*
|
|
||||||
* The client has compute relocations refering to this buffer using the
|
|
||||||
* offset in the presumed_offset field. If that offset ends up matching
|
|
||||||
* where this buffer lands, the kernel is free to skip executing those
|
|
||||||
* relocations
|
|
||||||
*/
|
|
||||||
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
|
|
||||||
|
|
||||||
#define DRM_BO_INIT_MAGIC 0xfe769812
|
|
||||||
#define DRM_BO_INIT_MAJOR 1
|
|
||||||
#define DRM_BO_INIT_MINOR 0
|
|
||||||
#define DRM_BO_INIT_PATCH 0
|
|
||||||
|
|
||||||
|
|
||||||
struct drm_bo_info_req {
|
|
||||||
uint64_t mask;
|
|
||||||
uint64_t flags;
|
|
||||||
unsigned int handle;
|
|
||||||
unsigned int hint;
|
|
||||||
unsigned int fence_class;
|
|
||||||
unsigned int desired_tile_stride;
|
|
||||||
unsigned int tile_info;
|
|
||||||
unsigned int pad64;
|
|
||||||
uint64_t presumed_offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_create_req {
|
|
||||||
uint64_t flags;
|
|
||||||
uint64_t size;
|
|
||||||
uint64_t buffer_start;
|
|
||||||
unsigned int hint;
|
|
||||||
unsigned int page_alignment;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Reply flags
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define DRM_BO_REP_BUSY 0x00000001
|
|
||||||
|
|
||||||
struct drm_bo_info_rep {
|
|
||||||
uint64_t flags;
|
|
||||||
uint64_t proposed_flags;
|
|
||||||
uint64_t size;
|
|
||||||
uint64_t offset;
|
|
||||||
uint64_t arg_handle;
|
|
||||||
uint64_t buffer_start;
|
|
||||||
unsigned int handle;
|
|
||||||
unsigned int fence_flags;
|
|
||||||
unsigned int rep_flags;
|
|
||||||
unsigned int page_alignment;
|
|
||||||
unsigned int desired_tile_stride;
|
|
||||||
unsigned int hw_tile_stride;
|
|
||||||
unsigned int tile_info;
|
|
||||||
unsigned int pad64;
|
|
||||||
uint64_t expand_pad[4]; /*Future expansion */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_arg_rep {
|
|
||||||
struct drm_bo_info_rep bo_info;
|
|
||||||
int ret;
|
|
||||||
unsigned int pad64;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_create_arg {
|
|
||||||
union {
|
|
||||||
struct drm_bo_create_req req;
|
|
||||||
struct drm_bo_info_rep rep;
|
|
||||||
} d;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_handle_arg {
|
|
||||||
unsigned int handle;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_reference_info_arg {
|
|
||||||
union {
|
|
||||||
struct drm_bo_handle_arg req;
|
|
||||||
struct drm_bo_info_rep rep;
|
|
||||||
} d;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_map_wait_idle_arg {
|
|
||||||
union {
|
|
||||||
struct drm_bo_info_req req;
|
|
||||||
struct drm_bo_info_rep rep;
|
|
||||||
} d;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_op_req {
|
|
||||||
enum {
|
|
||||||
drm_bo_validate,
|
|
||||||
drm_bo_fence,
|
|
||||||
drm_bo_ref_fence,
|
|
||||||
} op;
|
|
||||||
unsigned int arg_handle;
|
|
||||||
struct drm_bo_info_req bo_req;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
struct drm_bo_op_arg {
|
|
||||||
uint64_t next;
|
|
||||||
union {
|
|
||||||
struct drm_bo_op_req req;
|
|
||||||
struct drm_bo_arg_rep rep;
|
|
||||||
} d;
|
|
||||||
int handled;
|
|
||||||
unsigned int pad64;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#define DRM_BO_MEM_LOCAL 0
|
|
||||||
#define DRM_BO_MEM_TT 1
|
|
||||||
#define DRM_BO_MEM_VRAM 2
|
|
||||||
#define DRM_BO_MEM_PRIV0 3
|
|
||||||
#define DRM_BO_MEM_PRIV1 4
|
|
||||||
#define DRM_BO_MEM_PRIV2 5
|
|
||||||
#define DRM_BO_MEM_PRIV3 6
|
|
||||||
#define DRM_BO_MEM_PRIV4 7
|
|
||||||
|
|
||||||
#define DRM_BO_MEM_TYPES 8 /* For now. */
|
|
||||||
|
|
||||||
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
|
|
||||||
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
|
|
||||||
|
|
||||||
struct drm_bo_version_arg {
|
|
||||||
uint32_t major;
|
|
||||||
uint32_t minor;
|
|
||||||
uint32_t patchlevel;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_mm_type_arg {
|
|
||||||
unsigned int mem_type;
|
|
||||||
unsigned int lock_flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_mm_init_arg {
|
|
||||||
unsigned int magic;
|
|
||||||
unsigned int major;
|
|
||||||
unsigned int minor;
|
|
||||||
unsigned int mem_type;
|
|
||||||
uint64_t p_offset;
|
|
||||||
uint64_t p_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_mm_info_arg {
|
|
||||||
unsigned int mem_type;
|
|
||||||
uint64_t p_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_gem_close {
|
struct drm_gem_close {
|
||||||
/** Handle of the object to be closed. */
|
/** Handle of the object to be closed. */
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
|
@ -1337,31 +1019,6 @@ struct drm_mode_crtc_lut {
|
||||||
|
|
||||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
||||||
|
|
||||||
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
|
|
||||||
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
|
|
||||||
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
|
|
||||||
#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
|
|
||||||
#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
|
|
||||||
#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
|
|
||||||
#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
|
|
||||||
#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
|
|
||||||
#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
|
|
||||||
#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
|
|
||||||
#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
|
|
||||||
#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
|
|
||||||
#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
|
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
|
||||||
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
|
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
|
||||||
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA2, struct drm_mode_get_connector)
|
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA2, struct drm_mode_get_connector)
|
||||||
|
@ -1439,11 +1096,6 @@ typedef struct drm_agp_binding drm_agp_binding_t;
|
||||||
typedef struct drm_agp_info drm_agp_info_t;
|
typedef struct drm_agp_info drm_agp_info_t;
|
||||||
typedef struct drm_scatter_gather drm_scatter_gather_t;
|
typedef struct drm_scatter_gather drm_scatter_gather_t;
|
||||||
typedef struct drm_set_version drm_set_version_t;
|
typedef struct drm_set_version drm_set_version_t;
|
||||||
|
|
||||||
typedef struct drm_fence_arg drm_fence_arg_t;
|
|
||||||
typedef struct drm_mm_type_arg drm_mm_type_arg_t;
|
|
||||||
typedef struct drm_mm_init_arg drm_mm_init_arg_t;
|
|
||||||
typedef enum drm_bo_type drm_bo_type_t;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -135,7 +135,7 @@ int i915_dma_cleanup(struct drm_device * dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(I915_HAVE_BUFFER) && defined(DRI2)
|
#if defined(DRI2)
|
||||||
#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
|
#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
|
||||||
#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
|
#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
|
||||||
#define DRI2_SAREA_BLOCK_NEXT(p) \
|
#define DRI2_SAREA_BLOCK_NEXT(p) \
|
||||||
|
@ -213,12 +213,7 @@ static int i915_initialize(struct drm_device * dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef I915_HAVE_BUFFER
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
|
||||||
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (init->ring_size != 0) {
|
if (init->ring_size != 0) {
|
||||||
dev_priv->ring.Size = init->ring_size;
|
dev_priv->ring.Size = init->ring_size;
|
||||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||||
|
@ -253,10 +248,25 @@ static int i915_initialize(struct drm_device * dev,
|
||||||
*/
|
*/
|
||||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
|
||||||
|
|
||||||
#ifdef I915_HAVE_BUFFER
|
/* Program Hardware Status Page */
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (!I915_NEED_GFX_HWS(dev)) {
|
||||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
dev_priv->status_page_dmah =
|
||||||
|
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
|
||||||
|
|
||||||
|
if (!dev_priv->status_page_dmah) {
|
||||||
|
i915_dma_cleanup(dev);
|
||||||
|
DRM_ERROR("Can not allocate hardware status page\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
|
||||||
|
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
|
||||||
|
|
||||||
|
memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
|
||||||
|
|
||||||
|
I915_WRITE(0x02080, dev_priv->dma_status_page);
|
||||||
}
|
}
|
||||||
|
DRM_DEBUG("Enabled hardware status page\n");
|
||||||
|
|
||||||
#ifdef DRI2
|
#ifdef DRI2
|
||||||
if (init->func == I915_INIT_DMA2) {
|
if (init->func == I915_INIT_DMA2) {
|
||||||
int ret = setup_dri2_sarea(dev, file_priv, init);
|
int ret = setup_dri2_sarea(dev, file_priv, init);
|
||||||
|
@ -267,7 +277,6 @@ static int i915_initialize(struct drm_device * dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* DRI2 */
|
#endif /* DRI2 */
|
||||||
#endif /* I915_HAVE_BUFFER */
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -533,9 +542,6 @@ int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
|
||||||
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
|
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
|
||||||
struct drm_i915_cmdbuffer * cmd)
|
struct drm_i915_cmdbuffer * cmd)
|
||||||
{
|
{
|
||||||
#ifdef I915_HAVE_FENCE
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
#endif
|
|
||||||
int nbox = cmd->num_cliprects;
|
int nbox = cmd->num_cliprects;
|
||||||
int i = 0, count, ret;
|
int i = 0, count, ret;
|
||||||
|
|
||||||
|
@ -562,10 +568,6 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_emit_breadcrumb(dev);
|
i915_emit_breadcrumb(dev);
|
||||||
#ifdef I915_HAVE_FENCE
|
|
||||||
if (unlikely((dev_priv->counter & 0xFF) == 0))
|
|
||||||
drm_fence_flush_old(dev, 0, dev_priv->counter);
|
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,10 +618,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_emit_breadcrumb(dev);
|
i915_emit_breadcrumb(dev);
|
||||||
#ifdef I915_HAVE_FENCE
|
|
||||||
if (unlikely((dev_priv->counter & 0xFF) == 0))
|
|
||||||
drm_fence_flush_old(dev, 0, dev_priv->counter);
|
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -678,7 +676,6 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
|
||||||
|
|
||||||
void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
|
void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -692,10 +689,6 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
|
||||||
i915_do_dispatch_flip(dev, i, sync);
|
i915_do_dispatch_flip(dev, i, sync);
|
||||||
|
|
||||||
i915_emit_breadcrumb(dev);
|
i915_emit_breadcrumb(dev);
|
||||||
#ifdef I915_HAVE_FENCE
|
|
||||||
if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
|
|
||||||
drm_fence_flush_old(dev, 0, dev_priv->counter);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_quiescent(struct drm_device *dev)
|
int i915_quiescent(struct drm_device *dev)
|
||||||
|
@ -1049,9 +1042,6 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
|
||||||
#ifdef I915_HAVE_BUFFER
|
|
||||||
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
|
|
||||||
#endif
|
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
||||||
|
|
|
@ -380,58 +380,6 @@ typedef struct drm_i915_hws_addr {
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
} drm_i915_hws_addr_t;
|
} drm_i915_hws_addr_t;
|
||||||
|
|
||||||
/*
|
|
||||||
* Relocation header is 4 uint32_ts
|
|
||||||
* 0 - 32 bit reloc count
|
|
||||||
* 1 - 32-bit relocation type
|
|
||||||
* 2-3 - 64-bit user buffer handle ptr for another list of relocs.
|
|
||||||
*/
|
|
||||||
#define I915_RELOC_HEADER 4
|
|
||||||
|
|
||||||
/*
|
|
||||||
* type 0 relocation has 4-uint32_t stride
|
|
||||||
* 0 - offset into buffer
|
|
||||||
* 1 - delta to add in
|
|
||||||
* 2 - buffer handle
|
|
||||||
* 3 - reserved (for optimisations later).
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
* type 1 relocation has 4-uint32_t stride.
|
|
||||||
* Hangs off the first item in the op list.
|
|
||||||
* Performed after all valiations are done.
|
|
||||||
* Try to group relocs into the same relocatee together for
|
|
||||||
* performance reasons.
|
|
||||||
* 0 - offset into buffer
|
|
||||||
* 1 - delta to add in
|
|
||||||
* 2 - buffer index in op list.
|
|
||||||
* 3 - relocatee index in op list.
|
|
||||||
*/
|
|
||||||
#define I915_RELOC_TYPE_0 0
|
|
||||||
#define I915_RELOC0_STRIDE 4
|
|
||||||
#define I915_RELOC_TYPE_1 1
|
|
||||||
#define I915_RELOC1_STRIDE 4
|
|
||||||
|
|
||||||
|
|
||||||
struct drm_i915_op_arg {
|
|
||||||
uint64_t next;
|
|
||||||
uint64_t reloc_ptr;
|
|
||||||
int handled;
|
|
||||||
unsigned int pad64;
|
|
||||||
union {
|
|
||||||
struct drm_bo_op_req req;
|
|
||||||
struct drm_bo_arg_rep rep;
|
|
||||||
} d;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_execbuffer {
|
|
||||||
uint64_t ops_list;
|
|
||||||
uint32_t num_buffers;
|
|
||||||
struct drm_i915_batchbuffer batch;
|
|
||||||
drm_context_t context; /* for lockless use in the future */
|
|
||||||
struct drm_fence_arg fence_arg;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_i915_gem_init {
|
struct drm_i915_gem_init {
|
||||||
/**
|
/**
|
||||||
* Beginning offset in the GTT to be managed by the DRM memory
|
* Beginning offset in the GTT to be managed by the DRM memory
|
||||||
|
|
|
@ -437,7 +437,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (master_priv->sarea)
|
if (master_priv->sarea)
|
||||||
drm_rmmap(dev, master_priv->sarea);
|
drm_rmmap_locked(dev, master_priv->sarea);
|
||||||
|
|
||||||
drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
|
drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
|
|
|
@ -166,8 +166,6 @@ void r300_init_reg_flags(struct drm_device *dev)
|
||||||
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
|
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
|
||||||
r300_reg_flags[i]|=(mark);
|
r300_reg_flags[i]|=(mark);
|
||||||
|
|
||||||
#define MARK_SAFE 1
|
|
||||||
#define MARK_CHECK_OFFSET 2
|
|
||||||
|
|
||||||
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
|
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
|
||||||
|
|
||||||
|
@ -247,6 +245,11 @@ void r300_init_reg_flags(struct drm_device *dev)
|
||||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
|
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
|
||||||
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
|
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
|
||||||
|
|
||||||
|
ADD_RANGE(R500_SU_REG_DEST, 1);
|
||||||
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV410) {
|
||||||
|
ADD_RANGE(R300_DST_PIPE_CONFIG, 1);
|
||||||
|
}
|
||||||
|
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
|
||||||
ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
|
ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
|
||||||
ADD_RANGE(R500_US_CONFIG, 2);
|
ADD_RANGE(R500_US_CONFIG, 2);
|
||||||
|
@ -257,6 +260,7 @@ void r300_init_reg_flags(struct drm_device *dev)
|
||||||
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
|
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
|
||||||
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
|
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
|
||||||
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
|
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
|
||||||
|
ADD_RANGE(R500_GA_US_VECTOR_INDEX, 2);
|
||||||
} else {
|
} else {
|
||||||
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
ADD_RANGE(R300_PFS_CNTL_0, 3);
|
||||||
ADD_RANGE(R300_PFS_NODE_0, 4);
|
ADD_RANGE(R300_PFS_NODE_0, 4);
|
||||||
|
@ -269,9 +273,39 @@ void r300_init_reg_flags(struct drm_device *dev)
|
||||||
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
ADD_RANGE(R300_RS_ROUTE_0, 8);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* add 2d blit engine registers for DDX */
|
||||||
|
ADD_RANGE(RADEON_SRC_Y_X, 3); /* 1434, 1438, 143c,
|
||||||
|
SRC_Y_X, DST_Y_X, DST_HEIGHT_WIDTH
|
||||||
|
*/
|
||||||
|
ADD_RANGE(RADEON_DP_GUI_MASTER_CNTL, 1); /* 146c */
|
||||||
|
ADD_RANGE(RADEON_DP_BRUSH_BKGD_CLR, 2); /* 1478, 147c */
|
||||||
|
ADD_RANGE(RADEON_DP_SRC_FRGD_CLR, 2); /* 15d8, 15dc */
|
||||||
|
ADD_RANGE(RADEON_DP_CNTL, 1); /* 16c0 */
|
||||||
|
ADD_RANGE(RADEON_DP_WRITE_MASK, 1); /* 16cc */
|
||||||
|
ADD_RANGE(RADEON_DEFAULT_SC_BOTTOM_RIGHT, 1); /* 16e8 */
|
||||||
|
|
||||||
|
ADD_RANGE(RADEON_DSTCACHE_CTLSTAT, 1);
|
||||||
|
ADD_RANGE(RADEON_WAIT_UNTIL, 1);
|
||||||
|
|
||||||
|
ADD_RANGE_MARK(RADEON_DST_OFFSET, 1, MARK_CHECK_OFFSET);
|
||||||
|
ADD_RANGE_MARK(RADEON_SRC_OFFSET, 1, MARK_CHECK_OFFSET);
|
||||||
|
|
||||||
|
ADD_RANGE_MARK(RADEON_DST_PITCH_OFFSET, 1, MARK_CHECK_OFFSET);
|
||||||
|
ADD_RANGE_MARK(RADEON_SRC_PITCH_OFFSET, 1, MARK_CHECK_OFFSET);
|
||||||
|
|
||||||
|
/* TODO SCISSOR */
|
||||||
|
ADD_RANGE_MARK(R300_SC_SCISSOR0, 2, MARK_CHECK_SCISSOR);
|
||||||
|
|
||||||
|
ADD_RANGE(R300_SC_CLIP_0_A, 2);
|
||||||
|
ADD_RANGE(R300_SC_CLIP_RULE, 1);
|
||||||
|
ADD_RANGE(R300_SC_SCREENDOOR, 1);
|
||||||
|
|
||||||
|
ADD_RANGE(R300_VAP_PVS_CODE_CNTL_0, 4);
|
||||||
|
ADD_RANGE(R300_VAP_PVS_VECTOR_INDX_REG, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ int r300_check_range(unsigned reg, int count)
|
int r300_check_range(unsigned reg, int count)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (reg & ~0xffff)
|
if (reg & ~0xffff)
|
||||||
|
@ -282,6 +316,13 @@ static __inline__ int r300_check_range(unsigned reg, int count)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int r300_get_reg_flags(unsigned reg)
|
||||||
|
{
|
||||||
|
if (reg & ~0xffff)
|
||||||
|
return -1;
|
||||||
|
return r300_reg_flags[(reg >> 2)];
|
||||||
|
}
|
||||||
|
|
||||||
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
||||||
dev_priv,
|
dev_priv,
|
||||||
drm_radeon_kcmd_buffer_t
|
drm_radeon_kcmd_buffer_t
|
||||||
|
|
|
@ -129,15 +129,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
/* END: Wild guesses */
|
/* END: Wild guesses */
|
||||||
|
|
||||||
#define R300_SE_VTE_CNTL 0x20b0
|
#define R300_SE_VTE_CNTL 0x20b0
|
||||||
# define R300_VPORT_X_SCALE_ENA 0x00000001
|
|
||||||
# define R300_VPORT_X_OFFSET_ENA 0x00000002
|
|
||||||
# define R300_VPORT_Y_SCALE_ENA 0x00000004
|
|
||||||
# define R300_VPORT_Y_OFFSET_ENA 0x00000008
|
|
||||||
# define R300_VPORT_Z_SCALE_ENA 0x00000010
|
|
||||||
# define R300_VPORT_Z_OFFSET_ENA 0x00000020
|
|
||||||
# define R300_VTX_XY_FMT 0x00000100
|
|
||||||
# define R300_VTX_Z_FMT 0x00000200
|
|
||||||
# define R300_VTX_W0_FMT 0x00000400
|
|
||||||
# define R300_VTX_W0_NORMALIZE 0x00000800
|
# define R300_VTX_W0_NORMALIZE 0x00000800
|
||||||
# define R300_VTX_ST_DENORMALIZED 0x00001000
|
# define R300_VTX_ST_DENORMALIZED 0x00001000
|
||||||
|
|
||||||
|
@ -493,7 +484,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
|
# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
|
||||||
# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
|
# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
|
||||||
|
|
||||||
#define R300_GB_SELECT 0x401C
|
|
||||||
# define R300_GB_FOG_SELECT_C0A 0
|
# define R300_GB_FOG_SELECT_C0A 0
|
||||||
# define R300_GB_FOG_SELECT_C1A 1
|
# define R300_GB_FOG_SELECT_C1A 1
|
||||||
# define R300_GB_FOG_SELECT_C2A 2
|
# define R300_GB_FOG_SELECT_C2A 2
|
||||||
|
@ -955,7 +946,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
/* 32 bit chroma key */
|
/* 32 bit chroma key */
|
||||||
#define R300_TX_CHROMA_KEY_0 0x4580
|
#define R300_TX_CHROMA_KEY_0 0x4580
|
||||||
/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
|
/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
|
||||||
#define R300_TX_BORDER_COLOR_0 0x45C0
|
|
||||||
|
|
||||||
/* END: Texture specification */
|
/* END: Texture specification */
|
||||||
|
|
||||||
|
@ -1340,7 +1330,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
/* gap */
|
/* gap */
|
||||||
|
|
||||||
#define R300_RB3D_COLOROFFSET0 0x4E28
|
|
||||||
# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
|
# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
|
||||||
#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
|
#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
|
||||||
#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
|
#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
|
||||||
|
@ -1352,7 +1341,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
* Bit 17: 4x2 tiles
|
* Bit 17: 4x2 tiles
|
||||||
* Bit 18: Extremely weird tile like, but some pixels duplicated?
|
* Bit 18: Extremely weird tile like, but some pixels duplicated?
|
||||||
*/
|
*/
|
||||||
#define R300_RB3D_COLORPITCH0 0x4E38
|
|
||||||
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
|
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
|
||||||
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
|
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
|
||||||
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
|
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
|
||||||
|
|
|
@ -190,7 +190,7 @@ void radeon_pll_errata_after_data(struct drm_radeon_private *dev_priv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr)
|
u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr)
|
||||||
{
|
{
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
|
|
||||||
|
@ -661,8 +661,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
||||||
((dev_priv->gart_vm_start - 1) & 0xffff0000)
|
((dev_priv->gart_vm_start - 1) & 0xffff0000)
|
||||||
| (dev_priv->fb_location >> 16));
|
| (dev_priv->fb_location >> 16));
|
||||||
|
|
||||||
if (dev_priv->mm.ring) {
|
if (dev_priv->mm.ring.bo) {
|
||||||
ring_start = dev_priv->mm.ring->offset +
|
ring_start = dev_priv->mm.ring.bo->offset +
|
||||||
dev_priv->gart_vm_start;
|
dev_priv->gart_vm_start;
|
||||||
} else
|
} else
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
|
@ -695,9 +695,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
||||||
dev_priv->ring.tail = cur_read_ptr;
|
dev_priv->ring.tail = cur_read_ptr;
|
||||||
|
|
||||||
|
|
||||||
if (dev_priv->mm.ring_read_ptr) {
|
if (dev_priv->mm.ring_read.bo) {
|
||||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
|
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
|
||||||
dev_priv->mm.ring_read_ptr->offset +
|
dev_priv->mm.ring_read.bo->offset +
|
||||||
dev_priv->gart_vm_start);
|
dev_priv->gart_vm_start);
|
||||||
} else
|
} else
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
|
@ -748,9 +748,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
||||||
RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
|
RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
|
||||||
+ RADEON_SCRATCH_REG_OFFSET);
|
+ RADEON_SCRATCH_REG_OFFSET);
|
||||||
|
|
||||||
if (dev_priv->mm.ring_read_ptr)
|
if (dev_priv->mm.ring_read.bo)
|
||||||
dev_priv->scratch = ((__volatile__ u32 *)
|
dev_priv->scratch = ((__volatile__ u32 *)
|
||||||
dev_priv->mm.ring_read_ptr_map.virtual +
|
dev_priv->mm.ring_read.kmap.virtual +
|
||||||
(RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
|
(RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
|
||||||
else
|
else
|
||||||
dev_priv->scratch = ((__volatile__ u32 *)
|
dev_priv->scratch = ((__volatile__ u32 *)
|
||||||
|
@ -775,12 +775,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
||||||
radeon_do_wait_for_idle(dev_priv);
|
radeon_do_wait_for_idle(dev_priv);
|
||||||
|
|
||||||
/* Sync everything up */
|
/* Sync everything up */
|
||||||
|
if (dev_priv->chip_family > CHIP_RV280) {
|
||||||
RADEON_WRITE(RADEON_ISYNC_CNTL,
|
RADEON_WRITE(RADEON_ISYNC_CNTL,
|
||||||
(RADEON_ISYNC_ANY2D_IDLE3D |
|
(RADEON_ISYNC_ANY2D_IDLE3D |
|
||||||
RADEON_ISYNC_ANY3D_IDLE2D |
|
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||||
RADEON_ISYNC_WAIT_IDLEGUI |
|
RADEON_ISYNC_WAIT_IDLEGUI |
|
||||||
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
|
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
|
||||||
|
} else {
|
||||||
|
RADEON_WRITE(RADEON_ISYNC_CNTL,
|
||||||
|
(RADEON_ISYNC_ANY2D_IDLE3D |
|
||||||
|
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||||
|
RADEON_ISYNC_WAIT_IDLEGUI));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
|
static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
|
||||||
|
@ -788,8 +794,8 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
void *ring_read_ptr;
|
void *ring_read_ptr;
|
||||||
|
|
||||||
if (dev_priv->mm.ring_read_ptr)
|
if (dev_priv->mm.ring_read.bo)
|
||||||
ring_read_ptr = dev_priv->mm.ring_read_ptr_map.virtual;
|
ring_read_ptr = dev_priv->mm.ring_read.kmap.virtual;
|
||||||
else
|
else
|
||||||
ring_read_ptr = dev_priv->ring_rptr->handle;
|
ring_read_ptr = dev_priv->ring_rptr->handle;
|
||||||
|
|
||||||
|
@ -1353,8 +1359,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
|
||||||
if (dev_priv->gart_info.bus_addr) {
|
if (dev_priv->gart_info.bus_addr) {
|
||||||
/* Turn off PCI GART */
|
/* Turn off PCI GART */
|
||||||
radeon_set_pcigart(dev_priv, 0);
|
radeon_set_pcigart(dev_priv, 0);
|
||||||
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
|
drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
|
||||||
DRM_ERROR("failed to cleanup PCI GART!\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
|
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
|
||||||
|
@ -1362,6 +1367,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
|
||||||
if (dev_priv->pcigart_offset_set == 1) {
|
if (dev_priv->pcigart_offset_set == 1) {
|
||||||
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
|
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
|
||||||
dev_priv->gart_info.addr = NULL;
|
dev_priv->gart_info.addr = NULL;
|
||||||
|
dev_priv->pcigart_offset_set = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1553,8 +1559,10 @@ void radeon_do_release(struct drm_device * dev)
|
||||||
radeon_mem_takedown(&(dev_priv->gart_heap));
|
radeon_mem_takedown(&(dev_priv->gart_heap));
|
||||||
radeon_mem_takedown(&(dev_priv->fb_heap));
|
radeon_mem_takedown(&(dev_priv->fb_heap));
|
||||||
|
|
||||||
|
if (dev_priv->user_mm_enable) {
|
||||||
radeon_gem_mm_fini(dev);
|
radeon_gem_mm_fini(dev);
|
||||||
|
dev_priv->user_mm_enable = false;
|
||||||
|
}
|
||||||
|
|
||||||
/* deallocate kernel resources */
|
/* deallocate kernel resources */
|
||||||
radeon_do_cleanup_cp(dev);
|
radeon_do_cleanup_cp(dev);
|
||||||
|
@ -2270,6 +2278,7 @@ static void radeon_set_dynamic_clock(struct drm_device *dev, int mode)
|
||||||
int radeon_modeset_cp_init(struct drm_device *dev)
|
int radeon_modeset_cp_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
/* allocate a ring and ring rptr bits from GART space */
|
/* allocate a ring and ring rptr bits from GART space */
|
||||||
/* these are allocated in GEM files */
|
/* these are allocated in GEM files */
|
||||||
|
@ -2278,8 +2287,8 @@ int radeon_modeset_cp_init(struct drm_device *dev)
|
||||||
dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE;
|
dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE;
|
||||||
dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM;
|
dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM;
|
||||||
|
|
||||||
dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual;
|
dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual;
|
||||||
dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual +
|
dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual +
|
||||||
dev_priv->ring.size / sizeof(u32);
|
dev_priv->ring.size / sizeof(u32);
|
||||||
dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
|
dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
|
||||||
dev_priv->ring.rptr_update = 4096;
|
dev_priv->ring.rptr_update = 4096;
|
||||||
|
@ -2289,14 +2298,21 @@ int radeon_modeset_cp_init(struct drm_device *dev)
|
||||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||||
|
|
||||||
dev_priv->new_memmap = 1;
|
dev_priv->new_memmap = true;
|
||||||
|
|
||||||
|
r300_init_reg_flags(dev);
|
||||||
|
|
||||||
radeon_cp_load_microcode(dev_priv);
|
radeon_cp_load_microcode(dev_priv);
|
||||||
|
|
||||||
DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring->offset, dev_priv->mm.ring_read_ptr->offset);
|
DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring.bo->offset, dev_priv->mm.ring_read.bo->offset);
|
||||||
|
|
||||||
radeon_cp_init_ring_buffer(dev, dev_priv);
|
radeon_cp_init_ring_buffer(dev, dev_priv);
|
||||||
|
|
||||||
|
/* need to enable BUS mastering in Buscntl */
|
||||||
|
tmp = RADEON_READ(RADEON_BUS_CNTL);
|
||||||
|
tmp &= ~RADEON_BUS_MASTER_DIS;
|
||||||
|
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
|
||||||
|
|
||||||
radeon_do_engine_reset(dev);
|
radeon_do_engine_reset(dev);
|
||||||
radeon_test_writeback(dev_priv);
|
radeon_test_writeback(dev_priv);
|
||||||
|
|
||||||
|
@ -2367,8 +2383,8 @@ int radeon_modeset_preinit(struct drm_device *dev)
|
||||||
|
|
||||||
if (dev_priv->is_atom_bios) {
|
if (dev_priv->is_atom_bios) {
|
||||||
dev_priv->mode_info.atom_context = atom_parse(&card, dev_priv->bios);
|
dev_priv->mode_info.atom_context = atom_parse(&card, dev_priv->bios);
|
||||||
radeon_get_clock_info(dev);
|
|
||||||
}
|
}
|
||||||
|
radeon_get_clock_info(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2523,7 +2539,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
|
||||||
|
|
||||||
master_priv->sarea_priv = NULL;
|
master_priv->sarea_priv = NULL;
|
||||||
if (master_priv->sarea)
|
if (master_priv->sarea)
|
||||||
drm_rmmap(dev, master_priv->sarea);
|
drm_rmmap_locked(dev, master_priv->sarea);
|
||||||
|
|
||||||
drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
|
drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,411 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2008 Jerome Glisse.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the next
|
||||||
|
* paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
* Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors:
|
||||||
|
* Jerome Glisse <glisse@freedesktop.org>
|
||||||
|
*/
|
||||||
|
#include "drmP.h"
|
||||||
|
#include "radeon_drm.h"
|
||||||
|
#include "radeon_drv.h"
|
||||||
|
#include "r300_reg.h"
|
||||||
|
|
||||||
|
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
|
||||||
|
{
|
||||||
|
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_radeon_cs *cs = data;
|
||||||
|
uint32_t *packets = NULL;
|
||||||
|
uint32_t cs_id;
|
||||||
|
uint32_t card_offset;
|
||||||
|
void *ib = NULL;
|
||||||
|
long size;
|
||||||
|
int r;
|
||||||
|
RING_LOCALS;
|
||||||
|
|
||||||
|
/* set command stream id to 0 which is fake id */
|
||||||
|
cs_id = 0;
|
||||||
|
DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
|
||||||
|
|
||||||
|
if (dev_priv == NULL) {
|
||||||
|
DRM_ERROR("called with no initialization\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (!cs->dwords) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* limit cs to 64K ib */
|
||||||
|
if (cs->dwords > (16 * 1024)) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
/* copy cs from userspace maybe we should copy into ib to save
|
||||||
|
* one copy but ib will be mapped wc so not good for cmd checking
|
||||||
|
* somethings worth testing i guess (Jerome)
|
||||||
|
*/
|
||||||
|
size = cs->dwords * sizeof(uint32_t);
|
||||||
|
packets = drm_alloc(size, DRM_MEM_DRIVER);
|
||||||
|
if (packets == NULL) {
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
if (DRM_COPY_FROM_USER(packets, (void __user *)(unsigned long)cs->packets, size)) {
|
||||||
|
r = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
/* get ib */
|
||||||
|
r = dev_priv->cs.ib_get(dev, &ib, cs->dwords, &card_offset);
|
||||||
|
if (r) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* now parse command stream */
|
||||||
|
r = dev_priv->cs.parse(dev, fpriv, ib, packets, cs->dwords);
|
||||||
|
if (r) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
BEGIN_RING(4);
|
||||||
|
OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
|
||||||
|
OUT_RING(card_offset);
|
||||||
|
OUT_RING(cs->dwords);
|
||||||
|
OUT_RING(CP_PACKET2());
|
||||||
|
ADVANCE_RING();
|
||||||
|
|
||||||
|
/* emit cs id sequence */
|
||||||
|
dev_priv->cs.id_emit(dev, &cs_id);
|
||||||
|
COMMIT_RING();
|
||||||
|
|
||||||
|
DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
|
||||||
|
out:
|
||||||
|
dev_priv->cs.ib_free(dev, ib, cs->dwords);
|
||||||
|
drm_free(packets, size, DRM_MEM_DRIVER);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* for non-mm */
|
||||||
|
static int radeon_nomm_relocate(struct drm_device *dev, struct drm_file *file_priv, uint32_t *reloc, uint32_t *offset)
|
||||||
|
{
|
||||||
|
*offset = reloc[1];
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#define RELOC_SIZE 2
|
||||||
|
#define RADEON_2D_OFFSET_MASK 0x3fffff
|
||||||
|
|
||||||
|
static __inline__ int radeon_cs_relocate_packet0(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
uint32_t *packets, uint32_t offset_dw)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
uint32_t hdr = packets[offset_dw];
|
||||||
|
uint32_t reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2;
|
||||||
|
uint32_t val = packets[offset_dw + 1];
|
||||||
|
uint32_t packet3_hdr = packets[offset_dw+2];
|
||||||
|
uint32_t tmp, offset;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* this is too strict we may want to expand the length in the future and have
|
||||||
|
old kernels ignore it. */
|
||||||
|
if (packet3_hdr != (RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16))) {
|
||||||
|
DRM_ERROR("Packet 3 was %x should have been %x\n", packet3_hdr, RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch(reg) {
|
||||||
|
case RADEON_DST_PITCH_OFFSET:
|
||||||
|
case RADEON_SRC_PITCH_OFFSET:
|
||||||
|
/* pass in the start of the reloc */
|
||||||
|
ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
tmp = (val & RADEON_2D_OFFSET_MASK) << 10;
|
||||||
|
val &= ~RADEON_2D_OFFSET_MASK;
|
||||||
|
offset += tmp;
|
||||||
|
offset >>= 10;
|
||||||
|
val |= offset;
|
||||||
|
break;
|
||||||
|
case R300_RB3D_COLOROFFSET0:
|
||||||
|
case R300_ZB_DEPTHOFFSET:
|
||||||
|
case R300_TX_OFFSET_0:
|
||||||
|
case R300_TX_OFFSET_0+4:
|
||||||
|
ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
offset &= 0xffffffe0;
|
||||||
|
val += offset;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
packets[offset_dw + 1] = val;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int radeon_cs_relocate_packet3(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
uint32_t *packets, uint32_t offset_dw)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
uint32_t hdr = packets[offset_dw];
|
||||||
|
int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16;
|
||||||
|
uint32_t reg = hdr & 0xff00;
|
||||||
|
uint32_t offset, val, tmp;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
switch(reg) {
|
||||||
|
case RADEON_CNTL_HOSTDATA_BLT:
|
||||||
|
{
|
||||||
|
val = packets[offset_dw + 2];
|
||||||
|
ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + num_dw + 2, &offset);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
tmp = (val & RADEON_2D_OFFSET_MASK) << 10;
|
||||||
|
val &= ~RADEON_2D_OFFSET_MASK;
|
||||||
|
offset += tmp;
|
||||||
|
offset >>= 10;
|
||||||
|
val |= offset;
|
||||||
|
|
||||||
|
packets[offset_dw + 2] = val;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __inline__ int radeon_cs_check_offset(struct drm_device *dev,
|
||||||
|
uint32_t reg, uint32_t val)
|
||||||
|
{
|
||||||
|
uint32_t offset;
|
||||||
|
|
||||||
|
switch(reg) {
|
||||||
|
case RADEON_DST_PITCH_OFFSET:
|
||||||
|
case RADEON_SRC_PITCH_OFFSET:
|
||||||
|
offset = val & ((1 << 22) - 1);
|
||||||
|
offset <<= 10;
|
||||||
|
break;
|
||||||
|
case R300_RB3D_COLOROFFSET0:
|
||||||
|
case R300_ZB_DEPTHOFFSET:
|
||||||
|
offset = val;
|
||||||
|
break;
|
||||||
|
case R300_TX_OFFSET_0:
|
||||||
|
case R300_TX_OFFSET_0+4:
|
||||||
|
offset = val & 0xffffffe0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_cs_packet0(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
uint32_t *packets, uint32_t offset_dw)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
uint32_t hdr = packets[offset_dw];
|
||||||
|
int num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2;
|
||||||
|
int need_reloc = 0;
|
||||||
|
int reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2;
|
||||||
|
int count_dw = 1;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
while (count_dw < num_dw) {
|
||||||
|
/* need to have something like the r300 validation here -
|
||||||
|
list of allowed registers */
|
||||||
|
int flags;
|
||||||
|
|
||||||
|
ret = r300_check_range(reg, 1);
|
||||||
|
switch(ret) {
|
||||||
|
case -1:
|
||||||
|
DRM_ERROR("Illegal register %x\n", reg);
|
||||||
|
break;
|
||||||
|
case 0:
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
flags = r300_get_reg_flags(reg);
|
||||||
|
if (flags == MARK_CHECK_OFFSET) {
|
||||||
|
if (num_dw > 2) {
|
||||||
|
DRM_ERROR("Cannot relocate inside type stream of reg0 packets\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = radeon_cs_relocate_packet0(dev, file_priv, packets, offset_dw);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
DRM_DEBUG("need to relocate %x %d\n", reg, flags);
|
||||||
|
/* okay it should be followed by a NOP */
|
||||||
|
} else if (flags == MARK_CHECK_SCISSOR) {
|
||||||
|
DRM_DEBUG("need to validate scissor %x %d\n", reg, flags);
|
||||||
|
} else {
|
||||||
|
DRM_DEBUG("illegal register %x %d\n", reg, flags);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
count_dw++;
|
||||||
|
reg += 4;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_cs_parse(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
void *ib, uint32_t *packets, uint32_t dwords)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
volatile int rb;
|
||||||
|
int size_dw = dwords;
|
||||||
|
/* scan the packet for various things */
|
||||||
|
int count_dw = 0;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
while (count_dw < size_dw && ret == 0) {
|
||||||
|
int hdr = packets[count_dw];
|
||||||
|
int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16;
|
||||||
|
int reg;
|
||||||
|
|
||||||
|
switch (hdr & RADEON_CP_PACKET_MASK) {
|
||||||
|
case RADEON_CP_PACKET0:
|
||||||
|
ret = radeon_cs_packet0(dev, file_priv, packets, count_dw);
|
||||||
|
break;
|
||||||
|
case RADEON_CP_PACKET1:
|
||||||
|
case RADEON_CP_PACKET2:
|
||||||
|
reg = hdr & RADEON_CP_PACKET0_REG_MASK;
|
||||||
|
DRM_DEBUG("Packet 1/2: %d %x\n", num_dw, reg);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RADEON_CP_PACKET3:
|
||||||
|
reg = hdr & 0xff00;
|
||||||
|
|
||||||
|
switch(reg) {
|
||||||
|
case RADEON_CNTL_HOSTDATA_BLT:
|
||||||
|
radeon_cs_relocate_packet3(dev, file_priv, packets, count_dw);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RADEON_CNTL_BITBLT_MULTI:
|
||||||
|
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
|
||||||
|
case RADEON_CP_INDX_BUFFER:
|
||||||
|
DRM_ERROR("need relocate packet 3 for %x\n", reg);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
|
||||||
|
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
|
||||||
|
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
|
||||||
|
case RADEON_WAIT_FOR_IDLE:
|
||||||
|
case RADEON_CP_NOP:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("unknown packet 3 %x\n", reg);
|
||||||
|
ret = -EINVAL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
count_dw += num_dw+2;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
|
||||||
|
/* copy the packet into the IB */
|
||||||
|
memcpy(ib, packets, dwords * sizeof(uint32_t));
|
||||||
|
|
||||||
|
/* read back last byte to flush WC buffers */
|
||||||
|
rb = readl((ib + (dwords-1) * sizeof(uint32_t)));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon)
|
||||||
|
{
|
||||||
|
/* FIXME: protect with a spinlock */
|
||||||
|
/* FIXME: check if wrap affect last reported wrap & sequence */
|
||||||
|
radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF;
|
||||||
|
if (!radeon->cs.id_scnt) {
|
||||||
|
/* increment wrap counter */
|
||||||
|
radeon->cs.id_wcnt += 0x01000000;
|
||||||
|
/* valid sequence counter start at 1 */
|
||||||
|
radeon->cs.id_scnt = 1;
|
||||||
|
}
|
||||||
|
return (radeon->cs.id_scnt | radeon->cs.id_wcnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
void r100_cs_id_emit(struct drm_device *dev, uint32_t *id)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
RING_LOCALS;
|
||||||
|
|
||||||
|
/* ISYNC_CNTL should have CPSCRACTH bit set */
|
||||||
|
*id = radeon_cs_id_get(dev_priv);
|
||||||
|
/* emit id in SCRATCH4 (not used yet in old drm) */
|
||||||
|
BEGIN_RING(2);
|
||||||
|
OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG4, 0));
|
||||||
|
OUT_RING(*id);
|
||||||
|
ADVANCE_RING();
|
||||||
|
}
|
||||||
|
|
||||||
|
void r300_cs_id_emit(struct drm_device *dev, uint32_t *id)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
RING_LOCALS;
|
||||||
|
|
||||||
|
/* ISYNC_CNTL should not have CPSCRACTH bit set */
|
||||||
|
*id = radeon_cs_id_get(dev_priv);
|
||||||
|
/* emit id in SCRATCH6 */
|
||||||
|
BEGIN_RING(6);
|
||||||
|
OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 0));
|
||||||
|
OUT_RING(6);
|
||||||
|
OUT_RING(CP_PACKET0(R300_CP_RESYNC_DATA, 0));
|
||||||
|
OUT_RING(*id);
|
||||||
|
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||||
|
OUT_RING(R300_RB3D_DC_FINISH);
|
||||||
|
ADVANCE_RING();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t r100_cs_id_last_get(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
return RADEON_READ(RADEON_SCRATCH_REG4);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t r300_cs_id_last_get(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
return RADEON_READ(RADEON_SCRATCH_REG6);
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_cs_init(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
if (dev_priv->chip_family < CHIP_RV280) {
|
||||||
|
dev_priv->cs.id_emit = r100_cs_id_emit;
|
||||||
|
dev_priv->cs.id_last_get = r100_cs_id_last_get;
|
||||||
|
} else if (dev_priv->chip_family < CHIP_R600) {
|
||||||
|
dev_priv->cs.id_emit = r300_cs_id_emit;
|
||||||
|
dev_priv->cs.id_last_get = r300_cs_id_last_get;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->cs.parse = radeon_cs_parse;
|
||||||
|
/* ib get depends on memory manager or not so memory manager */
|
||||||
|
dev_priv->cs.relocate = radeon_nomm_relocate;
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -455,6 +455,13 @@ typedef struct {
|
||||||
int tiling_enabled; /* set by drm, read by 2d + 3d clients */
|
int tiling_enabled; /* set by drm, read by 2d + 3d clients */
|
||||||
|
|
||||||
unsigned int last_fence;
|
unsigned int last_fence;
|
||||||
|
|
||||||
|
uint32_t front_handle;
|
||||||
|
uint32_t back_handle;
|
||||||
|
uint32_t depth_handle;
|
||||||
|
uint32_t front_pitch;
|
||||||
|
uint32_t back_pitch;
|
||||||
|
uint32_t depth_pitch;
|
||||||
} drm_radeon_sarea_t;
|
} drm_radeon_sarea_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -506,6 +513,7 @@ typedef struct {
|
||||||
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
|
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
|
||||||
#define DRM_RADEON_GEM_INDIRECT 0x24 // temporary for X server
|
#define DRM_RADEON_GEM_INDIRECT 0x24 // temporary for X server
|
||||||
|
|
||||||
|
#define DRM_RADEON_CS 0x25
|
||||||
|
|
||||||
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
|
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
|
||||||
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
|
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
|
||||||
|
@ -545,6 +553,7 @@ typedef struct {
|
||||||
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
|
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
|
||||||
#define DRM_IOCTL_RADEON_GEM_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INDIRECT, struct drm_radeon_gem_indirect)
|
#define DRM_IOCTL_RADEON_GEM_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INDIRECT, struct drm_radeon_gem_indirect)
|
||||||
|
|
||||||
|
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
|
||||||
|
|
||||||
|
|
||||||
typedef struct drm_radeon_init {
|
typedef struct drm_radeon_init {
|
||||||
|
@ -703,6 +712,7 @@ typedef struct drm_radeon_indirect {
|
||||||
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
|
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
|
||||||
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
|
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
|
||||||
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
|
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
|
||||||
|
#define RADEON_PARAM_KERNEL_MM 16
|
||||||
|
|
||||||
typedef struct drm_radeon_getparam {
|
typedef struct drm_radeon_getparam {
|
||||||
int param;
|
int param;
|
||||||
|
@ -758,6 +768,7 @@ typedef struct drm_radeon_setparam {
|
||||||
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
|
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
|
||||||
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
|
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
|
||||||
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
|
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
|
||||||
|
#define RADEON_SETPARAM_MM_INIT 7 /* Initialise the mm */
|
||||||
/* 1.14: Clients can allocate/free a surface
|
/* 1.14: Clients can allocate/free a surface
|
||||||
*/
|
*/
|
||||||
typedef struct drm_radeon_surface_alloc {
|
typedef struct drm_radeon_surface_alloc {
|
||||||
|
@ -861,4 +872,17 @@ struct drm_radeon_gem_indirect {
|
||||||
uint32_t used;
|
uint32_t used;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* New interface which obsolete all previous interface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
struct drm_radeon_cs {
|
||||||
|
// uint32_t __user *packets;
|
||||||
|
uint32_t dwords;
|
||||||
|
uint32_t cs_id;
|
||||||
|
uint64_t packets;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -195,11 +195,11 @@ enum radeon_mac_model {
|
||||||
|
|
||||||
|
|
||||||
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
|
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
|
||||||
(dev_priv->mm.ring_read_ptr ? readl(dev_priv->mm.ring_read_ptr_map.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
|
(dev_priv->mm.ring_read.bo ? readl(dev_priv->mm.ring_read.kmap.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
|
||||||
RADEON_READ(RADEON_CP_RB_RPTR))
|
RADEON_READ(RADEON_CP_RB_RPTR))
|
||||||
|
|
||||||
#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read_ptr ? \
|
#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read.bo ? \
|
||||||
writel((val), dev_priv->mm.ring_read_ptr_map.virtual) : \
|
writel((val), dev_priv->mm.ring_read.kmap.virtual) : \
|
||||||
DRM_WRITE32((dev_priv)->ring_rptr, 0, (val)))
|
DRM_WRITE32((dev_priv)->ring_rptr, 0, (val)))
|
||||||
|
|
||||||
typedef struct drm_radeon_freelist {
|
typedef struct drm_radeon_freelist {
|
||||||
|
@ -261,6 +261,11 @@ struct radeon_virt_surface {
|
||||||
struct drm_file *file_priv;
|
struct drm_file *file_priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct radeon_mm_obj {
|
||||||
|
struct drm_buffer_object *bo;
|
||||||
|
struct drm_bo_kmap_obj kmap;
|
||||||
|
};
|
||||||
|
|
||||||
struct radeon_mm_info {
|
struct radeon_mm_info {
|
||||||
uint64_t vram_offset; // Offset into GPU space
|
uint64_t vram_offset; // Offset into GPU space
|
||||||
uint64_t vram_size;
|
uint64_t vram_size;
|
||||||
|
@ -268,15 +273,13 @@ struct radeon_mm_info {
|
||||||
|
|
||||||
uint64_t gart_start;
|
uint64_t gart_start;
|
||||||
uint64_t gart_size;
|
uint64_t gart_size;
|
||||||
|
|
||||||
|
struct radeon_mm_obj pcie_table;
|
||||||
|
struct radeon_mm_obj ring;
|
||||||
|
struct radeon_mm_obj ring_read;
|
||||||
|
|
||||||
struct drm_buffer_object *pcie_table;
|
struct radeon_mm_obj dma_bufs;
|
||||||
struct drm_bo_kmap_obj pcie_table_map;
|
struct drm_map fake_agp_map;
|
||||||
|
|
||||||
struct drm_buffer_object *ring;
|
|
||||||
struct drm_bo_kmap_obj ring_map;
|
|
||||||
|
|
||||||
struct drm_buffer_object *ring_read_ptr;
|
|
||||||
struct drm_bo_kmap_obj ring_read_ptr_map;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "radeon_mode.h"
|
#include "radeon_mode.h"
|
||||||
|
@ -289,11 +292,35 @@ struct drm_radeon_master_private {
|
||||||
#define RADEON_FLUSH_EMITED (1 < 0)
|
#define RADEON_FLUSH_EMITED (1 < 0)
|
||||||
#define RADEON_PURGE_EMITED (1 < 1)
|
#define RADEON_PURGE_EMITED (1 < 1)
|
||||||
|
|
||||||
|
/* command submission struct */
|
||||||
|
struct drm_radeon_cs_priv {
|
||||||
|
uint32_t id_wcnt;
|
||||||
|
uint32_t id_scnt;
|
||||||
|
uint32_t id_last_wcnt;
|
||||||
|
uint32_t id_last_scnt;
|
||||||
|
|
||||||
|
int (*parse)(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
void *ib, uint32_t *packets, uint32_t dwords);
|
||||||
|
void (*id_emit)(struct drm_device *dev, uint32_t *id);
|
||||||
|
uint32_t (*id_last_get)(struct drm_device *dev);
|
||||||
|
/* this ib handling callback are for hidding memory manager drm
|
||||||
|
* from memory manager less drm, free have to emit ib discard
|
||||||
|
* sequence into the ring */
|
||||||
|
int (*ib_get)(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset);
|
||||||
|
uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib);
|
||||||
|
void (*ib_free)(struct drm_device *dev, void *ib, uint32_t dwords);
|
||||||
|
/* do a relocation either MM or non-MM */
|
||||||
|
int (*relocate)(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
uint32_t *reloc, uint32_t *offset);
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct drm_radeon_private {
|
typedef struct drm_radeon_private {
|
||||||
|
|
||||||
drm_radeon_ring_buffer_t ring;
|
drm_radeon_ring_buffer_t ring;
|
||||||
|
|
||||||
int new_memmap;
|
bool new_memmap;
|
||||||
|
|
||||||
|
bool user_mm_enable;
|
||||||
|
|
||||||
int gart_size;
|
int gart_size;
|
||||||
u32 gart_vm_start;
|
u32 gart_vm_start;
|
||||||
|
@ -372,6 +399,7 @@ typedef struct drm_radeon_private {
|
||||||
uint32_t flags; /* see radeon_chip_flags */
|
uint32_t flags; /* see radeon_chip_flags */
|
||||||
unsigned long fb_aper_offset;
|
unsigned long fb_aper_offset;
|
||||||
|
|
||||||
|
bool mm_enabled;
|
||||||
struct radeon_mm_info mm;
|
struct radeon_mm_info mm;
|
||||||
drm_local_map_t *mmio;
|
drm_local_map_t *mmio;
|
||||||
|
|
||||||
|
@ -395,6 +423,11 @@ typedef struct drm_radeon_private {
|
||||||
int num_gb_pipes;
|
int num_gb_pipes;
|
||||||
int track_flush;
|
int track_flush;
|
||||||
uint32_t chip_family; /* extract from flags */
|
uint32_t chip_family; /* extract from flags */
|
||||||
|
|
||||||
|
struct radeon_mm_obj **ib_objs;
|
||||||
|
/* ib bitmap */
|
||||||
|
uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap
|
||||||
|
struct drm_radeon_cs_priv cs;
|
||||||
} drm_radeon_private_t;
|
} drm_radeon_private_t;
|
||||||
|
|
||||||
typedef struct drm_radeon_buf_priv {
|
typedef struct drm_radeon_buf_priv {
|
||||||
|
@ -672,14 +705,15 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
||||||
#define RADEON_SCRATCH_REG3 0x15ec
|
#define RADEON_SCRATCH_REG3 0x15ec
|
||||||
#define RADEON_SCRATCH_REG4 0x15f0
|
#define RADEON_SCRATCH_REG4 0x15f0
|
||||||
#define RADEON_SCRATCH_REG5 0x15f4
|
#define RADEON_SCRATCH_REG5 0x15f4
|
||||||
|
#define RADEON_SCRATCH_REG6 0x15f8
|
||||||
#define RADEON_SCRATCH_UMSK 0x0770
|
#define RADEON_SCRATCH_UMSK 0x0770
|
||||||
#define RADEON_SCRATCH_ADDR 0x0774
|
#define RADEON_SCRATCH_ADDR 0x0774
|
||||||
|
|
||||||
#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
|
#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
|
||||||
|
|
||||||
#define GET_SCRATCH( x ) (dev_priv->writeback_works ? \
|
#define GET_SCRATCH( x ) (dev_priv->writeback_works ? \
|
||||||
(dev_priv->mm.ring_read_ptr ? \
|
(dev_priv->mm.ring_read.bo ? \
|
||||||
readl(dev_priv->mm.ring_read_ptr_map.virtual + RADEON_SCRATCHOFF(0)) : \
|
readl(dev_priv->mm.ring_read.kmap.virtual + RADEON_SCRATCHOFF(0)) : \
|
||||||
DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \
|
DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \
|
||||||
RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x)))
|
RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x)))
|
||||||
|
|
||||||
|
@ -1243,44 +1277,62 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
||||||
#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
|
#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
|
||||||
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
|
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
|
||||||
|
|
||||||
extern int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr);
|
extern u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr);
|
||||||
extern void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data);
|
extern void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data);
|
||||||
|
|
||||||
#define RADEON_WRITE_PCIE( addr, val ) \
|
#define RADEON_WRITE_P(reg, val, mask) \
|
||||||
do { \
|
do { \
|
||||||
RADEON_WRITE8( RADEON_PCIE_INDEX, \
|
uint32_t tmp = RADEON_READ(reg); \
|
||||||
((addr) & 0xff)); \
|
tmp &= (mask); \
|
||||||
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
|
tmp |= ((val) & ~(mask)); \
|
||||||
|
RADEON_WRITE(reg, tmp); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define RADEON_WRITE_PLL_P(dev_priv, addr, val, mask) \
|
||||||
|
do { \
|
||||||
|
uint32_t tmp_ = RADEON_READ_PLL(dev_priv, addr); \
|
||||||
|
tmp_ &= (mask); \
|
||||||
|
tmp_ |= ((val) & ~(mask)); \
|
||||||
|
RADEON_WRITE_PLL(dev_priv, addr, tmp_); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define R500_WRITE_MCIND( addr, val ) \
|
|
||||||
|
|
||||||
|
#define RADEON_WRITE_PCIE(addr, val) \
|
||||||
|
do { \
|
||||||
|
RADEON_WRITE8(RADEON_PCIE_INDEX, \
|
||||||
|
((addr) & 0xff)); \
|
||||||
|
RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define R500_WRITE_MCIND(addr, val) \
|
||||||
do { \
|
do { \
|
||||||
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
|
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
|
||||||
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
|
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
|
||||||
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
|
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define RS480_WRITE_MCIND( addr, val ) \
|
#define RS480_WRITE_MCIND(addr, val) \
|
||||||
do { \
|
do { \
|
||||||
RADEON_WRITE( RS480_NB_MC_INDEX, \
|
RADEON_WRITE(RS480_NB_MC_INDEX, \
|
||||||
((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
|
((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
|
||||||
RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \
|
RADEON_WRITE(RS480_NB_MC_DATA, (val)); \
|
||||||
RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \
|
RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define RS690_WRITE_MCIND( addr, val ) \
|
#define RS690_WRITE_MCIND(addr, val) \
|
||||||
do { \
|
do { \
|
||||||
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
|
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
|
||||||
RADEON_WRITE(RS690_MC_DATA, val); \
|
RADEON_WRITE(RS690_MC_DATA, val); \
|
||||||
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
|
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define IGP_WRITE_MCIND( addr, val ) \
|
#define IGP_WRITE_MCIND(addr, val) \
|
||||||
do { \
|
do { \
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
|
||||||
RS690_WRITE_MCIND( addr, val ); \
|
RS690_WRITE_MCIND(addr, val); \
|
||||||
else \
|
else \
|
||||||
RS480_WRITE_MCIND( addr, val ); \
|
RS480_WRITE_MCIND(addr, val); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define CP_PACKET0( reg, n ) \
|
#define CP_PACKET0( reg, n ) \
|
||||||
|
@ -1324,42 +1376,42 @@ do { \
|
||||||
|
|
||||||
#define RADEON_FLUSH_CACHE() do { \
|
#define RADEON_FLUSH_CACHE() do { \
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||||
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
||||||
} else { \
|
} else { \
|
||||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(R300_RB3D_DC_FLUSH); \
|
OUT_RING(RADEON_RB3D_DC_FLUSH); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define RADEON_PURGE_CACHE() do { \
|
#define RADEON_PURGE_CACHE() do { \
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||||
OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
|
OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
|
||||||
} else { \
|
} else { \
|
||||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \
|
OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define RADEON_FLUSH_ZCACHE() do { \
|
#define RADEON_FLUSH_ZCACHE() do { \
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||||
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
|
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
|
OUT_RING(RADEON_RB3D_ZC_FLUSH); \
|
||||||
} else { \
|
} else { \
|
||||||
OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \
|
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING( R300_ZC_FLUSH ); \
|
OUT_RING(R300_ZC_FLUSH); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define RADEON_PURGE_ZCACHE() do { \
|
#define RADEON_PURGE_ZCACHE() do { \
|
||||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
|
||||||
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
|
OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
|
||||||
} else { \
|
} else { \
|
||||||
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
|
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
|
||||||
OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
|
OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* ================================================================
|
/* ================================================================
|
||||||
|
@ -1380,7 +1432,7 @@ do { \
|
||||||
#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
|
#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
|
||||||
do { \
|
do { \
|
||||||
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
|
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
|
||||||
drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
|
drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
|
||||||
if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
|
if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
|
||||||
int __ret = radeon_do_cp_idle( dev_priv ); \
|
int __ret = radeon_do_cp_idle( dev_priv ); \
|
||||||
if ( __ret ) return __ret; \
|
if ( __ret ) return __ret; \
|
||||||
|
@ -1556,6 +1608,23 @@ static inline int radeon_update_breadcrumb(struct drm_device *dev)
|
||||||
|
|
||||||
#define radeon_is_dce3(dev_priv) ((dev_priv->chip_family >= CHIP_RV620))
|
#define radeon_is_dce3(dev_priv) ((dev_priv->chip_family >= CHIP_RV620))
|
||||||
|
|
||||||
|
#define radeon_is_rv100(dev_priv) ((dev_priv->chip_family == CHIP_RV100) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV200) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RS100) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RS200) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV250) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV280) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RS300))
|
||||||
|
|
||||||
|
#define radeon_is_r300(dev_priv) ((dev_priv->chip_family == CHIP_R300) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV350) || \
|
||||||
|
(dev_priv->chip_family == CHIP_R350) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV380) || \
|
||||||
|
(dev_priv->chip_family == CHIP_R420) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RV410) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RS400) || \
|
||||||
|
(dev_priv->chip_family == CHIP_RS480))
|
||||||
|
|
||||||
#define radeon_bios8(dev_priv, v) (dev_priv->bios[v])
|
#define radeon_bios8(dev_priv, v) (dev_priv->bios[v])
|
||||||
#define radeon_bios16(dev_priv, v) (dev_priv->bios[v] | (dev_priv->bios[(v) + 1] << 8))
|
#define radeon_bios16(dev_priv, v) (dev_priv->bios[v] | (dev_priv->bios[(v) + 1] << 8))
|
||||||
#define radeon_bios32(dev_priv, v) ((dev_priv->bios[v]) | \
|
#define radeon_bios32(dev_priv, v) ((dev_priv->bios[v]) | \
|
||||||
|
@ -1563,6 +1632,7 @@ static inline int radeon_update_breadcrumb(struct drm_device *dev)
|
||||||
(dev_priv->bios[(v) + 2] << 16) | \
|
(dev_priv->bios[(v) + 2] << 16) | \
|
||||||
(dev_priv->bios[(v) + 3] << 24))
|
(dev_priv->bios[(v) + 3] << 24))
|
||||||
|
|
||||||
|
extern void radeon_pll_errata_after_index(struct drm_radeon_private *dev_priv);
|
||||||
extern int radeon_emit_irq(struct drm_device * dev);
|
extern int radeon_emit_irq(struct drm_device * dev);
|
||||||
|
|
||||||
extern void radeon_gem_free_object(struct drm_gem_object *obj);
|
extern void radeon_gem_free_object(struct drm_gem_object *obj);
|
||||||
|
@ -1592,4 +1662,14 @@ extern void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on);
|
||||||
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
|
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
|
||||||
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
|
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
|
||||||
extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master);
|
extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master);
|
||||||
|
extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
|
||||||
|
extern int radeon_cs_init(struct drm_device *dev);
|
||||||
|
void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master);
|
||||||
|
|
||||||
|
#define MARK_SAFE 1
|
||||||
|
#define MARK_CHECK_OFFSET 2
|
||||||
|
#define MARK_CHECK_SCISSOR 3
|
||||||
|
|
||||||
|
extern int r300_check_range(unsigned reg, int count);
|
||||||
|
extern int r300_get_reg_flags(unsigned reg);
|
||||||
#endif /* __RADEON_DRV_H__ */
|
#endif /* __RADEON_DRV_H__ */
|
||||||
|
|
|
@ -2223,6 +2223,9 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
|
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
|
||||||
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
|
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
|
||||||
|
|
||||||
|
if (dev_priv->mm.vram_offset)
|
||||||
|
radeon_gem_update_offsets(dev, file_priv->master);
|
||||||
|
|
||||||
radeon_cp_dispatch_swap(dev, file_priv->master);
|
radeon_cp_dispatch_swap(dev, file_priv->master);
|
||||||
sarea_priv->ctx_owner = 0;
|
sarea_priv->ctx_owner = 0;
|
||||||
|
|
||||||
|
@ -3117,6 +3120,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
|
||||||
case RADEON_PARAM_NUM_GB_PIPES:
|
case RADEON_PARAM_NUM_GB_PIPES:
|
||||||
value = dev_priv->num_gb_pipes;
|
value = dev_priv->num_gb_pipes;
|
||||||
break;
|
break;
|
||||||
|
case RADEON_PARAM_KERNEL_MM:
|
||||||
|
value = dev_priv->mm_enabled;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_DEBUG( "Invalid parameter %d\n", param->param );
|
DRM_DEBUG( "Invalid parameter %d\n", param->param );
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3178,6 +3184,10 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
|
||||||
case RADEON_SETPARAM_VBLANK_CRTC:
|
case RADEON_SETPARAM_VBLANK_CRTC:
|
||||||
return radeon_vblank_crtc_set(dev, sp->value);
|
return radeon_vblank_crtc_set(dev, sp->value);
|
||||||
break;
|
break;
|
||||||
|
case RADEON_SETPARAM_MM_INIT:
|
||||||
|
dev_priv->user_mm_enable = true;
|
||||||
|
dev_priv->new_memmap = true;
|
||||||
|
return radeon_gem_mm_init(dev);
|
||||||
default:
|
default:
|
||||||
DRM_DEBUG("Invalid parameter %d\n", sp->param);
|
DRM_DEBUG("Invalid parameter %d\n", sp->param);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3279,6 +3289,7 @@ struct drm_ioctl_desc radeon_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_INDIRECT, radeon_gem_indirect_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_RADEON_GEM_INDIRECT, radeon_gem_indirect_ioctl, DRM_AUTH),
|
||||||
|
DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
|
||||||
};
|
};
|
||||||
|
|
||||||
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
|
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
|
||||||
|
|
Loading…
Reference in New Issue