parent
1062d8dcff
commit
2a6dad31d8
|
@ -270,7 +270,6 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr,
|
|||
if(print)
|
||||
DEBUG("MC[0x%02X]", idx);
|
||||
val = gctx->card->mc_read(gctx->card, idx);
|
||||
printk(KERN_INFO "MC registers are not implemented.\n");
|
||||
return 0;
|
||||
}
|
||||
if(saved)
|
||||
|
@ -452,7 +451,6 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr
|
|||
(*ptr)++;
|
||||
DEBUG("MC[0x%02X]", idx);
|
||||
gctx->card->mc_write(gctx->card, idx, val);
|
||||
printk(KERN_INFO "MC registers are not implemented.\n");
|
||||
return;
|
||||
}
|
||||
switch(align) {
|
||||
|
|
|
@ -511,6 +511,7 @@ static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
|
|||
entry = list_entry(list, struct drm_buffer_object, ddestroy);
|
||||
|
||||
nentry = NULL;
|
||||
DRM_DEBUG("bo is %p, %d\n", entry, entry->num_pages);
|
||||
if (next != &bm->ddestroy) {
|
||||
nentry = list_entry(next, struct drm_buffer_object,
|
||||
ddestroy);
|
||||
|
@ -1330,14 +1331,15 @@ static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
|
|||
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.proposed_flags,
|
||||
(unsigned long long) bo->mem.flags);
|
||||
|
||||
ret = drm_bo_modify_proposed_flags (bo, flags, mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.proposed_flags,
|
||||
(unsigned long long) bo->mem.flags);
|
||||
|
||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2084,3 +2086,48 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* used to EVICT VRAM lru at suspend time */
|
||||
void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait)
|
||||
{
|
||||
struct drm_buffer_manager *bm = &dev->bm;
|
||||
struct drm_mem_type_manager *man = &bm->man[mem_type];
|
||||
struct drm_buffer_object *entry;
|
||||
/* we need to migrate all objects in VRAM */
|
||||
struct list_head *lru;
|
||||
int ret;
|
||||
/* evict all buffers on the LRU - won't evict pinned buffers */
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
do {
|
||||
lru = &man->lru;
|
||||
|
||||
if (lru->next == lru) {
|
||||
DRM_ERROR("lru empty\n");
|
||||
break;
|
||||
}
|
||||
|
||||
entry = list_entry(lru->next, struct drm_buffer_object, lru);
|
||||
atomic_inc(&entry->usage);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&entry->mutex);
|
||||
|
||||
DRM_ERROR("Evicting %p %d\n", entry, entry->num_pages);
|
||||
ret = drm_bo_evict(entry, mem_type, no_wait);
|
||||
mutex_unlock(&entry->mutex);
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("Evict failed for BO\n");
|
||||
|
||||
mutex_lock(&entry->mutex);
|
||||
(void)drm_bo_expire_fence(entry, 0);
|
||||
mutex_unlock(&entry->mutex);
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
} while(0);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(drm_bo_evict_mm);
|
||||
|
|
|
@ -381,7 +381,6 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
|
|||
if (driver->wait)
|
||||
return driver->wait(fence, lazy, !ignore_signals, mask);
|
||||
|
||||
|
||||
drm_fence_object_flush(fence, mask);
|
||||
if (driver->has_irq(dev, fence->fence_class, mask)) {
|
||||
if (!ignore_signals)
|
||||
|
@ -409,8 +408,6 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fence_object_wait);
|
||||
|
||||
|
||||
|
||||
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
|
||||
uint32_t fence_class, uint32_t type)
|
||||
{
|
||||
|
|
|
@ -796,6 +796,7 @@ extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_
|
|||
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
|
||||
|
||||
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
|
||||
extern void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait);
|
||||
/*
|
||||
* Buffer object memory move- and map helpers.
|
||||
* drm_bo_move.c
|
||||
|
|
|
@ -59,28 +59,6 @@ static int dri_library_name(struct drm_device * dev, char * buf)
|
|||
"r300"));
|
||||
}
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_resume(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* Restore interrupt registers */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
|
|
@ -736,7 +736,7 @@ int radeonfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_heigh
|
|||
}
|
||||
obj_priv = fbo->driver_private;
|
||||
|
||||
ret = radeon_gem_object_pin(fbo, PAGE_SIZE);
|
||||
ret = radeon_gem_object_pin(fbo, PAGE_SIZE, RADEON_GEM_DOMAIN_VRAM);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin fb: %d\n", ret);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
|
@ -45,6 +45,8 @@ int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
|||
return -EINVAL;
|
||||
|
||||
radeon_emit_irq(dev);
|
||||
|
||||
DRM_DEBUG("emitting %d\n", dev_priv->counter);
|
||||
*sequence = (uint32_t) dev_priv->counter;
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
|
||||
|
@ -60,6 +62,7 @@ static void radeon_fence_poll(struct drm_device *dev, uint32_t fence_class,
|
|||
|
||||
sequence = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
DRM_DEBUG("polling %d\n", sequence);
|
||||
drm_fence_handler(dev, 0, sequence,
|
||||
DRM_FENCE_TYPE_EXE, 0);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ int radeon_gem_init_object(struct drm_gem_object *obj)
|
|||
|
||||
obj->driver_private = obj_priv;
|
||||
obj_priv->obj = obj;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -320,6 +319,8 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_gem_object *obj;
|
||||
struct drm_radeon_gem_object *obj_priv;
|
||||
int ret;
|
||||
int flags = DRM_BO_FLAG_NO_EVICT;
|
||||
int mask = DRM_BO_FLAG_NO_EVICT;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
|
@ -329,15 +330,24 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage));
|
||||
/* validate into a pin with no fence */
|
||||
if (args->pin_domain) {
|
||||
mask |= DRM_BO_MASK_MEM;
|
||||
if (args->pin_domain == RADEON_GEM_DOMAIN_GTT)
|
||||
flags |= DRM_BO_FLAG_MEM_TT;
|
||||
else if (args->pin_domain == RADEON_GEM_DOMAIN_VRAM)
|
||||
flags |= DRM_BO_FLAG_MEM_VRAM;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
|
||||
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
||||
ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
|
||||
DRM_BO_HINT_DONT_FENCE, 0);
|
||||
} else
|
||||
ret = 0;
|
||||
|
||||
args->offset = obj_priv->bo->offset;
|
||||
DRM_DEBUG("got here %p %p\n", obj, obj_priv->bo);
|
||||
DRM_DEBUG("got here %p %p %x\n", obj, obj_priv->bo, obj_priv->bo->offset);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
|
@ -361,7 +371,7 @@ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
/* validate into a pin with no fence */
|
||||
|
||||
ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT,
|
||||
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
|
||||
DRM_BO_HINT_DONT_FENCE, 0);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -598,7 +608,11 @@ static int radeon_gart_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset);
|
||||
dev_priv->mm.pcie_table_backup = kzalloc(RADEON_PCIGART_TABLE_SIZE, GFP_KERNEL);
|
||||
if (!dev_priv->mm.pcie_table_backup)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_ERROR("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset);
|
||||
ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
|
||||
&dev_priv->mm.pcie_table.kmap);
|
||||
if (ret)
|
||||
|
@ -690,10 +704,11 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
|
|||
|
||||
}
|
||||
|
||||
static void radeon_init_memory_map(struct drm_device *dev)
|
||||
void radeon_init_memory_map(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
u32 mem_size, aper_size;
|
||||
u32 tmp;
|
||||
|
||||
dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv);
|
||||
radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi);
|
||||
|
@ -841,6 +856,10 @@ void radeon_gem_mm_fini(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
if (dev_priv->mm.pcie_table_backup) {
|
||||
kfree(dev_priv->mm.pcie_table_backup);
|
||||
dev_priv->mm.pcie_table_backup = NULL;
|
||||
}
|
||||
if (dev_priv->mm.pcie_table.bo) {
|
||||
drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
|
||||
drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
|
||||
|
@ -858,7 +877,31 @@ void radeon_gem_mm_fini(struct drm_device *dev)
|
|||
}
|
||||
|
||||
int radeon_gem_object_pin(struct drm_gem_object *obj,
|
||||
uint32_t alignment)
|
||||
uint32_t alignment, uint32_t pin_domain)
|
||||
{
|
||||
struct drm_radeon_gem_object *obj_priv;
|
||||
int ret;
|
||||
uint32_t flags = DRM_BO_FLAG_NO_EVICT;
|
||||
uint32_t mask = DRM_BO_FLAG_NO_EVICT;
|
||||
|
||||
obj_priv = obj->driver_private;
|
||||
|
||||
if (pin_domain) {
|
||||
mask |= DRM_BO_MASK_MEM;
|
||||
if (pin_domain == RADEON_GEM_DOMAIN_GTT)
|
||||
flags |= DRM_BO_FLAG_MEM_TT;
|
||||
else if (pin_domain == RADEON_GEM_DOMAIN_VRAM)
|
||||
flags |= DRM_BO_FLAG_MEM_VRAM;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = drm_bo_do_validate(obj_priv->bo, flags, mask,
|
||||
DRM_BO_HINT_DONT_FENCE, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_gem_object_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_radeon_gem_object *obj_priv;
|
||||
int ret;
|
||||
|
@ -1335,3 +1378,5 @@ void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master
|
|||
dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_drv.h"
|
||||
|
||||
#include "atom.h"
|
||||
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb;
|
||||
int i;
|
||||
|
||||
if (!dev || !dev_priv) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return 0;
|
||||
|
||||
/* unpin the front buffers */
|
||||
list_for_each_entry(fb, &dev->mode_config.fb_kernel_list, filp_head) {
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
||||
if (!radeon_fb)
|
||||
continue;
|
||||
|
||||
if (!radeon_fb->obj)
|
||||
continue;
|
||||
|
||||
radeon_gem_object_unpin(radeon_fb->obj);
|
||||
}
|
||||
|
||||
if (!(dev_priv->flags & RADEON_IS_IGP))
|
||||
drm_bo_evict_mm(dev, DRM_BO_MEM_VRAM, 0);
|
||||
|
||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
memcpy_fromio(dev_priv->mm.pcie_table_backup, dev_priv->mm.pcie_table.kmap.virtual, RADEON_PCIGART_TABLE_SIZE);
|
||||
}
|
||||
|
||||
dev_priv->pmregs.crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL);
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->pmregs.bios_scratch[i] = RADEON_READ(RADEON_BIOS_0_SCRATCH + (i * 4));
|
||||
|
||||
radeon_modeset_cp_suspend(dev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb;
|
||||
int i;
|
||||
u32 tmp;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return 0;
|
||||
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -1;
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
/* Turn on bus mastering */
|
||||
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
|
||||
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
|
||||
|
||||
/* on atom cards re init the whole card
|
||||
and set the modes again */
|
||||
|
||||
if (dev_priv->is_atom_bios) {
|
||||
struct atom_context *ctx = dev_priv->mode_info.atom_context;
|
||||
atom_asic_init(ctx);
|
||||
} else {
|
||||
radeon_combios_asic_init(dev);
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
RADEON_WRITE(RADEON_BIOS_0_SCRATCH + (i * 4), dev_priv->pmregs.bios_scratch[i]);
|
||||
|
||||
/* VGA render mayhaps */
|
||||
if (dev_priv->chip_family >= CHIP_RS600) {
|
||||
uint32_t tmp;
|
||||
|
||||
RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0);
|
||||
RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0);
|
||||
tmp = RADEON_READ(0x300);
|
||||
tmp &= ~(3 << 16);
|
||||
RADEON_WRITE(0x300, tmp);
|
||||
RADEON_WRITE(0x308, (1 << 8));
|
||||
RADEON_WRITE(0x310, dev_priv->fb_location);
|
||||
RADEON_WRITE(0x594, 0);
|
||||
}
|
||||
|
||||
RADEON_WRITE(RADEON_CRTC_EXT_CNTL, dev_priv->pmregs.crtc_ext_cntl);
|
||||
|
||||
radeon_static_clocks_init(dev);
|
||||
|
||||
radeon_init_memory_map(dev);
|
||||
|
||||
if (dev_priv->flags & RADEON_IS_PCIE) {
|
||||
memcpy_toio(dev_priv->mm.pcie_table.kmap.virtual, dev_priv->mm.pcie_table_backup, RADEON_PCIGART_TABLE_SIZE);
|
||||
}
|
||||
|
||||
if (dev_priv->mm.ring.kmap.virtual)
|
||||
memset(dev_priv->mm.ring.kmap.virtual, 0, RADEON_DEFAULT_RING_SIZE);
|
||||
|
||||
if (dev_priv->mm.ring_read.kmap.virtual)
|
||||
memset(dev_priv->mm.ring_read.kmap.virtual, 0, PAGE_SIZE);
|
||||
|
||||
radeon_modeset_cp_resume(dev);
|
||||
|
||||
/* reset swi reg */
|
||||
RADEON_WRITE(RADEON_LAST_SWI_REG, dev_priv->counter);
|
||||
|
||||
radeon_enable_interrupt(dev);
|
||||
|
||||
/* reset the context for userspace */
|
||||
if (dev->primary->master) {
|
||||
struct drm_radeon_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
if (master_priv->sarea_priv)
|
||||
master_priv->sarea_priv->ctx_owner = 0;
|
||||
}
|
||||
|
||||
/* unpin the front buffers */
|
||||
list_for_each_entry(fb, &dev->mode_config.fb_kernel_list, filp_head) {
|
||||
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
||||
if (!radeon_fb)
|
||||
continue;
|
||||
|
||||
if (!radeon_fb->obj)
|
||||
continue;
|
||||
|
||||
radeon_gem_object_pin(radeon_fb->obj, PAGE_SIZE, RADEON_GEM_DOMAIN_VRAM);
|
||||
}
|
||||
/* blat the mode back in */
|
||||
drm_helper_resume_force_mode(dev);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -521,7 +521,6 @@ static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
|
|||
DRM_DEBUG("\n");
|
||||
#if 0
|
||||
u32 tmp;
|
||||
|
||||
tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
|
||||
RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
|
||||
#endif
|
||||
|
@ -761,8 +760,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
|||
dev_priv->ring.size_l2qw);
|
||||
#endif
|
||||
|
||||
/* Start with assuming that writeback doesn't work */
|
||||
dev_priv->writeback_works = 0;
|
||||
|
||||
/* Initialize the scratch register pointer. This will cause
|
||||
* the scratch register values to be written out to memory
|
||||
|
@ -1341,6 +1338,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
|
|||
radeon_set_pcigart(dev_priv, 1);
|
||||
}
|
||||
|
||||
/* Start with assuming that writeback doesn't work */
|
||||
dev_priv->writeback_works = 0;
|
||||
|
||||
radeon_cp_load_microcode(dev_priv);
|
||||
radeon_cp_init_ring_buffer(dev, dev_priv);
|
||||
|
||||
|
@ -2301,14 +2301,64 @@ static void radeon_set_dynamic_clock(struct drm_device *dev, int mode)
|
|||
|
||||
}
|
||||
|
||||
int radeon_modeset_cp_init(struct drm_device *dev)
|
||||
int radeon_modeset_cp_suspend(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = radeon_do_cp_idle(dev_priv);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to idle CP on suspend\n");
|
||||
|
||||
radeon_do_cp_stop(dev_priv);
|
||||
radeon_do_engine_reset(dev);
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
} else {
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_modeset_cp_resume(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
radeon_do_wait_for_idle(dev_priv);
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & RADEON_IS_AGP) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* Turn on PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 1);
|
||||
}
|
||||
radeon_gart_flush(dev);
|
||||
|
||||
DRM_ERROR("microcode loading\n");
|
||||
radeon_cp_load_microcode(dev_priv);
|
||||
radeon_cp_init_ring_buffer(dev, dev_priv);
|
||||
|
||||
DRM_ERROR("engine init\n");
|
||||
radeon_do_engine_reset(dev);
|
||||
|
||||
radeon_do_cp_start(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_modeset_cp_init(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
/* allocate a ring and ring rptr bits from GART space */
|
||||
/* these are allocated in GEM files */
|
||||
|
||||
/* Start with assuming that writeback doesn't work */
|
||||
dev_priv->writeback_works = 0;
|
||||
|
||||
dev_priv->usec_timeout = RADEON_DEFAULT_CP_TIMEOUT;
|
||||
dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE;
|
||||
dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM;
|
||||
|
@ -2327,23 +2377,8 @@ int radeon_modeset_cp_init(struct drm_device *dev)
|
|||
dev_priv->new_memmap = true;
|
||||
|
||||
r300_init_reg_flags(dev);
|
||||
|
||||
radeon_cp_load_microcode(dev_priv);
|
||||
|
||||
DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring.bo->offset, dev_priv->mm.ring_read.bo->offset);
|
||||
|
||||
radeon_cp_init_ring_buffer(dev, dev_priv);
|
||||
|
||||
/* need to enable BUS mastering in Buscntl */
|
||||
tmp = RADEON_READ(RADEON_BUS_CNTL);
|
||||
tmp &= ~RADEON_BUS_MASTER_DIS;
|
||||
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
|
||||
|
||||
radeon_do_engine_reset(dev);
|
||||
radeon_test_writeback(dev_priv);
|
||||
|
||||
radeon_do_cp_start(dev_priv);
|
||||
return 0;
|
||||
return radeon_modeset_cp_resume(dev);
|
||||
}
|
||||
|
||||
static bool radeon_get_bios(struct drm_device *dev)
|
||||
|
@ -2418,6 +2453,20 @@ int radeon_modeset_preinit(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int radeon_static_clocks_init(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->flags & RADEON_IS_MOBILITY) && !radeon_is_avivo(dev_priv)) {
|
||||
radeon_set_dynamic_clock(dev, radeon_dynclks);
|
||||
} else if (radeon_is_avivo(dev_priv)) {
|
||||
if (radeon_dynclks) {
|
||||
radeon_atom_static_pwrmgt_setup(dev, 1);
|
||||
radeon_atom_dyn_clk_setup(dev, 1);
|
||||
}
|
||||
}
|
||||
radeon_force_some_clocks(dev);
|
||||
}
|
||||
|
||||
int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
|
@ -2473,7 +2522,6 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
radeon_modeset_preinit(dev);
|
||||
|
||||
|
||||
radeon_get_vram_type(dev);
|
||||
|
||||
dev_priv->pll_errata = 0;
|
||||
|
@ -2493,17 +2541,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->pll_errata |= CHIP_ERRATA_PLL_DELAY;
|
||||
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if ((dev_priv->flags & RADEON_IS_MOBILITY) && !radeon_is_avivo(dev_priv)) {
|
||||
radeon_set_dynamic_clock(dev, radeon_dynclks);
|
||||
} else if (radeon_is_avivo(dev_priv)) {
|
||||
if (radeon_dynclks) {
|
||||
radeon_atom_static_pwrmgt_setup(dev, 1);
|
||||
radeon_atom_dyn_clk_setup(dev, 1);
|
||||
}
|
||||
}
|
||||
radeon_force_some_clocks(dev);
|
||||
}
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
radeon_static_clocks_init(dev);
|
||||
|
||||
/* init memory manager - start with all of VRAM and a 32MB GART aperture for now */
|
||||
dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
|
||||
|
|
|
@ -273,6 +273,8 @@ struct radeon_mm_info {
|
|||
|
||||
uint64_t gart_start;
|
||||
uint64_t gart_size;
|
||||
|
||||
void *pcie_table_backup;
|
||||
|
||||
struct radeon_mm_obj pcie_table;
|
||||
struct radeon_mm_obj ring;
|
||||
|
@ -314,6 +316,11 @@ struct drm_radeon_cs_priv {
|
|||
uint32_t *reloc, uint32_t *offset);
|
||||
};
|
||||
|
||||
struct radeon_pm_regs {
|
||||
uint32_t crtc_ext_cntl;
|
||||
uint32_t bios_scratch[8];
|
||||
};
|
||||
|
||||
typedef struct drm_radeon_private {
|
||||
|
||||
drm_radeon_ring_buffer_t ring;
|
||||
|
@ -432,6 +439,8 @@ typedef struct drm_radeon_private {
|
|||
/* ib bitmap */
|
||||
uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap
|
||||
struct drm_radeon_cs_priv cs;
|
||||
|
||||
struct radeon_pm_regs pmregs;
|
||||
} drm_radeon_private_t;
|
||||
|
||||
typedef struct drm_radeon_buf_priv {
|
||||
|
@ -527,6 +536,11 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
struct drm_file *file_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf);
|
||||
|
||||
extern int radeon_modeset_cp_suspend(struct drm_device *dev);
|
||||
extern int radeon_modeset_cp_resume(struct drm_device *dev);
|
||||
/* radeon_pm.c */
|
||||
int radeon_suspend(struct drm_device *dev, pm_message_t state);
|
||||
int radeon_resume(struct drm_device *dev);
|
||||
/* Flags for stats.boxes
|
||||
*/
|
||||
#define RADEON_BOX_DMA_IDLE 0x1
|
||||
|
@ -1464,7 +1478,7 @@ do { \
|
|||
* Ring control
|
||||
*/
|
||||
|
||||
#define RADEON_VERBOSE 0
|
||||
#define RADEON_VERBOSE 1
|
||||
|
||||
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
|
||||
|
||||
|
@ -1648,7 +1662,8 @@ extern int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|||
extern int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_object_pin(struct drm_gem_object *obj,
|
||||
uint32_t alignment);
|
||||
uint32_t alignment, uint32_t pin_domain);
|
||||
int radeon_gem_object_unpin(struct drm_gem_object *obj);
|
||||
int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -1670,6 +1685,8 @@ extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *
|
|||
extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
|
||||
extern int radeon_cs_init(struct drm_device *dev);
|
||||
void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master);
|
||||
void radeon_init_memory_map(struct drm_device *dev);
|
||||
|
||||
|
||||
#define MARK_SAFE 1
|
||||
#define MARK_CHECK_OFFSET 2
|
||||
|
|
Loading…
Reference in New Issue