Initial pass at converting driver to DRM infrastructure.
parent
8d60bf2f19
commit
5ba94c2ab8
|
@ -91,8 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
|
||||||
NVHEADERS = nv_drv.h $(DRMHEADERS)
|
NVHEADERS = nv_drv.h $(DRMHEADERS)
|
||||||
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
|
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
|
||||||
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
|
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
|
||||||
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \
|
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
|
||||||
xgi_pcie.h xgi_regs.h xgi_types.h
|
|
||||||
|
|
||||||
PROGS = dristat drmstat
|
PROGS = dristat drmstat
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
#include "xgi_linux.h"
|
|
||||||
#include "xgi_drv.h"
|
#include "xgi_drv.h"
|
||||||
#include "xgi_regs.h"
|
#include "xgi_regs.h"
|
||||||
#include "xgi_misc.h"
|
#include "xgi_misc.h"
|
||||||
|
@ -55,18 +54,19 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
|
||||||
|
|
||||||
s_cmdring._cmdRingSize = mem_alloc.size;
|
s_cmdring._cmdRingSize = mem_alloc.size;
|
||||||
s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
|
s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
|
||||||
s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr;
|
s_cmdring._cmdRingAllocOffset = mem_alloc.offset;
|
||||||
s_cmdring._lastBatchStartAddr = 0;
|
s_cmdring._lastBatchStartAddr = 0;
|
||||||
s_cmdring._cmdRingOffset = 0;
|
s_cmdring._cmdRingOffset = 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
|
static void xgi_submit_cmdlist(struct xgi_info * info,
|
||||||
|
struct xgi_cmd_info * pCmdInfo)
|
||||||
{
|
{
|
||||||
const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo);
|
const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo);
|
||||||
|
|
||||||
XGI_INFO("After getCurBatchBeginPort()\n");
|
DRM_INFO("After getCurBatchBeginPort()\n");
|
||||||
|
|
||||||
if (s_cmdring._lastBatchStartAddr == 0) {
|
if (s_cmdring._lastBatchStartAddr == 0) {
|
||||||
const unsigned int portOffset = BASE_3D_ENG + beginPort;
|
const unsigned int portOffset = BASE_3D_ENG + beginPort;
|
||||||
|
@ -75,50 +75,53 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
|
||||||
/* xgi_waitfor_pci_idle(info); */
|
/* xgi_waitfor_pci_idle(info); */
|
||||||
|
|
||||||
// Enable PCI Trigger Mode
|
// Enable PCI Trigger Mode
|
||||||
XGI_INFO("Enable PCI Trigger Mode \n");
|
DRM_INFO("Enable PCI Trigger Mode \n");
|
||||||
|
|
||||||
|
|
||||||
/* Jong 06/14/2006; 0x400001a */
|
/* Jong 06/14/2006; 0x400001a */
|
||||||
dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
dwWriteReg(info->mmio_map,
|
||||||
|
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
||||||
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
|
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
|
||||||
M2REG_CLEAR_COUNTERS_MASK | 0x08 |
|
M2REG_CLEAR_COUNTERS_MASK | 0x08 |
|
||||||
M2REG_PCI_TRIGGER_MODE_MASK);
|
M2REG_PCI_TRIGGER_MODE_MASK);
|
||||||
|
|
||||||
/* Jong 06/14/2006; 0x400000a */
|
/* Jong 06/14/2006; 0x400000a */
|
||||||
dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
dwWriteReg(info->mmio_map,
|
||||||
|
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
||||||
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
|
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
|
||||||
M2REG_PCI_TRIGGER_MODE_MASK);
|
M2REG_PCI_TRIGGER_MODE_MASK);
|
||||||
|
|
||||||
// Send PCI begin command
|
// Send PCI begin command
|
||||||
XGI_INFO("Send PCI begin command \n");
|
DRM_INFO("Send PCI begin command \n");
|
||||||
|
|
||||||
XGI_INFO("portOffset=%d, beginPort=%d\n",
|
DRM_INFO("portOffset=%d, beginPort=%d\n",
|
||||||
portOffset, beginPort);
|
portOffset, beginPort);
|
||||||
|
|
||||||
/* beginPort = 48; */
|
/* beginPort = 48; */
|
||||||
/* 0xc100000 */
|
/* 0xc100000 */
|
||||||
dwWriteReg(portOffset,
|
dwWriteReg(info->mmio_map, portOffset,
|
||||||
(beginPort << 22) + (BEGIN_VALID_MASK) +
|
(beginPort << 22) + (BEGIN_VALID_MASK) +
|
||||||
pCmdInfo->_curDebugID);
|
pCmdInfo->_curDebugID);
|
||||||
|
|
||||||
XGI_INFO("Send PCI begin command- After\n");
|
DRM_INFO("Send PCI begin command- After\n");
|
||||||
|
|
||||||
/* 0x80000024 */
|
/* 0x80000024 */
|
||||||
dwWriteReg(portOffset + 4,
|
dwWriteReg(info->mmio_map, portOffset + 4,
|
||||||
BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
|
BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
|
||||||
|
|
||||||
/* 0x1010000 */
|
/* 0x1010000 */
|
||||||
dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4));
|
dwWriteReg(info->mmio_map, portOffset + 8,
|
||||||
|
(pCmdInfo->_firstBeginAddr >> 4));
|
||||||
|
|
||||||
/* Jong 06/12/2006; system hang; marked for test */
|
/* Jong 06/12/2006; system hang; marked for test */
|
||||||
dwWriteReg(portOffset + 12, 0);
|
dwWriteReg(info->mmio_map, portOffset + 12, 0);
|
||||||
|
|
||||||
/* Jong 06/13/2006; remove marked for system hang test */
|
/* Jong 06/13/2006; remove marked for system hang test */
|
||||||
/* xgi_waitfor_pci_idle(info); */
|
/* xgi_waitfor_pci_idle(info); */
|
||||||
} else {
|
} else {
|
||||||
u32 *lastBatchVirtAddr;
|
u32 *lastBatchVirtAddr;
|
||||||
|
|
||||||
XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n");
|
DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n");
|
||||||
|
|
||||||
if (pCmdInfo->_firstBeginType == BTYPE_3D) {
|
if (pCmdInfo->_firstBeginType == BTYPE_3D) {
|
||||||
addFlush2D(info);
|
addFlush2D(info);
|
||||||
|
@ -146,21 +149,38 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
|
||||||
/* Jong 06/12/2006; system hang; marked for test */
|
/* Jong 06/12/2006; system hang; marked for test */
|
||||||
triggerHWCommandList(info, pCmdInfo->_beginCount);
|
triggerHWCommandList(info, pCmdInfo->_beginCount);
|
||||||
} else {
|
} else {
|
||||||
XGI_ERROR("lastBatchVirtAddr is NULL\n");
|
DRM_ERROR("lastBatchVirtAddr is NULL\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;
|
s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;
|
||||||
XGI_INFO("End\n");
|
DRM_INFO("End\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_cmd_info cmd_list;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
|
||||||
|
DRM_COPY_FROM_USER_IOCTL(cmd_list,
|
||||||
|
(struct xgi_cmd_info __user *) data,
|
||||||
|
sizeof(cmd_list));
|
||||||
|
|
||||||
|
xgi_submit_cmdlist(info, &cmd_list);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
state: 0 - console
|
state: 0 - console
|
||||||
1 - graphic
|
1 - graphic
|
||||||
2 - fb
|
2 - fb
|
||||||
3 - logout
|
3 - logout
|
||||||
*/
|
*/
|
||||||
void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo)
|
int xgi_state_change(struct xgi_info * info, unsigned int to,
|
||||||
|
unsigned int from)
|
||||||
{
|
{
|
||||||
#define STATE_CONSOLE 0
|
#define STATE_CONSOLE 0
|
||||||
#define STATE_GRAPHIC 1
|
#define STATE_GRAPHIC 1
|
||||||
|
@ -169,26 +189,40 @@ void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo
|
||||||
#define STATE_REBOOT 4
|
#define STATE_REBOOT 4
|
||||||
#define STATE_SHUTDOWN 5
|
#define STATE_SHUTDOWN 5
|
||||||
|
|
||||||
if ((pStateInfo->_fromState == STATE_GRAPHIC)
|
if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
|
||||||
&& (pStateInfo->_toState == STATE_CONSOLE)) {
|
DRM_INFO("[kd] I see, now is to leaveVT\n");
|
||||||
XGI_INFO("[kd] I see, now is to leaveVT\n");
|
|
||||||
// stop to received batch
|
// stop to received batch
|
||||||
} else if ((pStateInfo->_fromState == STATE_CONSOLE)
|
} else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
|
||||||
&& (pStateInfo->_toState == STATE_GRAPHIC)) {
|
DRM_INFO("[kd] I see, now is to enterVT\n");
|
||||||
XGI_INFO("[kd] I see, now is to enterVT\n");
|
|
||||||
xgi_cmdlist_reset();
|
xgi_cmdlist_reset();
|
||||||
} else if ((pStateInfo->_fromState == STATE_GRAPHIC)
|
} else if ((from == STATE_GRAPHIC)
|
||||||
&& ((pStateInfo->_toState == STATE_LOGOUT)
|
&& ((to == STATE_LOGOUT)
|
||||||
|| (pStateInfo->_toState == STATE_REBOOT)
|
|| (to == STATE_REBOOT)
|
||||||
|| (pStateInfo->_toState == STATE_SHUTDOWN))) {
|
|| (to == STATE_SHUTDOWN))) {
|
||||||
XGI_INFO("[kd] I see, not is to exit from X\n");
|
DRM_INFO("[kd] I see, not is to exit from X\n");
|
||||||
// stop to received batch
|
// stop to received batch
|
||||||
} else {
|
} else {
|
||||||
XGI_ERROR("[kd] Should not happen\n");
|
DRM_ERROR("[kd] Should not happen\n");
|
||||||
|
return DRM_ERR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_state_change_ioctl(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_state_info state;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
|
||||||
|
DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data,
|
||||||
|
sizeof(state));
|
||||||
|
|
||||||
|
return xgi_state_change(info, state._toState, state._fromState);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void xgi_cmdlist_reset(void)
|
void xgi_cmdlist_reset(void)
|
||||||
{
|
{
|
||||||
s_cmdring._lastBatchStartAddr = 0;
|
s_cmdring._lastBatchStartAddr = 0;
|
||||||
|
@ -198,7 +232,7 @@ void xgi_cmdlist_reset(void)
|
||||||
void xgi_cmdlist_cleanup(struct xgi_info * info)
|
void xgi_cmdlist_cleanup(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
if (s_cmdring._cmdRingBuffer != 0) {
|
if (s_cmdring._cmdRingBuffer != 0) {
|
||||||
xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);
|
xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL);
|
||||||
s_cmdring._cmdRingBuffer = 0;
|
s_cmdring._cmdRingBuffer = 0;
|
||||||
s_cmdring._cmdRingOffset = 0;
|
s_cmdring._cmdRingOffset = 0;
|
||||||
s_cmdring._cmdRingSize = 0;
|
s_cmdring._cmdRingSize = 0;
|
||||||
|
@ -212,7 +246,8 @@ static void triggerHWCommandList(struct xgi_info * info,
|
||||||
|
|
||||||
//Fix me, currently we just trigger one time
|
//Fix me, currently we just trigger one time
|
||||||
while (triggerCounter--) {
|
while (triggerCounter--) {
|
||||||
dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
|
dwWriteReg(info->mmio_map,
|
||||||
|
BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
|
||||||
0x05000000 + (0x0ffff & s_triggerID++));
|
0x05000000 + (0x0ffff & s_triggerID++));
|
||||||
// xgi_waitfor_pci_idle(info);
|
// xgi_waitfor_pci_idle(info);
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,16 +60,15 @@ typedef enum {
|
||||||
struct xgi_cmdring_info {
|
struct xgi_cmdring_info {
|
||||||
unsigned int _cmdRingSize;
|
unsigned int _cmdRingSize;
|
||||||
u32 _cmdRingBuffer;
|
u32 _cmdRingBuffer;
|
||||||
unsigned long _cmdRingBusAddr;
|
unsigned long _cmdRingAllocOffset;
|
||||||
u32 _lastBatchStartAddr;
|
u32 _lastBatchStartAddr;
|
||||||
u32 _cmdRingOffset;
|
u32 _cmdRingOffset;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size);
|
extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size);
|
||||||
|
|
||||||
extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo);
|
extern int xgi_state_change(struct xgi_info * info, unsigned int to,
|
||||||
|
unsigned int from);
|
||||||
extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo);
|
|
||||||
|
|
||||||
extern void xgi_cmdlist_cleanup(struct xgi_info * info);
|
extern void xgi_cmdlist_cleanup(struct xgi_info * info);
|
||||||
|
|
||||||
|
|
1544
linux-core/xgi_drv.c
1544
linux-core/xgi_drv.c
File diff suppressed because it is too large
Load Diff
|
@ -29,115 +29,69 @@
|
||||||
#ifndef _XGI_DRV_H_
|
#ifndef _XGI_DRV_H_
|
||||||
#define _XGI_DRV_H_
|
#define _XGI_DRV_H_
|
||||||
|
|
||||||
|
#include "drmP.h"
|
||||||
|
#include "drm.h"
|
||||||
|
|
||||||
|
#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>"
|
||||||
|
|
||||||
|
#define DRIVER_NAME "xgi"
|
||||||
|
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
|
||||||
|
#define DRIVER_DATE "20070710"
|
||||||
|
|
||||||
|
#define DRIVER_MAJOR 0
|
||||||
|
#define DRIVER_MINOR 8
|
||||||
|
#define DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
#include "xgi_drm.h"
|
#include "xgi_drm.h"
|
||||||
|
|
||||||
#define XGI_MAJOR_VERSION 0
|
|
||||||
#define XGI_MINOR_VERSION 7
|
|
||||||
#define XGI_PATCHLEVEL 5
|
|
||||||
|
|
||||||
#define XGI_DRV_VERSION "0.7.5"
|
|
||||||
|
|
||||||
#ifndef XGI_DRV_NAME
|
|
||||||
#define XGI_DRV_NAME "xgi"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* xgi reserved major device number, Set this to 0 to
|
|
||||||
* request dynamic major number allocation.
|
|
||||||
*/
|
|
||||||
#ifndef XGI_DEV_MAJOR
|
|
||||||
#define XGI_DEV_MAJOR 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef XGI_MAX_DEVICES
|
|
||||||
#define XGI_MAX_DEVICES 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Jong 06/06/2006 */
|
|
||||||
/* #define XGI_DEBUG */
|
|
||||||
|
|
||||||
#ifndef PCI_VENDOR_ID_XGI
|
|
||||||
/*
|
|
||||||
#define PCI_VENDOR_ID_XGI 0x1023
|
|
||||||
*/
|
|
||||||
#define PCI_VENDOR_ID_XGI 0x18CA
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef PCI_DEVICE_ID_XP5
|
|
||||||
#define PCI_DEVICE_ID_XP5 0x2200
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef PCI_DEVICE_ID_XG47
|
|
||||||
#define PCI_DEVICE_ID_XG47 0x0047
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Macros to make printk easier */
|
|
||||||
#define XGI_ERROR(fmt, arg...) \
|
|
||||||
printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
|
|
||||||
|
|
||||||
#define XGI_MEM_ERROR(area, fmt, arg...) \
|
|
||||||
printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
|
|
||||||
|
|
||||||
/* #define XGI_DEBUG */
|
|
||||||
|
|
||||||
#ifdef XGI_DEBUG
|
|
||||||
#define XGI_INFO(fmt, arg...) \
|
|
||||||
printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg)
|
|
||||||
/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */
|
|
||||||
#else
|
|
||||||
#define XGI_INFO(fmt, arg...) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* device name length; must be atleast 8 */
|
|
||||||
#define XGI_DEVICE_NAME_LENGTH 40
|
|
||||||
|
|
||||||
/* need a fake device number for control device; just to flag it for msgs */
|
|
||||||
#define XGI_CONTROL_DEVICE_NUMBER 100
|
|
||||||
|
|
||||||
struct xgi_aperture {
|
struct xgi_aperture {
|
||||||
unsigned long base;
|
dma_addr_t base;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
void *vbase;
|
};
|
||||||
|
|
||||||
|
struct xgi_mem_block {
|
||||||
|
struct list_head list;
|
||||||
|
unsigned long offset;
|
||||||
|
unsigned long size;
|
||||||
|
DRMFILE filp;
|
||||||
|
|
||||||
|
unsigned int owner;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct xgi_mem_heap {
|
||||||
|
struct list_head free_list;
|
||||||
|
struct list_head used_list;
|
||||||
|
struct list_head sort_list;
|
||||||
|
unsigned long max_freesize;
|
||||||
|
|
||||||
|
bool initialized;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xgi_info {
|
struct xgi_info {
|
||||||
struct pci_dev *dev;
|
struct drm_device *dev;
|
||||||
int flags;
|
|
||||||
int device_number;
|
bool bootstrap_done;
|
||||||
|
|
||||||
/* physical characteristics */
|
/* physical characteristics */
|
||||||
struct xgi_aperture mmio;
|
struct xgi_aperture mmio;
|
||||||
struct xgi_aperture fb;
|
struct xgi_aperture fb;
|
||||||
struct xgi_aperture pcie;
|
struct xgi_aperture pcie;
|
||||||
|
|
||||||
|
struct drm_map *mmio_map;
|
||||||
|
struct drm_map *pcie_map;
|
||||||
|
struct drm_map *fb_map;
|
||||||
|
|
||||||
/* look up table parameters */
|
/* look up table parameters */
|
||||||
u32 *lut_base;
|
struct drm_dma_handle *lut_handle;
|
||||||
unsigned int lutPageSize;
|
unsigned int lutPageSize;
|
||||||
unsigned int lutPageOrder;
|
|
||||||
bool isLUTInLFB;
|
|
||||||
unsigned int sdfbPageSize;
|
|
||||||
|
|
||||||
u32 pcie_config;
|
struct xgi_mem_heap fb_heap;
|
||||||
u32 pcie_status;
|
struct xgi_mem_heap pcie_heap;
|
||||||
|
|
||||||
atomic_t use_count;
|
|
||||||
|
|
||||||
/* keep track of any pending bottom halfes */
|
|
||||||
struct tasklet_struct tasklet;
|
|
||||||
|
|
||||||
spinlock_t info_lock;
|
|
||||||
|
|
||||||
struct semaphore info_sem;
|
|
||||||
struct semaphore fb_sem;
|
struct semaphore fb_sem;
|
||||||
struct semaphore pcie_sem;
|
struct semaphore pcie_sem;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xgi_ioctl_post_vbios {
|
|
||||||
unsigned int bus;
|
|
||||||
unsigned int slot;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum PcieOwner {
|
enum PcieOwner {
|
||||||
PCIE_2D = 0,
|
PCIE_2D = 0,
|
||||||
/*
|
/*
|
||||||
|
@ -151,64 +105,47 @@ enum PcieOwner {
|
||||||
PCIE_INVALID = 0x7fffffff
|
PCIE_INVALID = 0x7fffffff
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xgi_mem_pid {
|
|
||||||
struct list_head list;
|
|
||||||
enum xgi_mem_location location;
|
|
||||||
unsigned long bus_addr;
|
|
||||||
unsigned long pid;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
extern struct kmem_cache *xgi_mem_block_cache;
|
||||||
/*
|
extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
|
||||||
* flags
|
unsigned long size, enum PcieOwner owner);
|
||||||
*/
|
extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
|
||||||
#define XGI_FLAG_OPEN 0x0001
|
DRMFILE filp);
|
||||||
#define XGI_FLAG_NEEDS_POSTING 0x0002
|
extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start,
|
||||||
#define XGI_FLAG_WAS_POSTED 0x0004
|
unsigned int end);
|
||||||
#define XGI_FLAG_CONTROL 0x0010
|
extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap);
|
||||||
#define XGI_FLAG_MAP_REGS_EARLY 0x0200
|
|
||||||
|
|
||||||
/* mmap(2) offsets */
|
|
||||||
|
|
||||||
#define IS_IO_OFFSET(info, offset, length) \
|
|
||||||
(((offset) >= (info)->mmio.base) \
|
|
||||||
&& (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size))
|
|
||||||
|
|
||||||
/* Jong 06/14/2006 */
|
|
||||||
/* (info)->fb.base is a base address for physical (bus) address space */
|
|
||||||
/* what's the definition of offest? on physical (bus) address space or HW address space */
|
|
||||||
/* Jong 06/15/2006; use HW address space */
|
|
||||||
#define IS_FB_OFFSET(info, offset, length) \
|
|
||||||
(((offset) >= 0) \
|
|
||||||
&& (((offset) + (length)) <= (info)->fb.size))
|
|
||||||
#if 0
|
|
||||||
#define IS_FB_OFFSET(info, offset, length) \
|
|
||||||
(((offset) >= (info)->fb.base) \
|
|
||||||
&& (((offset) + (length)) <= (info)->fb.base + (info)->fb.size))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define IS_PCIE_OFFSET(info, offset, length) \
|
|
||||||
(((offset) >= (info)->pcie.base) \
|
|
||||||
&& (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))
|
|
||||||
|
|
||||||
extern int xgi_fb_heap_init(struct xgi_info * info);
|
extern int xgi_fb_heap_init(struct xgi_info * info);
|
||||||
extern void xgi_fb_heap_cleanup(struct xgi_info * info);
|
|
||||||
|
|
||||||
extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
||||||
pid_t pid);
|
DRMFILE filp);
|
||||||
extern void xgi_fb_free(struct xgi_info * info, unsigned long offset);
|
|
||||||
extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt);
|
extern int xgi_fb_free(struct xgi_info * info, unsigned long offset,
|
||||||
|
DRMFILE filp);
|
||||||
|
|
||||||
extern int xgi_pcie_heap_init(struct xgi_info * info);
|
extern int xgi_pcie_heap_init(struct xgi_info * info);
|
||||||
extern void xgi_pcie_heap_cleanup(struct xgi_info * info);
|
extern void xgi_pcie_lut_cleanup(struct xgi_info * info);
|
||||||
|
|
||||||
extern void xgi_pcie_alloc(struct xgi_info * info,
|
extern int xgi_pcie_alloc(struct xgi_info * info,
|
||||||
struct xgi_mem_alloc * alloc, pid_t pid);
|
struct xgi_mem_alloc * alloc, DRMFILE filp);
|
||||||
extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset);
|
|
||||||
extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
|
|
||||||
unsigned long address);
|
|
||||||
extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address);
|
|
||||||
|
|
||||||
extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address);
|
extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset,
|
||||||
|
DRMFILE filp);
|
||||||
|
|
||||||
|
extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
|
||||||
|
|
||||||
|
extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE);
|
||||||
|
extern void xgi_fb_free_all(struct xgi_info *, DRMFILE);
|
||||||
|
|
||||||
|
extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -26,343 +26,126 @@
|
||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
#include "xgi_linux.h"
|
|
||||||
#include "xgi_drv.h"
|
#include "xgi_drv.h"
|
||||||
#include "xgi_fb.h"
|
|
||||||
|
|
||||||
#define XGI_FB_HEAP_START 0x1000000
|
#define XGI_FB_HEAP_START 0x1000000
|
||||||
|
|
||||||
static struct xgi_mem_heap *xgi_fb_heap;
|
struct kmem_cache *xgi_mem_block_cache = NULL;
|
||||||
static struct kmem_cache *xgi_fb_cache_block = NULL;
|
|
||||||
extern struct list_head xgi_mempid_list;
|
|
||||||
|
|
||||||
static struct xgi_mem_block *xgi_mem_new_node(void);
|
static struct xgi_mem_block *xgi_mem_new_node(void);
|
||||||
static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size);
|
|
||||||
static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset);
|
|
||||||
|
|
||||||
void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
|
||||||
pid_t pid)
|
|
||||||
{
|
|
||||||
struct xgi_mem_block *block;
|
|
||||||
struct xgi_mem_pid *mempid_block;
|
|
||||||
|
|
||||||
if (alloc->is_front) {
|
int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start,
|
||||||
alloc->location = XGI_MEMLOC_LOCAL;
|
unsigned int end)
|
||||||
alloc->bus_addr = info->fb.base;
|
|
||||||
alloc->hw_addr = 0;
|
|
||||||
XGI_INFO
|
|
||||||
("Video RAM allocation on front buffer successfully! \n");
|
|
||||||
} else {
|
|
||||||
xgi_down(info->fb_sem);
|
|
||||||
block = xgi_mem_alloc(info, alloc->size);
|
|
||||||
xgi_up(info->fb_sem);
|
|
||||||
|
|
||||||
if (block == NULL) {
|
|
||||||
alloc->location = XGI_MEMLOC_LOCAL;
|
|
||||||
alloc->size = 0;
|
|
||||||
alloc->bus_addr = 0;
|
|
||||||
alloc->hw_addr = 0;
|
|
||||||
XGI_ERROR("Video RAM allocation failed\n");
|
|
||||||
} else {
|
|
||||||
XGI_INFO("Video RAM allocation succeeded: 0x%p\n",
|
|
||||||
(char *)block->offset);
|
|
||||||
alloc->location = XGI_MEMLOC_LOCAL;
|
|
||||||
alloc->size = block->size;
|
|
||||||
alloc->bus_addr = info->fb.base + block->offset;
|
|
||||||
alloc->hw_addr = block->offset;
|
|
||||||
|
|
||||||
/* manage mempid */
|
|
||||||
mempid_block =
|
|
||||||
kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
|
|
||||||
mempid_block->location = XGI_MEMLOC_LOCAL;
|
|
||||||
mempid_block->bus_addr = alloc->bus_addr;
|
|
||||||
mempid_block->pid = pid;
|
|
||||||
|
|
||||||
if (!mempid_block)
|
|
||||||
XGI_ERROR("mempid_block alloc failed\n");
|
|
||||||
|
|
||||||
XGI_INFO
|
|
||||||
("Memory ProcessID add one fb block pid:%ld successfully! \n",
|
|
||||||
mempid_block->pid);
|
|
||||||
list_add(&mempid_block->list, &xgi_mempid_list);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr)
|
|
||||||
{
|
|
||||||
struct xgi_mem_block *block;
|
|
||||||
unsigned long offset = bus_addr - info->fb.base;
|
|
||||||
struct xgi_mem_pid *mempid_block;
|
|
||||||
struct xgi_mem_pid *mempid_freeblock = NULL;
|
|
||||||
|
|
||||||
if (offset < 0) {
|
|
||||||
XGI_INFO("free onscreen frame buffer successfully !\n");
|
|
||||||
} else {
|
|
||||||
xgi_down(info->fb_sem);
|
|
||||||
block = xgi_mem_free(info, offset);
|
|
||||||
xgi_up(info->fb_sem);
|
|
||||||
|
|
||||||
if (block == NULL) {
|
|
||||||
XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n",
|
|
||||||
offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* manage mempid */
|
|
||||||
list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
|
|
||||||
if (mempid_block->location == XGI_MEMLOC_LOCAL
|
|
||||||
&& mempid_block->bus_addr == bus_addr) {
|
|
||||||
mempid_freeblock = mempid_block;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (mempid_freeblock) {
|
|
||||||
list_del(&mempid_freeblock->list);
|
|
||||||
XGI_INFO
|
|
||||||
("Memory ProcessID delete one fb block pid:%ld successfully! \n",
|
|
||||||
mempid_freeblock->pid);
|
|
||||||
kfree(mempid_freeblock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int xgi_fb_heap_init(struct xgi_info * info)
|
|
||||||
{
|
{
|
||||||
struct xgi_mem_block *block;
|
struct xgi_mem_block *block;
|
||||||
|
|
||||||
xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL);
|
INIT_LIST_HEAD(&heap->free_list);
|
||||||
if (!xgi_fb_heap) {
|
INIT_LIST_HEAD(&heap->used_list);
|
||||||
XGI_ERROR("xgi_fb_heap alloc failed\n");
|
INIT_LIST_HEAD(&heap->sort_list);
|
||||||
return 0;
|
heap->initialized = TRUE;
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&xgi_fb_heap->free_list);
|
block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
|
||||||
INIT_LIST_HEAD(&xgi_fb_heap->used_list);
|
|
||||||
INIT_LIST_HEAD(&xgi_fb_heap->sort_list);
|
|
||||||
|
|
||||||
xgi_fb_cache_block =
|
|
||||||
kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0,
|
|
||||||
SLAB_HWCACHE_ALIGN, NULL, NULL);
|
|
||||||
|
|
||||||
if (NULL == xgi_fb_cache_block) {
|
|
||||||
XGI_ERROR("Fail to creat xgi_fb_block\n");
|
|
||||||
goto fail1;
|
|
||||||
}
|
|
||||||
|
|
||||||
block =
|
|
||||||
(struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!block) {
|
if (!block) {
|
||||||
XGI_ERROR("kmem_cache_alloc failed\n");
|
return DRM_ERR(ENOMEM);
|
||||||
goto fail2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
block->offset = XGI_FB_HEAP_START;
|
block->offset = start;
|
||||||
block->size = info->fb.size - XGI_FB_HEAP_START;
|
block->size = end - start;
|
||||||
|
|
||||||
list_add(&block->list, &xgi_fb_heap->free_list);
|
list_add(&block->list, &heap->free_list);
|
||||||
|
|
||||||
xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START;
|
heap->max_freesize = end - start;
|
||||||
|
|
||||||
XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset,
|
|
||||||
block->size);
|
|
||||||
XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n",
|
|
||||||
xgi_fb_heap->max_freesize);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
fail2:
|
|
||||||
if (xgi_fb_cache_block) {
|
|
||||||
kmem_cache_destroy(xgi_fb_cache_block);
|
|
||||||
xgi_fb_cache_block = NULL;
|
|
||||||
}
|
|
||||||
fail1:
|
|
||||||
if (xgi_fb_heap) {
|
|
||||||
kfree(xgi_fb_heap);
|
|
||||||
xgi_fb_heap = NULL;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xgi_fb_heap_cleanup(struct xgi_info * info)
|
|
||||||
|
void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap)
|
||||||
{
|
{
|
||||||
struct list_head *free_list;
|
struct list_head *free_list;
|
||||||
struct xgi_mem_block *block;
|
struct xgi_mem_block *block;
|
||||||
struct xgi_mem_block *next;
|
struct xgi_mem_block *next;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (xgi_fb_heap) {
|
free_list = &heap->free_list;
|
||||||
free_list = &xgi_fb_heap->free_list;
|
for (i = 0; i < 3; i++, free_list++) {
|
||||||
for (i = 0; i < 3; i++, free_list++) {
|
list_for_each_entry_safe(block, next, free_list, list) {
|
||||||
list_for_each_entry_safe(block, next, free_list, list) {
|
DRM_INFO
|
||||||
XGI_INFO
|
("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
|
||||||
("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
|
i, block->offset, block->size);
|
||||||
i, block->offset, block->size);
|
kmem_cache_free(xgi_mem_block_cache, block);
|
||||||
//XGI_INFO("No. %d free block: 0x%p \n", i, block);
|
block = NULL;
|
||||||
kmem_cache_free(xgi_fb_cache_block, block);
|
|
||||||
block = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap);
|
|
||||||
kfree(xgi_fb_heap);
|
|
||||||
xgi_fb_heap = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (xgi_fb_cache_block) {
|
|
||||||
kmem_cache_destroy(xgi_fb_cache_block);
|
|
||||||
xgi_fb_cache_block = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
heap->initialized = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xgi_mem_block *xgi_mem_new_node(void)
|
|
||||||
{
|
|
||||||
struct xgi_mem_block *block;
|
|
||||||
|
|
||||||
block =
|
struct xgi_mem_block *xgi_mem_new_node(void)
|
||||||
(struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
|
{
|
||||||
GFP_KERNEL);
|
struct xgi_mem_block *block =
|
||||||
|
kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
|
||||||
|
|
||||||
if (!block) {
|
if (!block) {
|
||||||
XGI_ERROR("kmem_cache_alloc failed\n");
|
DRM_ERROR("kmem_cache_alloc failed\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
block->offset = 0;
|
||||||
|
block->size = 0;
|
||||||
|
block->owner = PCIE_INVALID;
|
||||||
|
block->filp = (DRMFILE) -1;
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * current,
|
|
||||||
struct xgi_mem_block * block);
|
|
||||||
static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * current,
|
|
||||||
struct xgi_mem_block * block);
|
|
||||||
static void xgi_mem_insert_node_head(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * block);
|
|
||||||
static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * block);
|
|
||||||
static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block);
|
|
||||||
/*
|
|
||||||
* insert node:block after node:current
|
|
||||||
*/
|
|
||||||
static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * current,
|
|
||||||
struct xgi_mem_block * block)
|
|
||||||
{
|
|
||||||
block->prev = current;
|
|
||||||
block->next = current->next;
|
|
||||||
current->next = block;
|
|
||||||
|
|
||||||
if (current == list->tail) {
|
struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
|
||||||
list->tail = block;
|
unsigned long originalSize,
|
||||||
} else {
|
enum PcieOwner owner)
|
||||||
block->next->prev = block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* insert node:block before node:current
|
|
||||||
*/
|
|
||||||
static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * current,
|
|
||||||
struct xgi_mem_block * block)
|
|
||||||
{
|
|
||||||
block->prev = current->prev;
|
|
||||||
block->next = current;
|
|
||||||
current->prev = block;
|
|
||||||
if (current == list->head) {
|
|
||||||
list->head = block;
|
|
||||||
} else {
|
|
||||||
block->prev->next = block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block)
|
|
||||||
{
|
|
||||||
block->next = list->head;
|
|
||||||
block->prev = NULL;
|
|
||||||
|
|
||||||
if (NULL == list->head) {
|
|
||||||
list->tail = block;
|
|
||||||
} else {
|
|
||||||
list->head->prev = block;
|
|
||||||
}
|
|
||||||
list->head = block;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
|
|
||||||
struct xgi_mem_block * block)
|
|
||||||
{
|
|
||||||
block->next = NULL;
|
|
||||||
block->prev = list->tail;
|
|
||||||
if (NULL == list->tail) {
|
|
||||||
list->head = block;
|
|
||||||
} else {
|
|
||||||
list->tail->next = block;
|
|
||||||
}
|
|
||||||
list->tail = block;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block)
|
|
||||||
{
|
|
||||||
if (block == list->head) {
|
|
||||||
list->head = block->next;
|
|
||||||
}
|
|
||||||
if (block == list->tail) {
|
|
||||||
list->tail = block->prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (block->prev) {
|
|
||||||
block->prev->next = block->next;
|
|
||||||
}
|
|
||||||
if (block->next) {
|
|
||||||
block->next->prev = block->prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
block->next = block->prev = NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
|
|
||||||
unsigned long originalSize)
|
|
||||||
{
|
{
|
||||||
struct xgi_mem_block *block, *free_block, *used_block;
|
struct xgi_mem_block *block, *free_block, *used_block;
|
||||||
|
|
||||||
unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
|
unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
|
||||||
|
|
||||||
XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
|
|
||||||
|
DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
|
||||||
originalSize, size);
|
originalSize, size);
|
||||||
|
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
XGI_ERROR("size == 0\n");
|
DRM_ERROR("size == 0\n");
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);
|
DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize);
|
||||||
if (size > xgi_fb_heap->max_freesize) {
|
if (size > heap->max_freesize) {
|
||||||
XGI_ERROR
|
DRM_ERROR
|
||||||
("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
|
("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
|
||||||
size, xgi_fb_heap->max_freesize);
|
size, heap->max_freesize);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
|
list_for_each_entry(block, &heap->free_list, list) {
|
||||||
XGI_INFO("free_list: 0x%px \n", free_list);
|
DRM_INFO("block: 0x%px \n", block);
|
||||||
if (size <= block->size) {
|
if (size <= block->size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (&block->list == &xgi_fb_heap->free_list) {
|
if (&block->list == &heap->free_list) {
|
||||||
XGI_ERROR
|
DRM_ERROR
|
||||||
("Can't allocate %ldk size from frame buffer memory !\n",
|
("Can't allocate %ldk size from frame buffer memory !\n",
|
||||||
size / 1024);
|
size / 1024);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_block = block;
|
free_block = block;
|
||||||
XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
|
DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
|
||||||
size, free_block->offset, free_block->size);
|
size, free_block->offset, free_block->size);
|
||||||
|
|
||||||
if (size == free_block->size) {
|
if (size == free_block->size) {
|
||||||
used_block = free_block;
|
used_block = free_block;
|
||||||
XGI_INFO("size == free_block->size: free_block = 0x%p\n",
|
DRM_INFO("size == free_block->size: free_block = 0x%p\n",
|
||||||
free_block);
|
free_block);
|
||||||
list_del(&free_block->list);
|
list_del(&free_block->list);
|
||||||
} else {
|
} else {
|
||||||
|
@ -372,7 +155,7 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
if (used_block == free_block) {
|
if (used_block == free_block) {
|
||||||
XGI_ERROR("used_block == free_block = 0x%p\n",
|
DRM_ERROR("used_block == free_block = 0x%p\n",
|
||||||
used_block);
|
used_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,14 +166,16 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
|
||||||
free_block->size -= size;
|
free_block->size -= size;
|
||||||
}
|
}
|
||||||
|
|
||||||
xgi_fb_heap->max_freesize -= size;
|
heap->max_freesize -= size;
|
||||||
|
|
||||||
list_add(&used_block->list, &xgi_fb_heap->used_list);
|
list_add(&used_block->list, &heap->used_list);
|
||||||
|
used_block->owner = owner;
|
||||||
|
|
||||||
return (used_block);
|
return (used_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset)
|
int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
|
||||||
|
DRMFILE filp)
|
||||||
{
|
{
|
||||||
struct xgi_mem_block *used_block = NULL, *block;
|
struct xgi_mem_block *used_block = NULL, *block;
|
||||||
struct xgi_mem_block *prev, *next;
|
struct xgi_mem_block *prev, *next;
|
||||||
|
@ -398,28 +183,32 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long
|
||||||
unsigned long upper;
|
unsigned long upper;
|
||||||
unsigned long lower;
|
unsigned long lower;
|
||||||
|
|
||||||
list_for_each_entry(block, &xgi_fb_heap->used_list, list) {
|
list_for_each_entry(block, &heap->used_list, list) {
|
||||||
if (block->offset == offset) {
|
if (block->offset == offset) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (&block->list == &xgi_fb_heap->used_list) {
|
if (&block->list == &heap->used_list) {
|
||||||
XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
|
DRM_ERROR("can't find block: 0x%lx to free!\n", offset);
|
||||||
return (NULL);
|
return DRM_ERR(ENOENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (block->filp != filp) {
|
||||||
|
return DRM_ERR(EPERM);
|
||||||
}
|
}
|
||||||
|
|
||||||
used_block = block;
|
used_block = block;
|
||||||
XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
|
DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
|
||||||
used_block, used_block->offset, used_block->size);
|
used_block, used_block->offset, used_block->size);
|
||||||
|
|
||||||
xgi_fb_heap->max_freesize += used_block->size;
|
heap->max_freesize += used_block->size;
|
||||||
|
|
||||||
prev = next = NULL;
|
prev = next = NULL;
|
||||||
upper = used_block->offset + used_block->size;
|
upper = used_block->offset + used_block->size;
|
||||||
lower = used_block->offset;
|
lower = used_block->offset;
|
||||||
|
|
||||||
list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
|
list_for_each_entry(block, &heap->free_list, list) {
|
||||||
if (block->offset == upper) {
|
if (block->offset == upper) {
|
||||||
next = block;
|
next = block;
|
||||||
} else if ((block->offset + block->size) == lower) {
|
} else if ((block->offset + block->size) == lower) {
|
||||||
|
@ -427,41 +216,157 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
|
DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
|
||||||
list_del(&used_block->list);
|
list_del(&used_block->list);
|
||||||
|
|
||||||
if (prev && next) {
|
if (prev && next) {
|
||||||
prev->size += (used_block->size + next->size);
|
prev->size += (used_block->size + next->size);
|
||||||
list_del(&next->list);
|
list_del(&next->list);
|
||||||
XGI_INFO("free node 0x%p\n", next);
|
DRM_INFO("free node 0x%p\n", next);
|
||||||
kmem_cache_free(xgi_fb_cache_block, next);
|
kmem_cache_free(xgi_mem_block_cache, next);
|
||||||
kmem_cache_free(xgi_fb_cache_block, used_block);
|
kmem_cache_free(xgi_mem_block_cache, used_block);
|
||||||
|
|
||||||
next = NULL;
|
|
||||||
used_block = NULL;
|
|
||||||
return (prev);
|
|
||||||
}
|
}
|
||||||
|
else if (prev) {
|
||||||
if (prev) {
|
|
||||||
prev->size += used_block->size;
|
prev->size += used_block->size;
|
||||||
XGI_INFO("free node 0x%p\n", used_block);
|
DRM_INFO("free node 0x%p\n", used_block);
|
||||||
kmem_cache_free(xgi_fb_cache_block, used_block);
|
kmem_cache_free(xgi_mem_block_cache, used_block);
|
||||||
used_block = NULL;
|
|
||||||
return (prev);
|
|
||||||
}
|
}
|
||||||
|
else if (next) {
|
||||||
if (next) {
|
|
||||||
next->size += used_block->size;
|
next->size += used_block->size;
|
||||||
next->offset = used_block->offset;
|
next->offset = used_block->offset;
|
||||||
XGI_INFO("free node 0x%p\n", used_block);
|
DRM_INFO("free node 0x%p\n", used_block);
|
||||||
kmem_cache_free(xgi_fb_cache_block, used_block);
|
kmem_cache_free(xgi_mem_block_cache, used_block);
|
||||||
used_block = NULL;
|
}
|
||||||
return (next);
|
else {
|
||||||
|
list_add(&used_block->list, &heap->free_list);
|
||||||
|
DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
|
||||||
|
used_block, used_block->offset, used_block->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add(&used_block->list, &xgi_fb_heap->free_list);
|
return 0;
|
||||||
XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
|
}
|
||||||
used_block, used_block->offset, used_block->size);
|
|
||||||
|
|
||||||
return (used_block);
|
int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
|
||||||
|
DRMFILE filp)
|
||||||
|
{
|
||||||
|
struct xgi_mem_block *block;
|
||||||
|
|
||||||
|
if (alloc->is_front) {
|
||||||
|
alloc->location = XGI_MEMLOC_LOCAL;
|
||||||
|
alloc->offset = 0;
|
||||||
|
alloc->hw_addr = 0;
|
||||||
|
DRM_INFO
|
||||||
|
("Video RAM allocation on front buffer successfully! \n");
|
||||||
|
} else {
|
||||||
|
down(&info->fb_sem);
|
||||||
|
block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D);
|
||||||
|
up(&info->fb_sem);
|
||||||
|
|
||||||
|
if (block == NULL) {
|
||||||
|
alloc->location = XGI_MEMLOC_LOCAL;
|
||||||
|
alloc->size = 0;
|
||||||
|
DRM_ERROR("Video RAM allocation failed\n");
|
||||||
|
return DRM_ERR(ENOMEM);
|
||||||
|
} else {
|
||||||
|
DRM_INFO("Video RAM allocation succeeded: 0x%p\n",
|
||||||
|
(char *)block->offset);
|
||||||
|
alloc->location = XGI_MEMLOC_LOCAL;
|
||||||
|
alloc->size = block->size;
|
||||||
|
alloc->offset = block->offset;
|
||||||
|
alloc->hw_addr = block->offset;
|
||||||
|
|
||||||
|
block->filp = filp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_mem_alloc alloc;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
|
||||||
|
sizeof(alloc));
|
||||||
|
|
||||||
|
err = xgi_fb_alloc(info, & alloc, filp);
|
||||||
|
if (err) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
|
||||||
|
alloc, sizeof(alloc));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (offset == 0) {
|
||||||
|
DRM_INFO("free onscreen frame buffer successfully !\n");
|
||||||
|
} else {
|
||||||
|
down(&info->fb_sem);
|
||||||
|
err = xgi_mem_free(&info->fb_heap, offset, filp);
|
||||||
|
up(&info->fb_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_fb_free_ioctl(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
u32 offset;
|
||||||
|
|
||||||
|
DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
|
||||||
|
sizeof(offset));
|
||||||
|
|
||||||
|
return xgi_fb_free(info, offset, filp);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_fb_heap_init(struct xgi_info * info)
|
||||||
|
{
|
||||||
|
return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START,
|
||||||
|
info->fb.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free all blocks associated with a particular file handle.
|
||||||
|
*/
|
||||||
|
void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp)
|
||||||
|
{
|
||||||
|
if (!info->fb_heap.initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
down(&info->fb_sem);
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct xgi_mem_block *block;
|
||||||
|
|
||||||
|
list_for_each_entry(block, &info->fb_heap.used_list, list) {
|
||||||
|
if (block->filp == filp) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&block->list == &info->fb_heap.used_list) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
(void) xgi_fb_free(info, block->offset, filp);
|
||||||
|
} while(1);
|
||||||
|
|
||||||
|
up(&info->fb_sem);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
|
|
||||||
* *
|
|
||||||
* All Rights Reserved. *
|
|
||||||
* *
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation on the rights to use, copy, modify, merge,
|
|
||||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
|
||||||
* and to permit persons to whom the Software is furnished to do so,
|
|
||||||
* subject to the following conditions:
|
|
||||||
* *
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial
|
|
||||||
* portions of the Software.
|
|
||||||
* *
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
|
|
||||||
* ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
***************************************************************************/
|
|
||||||
|
|
||||||
#ifndef _XGI_FB_H_
|
|
||||||
#define _XGI_FB_H_
|
|
||||||
|
|
||||||
struct xgi_mem_block {
|
|
||||||
struct list_head list;
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
atomic_t use_count;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xgi_mem_heap {
|
|
||||||
struct list_head free_list;
|
|
||||||
struct list_head used_list;
|
|
||||||
struct list_head sort_list;
|
|
||||||
unsigned long max_freesize;
|
|
||||||
spinlock_t lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,490 +0,0 @@
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
|
|
||||||
* *
|
|
||||||
* All Rights Reserved. *
|
|
||||||
* *
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation on the rights to use, copy, modify, merge,
|
|
||||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
|
||||||
* and to permit persons to whom the Software is furnished to do so,
|
|
||||||
* subject to the following conditions:
|
|
||||||
* *
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial
|
|
||||||
* portions of the Software.
|
|
||||||
* *
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
|
|
||||||
* ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
***************************************************************************/
|
|
||||||
|
|
||||||
#ifndef _XGI_LINUX_H_
|
|
||||||
#define _XGI_LINUX_H_
|
|
||||||
|
|
||||||
#ifndef LINUX_VERSION_CODE
|
|
||||||
#include <linux/version.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
|
|
||||||
# error "This driver does not support pre-2.6 kernels!"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
|
|
||||||
# define XGI_REMAP_PFN_RANGE_PRESENT
|
|
||||||
#else
|
|
||||||
# define XGI_REMAP_PAGE_RANGE_5
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined (CONFIG_SMP) && !defined (__SMP__)
|
|
||||||
#define __SMP__
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)
|
|
||||||
#define MODVERSIONS
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/kernel.h> /* printk */
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <linux/init.h> /* module_init, module_exit */
|
|
||||||
#include <linux/types.h> /* pic_t, size_t, __u32, etc */
|
|
||||||
#include <linux/errno.h> /* error codes */
|
|
||||||
#include <linux/list.h> /* circular linked list */
|
|
||||||
#include <linux/stddef.h> /* NULL, offsetof */
|
|
||||||
#include <linux/wait.h> /* wait queues */
|
|
||||||
|
|
||||||
#include <linux/slab.h> /* kmalloc, kfree, etc */
|
|
||||||
#include <linux/vmalloc.h> /* vmalloc, vfree, etc */
|
|
||||||
|
|
||||||
#include <linux/poll.h> /* poll_wait */
|
|
||||||
#include <linux/delay.h> /* mdelay, udelay */
|
|
||||||
#include <asm/msr.h> /* rdtsc rdtscl */
|
|
||||||
|
|
||||||
#include <linux/sched.h> /* suser(), capable() replacement
|
|
||||||
for_each_task, for_each_process */
|
|
||||||
#ifdef for_each_process
|
|
||||||
#define XGI_SCAN_PROCESS(p) for_each_process(p)
|
|
||||||
#else
|
|
||||||
#define XGI_SCAN_PROCESS(p) for_each_task(p)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/moduleparam.h> /* module_param() */
|
|
||||||
#include <linux/smp_lock.h> /* kernel_locked */
|
|
||||||
#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */
|
|
||||||
#include <asm/kmap_types.h> /* page table entry lookup */
|
|
||||||
|
|
||||||
#include <linux/pci.h> /* pci_find_class, etc */
|
|
||||||
#include <linux/interrupt.h> /* tasklets, interrupt helpers */
|
|
||||||
#include <linux/timer.h>
|
|
||||||
|
|
||||||
#include <asm/system.h> /* cli, sli, save_flags */
|
|
||||||
#include <asm/io.h> /* ioremap, virt_to_phys */
|
|
||||||
#include <asm/uaccess.h> /* access_ok */
|
|
||||||
#include <asm/page.h> /* PAGE_OFFSET */
|
|
||||||
#include <asm/pgtable.h> /* pte bit definitions */
|
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <asm/semaphore.h>
|
|
||||||
#include <linux/highmem.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_PROC_FS
|
|
||||||
#include <linux/proc_fs.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEVFS_FS
|
|
||||||
#include <linux/devfs_fs_kernel.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_KMOD
|
|
||||||
#include <linux/kmod.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
#include <linux/pm.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_MTRR
|
|
||||||
#include <asm/mtrr.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_KDB
|
|
||||||
#include <linux/kdb.h>
|
|
||||||
#include <asm/kdb.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
|
|
||||||
#define AGPGART
|
|
||||||
#include <linux/agp_backend.h>
|
|
||||||
#include <linux/agpgart.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef MAX_ORDER
|
|
||||||
#define MAX_ORDER 11
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef module_init
|
|
||||||
#define module_init(x) int init_module(void) { return x(); }
|
|
||||||
#define module_exit(x) void cleanup_module(void) { x(); }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef minor
|
|
||||||
#define minor(x) MINOR(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef IRQ_HANDLED
|
|
||||||
typedef void irqreturn_t;
|
|
||||||
#define IRQ_NONE
|
|
||||||
#define IRQ_HANDLED
|
|
||||||
#define IRQ_RETVAL(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined (list_for_each)
|
|
||||||
#define list_for_each(pos, head) \
|
|
||||||
for (pos = (head)->next, prefetch(pos->next); pos != (head); \
|
|
||||||
pos = pos->next, prefetch(pos->next))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern struct list_head pci_devices; /* list of all devices */
|
|
||||||
#define XGI_PCI_FOR_EACH_DEV(dev) \
|
|
||||||
for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the following macro causes problems when used in the same module
|
|
||||||
* as module_param(); undef it so we don't accidentally mix the two
|
|
||||||
*/
|
|
||||||
#undef MODULE_PARM
|
|
||||||
|
|
||||||
#ifdef EXPORT_NO_SYMBOLS
|
|
||||||
EXPORT_NO_SYMBOLS;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN)
|
|
||||||
#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name)
|
|
||||||
#define XGI_NUM_CPUS() num_online_cpus()
|
|
||||||
#define XGI_CLI() local_irq_disable()
|
|
||||||
#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags)
|
|
||||||
#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags)
|
|
||||||
#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic())
|
|
||||||
#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0)
|
|
||||||
|
|
||||||
|
|
||||||
#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev)
|
|
||||||
|
|
||||||
/* common defines */
|
|
||||||
#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
|
|
||||||
#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
|
|
||||||
|
|
||||||
#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
|
|
||||||
#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
|
|
||||||
#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data)
|
|
||||||
|
|
||||||
#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev)
|
|
||||||
#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255)
|
|
||||||
|
|
||||||
#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start)
|
|
||||||
#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1)
|
|
||||||
|
|
||||||
#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number
|
|
||||||
#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn)
|
|
||||||
|
|
||||||
#define XGI_PCI_GET_CLASS_PRESENT
|
|
||||||
#ifdef XGI_PCI_GET_CLASS_PRESENT
|
|
||||||
#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev)
|
|
||||||
#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from)
|
|
||||||
#else
|
|
||||||
#define XGI_PCI_DEV_PUT(dev)
|
|
||||||
#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver
|
|
||||||
* model is not sufficient for full acpi support. it may work in some cases,
|
|
||||||
* but not enough for us to officially support this configuration.
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_ACPI)
|
|
||||||
#define XGI_PM_SUPPORT_ACPI
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
|
|
||||||
#define XGI_PM_SUPPORT_APM
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_DEVFS_FS)
|
|
||||||
typedef void *devfs_handle_t;
|
|
||||||
#define XGI_DEVFS_REGISTER(_name, _minor) \
|
|
||||||
({ \
|
|
||||||
devfs_handle_t __handle = NULL; \
|
|
||||||
if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \
|
|
||||||
S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \
|
|
||||||
{ \
|
|
||||||
__handle = (void *) 1; /* XXX Fix me! (boolean) */ \
|
|
||||||
} \
|
|
||||||
__handle; \
|
|
||||||
})
|
|
||||||
/*
|
|
||||||
#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i)
|
|
||||||
*/
|
|
||||||
#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl")
|
|
||||||
#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi")
|
|
||||||
#endif /* defined(CONFIG_DEVFS_FS) */
|
|
||||||
|
|
||||||
#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x)
|
|
||||||
#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x)
|
|
||||||
|
|
||||||
#if defined(XGI_REMAP_PFN_RANGE_PRESENT)
|
|
||||||
#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \
|
|
||||||
remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
|
|
||||||
#elif defined(XGI_REMAP_PAGE_RANGE_5)
|
|
||||||
#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)
|
|
||||||
#elif defined(XGI_REMAP_PAGE_RANGE_4)
|
|
||||||
#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x)
|
|
||||||
#else
|
|
||||||
#warning "xgi_configure.sh failed, assuming remap_page_range(5)!"
|
|
||||||
#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(pmd_offset_map)
|
|
||||||
#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
|
|
||||||
{ \
|
|
||||||
pg_mid_dir = pmd_offset_map(pg_dir, address); \
|
|
||||||
}
|
|
||||||
#define XGI_PMD_UNMAP(pg_mid_dir) \
|
|
||||||
{ \
|
|
||||||
pmd_unmap(pg_mid_dir); \
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
|
|
||||||
{ \
|
|
||||||
pg_mid_dir = pmd_offset(pg_dir, address); \
|
|
||||||
}
|
|
||||||
#define XGI_PMD_UNMAP(pg_mid_dir)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define XGI_PMD_PRESENT(pg_mid_dir) \
|
|
||||||
({ \
|
|
||||||
if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \
|
|
||||||
{ \
|
|
||||||
XGI_PMD_UNMAP(pg_mid_dir); \
|
|
||||||
pg_mid_dir = NULL; \
|
|
||||||
} \
|
|
||||||
pg_mid_dir != NULL; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#if defined(pte_offset_atomic)
|
|
||||||
#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
|
|
||||||
{ \
|
|
||||||
pte = pte_offset_atomic(pg_mid_dir, address); \
|
|
||||||
XGI_PMD_UNMAP(pg_mid_dir); \
|
|
||||||
}
|
|
||||||
#define XGI_PTE_UNMAP(pte) \
|
|
||||||
{ \
|
|
||||||
pte_kunmap(pte); \
|
|
||||||
}
|
|
||||||
#elif defined(pte_offset)
|
|
||||||
#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
|
|
||||||
{ \
|
|
||||||
pte = pte_offset(pg_mid_dir, address); \
|
|
||||||
XGI_PMD_UNMAP(pg_mid_dir); \
|
|
||||||
}
|
|
||||||
#define XGI_PTE_UNMAP(pte)
|
|
||||||
#else
|
|
||||||
#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
|
|
||||||
{ \
|
|
||||||
pte = pte_offset_map(pg_mid_dir, address); \
|
|
||||||
XGI_PMD_UNMAP(pg_mid_dir); \
|
|
||||||
}
|
|
||||||
#define XGI_PTE_UNMAP(pte) \
|
|
||||||
{ \
|
|
||||||
pte_unmap(pte); \
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define XGI_PTE_PRESENT(pte) \
|
|
||||||
({ \
|
|
||||||
if (pte) \
|
|
||||||
{ \
|
|
||||||
if (!pte_present(*pte)) \
|
|
||||||
{ \
|
|
||||||
XGI_PTE_UNMAP(pte); pte = NULL; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
pte != NULL; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define XGI_PTE_VALUE(pte) \
|
|
||||||
({ \
|
|
||||||
unsigned long __pte_value = pte_val(*pte); \
|
|
||||||
XGI_PTE_UNMAP(pte); \
|
|
||||||
__pte_value; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
|
|
||||||
#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1))
|
|
||||||
|
|
||||||
#if !defined (pgprot_noncached)
|
|
||||||
static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
|
|
||||||
{
|
|
||||||
pgprot_t new_prot = old_prot;
|
|
||||||
if (boot_cpu_data.x86 > 3)
|
|
||||||
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
|
|
||||||
return new_prot;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined)
|
|
||||||
/* Added define for write combining page, only valid if pat enabled. */
|
|
||||||
#define _PAGE_WRTCOMB _PAGE_PWT
|
|
||||||
#define __PAGE_KERNEL_WRTCOMB \
|
|
||||||
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED)
|
|
||||||
#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB)
|
|
||||||
|
|
||||||
static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)
|
|
||||||
{
|
|
||||||
pgprot_t new_prot = old_prot;
|
|
||||||
if (boot_cpu_data.x86 > 3) {
|
|
||||||
pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
|
|
||||||
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);
|
|
||||||
}
|
|
||||||
return new_prot;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(page_to_pfn)
|
|
||||||
#define page_to_pfn(page) ((page) - mem_map)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define XGI_VMALLOC(ptr, size) \
|
|
||||||
{ \
|
|
||||||
(ptr) = vmalloc_32(size); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_VFREE(ptr, size) \
|
|
||||||
{ \
|
|
||||||
vfree((void *) (ptr)); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_IOREMAP(ptr, physaddr, size) \
|
|
||||||
{ \
|
|
||||||
(ptr) = ioremap(physaddr, size); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \
|
|
||||||
{ \
|
|
||||||
(ptr) = ioremap_nocache(physaddr, size); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_IOUNMAP(ptr, size) \
|
|
||||||
{ \
|
|
||||||
iounmap(ptr); \
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* only use this because GFP_KERNEL may sleep..
|
|
||||||
* GFP_ATOMIC is ok, it won't sleep
|
|
||||||
*/
|
|
||||||
#define XGI_KMALLOC(ptr, size) \
|
|
||||||
{ \
|
|
||||||
(ptr) = kmalloc(size, GFP_KERNEL); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_KMALLOC_ATOMIC(ptr, size) \
|
|
||||||
{ \
|
|
||||||
(ptr) = kmalloc(size, GFP_ATOMIC); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_KFREE(ptr, size) \
|
|
||||||
{ \
|
|
||||||
kfree((void *) (ptr)); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_GET_FREE_PAGES(ptr, order) \
|
|
||||||
{ \
|
|
||||||
(ptr) = __get_free_pages(GFP_KERNEL, order); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define XGI_FREE_PAGES(ptr, order) \
|
|
||||||
{ \
|
|
||||||
free_pages(ptr, order); \
|
|
||||||
}
|
|
||||||
|
|
||||||
struct xgi_pte {
|
|
||||||
unsigned long phys_addr;
|
|
||||||
unsigned long virt_addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* AMD Athlon processors expose a subtle bug in the Linux
|
|
||||||
* kernel, that may lead to AGP memory corruption. Recent
|
|
||||||
* kernel versions had a workaround for this problem, but
|
|
||||||
* 2.4.20 is the first kernel to address it properly. The
|
|
||||||
* page_attr API provides the means to solve the problem.
|
|
||||||
*/
|
|
||||||
static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr)
|
|
||||||
{
|
|
||||||
struct page *page = virt_to_page(__va(page_ptr->phys_addr));
|
|
||||||
change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
|
|
||||||
}
|
|
||||||
static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr)
|
|
||||||
{
|
|
||||||
struct page *page = virt_to_page(__va(page_ptr->phys_addr));
|
|
||||||
change_page_attr(page, 1, PAGE_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add for SUSE 9, Jill*/
|
|
||||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4)
|
|
||||||
#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count)
|
|
||||||
#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count)
|
|
||||||
#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count)
|
|
||||||
#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v)
|
|
||||||
#else
|
|
||||||
#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count)
|
|
||||||
#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count)
|
|
||||||
#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count)
|
|
||||||
#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v)
|
|
||||||
#endif
|
|
||||||
#define XGILockPage(page) SetPageLocked(page)
|
|
||||||
#define XGIUnlockPage(page) ClearPageLocked(page)
|
|
||||||
|
|
||||||
struct xgi_file_private {
|
|
||||||
struct xgi_info *info;
|
|
||||||
unsigned int num_events;
|
|
||||||
spinlock_t fp_lock;
|
|
||||||
wait_queue_head_t wait_queue;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define FILE_PRIVATE(filp) ((filp)->private_data)
|
|
||||||
|
|
||||||
#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp))
|
|
||||||
|
|
||||||
/* for the card devices */
|
|
||||||
#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info)
|
|
||||||
|
|
||||||
#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode)
|
|
||||||
|
|
||||||
#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val))
|
|
||||||
#define XGI_ATOMIC_INC(data) atomic_inc(&(data))
|
|
||||||
#define XGI_ATOMIC_DEC(data) atomic_dec(&(data))
|
|
||||||
#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data))
|
|
||||||
#define XGI_ATOMIC_READ(data) atomic_read(&(data))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* lock-related functions that should only be called from this file
|
|
||||||
*/
|
|
||||||
#define xgi_init_lock(lock) spin_lock_init(&lock)
|
|
||||||
#define xgi_lock(lock) spin_lock(&lock)
|
|
||||||
#define xgi_unlock(lock) spin_unlock(&lock)
|
|
||||||
#define xgi_down(lock) down(&lock)
|
|
||||||
#define xgi_up(lock) up(&lock)
|
|
||||||
|
|
||||||
#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags)
|
|
||||||
#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags)
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -26,17 +26,21 @@
|
||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
#include "xgi_linux.h"
|
|
||||||
#include "xgi_drv.h"
|
#include "xgi_drv.h"
|
||||||
#include "xgi_regs.h"
|
#include "xgi_regs.h"
|
||||||
#include "xgi_pcie.h"
|
|
||||||
|
|
||||||
void xgi_ge_reset(struct xgi_info * info)
|
int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS)
|
||||||
{
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
|
||||||
xgi_disable_ge(info);
|
xgi_disable_ge(info);
|
||||||
xgi_enable_ge(info);
|
xgi_enable_ge(info);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* irq functions
|
* irq functions
|
||||||
*/
|
*/
|
||||||
|
@ -113,7 +117,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
|
||||||
u8 old_index;
|
u8 old_index;
|
||||||
u8 old_36;
|
u8 old_36;
|
||||||
|
|
||||||
XGI_INFO("Can not reset back 0x%x!\n",
|
DRM_INFO("Can not reset back 0x%x!\n",
|
||||||
ge_3d_status[0x00]);
|
ge_3d_status[0x00]);
|
||||||
|
|
||||||
*(mmio_vbase + 0xb057) = 0;
|
*(mmio_vbase + 0xb057) = 0;
|
||||||
|
@ -151,7 +155,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
|
||||||
|
|
||||||
bool xgi_ge_irq_handler(struct xgi_info * info)
|
bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
volatile u8 *const mmio_vbase = info->mmio.vbase;
|
volatile u8 *const mmio_vbase = info->mmio_map->handle;
|
||||||
volatile u32 *const ge_3d_status =
|
volatile u32 *const ge_3d_status =
|
||||||
(volatile u32 *)(mmio_vbase + 0x2800);
|
(volatile u32 *)(mmio_vbase + 0x2800);
|
||||||
const u32 int_status = ge_3d_status[4];
|
const u32 int_status = ge_3d_status[4];
|
||||||
|
@ -185,7 +189,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
continue_int_count = 0;
|
continue_int_count = 0;
|
||||||
|
|
||||||
/* GE Hung up, need reset. */
|
/* GE Hung up, need reset. */
|
||||||
XGI_INFO("Reset GE!\n");
|
DRM_INFO("Reset GE!\n");
|
||||||
|
|
||||||
xgi_ge_hang_reset(mmio_vbase);
|
xgi_ge_hang_reset(mmio_vbase);
|
||||||
}
|
}
|
||||||
|
@ -205,23 +209,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
bool xgi_crt_irq_handler(struct xgi_info * info)
|
bool xgi_crt_irq_handler(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
bool ret = FALSE;
|
bool ret = FALSE;
|
||||||
u8 save_3ce = bReadReg(0x3ce);
|
u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
|
||||||
|
|
||||||
if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened
|
if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened
|
||||||
{
|
{
|
||||||
u8 op3cf_3d;
|
u8 op3cf_3d;
|
||||||
u8 op3cf_37;
|
u8 op3cf_37;
|
||||||
|
|
||||||
// What happened?
|
// What happened?
|
||||||
op3cf_37 = bIn3cf(0x37);
|
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
|
||||||
|
|
||||||
// Clear CRT interrupt
|
// Clear CRT interrupt
|
||||||
op3cf_3d = bIn3cf(0x3d);
|
op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
|
||||||
bOut3cf(0x3d, (op3cf_3d | 0x04));
|
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
|
||||||
bOut3cf(0x3d, (op3cf_3d & ~0x04));
|
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
|
||||||
ret = TRUE;
|
ret = TRUE;
|
||||||
}
|
}
|
||||||
bWriteReg(0x3ce, save_3ce);
|
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
@ -229,36 +233,36 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
|
||||||
bool xgi_dvi_irq_handler(struct xgi_info * info)
|
bool xgi_dvi_irq_handler(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
bool ret = FALSE;
|
bool ret = FALSE;
|
||||||
u8 save_3ce = bReadReg(0x3ce);
|
const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
|
||||||
|
|
||||||
if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened
|
if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened
|
||||||
{
|
const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
|
||||||
u8 op3cf_39;
|
u8 op3cf_39;
|
||||||
u8 op3cf_37;
|
u8 op3cf_37;
|
||||||
u8 op3x5_5a;
|
u8 op3x5_5a;
|
||||||
u8 save_3x4 = bReadReg(0x3d4);;
|
|
||||||
|
|
||||||
// What happened?
|
// What happened?
|
||||||
op3cf_37 = bIn3cf(0x37);
|
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
|
||||||
|
|
||||||
//Notify BIOS that DVI plug/unplug happened
|
//Notify BIOS that DVI plug/unplug happened
|
||||||
op3x5_5a = bIn3x5(0x5a);
|
op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
|
||||||
bOut3x5(0x5a, op3x5_5a & 0xf7);
|
OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
|
||||||
|
|
||||||
bWriteReg(0x3d4, save_3x4);
|
DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
|
||||||
|
|
||||||
// Clear DVI interrupt
|
// Clear DVI interrupt
|
||||||
op3cf_39 = bIn3cf(0x39);
|
op3cf_39 = IN3CFB(info->mmio_map, 0x39);
|
||||||
bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0
|
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0
|
||||||
bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1
|
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1
|
||||||
|
|
||||||
ret = TRUE;
|
ret = TRUE;
|
||||||
}
|
}
|
||||||
bWriteReg(0x3ce, save_3ce);
|
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void xgi_dump_register(struct xgi_info * info)
|
void xgi_dump_register(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
@ -281,7 +285,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bIn3c5(i * 0x10 + j);
|
temp = IN3C5B(info->mmio_map, i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -303,7 +307,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bIn3x5(i * 0x10 + j);
|
temp = IN3X5B(info->mmio_map, i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -325,7 +329,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bIn3cf(i * 0x10 + j);
|
temp = IN3CFB(info->mmio_map, i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -346,7 +350,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bReadReg(0xB000 + i * 0x10 + j);
|
temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -366,7 +370,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bReadReg(0x2200 + i * 0x10 + j);
|
temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -386,7 +390,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bReadReg(0x2300 + i * 0x10 + j);
|
temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -406,7 +410,7 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bReadReg(0x2400 + i * 0x10 + j);
|
temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
|
@ -426,17 +430,34 @@ void xgi_dump_register(struct xgi_info * info)
|
||||||
printk("%1x ", i);
|
printk("%1x ", i);
|
||||||
|
|
||||||
for (j = 0; j < 0x10; j++) {
|
for (j = 0; j < 0x10; j++) {
|
||||||
temp = bReadReg(0x2800 + i * 0x10 + j);
|
temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j);
|
||||||
printk("%3x", temp);
|
printk("%3x", temp);
|
||||||
}
|
}
|
||||||
printk("\r\n");
|
printk("\r\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void xgi_restore_registers(struct xgi_info * info)
|
|
||||||
|
int xgi_dump_register_ioctl(DRM_IOCTL_ARGS)
|
||||||
{
|
{
|
||||||
bOut3x5(0x13, 0);
|
DRM_DEVICE;
|
||||||
bOut3x5(0x8b, 2);
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
|
||||||
|
xgi_dump_register(info);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEVICE;
|
||||||
|
struct xgi_info *info = dev->dev_private;
|
||||||
|
|
||||||
|
OUT3X5B(info->mmio_map, 0x13, 0);
|
||||||
|
OUT3X5B(info->mmio_map, 0x8b, 2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xgi_waitfor_pci_idle(struct xgi_info * info)
|
void xgi_waitfor_pci_idle(struct xgi_info * info)
|
||||||
|
@ -446,60 +467,10 @@ void xgi_waitfor_pci_idle(struct xgi_info * info)
|
||||||
|
|
||||||
int idleCount = 0;
|
int idleCount = 0;
|
||||||
while (idleCount < 5) {
|
while (idleCount < 5) {
|
||||||
if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) {
|
if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) {
|
||||||
idleCount = 0;
|
idleCount = 0;
|
||||||
} else {
|
} else {
|
||||||
idleCount++;
|
idleCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*memory collect function*/
|
|
||||||
extern struct list_head xgi_mempid_list;
|
|
||||||
void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt)
|
|
||||||
{
|
|
||||||
struct xgi_mem_pid *block;
|
|
||||||
struct xgi_mem_pid *next;
|
|
||||||
struct task_struct *p, *find;
|
|
||||||
unsigned int cnt = 0;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(block, next, &xgi_mempid_list, list) {
|
|
||||||
|
|
||||||
find = NULL;
|
|
||||||
XGI_SCAN_PROCESS(p) {
|
|
||||||
if (p->pid == block->pid) {
|
|
||||||
XGI_INFO
|
|
||||||
("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n",
|
|
||||||
block->pid, p->state,
|
|
||||||
block->location,
|
|
||||||
block->bus_addr);
|
|
||||||
find = p;
|
|
||||||
if (block->bus_addr == 0xFFFFFFFF)
|
|
||||||
++cnt;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!find) {
|
|
||||||
if (block->location == XGI_MEMLOC_LOCAL) {
|
|
||||||
XGI_INFO
|
|
||||||
("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n",
|
|
||||||
block->pid, block->bus_addr);
|
|
||||||
xgi_fb_free(info, block->bus_addr);
|
|
||||||
} else if (block->bus_addr != 0xFFFFFFFF) {
|
|
||||||
XGI_INFO
|
|
||||||
("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n",
|
|
||||||
block->pid, block->bus_addr);
|
|
||||||
xgi_pcie_free(info, block->bus_addr);
|
|
||||||
} else {
|
|
||||||
/*only delete the memory block */
|
|
||||||
list_del(&block->list);
|
|
||||||
XGI_INFO
|
|
||||||
("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
|
|
||||||
block->pid);
|
|
||||||
kfree(block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*pcnt = cnt;
|
|
||||||
}
|
|
||||||
|
|
|
@ -30,9 +30,7 @@
|
||||||
#define _XGI_MISC_H_
|
#define _XGI_MISC_H_
|
||||||
|
|
||||||
extern void xgi_dump_register(struct xgi_info * info);
|
extern void xgi_dump_register(struct xgi_info * info);
|
||||||
extern void xgi_ge_reset(struct xgi_info * info);
|
|
||||||
|
|
||||||
extern void xgi_restore_registers(struct xgi_info * info);
|
|
||||||
extern bool xgi_ge_irq_handler(struct xgi_info * info);
|
extern bool xgi_ge_irq_handler(struct xgi_info * info);
|
||||||
extern bool xgi_crt_irq_handler(struct xgi_info * info);
|
extern bool xgi_crt_irq_handler(struct xgi_info * info);
|
||||||
extern bool xgi_dvi_irq_handler(struct xgi_info * info);
|
extern bool xgi_dvi_irq_handler(struct xgi_info * info);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,68 +0,0 @@
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
|
|
||||||
* *
|
|
||||||
* All Rights Reserved. *
|
|
||||||
* *
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation on the rights to use, copy, modify, merge,
|
|
||||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
|
||||||
* and to permit persons to whom the Software is furnished to do so,
|
|
||||||
* subject to the following conditions:
|
|
||||||
* *
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial
|
|
||||||
* portions of the Software.
|
|
||||||
* *
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
|
|
||||||
* ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
***************************************************************************/
|
|
||||||
|
|
||||||
#ifndef _XGI_PCIE_H_
|
|
||||||
#define _XGI_PCIE_H_
|
|
||||||
|
|
||||||
#ifndef XGI_PCIE_ALLOC_MAX_ORDER
|
|
||||||
#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct xgi_page_block {
|
|
||||||
struct xgi_page_block *next;
|
|
||||||
unsigned long phys_addr;
|
|
||||||
unsigned long virt_addr;
|
|
||||||
unsigned long page_count;
|
|
||||||
unsigned long page_order;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xgi_pcie_block {
|
|
||||||
struct list_head list;
|
|
||||||
unsigned long offset; /* block's offset in pcie memory, begin from 0 */
|
|
||||||
unsigned long size; /* The block size. */
|
|
||||||
unsigned long bus_addr; /* CPU access address/bus address */
|
|
||||||
unsigned long hw_addr; /* GE access address */
|
|
||||||
|
|
||||||
unsigned long page_count;
|
|
||||||
unsigned long page_order;
|
|
||||||
struct xgi_page_block *page_block;
|
|
||||||
struct xgi_pte *page_table; /* list of physical pages allocated */
|
|
||||||
|
|
||||||
atomic_t use_count;
|
|
||||||
enum PcieOwner owner;
|
|
||||||
unsigned long processID;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xgi_pcie_heap {
|
|
||||||
struct list_head free_list;
|
|
||||||
struct list_head used_list;
|
|
||||||
struct list_head sort_list;
|
|
||||||
unsigned long max_freesize;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -29,269 +29,100 @@
|
||||||
#ifndef _XGI_REGS_H_
|
#ifndef _XGI_REGS_H_
|
||||||
#define _XGI_REGS_H_
|
#define _XGI_REGS_H_
|
||||||
|
|
||||||
#ifndef XGI_MMIO
|
#include "drmP.h"
|
||||||
#define XGI_MMIO 1
|
#include "drm.h"
|
||||||
#endif
|
|
||||||
|
|
||||||
#if XGI_MMIO
|
|
||||||
#define OUTB(port, value) writeb(value, info->mmio.vbase + port)
|
|
||||||
#define INB(port) readb(info->mmio.vbase + port)
|
|
||||||
#define OUTW(port, value) writew(value, info->mmio.vbase + port)
|
|
||||||
#define INW(port) readw(info->mmio.vbase + port)
|
|
||||||
#define OUTDW(port, value) writel(value, info->mmio.vbase + port)
|
|
||||||
#define INDW(port) readl(info->mmio.vbase + port)
|
|
||||||
#else
|
|
||||||
#define OUTB(port, value) outb(value, port)
|
|
||||||
#define INB(port) inb(port)
|
|
||||||
#define OUTW(port, value) outw(value, port)
|
|
||||||
#define INW(port) inw(port)
|
|
||||||
#define OUTDW(port, value) outl(value, port)
|
|
||||||
#define INDW(port) inl(port)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Hardware access functions */
|
/* Hardware access functions */
|
||||||
static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data)
|
static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
|
||||||
{
|
{
|
||||||
OUTB(0x3C4, index);
|
DRM_WRITE8(map, 0x3C4, index);
|
||||||
OUTB(0x3C5, data);
|
DRM_WRITE8(map, 0x3C5, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data)
|
static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
|
||||||
{
|
{
|
||||||
OUTB(0x3D4, index);
|
DRM_WRITE8(map, 0x3D4, index);
|
||||||
OUTB(0x3D5, data);
|
DRM_WRITE8(map, 0x3D5, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data)
|
static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
|
||||||
{
|
{
|
||||||
OUTB(0x3CE, index);
|
DRM_WRITE8(map, 0x3CE, index);
|
||||||
OUTB(0x3CF, data);
|
DRM_WRITE8(map, 0x3CF, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 IN3C5B(struct xgi_info * info, u8 index)
|
static inline u8 IN3C5B(struct drm_map * map, u8 index)
|
||||||
{
|
{
|
||||||
volatile u8 data = 0;
|
DRM_WRITE8(map, 0x3C4, index);
|
||||||
OUTB(0x3C4, index);
|
return DRM_READ8(map, 0x3C5);
|
||||||
data = INB(0x3C5);
|
|
||||||
return data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 IN3X5B(struct xgi_info * info, u8 index)
|
static inline u8 IN3X5B(struct drm_map * map, u8 index)
|
||||||
{
|
{
|
||||||
volatile u8 data = 0;
|
DRM_WRITE8(map, 0x3D4, index);
|
||||||
OUTB(0x3D4, index);
|
return DRM_READ8(map, 0x3D5);
|
||||||
data = INB(0x3D5);
|
|
||||||
return data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 IN3CFB(struct xgi_info * info, u8 index)
|
static inline u8 IN3CFB(struct drm_map * map, u8 index)
|
||||||
{
|
{
|
||||||
volatile u8 data = 0;
|
DRM_WRITE8(map, 0x3CE, index);
|
||||||
OUTB(0x3CE, index);
|
return DRM_READ8(map, 0x3CF);
|
||||||
data = INB(0x3CF);
|
|
||||||
return data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data)
|
|
||||||
{
|
|
||||||
OUTB(0x3C4, index);
|
|
||||||
OUTB(0x3C5, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data)
|
|
||||||
{
|
|
||||||
OUTB(0x3D4, index);
|
|
||||||
OUTB(0x3D5, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data)
|
|
||||||
{
|
|
||||||
OUTB(0x3CE, index);
|
|
||||||
OUTB(0x3CF, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 IN3C5W(struct xgi_info * info, u8 index)
|
|
||||||
{
|
|
||||||
volatile u8 data = 0;
|
|
||||||
OUTB(0x3C4, index);
|
|
||||||
data = INB(0x3C5);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 IN3X5W(struct xgi_info * info, u8 index)
|
|
||||||
{
|
|
||||||
volatile u8 data = 0;
|
|
||||||
OUTB(0x3D4, index);
|
|
||||||
data = INB(0x3D5);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 IN3CFW(struct xgi_info * info, u8 index)
|
|
||||||
{
|
|
||||||
volatile u8 data = 0;
|
|
||||||
OUTB(0x3CE, index);
|
|
||||||
data = INB(0x3CF);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 readAttr(struct xgi_info * info, u8 index)
|
|
||||||
{
|
|
||||||
INB(0x3DA); /* flip-flop to index */
|
|
||||||
OUTB(0x3C0, index);
|
|
||||||
return INB(0x3C1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void writeAttr(struct xgi_info * info, u8 index, u8 value)
|
|
||||||
{
|
|
||||||
INB(0x3DA); /* flip-flop to index */
|
|
||||||
OUTB(0x3C0, index);
|
|
||||||
OUTB(0x3C0, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Graphic engine register (2d/3d) acessing interface
|
* Graphic engine register (2d/3d) acessing interface
|
||||||
*/
|
*/
|
||||||
static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data)
|
static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
|
||||||
{
|
{
|
||||||
XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n",
|
DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
|
||||||
info->mmio->vbase, addr, data);
|
map->handle, addr, data);
|
||||||
|
|
||||||
*(volatile u32 *)(info->mmio.vbase + addr) = (data);
|
DRM_WRITE32(map, addr, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data)
|
|
||||||
{
|
|
||||||
*(volatile u16 *)(info->mmio.vbase + addr) = (data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data)
|
|
||||||
{
|
|
||||||
*(volatile u8 *)(info->mmio.vbase + addr) = (data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr)
|
|
||||||
{
|
|
||||||
volatile u32 data;
|
|
||||||
data = *(volatile u32 *)(info->mmio.vbase + addr);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 ReadRegWord(struct xgi_info * info, u32 addr)
|
|
||||||
{
|
|
||||||
volatile u16 data;
|
|
||||||
data = *(volatile u16 *)(info->mmio.vbase + addr);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 ReadRegByte(struct xgi_info * info, u32 addr)
|
|
||||||
{
|
|
||||||
volatile u8 data;
|
|
||||||
data = *(volatile u8 *)(info->mmio.vbase + addr);
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern u8 IN3C5B(struct xgi_info * info, u8 index);
|
|
||||||
extern u8 IN3X5B(struct xgi_info * info, u8 index);
|
|
||||||
extern u8 IN3CFB(struct xgi_info * info, u8 index);
|
|
||||||
extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data);
|
|
||||||
extern u8 IN3C5W(struct xgi_info * info, u8 index);
|
|
||||||
extern u8 IN3X5W(struct xgi_info * info, u8 index);
|
|
||||||
extern u8 IN3CFW(struct xgi_info * info, u8 index);
|
|
||||||
|
|
||||||
extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data);
|
|
||||||
extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data);
|
|
||||||
extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data);
|
|
||||||
extern u32 ReadRegDWord(struct xgi_info * info, u32 addr);
|
|
||||||
extern u16 ReadRegWord(struct xgi_info * info, u32 addr);
|
|
||||||
extern u8 ReadRegByte(struct xgi_info * info, u32 addr);
|
|
||||||
|
|
||||||
extern void EnableProtect();
|
|
||||||
extern void DisableProtect();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define Out(port, data) OUTB(port, data)
|
|
||||||
#define bOut(port, data) OUTB(port, data)
|
|
||||||
#define wOut(port, data) OUTW(port, data)
|
|
||||||
#define dwOut(port, data) OUTDW(port, data)
|
|
||||||
|
|
||||||
#define Out3x5(index, data) OUT3X5B(info, index, data)
|
|
||||||
#define bOut3x5(index, data) OUT3X5B(info, index, data)
|
|
||||||
#define wOut3x5(index, data) OUT3X5W(info, index, data)
|
|
||||||
|
|
||||||
#define Out3c5(index, data) OUT3C5B(info, index, data)
|
|
||||||
#define bOut3c5(index, data) OUT3C5B(info, index, data)
|
|
||||||
#define wOut3c5(index, data) OUT3C5W(info, index, data)
|
|
||||||
|
|
||||||
#define Out3cf(index, data) OUT3CFB(info, index, data)
|
|
||||||
#define bOut3cf(index, data) OUT3CFB(info, index, data)
|
|
||||||
#define wOut3cf(index, data) OUT3CFW(info, index, data)
|
|
||||||
|
|
||||||
#define In(port) INB(port)
|
|
||||||
#define bIn(port) INB(port)
|
|
||||||
#define wIn(port) INW(port)
|
|
||||||
#define dwIn(port) INDW(port)
|
|
||||||
|
|
||||||
#define In3x5(index) IN3X5B(info, index)
|
|
||||||
#define bIn3x5(index) IN3X5B(info, index)
|
|
||||||
#define wIn3x5(index) IN3X5W(info, index)
|
|
||||||
|
|
||||||
#define In3c5(index) IN3C5B(info, index)
|
|
||||||
#define bIn3c5(index) IN3C5B(info, index)
|
|
||||||
#define wIn3c5(index) IN3C5W(info, index)
|
|
||||||
|
|
||||||
#define In3cf(index) IN3CFB(info, index)
|
|
||||||
#define bIn3cf(index) IN3CFB(info, index)
|
|
||||||
#define wIn3cf(index) IN3CFW(info, index)
|
|
||||||
|
|
||||||
#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data)
|
|
||||||
#define wWriteReg(addr, data) WriteRegWord(info, addr, data)
|
|
||||||
#define bWriteReg(addr, data) WriteRegByte(info, addr, data)
|
|
||||||
#define dwReadReg(addr) ReadRegDWord(info, addr)
|
|
||||||
#define wReadReg(addr) ReadRegWord(info, addr)
|
|
||||||
#define bReadReg(addr) ReadRegByte(info, addr)
|
|
||||||
|
|
||||||
static inline void xgi_enable_mmio(struct xgi_info * info)
|
static inline void xgi_enable_mmio(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
u8 protect = 0;
|
u8 protect = 0;
|
||||||
|
u8 temp;
|
||||||
|
|
||||||
/* Unprotect registers */
|
/* Unprotect registers */
|
||||||
outb(0x11, 0x3C4);
|
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
|
||||||
protect = inb(0x3C5);
|
protect = DRM_READ8(info->mmio_map, 0x3C5);
|
||||||
outb(0x92, 0x3C5);
|
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
|
||||||
|
|
||||||
outb(0x3A, 0x3D4);
|
DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
|
||||||
outb(inb(0x3D5) | 0x20, 0x3D5);
|
temp = DRM_READ8(info->mmio_map, 0x3D5);
|
||||||
|
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
|
||||||
|
|
||||||
/* Enable MMIO */
|
/* Enable MMIO */
|
||||||
outb(0x39, 0x3D4);
|
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
|
||||||
outb(inb(0x3D5) | 0x01, 0x3D5);
|
temp = DRM_READ8(info->mmio_map, 0x3D5);
|
||||||
|
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
|
||||||
|
|
||||||
OUTB(0x3C4, 0x11);
|
/* Protect registers */
|
||||||
OUTB(0x3C5, protect);
|
OUT3C5B(info->mmio_map, 0x11, protect);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_disable_mmio(struct xgi_info * info)
|
static inline void xgi_disable_mmio(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
u8 protect = 0;
|
u8 protect = 0;
|
||||||
|
u8 temp;
|
||||||
|
|
||||||
/* unprotect registers */
|
/* Unprotect registers */
|
||||||
OUTB(0x3C4, 0x11);
|
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
|
||||||
protect = INB(0x3C5);
|
protect = DRM_READ8(info->mmio_map, 0x3C5);
|
||||||
OUTB(0x3C5, 0x92);
|
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
|
||||||
|
|
||||||
/* Disable MMIO access */
|
/* Disable MMIO access */
|
||||||
OUTB(0x3D4, 0x39);
|
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
|
||||||
OUTB(0x3D5, INB(0x3D5) & 0xFE);
|
temp = DRM_READ8(info->mmio_map, 0x3D5);
|
||||||
|
DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
|
||||||
|
|
||||||
/* Protect registers */
|
/* Protect registers */
|
||||||
outb(0x11, 0x3C4);
|
OUT3C5B(info->mmio_map, 0x11, protect);
|
||||||
outb(protect, 0x3C5);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_enable_ge(struct xgi_info * info)
|
static inline void xgi_enable_ge(struct xgi_info * info)
|
||||||
|
@ -300,36 +131,36 @@ static inline void xgi_enable_ge(struct xgi_info * info)
|
||||||
int wait = 0;
|
int wait = 0;
|
||||||
|
|
||||||
// Enable GE
|
// Enable GE
|
||||||
OUTW(0x3C4, 0x9211);
|
DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211);
|
||||||
|
|
||||||
// Save and close dynamic gating
|
// Save and close dynamic gating
|
||||||
bOld3cf2a = bIn3cf(0x2a);
|
bOld3cf2a = IN3CFB(info->mmio_map, 0x2a);
|
||||||
bOut3cf(0x2a, bOld3cf2a & 0xfe);
|
OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe);
|
||||||
|
|
||||||
// Reset both 3D and 2D engine
|
// Reset both 3D and 2D engine
|
||||||
bOut3x5(0x36, 0x84);
|
OUT3X5B(info->mmio_map, 0x36, 0x84);
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
bOut3x5(0x36, 0x94);
|
OUT3X5B(info->mmio_map, 0x36, 0x94);
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
bOut3x5(0x36, 0x84);
|
OUT3X5B(info->mmio_map, 0x36, 0x84);
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
// Enable 2D engine only
|
// Enable 2D engine only
|
||||||
bOut3x5(0x36, 0x80);
|
OUT3X5B(info->mmio_map, 0x36, 0x80);
|
||||||
|
|
||||||
// Enable 2D+3D engine
|
// Enable 2D+3D engine
|
||||||
bOut3x5(0x36, 0x84);
|
OUT3X5B(info->mmio_map, 0x36, 0x84);
|
||||||
|
|
||||||
// Restore dynamic gating
|
// Restore dynamic gating
|
||||||
bOut3cf(0x2a, bOld3cf2a);
|
OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_disable_ge(struct xgi_info * info)
|
static inline void xgi_disable_ge(struct xgi_info * info)
|
||||||
|
@ -337,50 +168,50 @@ static inline void xgi_disable_ge(struct xgi_info * info)
|
||||||
int wait = 0;
|
int wait = 0;
|
||||||
|
|
||||||
// Reset both 3D and 2D engine
|
// Reset both 3D and 2D engine
|
||||||
bOut3x5(0x36, 0x84);
|
OUT3X5B(info->mmio_map, 0x36, 0x84);
|
||||||
|
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
bOut3x5(0x36, 0x94);
|
OUT3X5B(info->mmio_map, 0x36, 0x94);
|
||||||
|
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
bOut3x5(0x36, 0x84);
|
OUT3X5B(info->mmio_map, 0x36, 0x84);
|
||||||
|
|
||||||
wait = 10;
|
wait = 10;
|
||||||
while (wait--) {
|
while (wait--) {
|
||||||
bIn(0x36);
|
DRM_READ8(info->mmio_map, 0x36);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable 2D engine only
|
// Disable 2D engine only
|
||||||
bOut3x5(0x36, 0);
|
OUT3X5B(info->mmio_map, 0x36, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
|
static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0
|
OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0
|
||||||
Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1
|
OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1
|
||||||
Out3cf(0x39, In3cf(0x39) | 0x02);
|
OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02);
|
||||||
}
|
}
|
||||||
static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
|
static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
Out3cf(0x39, In3cf(0x39) & ~0x02);
|
OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
|
static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
Out3cf(0x3d, In3cf(0x3d) | 0x04);
|
OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04);
|
||||||
Out3cf(0x3d, In3cf(0x3d) & ~0x04);
|
OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04);
|
||||||
Out3cf(0x3d, In3cf(0x3d) | 0x08);
|
OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
|
static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
Out3cf(0x3d, In3cf(0x3d) & ~0x08);
|
OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -44,6 +44,15 @@ struct drm_xgi_sarea {
|
||||||
unsigned int scrn_pitch;
|
unsigned int scrn_pitch;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct xgi_bootstrap {
|
||||||
|
/**
|
||||||
|
* Size of PCI-e GART range in megabytes.
|
||||||
|
*/
|
||||||
|
unsigned int gart_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
enum xgi_mem_location {
|
enum xgi_mem_location {
|
||||||
XGI_MEMLOC_NON_LOCAL = 0,
|
XGI_MEMLOC_NON_LOCAL = 0,
|
||||||
XGI_MEMLOC_LOCAL = 1,
|
XGI_MEMLOC_LOCAL = 1,
|
||||||
|
@ -62,9 +71,9 @@ struct xgi_mem_alloc {
|
||||||
__u32 hw_addr;
|
__u32 hw_addr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Physical address of the memory from the processor's point of view.
|
* Offset of the allocation in the mapping.
|
||||||
*/
|
*/
|
||||||
unsigned long bus_addr;
|
unsigned long offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum xgi_batch_type {
|
enum xgi_batch_type {
|
||||||
|
@ -95,38 +104,31 @@ struct xgi_state_info {
|
||||||
* Ioctl definitions
|
* Ioctl definitions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */
|
#define DRM_XGI_BOOTSTRAP 0
|
||||||
|
#define DRM_XGI_FB_ALLOC 1
|
||||||
|
#define DRM_XGI_FB_FREE 2
|
||||||
|
#define DRM_XGI_PCIE_ALLOC 3
|
||||||
|
#define DRM_XGI_PCIE_FREE 4
|
||||||
|
#define DRM_XGI_SUBMIT_CMDLIST 5
|
||||||
|
#define DRM_XGI_GE_RESET 6
|
||||||
|
#define DRM_XGI_DUMP_REGISTER 7
|
||||||
|
#define DRM_XGI_DEBUG_INFO 8
|
||||||
|
#define DRM_XGI_TEST_RWINKERNEL 9
|
||||||
|
#define DRM_XGI_STATE_CHANGE 10
|
||||||
|
|
||||||
#define XGI_IOCTL_BASE 0
|
#define XGI_IOCTL_BOOTSTRAP DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
|
||||||
#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 0)
|
|
||||||
|
|
||||||
#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 1)
|
#define XGI_IOCTL_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_FB_ALLOC, struct xgi_mem_alloc)
|
||||||
#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 2)
|
#define XGI_IOCTL_FB_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FB_FREE, __u32)
|
||||||
#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 3)
|
|
||||||
#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 4)
|
|
||||||
#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 5)
|
|
||||||
#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 6)
|
|
||||||
#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 7)
|
|
||||||
#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 8)
|
|
||||||
#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 9)
|
|
||||||
#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 10)
|
|
||||||
|
|
||||||
#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS)
|
#define XGI_IOCTL_PCIE_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_PCIE_ALLOC, struct xgi_mem_alloc)
|
||||||
|
#define XGI_IOCTL_PCIE_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_PCIE_FREE, __u32)
|
||||||
|
|
||||||
#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc)
|
#define XGI_IOCTL_GE_RESET DRM_IO(DRM_COMMAND_BASE + DRM_XGI_GE_RESET)
|
||||||
#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long)
|
#define XGI_IOCTL_DUMP_REGISTER DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DUMP_REGISTER)
|
||||||
|
#define XGI_IOCTL_DEBUG_INFO DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DEBUG_INFO)
|
||||||
#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc)
|
#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
|
||||||
#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long)
|
#define XGI_IOCTL_TEST_RWINKERNEL DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_TEST_RWINKERNEL, __u32)
|
||||||
|
#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
|
||||||
#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET)
|
|
||||||
#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER)
|
|
||||||
#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO)
|
|
||||||
|
|
||||||
#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info)
|
|
||||||
#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long)
|
|
||||||
#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info)
|
|
||||||
|
|
||||||
#define XGI_IOCTL_MAXNR 30
|
|
||||||
|
|
||||||
#endif /* _XGI_DRM_H_ */
|
#endif /* _XGI_DRM_H_ */
|
||||||
|
|
Loading…
Reference in New Issue