Sync with 2.4.0-test8-pre5 kernel.

main
Gareth Hughes 2000-09-06 20:56:34 +00:00
parent e3e2d66131
commit 7db6449142
32 changed files with 847 additions and 804 deletions

View File

@ -490,8 +490,8 @@ int i810_dma_init(struct inode *inode, struct file *filp,
drm_i810_init_t init;
int retcode = 0;
copy_from_user_ret(&init, (drm_i810_init_t *)arg,
sizeof(init), -EFAULT);
if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
return -EFAULT;
switch(init.func) {
case I810_INIT_DMA:
@ -1005,7 +1005,8 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG( "i810_control\n");
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
return -EFAULT;
switch (ctl.func) {
case DRM_INST_HANDLER:
@ -1178,7 +1179,8 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
int ret = 0;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -1275,8 +1277,8 @@ int i810_dma_vertex(struct inode *inode, struct file *filp,
dev_priv->sarea_priv;
drm_i810_vertex_t vertex;
copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
-EFAULT);
if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma_vertex called without lock held\n");
@ -1307,8 +1309,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_i810_clear_t clear;
copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear),
-EFAULT);
if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_clear_bufs called without lock held\n");
@ -1365,7 +1367,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
dev_priv->sarea_priv;
DRM_DEBUG("getbuf\n");
copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma called without lock held\n");
@ -1379,7 +1382,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];
return retcode;
@ -1404,14 +1408,16 @@ int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
return -EINVAL;
}
copy_from_user_ret(&d, (drm_i810_copy_t *)arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d)))
return -EFAULT;
if(d.idx > dma->buf_count) return -EINVAL;
buf = dma->buflist[ d.idx ];
buf_priv = buf->dev_private;
if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM;
copy_from_user_ret(buf_priv->virtual, d.address, d.used, -EFAULT);
if (copy_from_user(buf_priv->virtual, d.address, d.used))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];

View File

@ -35,7 +35,7 @@
#define I810_NAME "i810"
#define I810_DESC "Intel I810"
#define I810_DATE "20000719"
#define I810_DATE "20000906"
#define I810_MAJOR 1
#define I810_MINOR 1
#define I810_PATCHLEVEL 0
@ -428,17 +428,18 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = I810_MAJOR;
@ -449,10 +450,10 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, I810_DATE);
DRM_COPY(version.desc, I810_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -616,7 +617,8 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -36,7 +36,7 @@
#define MGA_NAME "mga"
#define MGA_DESC "Matrox g200/g400"
#define MGA_DATE "20000831"
#define MGA_DATE "20000906"
#define MGA_MAJOR 2
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 0
@ -444,17 +444,18 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = MGA_MAJOR;
@ -465,10 +466,10 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -633,7 +634,8 @@ int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -35,7 +35,7 @@
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20000719"
#define R128_DATE "20000906"
#define R128_MAJOR 1
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
@ -420,17 +420,18 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = R128_MAJOR;
@ -441,10 +442,10 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, R128_DATE);
DRM_COPY(version.desc, R128_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -559,7 +560,8 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -664,7 +666,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
@ -699,7 +700,8 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -728,7 +730,6 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
current->priority = DEF_PRIORITY;
}
#endif
unblock_all_signals();
return 0;
}

View File

@ -36,7 +36,7 @@
#define TDFX_NAME "tdfx"
#define TDFX_DESC "3dfx Banshee/Voodoo3+"
#define TDFX_DATE "20000719"
#define TDFX_DATE "20000906"
#define TDFX_MAJOR 1
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 0
@ -379,17 +379,18 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = TDFX_MAJOR;
@ -400,10 +401,10 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -518,7 +519,8 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -615,7 +617,6 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
#endif
if (!ret) {
#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
@ -624,7 +625,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
#endif
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
@ -659,7 +660,8 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -689,8 +691,6 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
}
#endif
#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */
unblock_all_signals();
#endif
return 0;
}

View File

@ -95,7 +95,8 @@ int drm_agp_info(struct inode *inode, struct file *filp, unsigned int cmd,
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
copy_to_user_ret((drm_agp_info_t *)arg, &info, sizeof(info), -EFAULT);
if (copy_to_user((drm_agp_info_t *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
@ -134,8 +135,8 @@ int drm_agp_enable(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dev->agp->acquired || !drm_agp.enable) return -EINVAL;
copy_from_user_ret(&mode, (drm_agp_mode_t *)arg, sizeof(mode),
-EFAULT);
if (copy_from_user(&mode, (drm_agp_mode_t *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode;
(*drm_agp.enable)(mode.mode);
@ -155,8 +156,8 @@ int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long pages;
u32 type;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
-EFAULT);
if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
@ -212,8 +213,8 @@ int drm_agp_unbind(struct inode *inode, struct file *filp, unsigned int cmd,
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
-EFAULT);
if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (!entry->bound) return -EINVAL;
@ -231,8 +232,8 @@ int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd,
int page;
if (!dev->agp->acquired || !drm_agp.bind_memory) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
-EFAULT);
if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (entry->bound) return -EINVAL;
@ -253,8 +254,8 @@ int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd,
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
-EFAULT);
if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (entry->bound) drm_unbind_agp(entry->memory);

View File

@ -137,7 +137,8 @@ int drm_getmagic(struct inode *inode, struct file *filp, unsigned int cmd,
}
DRM_DEBUG("%u\n", auth.magic);
copy_to_user_ret((drm_auth_t *)arg, &auth, sizeof(auth), -EFAULT);
if (copy_to_user((drm_auth_t *)arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
@ -149,7 +150,8 @@ int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd,
drm_auth_t auth;
drm_file_t *file;
copy_from_user_ret(&auth, (drm_auth_t *)arg, sizeof(auth), -EFAULT);
if (copy_from_user(&auth, (drm_auth_t *)arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {
file->authenticated = 1;

View File

@ -133,12 +133,13 @@ int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd,
dev->maplist[dev->map_count-1] = map;
up(&dev->struct_sem);
copy_to_user_ret((drm_map_t *)arg, map, sizeof(*map), -EFAULT);
if (copy_to_user((drm_map_t *)arg, map, sizeof(*map)))
return -EFAULT;
if (map->type != _DRM_SHM) {
copy_to_user_ret(&((drm_map_t *)arg)->handle,
if (copy_to_user(&((drm_map_t *)arg)->handle,
&map->offset,
sizeof(map->offset),
-EFAULT);
sizeof(map->offset)))
return -EFAULT;
}
return 0;
}
@ -166,10 +167,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
@ -295,10 +296,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
atomic_dec(&dev->buf_alloc);
return 0;
@ -324,10 +325,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
@ -338,28 +339,26 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
if (copy_to_user(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
.buf_count)) ||
copy_to_user(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
sizeof(dma->bufs[0].buf_size)) ||
copy_to_user(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.freelist.low_mark)) ||
copy_to_user(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
.freelist.high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
@ -372,10 +371,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
if (copy_to_user((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
return 0;
}
@ -392,10 +391,10 @@ int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
@ -427,17 +426,17 @@ int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
if (copy_from_user(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
@ -480,10 +479,10 @@ int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
down(&current->mm->mmap_sem);
@ -529,10 +528,10 @@ done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
if (copy_to_user((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
return retcode;
}

View File

@ -129,19 +129,21 @@ int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -153,7 +155,8 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
@ -161,7 +164,8 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
}
drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -173,7 +177,8 @@ int drm_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_ctx_t ctx;
drm_queue_t *q;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@ -206,7 +211,8 @@ int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_ctx_t ctx;
drm_queue_t *q;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@ -223,7 +229,8 @@ int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
ctx.flags = q->flags;
atomic_dec(&q->use_count);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -235,7 +242,8 @@ int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return drm_context_switch(dev, dev->last_context, ctx.handle);
}
@ -247,7 +255,8 @@ int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_context_switch_complete(dev, ctx.handle);
@ -263,7 +272,8 @@ int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_queue_t *q;
drm_buf_t *buf;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;

View File

@ -486,14 +486,16 @@ static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
buf->pending);
}
buf->pid = current->pid;
copy_to_user_ret(&d->request_indices[i],
if (copy_to_user(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx),
-EFAULT);
copy_to_user_ret(&d->request_sizes[i],
sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i],
&buf->total,
sizeof(buf->total),
-EFAULT);
sizeof(buf->total)))
return -EFAULT;
++d->granted_count;
}
return 0;

View File

@ -39,7 +39,8 @@ int drm_adddraw(struct inode *inode, struct file *filp, unsigned int cmd,
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
copy_to_user_ret((drm_draw_t *)arg, &draw, sizeof(draw), -EFAULT);
if (copy_to_user((drm_draw_t *)arg, &draw, sizeof(draw)))
return -EFAULT;
return 0;
}

View File

@ -176,7 +176,8 @@ ssize_t drm_read(struct file *filp, char *buf, size_t count, loff_t *off)
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
copy_to_user_ret(buf, dev->buf_rp, cur, -EINVAL);
if (copy_to_user(buf, dev->buf_rp, cur))
return -EFAULT;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;

View File

@ -586,7 +586,8 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
int retcode = 0;
drm_dma_t d;
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
return -EFAULT;
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
@ -621,7 +622,8 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
return retcode;
}
@ -710,7 +712,8 @@ int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd,
drm_control_t ctl;
int retcode;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
return -EFAULT;
switch (ctl.func) {
case DRM_INST_HANDLER:
@ -742,7 +745,8 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -42,7 +42,7 @@
#define GAMMA_NAME "gamma"
#define GAMMA_DESC "3dlabs GMX 2000"
#define GAMMA_DATE "20000719"
#define GAMMA_DATE "20000906"
#define GAMMA_MAJOR 1
#define GAMMA_MINOR 0
#define GAMMA_PATCHLEVEL 0
@ -405,17 +405,18 @@ int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = GAMMA_MAJOR;
@ -426,10 +427,10 @@ int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, GAMMA_DATE);
DRM_COPY(version.desc, GAMMA_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -537,7 +538,8 @@ int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -56,10 +56,10 @@ int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
@ -155,10 +155,10 @@ int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
atomic_dec(&dev->buf_alloc);
dma->flags = _DRM_DMA_USE_AGP;
@ -170,10 +170,10 @@ int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_buf_desc_t request;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
if(request.flags & _DRM_AGP_BUFFER)
return i810_addbufs_agp(inode, filp, cmd, arg);
@ -201,10 +201,10 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
@ -215,28 +215,26 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
if (copy_to_user(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
.buf_count)) ||
copy_to_user(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
sizeof(dma->bufs[0].buf_size)) ||
copy_to_user(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.freelist.low_mark)) ||
copy_to_user(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
.freelist.high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
@ -249,10 +247,10 @@ int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
if (copy_to_user((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
return 0;
}
@ -269,10 +267,10 @@ int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
@ -304,17 +302,17 @@ int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
if (copy_from_user(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);

View File

@ -103,19 +103,21 @@ int i810_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -126,7 +128,8 @@ int i810_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = i810_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = i810_alloc_queue(dev);
@ -137,7 +140,8 @@ int i810_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
return -ENOMEM;
}
DRM_DEBUG("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -153,10 +157,12 @@ int i810_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -167,7 +173,8 @@ int i810_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return i810_context_switch(dev, dev->last_context, ctx.handle);
}
@ -179,7 +186,8 @@ int i810_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
i810_context_switch_complete(dev, ctx.handle);
@ -193,7 +201,8 @@ int i810_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if(ctx.handle != DRM_KERNEL_CONTEXT) {
drm_ctxbitmap_free(dev, ctx.handle);

View File

@ -490,8 +490,8 @@ int i810_dma_init(struct inode *inode, struct file *filp,
drm_i810_init_t init;
int retcode = 0;
copy_from_user_ret(&init, (drm_i810_init_t *)arg,
sizeof(init), -EFAULT);
if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
return -EFAULT;
switch(init.func) {
case I810_INIT_DMA:
@ -1005,7 +1005,8 @@ int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG( "i810_control\n");
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
return -EFAULT;
switch (ctl.func) {
case DRM_INST_HANDLER:
@ -1178,7 +1179,8 @@ int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
int ret = 0;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -1275,8 +1277,8 @@ int i810_dma_vertex(struct inode *inode, struct file *filp,
dev_priv->sarea_priv;
drm_i810_vertex_t vertex;
copy_from_user_ret(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex),
-EFAULT);
if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma_vertex called without lock held\n");
@ -1307,8 +1309,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_i810_clear_t clear;
copy_from_user_ret(&clear, (drm_i810_clear_t *)arg, sizeof(clear),
-EFAULT);
if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_clear_bufs called without lock held\n");
@ -1365,7 +1367,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
dev_priv->sarea_priv;
DRM_DEBUG("getbuf\n");
copy_from_user_ret(&d, (drm_i810_dma_t *)arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i810_dma called without lock held\n");
@ -1379,7 +1382,8 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];
return retcode;
@ -1404,14 +1408,16 @@ int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
return -EINVAL;
}
copy_from_user_ret(&d, (drm_i810_copy_t *)arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d)))
return -EFAULT;
if(d.idx > dma->buf_count) return -EINVAL;
buf = dma->buflist[ d.idx ];
buf_priv = buf->dev_private;
if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM;
copy_from_user_ret(buf_priv->virtual, d.address, d.used, -EFAULT);
if (copy_from_user(buf_priv->virtual, d.address, d.used))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];

View File

@ -35,7 +35,7 @@
#define I810_NAME "i810"
#define I810_DESC "Intel I810"
#define I810_DATE "20000719"
#define I810_DATE "20000906"
#define I810_MAJOR 1
#define I810_MINOR 1
#define I810_PATCHLEVEL 0
@ -428,17 +428,18 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = I810_MAJOR;
@ -449,10 +450,10 @@ int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, I810_DATE);
DRM_COPY(version.desc, I810_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -616,7 +617,8 @@ int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -38,13 +38,15 @@ int drm_irq_busid(struct inode *inode, struct file *filp, unsigned int cmd,
drm_irq_busid_t p;
struct pci_dev *dev;
copy_from_user_ret(&p, (drm_irq_busid_t *)arg, sizeof(p), -EFAULT);
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT;
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
if (dev) p.irq = dev->irq;
else p.irq = 0;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
copy_to_user_ret((drm_irq_busid_t *)arg, &p, sizeof(p), -EFAULT);
if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
return -EFAULT;
return 0;
}
@ -55,13 +57,15 @@ int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_unique_t u;
copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT);
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (u.unique_len >= dev->unique_len) {
copy_to_user_ret(u.unique, dev->unique, dev->unique_len,
-EFAULT);
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
return -EFAULT;
}
u.unique_len = dev->unique_len;
copy_to_user_ret((drm_unique_t *)arg, &u, sizeof(u), -EFAULT);
if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
return -EFAULT;
return 0;
}
@ -72,15 +76,19 @@ int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_unique_t u;
if (dev->unique_len || dev->unique) return -EBUSY;
if (dev->unique_len || dev->unique)
return -EBUSY;
copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT);
if (!u.unique_len) return -EINVAL;
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (!u.unique_len)
return -EINVAL;
dev->unique_len = u.unique_len;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
copy_from_user_ret(dev->unique, u.unique, dev->unique_len,
-EFAULT);
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2,

View File

@ -218,7 +218,8 @@ int drm_finish(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("\n");
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
drm_flush_unblock(dev, lock.context, lock.flags);
return ret;

View File

@ -57,10 +57,10 @@ int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
@ -173,10 +173,10 @@ int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
atomic_dec(&dev->buf_alloc);
@ -219,10 +219,10 @@ int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
@ -348,10 +348,10 @@ int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
atomic_dec(&dev->buf_alloc);
return 0;
@ -362,10 +362,10 @@ int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_buf_desc_t request;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
if(request.flags & _DRM_AGP_BUFFER)
return mga_addbufs_agp(inode, filp, cmd, arg);
@ -393,10 +393,10 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
@ -407,28 +407,26 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
if (copy_to_user(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
.buf_count)) ||
copy_to_user(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
sizeof(dma->bufs[0].buf_size)) ||
copy_to_user(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.freelist.low_mark)) ||
copy_to_user(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
.freelist.high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
@ -441,10 +439,10 @@ int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
if (copy_to_user((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
return 0;
}
@ -461,10 +459,10 @@ int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
@ -496,17 +494,17 @@ int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
if (copy_from_user(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
@ -550,10 +548,10 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("mga_mapbufs\n");
DRM_DEBUG("dma->flags : %x\n", dma->flags);
@ -628,10 +626,10 @@ int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
if (copy_to_user((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
DRM_DEBUG("retcode : %d\n", retcode);

View File

@ -103,19 +103,21 @@ int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -126,7 +128,8 @@ int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = mga_alloc_queue(dev);
@ -137,7 +140,8 @@ int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
return -ENOMEM;
}
DRM_DEBUG("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -153,10 +157,12 @@ int mga_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -167,7 +173,8 @@ int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return mga_context_switch(dev, dev->last_context, ctx.handle);
}
@ -179,7 +186,8 @@ int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
mga_context_switch_complete(dev, ctx.handle);
@ -193,7 +201,8 @@ int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if(ctx.handle != DRM_KERNEL_CONTEXT) {
drm_ctxbitmap_free(dev, ctx.handle);

View File

@ -82,19 +82,6 @@ static void mga_delay(void)
return;
}
#ifdef __i386__
void mga_flush_write_combine(void)
{
int xchangeDummy;
DRM_DEBUG("%s\n", __FUNCTION__);
__asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy));
__asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;"
" movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;"
" pop %%eax" : /* no outputs */ : /* no inputs */ );
}
#endif
/* These are two age tags that will never be sent to
* the hardware */
#define MGA_BUF_USED 0xffffffff
@ -429,9 +416,7 @@ void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
}
}
#ifdef __i386__
mga_flush_write_combine();
#endif
atomic_inc(&dev_priv->pending_bufs);
MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
@ -829,9 +814,7 @@ static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
* the status register will be correct
*/
#ifdef __i386__
mga_flush_write_combine();
#endif
MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
@ -857,7 +840,8 @@ int mga_dma_init(struct inode *inode, struct file *filp,
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT);
if (copy_from_user(&init, (drm_mga_init_t *)arg, sizeof(init)))
return -EFAULT;
switch(init.func) {
case MGA_INIT_DMA:
@ -939,7 +923,8 @@ int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_control_t ctl;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
return -EFAULT;
DRM_DEBUG("%s\n", __FUNCTION__);
@ -1026,7 +1011,8 @@ int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_lock_t lock;
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -1104,7 +1090,8 @@ int mga_flush_ioctl(struct inode *inode, struct file *filp,
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_flush_ioctl called without lock held\n");

View File

@ -36,7 +36,7 @@
#define MGA_NAME "mga"
#define MGA_DESC "Matrox g200/g400"
#define MGA_DATE "20000831"
#define MGA_DATE "20000906"
#define MGA_MAJOR 2
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 0
@ -444,17 +444,18 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = MGA_MAJOR;
@ -465,10 +466,10 @@ int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -633,7 +634,8 @@ int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",

View File

@ -129,7 +129,6 @@ extern int mga_dma_init(struct inode *inode, struct file *filp,
extern int mga_dma_cleanup(drm_device_t *dev);
extern int mga_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void mga_flush_write_combine(void);
extern unsigned int mga_create_sync_tag(drm_device_t *dev);
extern drm_buf_t *mga_freelist_get(drm_device_t *dev);
extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf);
@ -180,6 +179,7 @@ extern int mga_rmctx(struct inode *inode, struct file *filp,
extern int mga_context_switch(drm_device_t *dev, int old, int new);
extern int mga_context_switch_complete(drm_device_t *dev, int new);
#define mga_flush_write_combine() mb()
typedef enum {
TT_GENERAL,

View File

@ -38,13 +38,13 @@
* change these values
*/
#define MGAEMITCLIP_SIZE 10
#define MGAEMITCTX_SIZE 20
#define MGAG200EMITTEX_SIZE 20
#define MGAG400EMITTEX0_SIZE 30
#define MGAG400EMITTEX1_SIZE 25
#define MGAG400EMITPIPE_SIZE 55
#define MGAG200EMITPIPE_SIZE 15
#define MGAEMITCLIP_SIZE 10
#define MGAEMITCTX_SIZE 20
#define MGAG200EMITTEX_SIZE 20
#define MGAG400EMITTEX0_SIZE 30
#define MGAG400EMITTEX1_SIZE 25
#define MGAG400EMITPIPE_SIZE 55
#define MGAG200EMITPIPE_SIZE 15
#define MAX_STATE_SIZE ((MGAEMITCLIP_SIZE * MGA_NR_SAREA_CLIPRECTS) + \
MGAEMITCTX_SIZE + MGAG400EMITTEX0_SIZE + \
@ -170,9 +170,7 @@ static void mgaG400EmitTex0(drm_mga_private_t * dev_priv)
/* This takes a max of 30 dwords */
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2]
| 0x00008000
);
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000);
PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
@ -220,8 +218,8 @@ static void mgaG400EmitTex1(drm_mga_private_t * dev_priv, int source )
/* This takes 25 dwords */
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable |
0x00008000);
PRIMOUTREG(MGAREG_TEXCTL2,
regs[MGA_TEXREG_CTL2] | TMC_map1_enable | 0x00008000);
PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
@ -282,9 +280,7 @@ static void mgaG400EmitPipe(drm_mga_private_t * dev_priv)
PRIMOUTREG(MGAREG_DMAPAD, 0);
if (multitex) {
PRIMOUTREG(MGAREG_TEXCTL2, 0
| 0x00008000
);
PRIMOUTREG(MGAREG_TEXCTL2, 0 | 0x00008000);
PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
PRIMOUTREG(MGAREG_TEXCTL2, 0x80 | 0x00008000);
PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
@ -856,8 +852,8 @@ int mga_clear_bufs(struct inode *inode, struct file *filp,
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_clear_t clear;
copy_from_user_ret(&clear, (drm_mga_clear_t *) arg, sizeof(clear),
-EFAULT);
if (copy_from_user(&clear, (drm_mga_clear_t *) arg, sizeof(clear)))
return -EFAULT;
DRM_DEBUG("%s\n", __FUNCTION__);
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
@ -877,9 +873,7 @@ int mga_clear_bufs(struct inode *inode, struct file *filp,
clear.clear_color_mask,
clear.clear_depth_mask);
PRIMUPDATE(dev_priv);
#ifdef __i386__
mga_flush_write_combine();
#endif
mga_dma_schedule(dev, 1);
return 0;
}
@ -909,9 +903,7 @@ int mga_swap_bufs(struct inode *inode, struct file *filp,
PRIMUPDATE(dev_priv);
set_bit(MGA_BUF_SWAP_PENDING,
&dev_priv->current_prim->buffer_status);
#ifdef __i386__
mga_flush_write_combine();
#endif
mga_dma_schedule(dev, 1);
return 0;
}
@ -932,8 +924,8 @@ int mga_iload(struct inode *inode, struct file *filp,
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("Starting Iload\n");
copy_from_user_ret(&iload, (drm_mga_iload_t *) arg, sizeof(iload),
-EFAULT);
if (copy_from_user(&iload, (drm_mga_iload_t *) arg, sizeof(iload)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_iload called without lock held\n");
@ -959,9 +951,7 @@ int mga_iload(struct inode *inode, struct file *filp,
AGEBUF(dev_priv, buf_priv);
buf_priv->discard = 1;
mga_freelist_put(dev, buf);
#ifdef __i386__
mga_flush_write_combine();
#endif
mga_dma_schedule(dev, 1);
return 0;
}
@ -979,8 +969,8 @@ int mga_vertex(struct inode *inode, struct file *filp,
drm_mga_vertex_t vertex;
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&vertex, (drm_mga_vertex_t *) arg,
sizeof(vertex), -EFAULT);
if (copy_from_user(&vertex, (drm_mga_vertex_t *) arg, sizeof(vertex)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_vertex called without lock held\n");
@ -1009,9 +999,7 @@ int mga_vertex(struct inode *inode, struct file *filp,
mga_dma_dispatch_vertex(dev, buf);
PRIMUPDATE(dev_priv);
#ifdef __i386__
mga_flush_write_combine();
#endif
mga_dma_schedule(dev, 1);
return 0;
}
@ -1030,8 +1018,8 @@ int mga_indices(struct inode *inode, struct file *filp,
drm_mga_indices_t indices;
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&indices, (drm_mga_indices_t *) arg,
sizeof(indices), -EFAULT);
if (copy_from_user(&indices, (drm_mga_indices_t *) arg, sizeof(indices)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("mga_indices called without lock held\n");
@ -1058,9 +1046,7 @@ int mga_indices(struct inode *inode, struct file *filp,
mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
PRIMUPDATE(dev_priv);
#ifdef __i386__
mga_flush_write_combine();
#endif
mga_dma_schedule(dev, 1);
return 0;
}
@ -1078,10 +1064,12 @@ static int mga_dma_get_buffers(drm_device_t * dev, drm_dma_t * d)
if (!buf)
break;
buf->pid = current->pid;
copy_to_user_ret(&d->request_indices[i],
&buf->idx, sizeof(buf->idx), -EFAULT);
copy_to_user_ret(&d->request_sizes[i],
&buf->total, sizeof(buf->total), -EFAULT);
if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
++d->granted_count;
}
return 0;
@ -1097,7 +1085,8 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
drm_dma_t d;
DRM_DEBUG("%s\n", __FUNCTION__);
copy_from_user_ret(&d, (drm_dma_t *) arg, sizeof(d), -EFAULT);
if (copy_from_user(&d, (drm_dma_t *) arg, sizeof(d)))
return -EFAULT;
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
@ -1132,6 +1121,7 @@ int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *) arg, &d, sizeof(d), -EFAULT);
if (copy_to_user((drm_dma_t *) arg, &d, sizeof(d)))
return -EFAULT;
return retcode;
}

View File

@ -60,10 +60,10 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
@ -173,10 +173,10 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
@ -195,10 +195,10 @@ int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dev_priv || dev_priv->is_pci) return -EINVAL;
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (request.flags & _DRM_AGP_BUFFER)
@ -234,10 +234,10 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
if (copy_from_user(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
@ -300,10 +300,10 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
if (copy_to_user((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
sizeof(request)))
return -EFAULT;
return retcode;
}

View File

@ -103,19 +103,21 @@ int r128_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -127,7 +129,8 @@ int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = r128_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = r128_alloc_queue(dev);
@ -139,7 +142,8 @@ int r128_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
return -ENOMEM;
}
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -148,7 +152,8 @@ int r128_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
r128_res_ctx.handle=ctx.handle;
return 0;
@ -159,10 +164,12 @@ int r128_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -173,7 +180,8 @@ int r128_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return r128_context_switch(dev, dev->last_context, ctx.handle);
}
@ -185,7 +193,8 @@ int r128_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
r128_context_switch_complete(dev, ctx.handle);
@ -199,7 +208,8 @@ int r128_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_ctxbitmap_free(dev, ctx.handle);

View File

@ -68,26 +68,8 @@ int R128_READ_PLL(drm_device_t *dev, int addr)
return R128_READ(R128_CLOCK_CNTL_DATA);
}
#ifdef __i386__
static void r128_flush_write_combine(void)
{
int xchangeDummy;
#define r128_flush_write_combine() mb()
__asm__ volatile("push %%eax ;"
"xchg %%eax, %0 ;"
"pop %%eax" : : "m" (xchangeDummy));
__asm__ volatile("push %%eax ;"
"push %%ebx ;"
"push %%ecx ;"
"push %%edx ;"
"movl $0,%%eax ;"
"cpuid ;"
"pop %%edx ;"
"pop %%ecx ;"
"pop %%ebx ;"
"pop %%eax" : /* no outputs */ : /* no inputs */ );
}
#endif
static void r128_status(drm_device_t *dev)
{
@ -213,8 +195,8 @@ int r128_init_cce(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_r128_init_t init;
copy_from_user_ret(&init, (drm_r128_init_t *)arg, sizeof(init),
-EFAULT);
if (copy_from_user(&init, (drm_r128_init_t *)arg, sizeof(init)))
return -EFAULT;
switch (init.func) {
case R128_INIT_CCE:
@ -498,10 +480,8 @@ static int r128_submit_packets_ring_secure(drm_device_t *dev,
dev_priv->ring_start,
write * sizeof(u32));
#ifdef __i386__
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
#endif
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
@ -603,10 +583,8 @@ static int r128_submit_packets_ring(drm_device_t *dev,
dev_priv->ring_start,
write * sizeof(u32));
#ifdef __i386__
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
#endif
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
@ -686,8 +664,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp,
return -EINVAL;
}
copy_from_user_ret(&packet, (drm_r128_packet_t *)arg, sizeof(packet),
-EFAULT);
if (copy_from_user(&packet, (drm_r128_packet_t *)arg, sizeof(packet)))
return -EFAULT;
c = packet.count;
size = c * sizeof(*buffer);
@ -702,7 +680,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp,
}
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
if (copy_from_user(buffer, packet.buffer, size))
return -EFAULT;
if (dev_priv->cce_secure)
ret = r128_submit_packets_ring_secure(dev, buffer, &c);
@ -712,7 +691,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp,
c += left;
} else {
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
if (copy_from_user(buffer, packet.buffer, size))
return -EFAULT;
if (dev_priv->cce_secure)
ret = r128_submit_packets_pio_secure(dev, buffer, &c);
@ -723,8 +703,8 @@ int r128_submit_pkt(struct inode *inode, struct file *filp,
kfree(buffer);
packet.count = c;
copy_to_user_ret((drm_r128_packet_t *)arg, &packet, sizeof(packet),
-EFAULT);
if (copy_to_user((drm_r128_packet_t *)arg, &packet, sizeof(packet)))
return -EFAULT;
if (ret) return ret;
else if (c > 0) return -EAGAIN;
@ -772,10 +752,8 @@ static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
r128_mark_vertbufs_done(dev);
}
#ifdef __i386__
/* Make sure WC cache has been flushed (if in PIO mode) */
if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
#endif
/* FIXME: Add support for sending vertex buffer to the CCE here
instead of in client code. The v->prim holds the primitive
@ -863,14 +841,13 @@ static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
buf = r128_freelist_get(dev);
if (!buf) break;
buf->pid = current->pid;
copy_to_user_ret(&v->request_indices[i],
if (copy_to_user(&v->request_indices[i],
&buf->idx,
sizeof(buf->idx),
-EFAULT);
copy_to_user_ret(&v->request_sizes[i],
sizeof(buf->idx)) ||
copy_to_user(&v->request_sizes[i],
&buf->total,
sizeof(buf->total),
-EFAULT);
sizeof(buf->total)))
return -EFAULT;
++v->granted_count;
}
return 0;
@ -897,7 +874,8 @@ int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
return -EINVAL;
}
copy_from_user_ret(&v, (drm_r128_vertex_t *)arg, sizeof(v), -EFAULT);
if (copy_from_user(&v, (drm_r128_vertex_t *)arg, sizeof(v)))
return -EFAULT;
DRM_DEBUG("%d: %d send, %d req\n",
current->pid, v.send_count, v.request_count);
@ -924,7 +902,8 @@ int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, v.granted_count);
copy_to_user_ret((drm_r128_vertex_t *)arg, &v, sizeof(v), -EFAULT);
if (copy_to_user((drm_r128_vertex_t *)arg, &v, sizeof(v)))
return -EFAULT;
return retcode;
}

View File

@ -35,7 +35,7 @@
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20000719"
#define R128_DATE "20000906"
#define R128_MAJOR 1
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
@ -420,17 +420,18 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = R128_MAJOR;
@ -441,10 +442,10 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, R128_DATE);
DRM_COPY(version.desc, R128_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -559,7 +560,8 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -664,7 +666,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
@ -699,7 +700,8 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -728,7 +730,6 @@ int r128_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
current->priority = DEF_PRIORITY;
}
#endif
unblock_all_signals();
return 0;
}

View File

@ -105,19 +105,21 @@ int tdfx_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -129,7 +131,8 @@ int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = tdfx_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = tdfx_alloc_queue(dev);
@ -141,7 +144,8 @@ int tdfx_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
return -ENOMEM;
}
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -150,7 +154,8 @@ int tdfx_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
tdfx_res_ctx.handle=ctx.handle;
return 0;
@ -161,10 +166,12 @@ int tdfx_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
/* This is 0, because we don't hanlde any context flags */
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -175,7 +182,8 @@ int tdfx_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return tdfx_context_switch(dev, dev->last_context, ctx.handle);
}
@ -187,7 +195,8 @@ int tdfx_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
tdfx_context_switch_complete(dev, ctx.handle);
@ -201,7 +210,8 @@ int tdfx_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_ctxbitmap_free(dev, ctx.handle);

View File

@ -36,7 +36,7 @@
#define TDFX_NAME "tdfx"
#define TDFX_DESC "3dfx Banshee/Voodoo3+"
#define TDFX_DATE "20000719"
#define TDFX_DATE "20000906"
#define TDFX_MAJOR 1
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 0
@ -379,17 +379,18 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
drm_version_t version;
int len;
copy_from_user_ret(&version,
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = TDFX_MAJOR;
@ -400,10 +401,10 @@ int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
copy_to_user_ret((drm_version_t *)arg,
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
sizeof(version)))
return -EFAULT;
return 0;
}
@ -518,7 +519,8 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -615,7 +617,6 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
#endif
if (!ret) {
#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
@ -624,7 +625,7 @@ int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
#endif
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
@ -659,7 +660,8 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -689,8 +691,6 @@ int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
}
#endif
#if LINUX_VERSION_CODE >= 0x020400 /* KERNEL_VERSION(2,4,0) */
unblock_all_signals();
#endif
return 0;
}