diff options
author | Eric Anholt <eric@anholt.net> | 2007-09-02 22:06:45 -0400 |
---|---|---|
committer | Dave Airlie <airlied@optimus.(none)> | 2007-10-14 20:38:20 -0400 |
commit | c153f45f9b7e30289157bba3ff5682291df16caa (patch) | |
tree | 33f21e1ebd83ec548751f3d490afe6230ab99972 /drivers/char/drm/drm_bufs.c | |
parent | b589ee5943a9610ebaea6e4e3433f2ae4d812b0b (diff) |
drm: Replace DRM_IOCTL_ARGS with (dev, data, file_priv) and remove DRM_DEVICE.
The data is now in kernel space, copied in/out as appropriate according to t
This results in DRM_COPY_{TO,FROM}_USER going away, and error paths to deal
with those failures. This also means that XFree86 4.2.0 support for i810 DR
is lost.
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/drm_bufs.c')
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 165 |
1 files changed, 57 insertions, 108 deletions
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index e7253874fa8f..856774fbe025 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -332,34 +332,24 @@ int drm_addmap(struct drm_device * dev, unsigned int offset, | |||
332 | 332 | ||
333 | EXPORT_SYMBOL(drm_addmap); | 333 | EXPORT_SYMBOL(drm_addmap); |
334 | 334 | ||
335 | int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, | 335 | int drm_addmap_ioctl(struct drm_device *dev, void *data, |
336 | unsigned int cmd, unsigned long arg) | 336 | struct drm_file *file_priv) |
337 | { | 337 | { |
338 | struct drm_device *dev = file_priv->head->dev; | 338 | struct drm_map *map = data; |
339 | struct drm_map map; | ||
340 | struct drm_map_list *maplist; | 339 | struct drm_map_list *maplist; |
341 | struct drm_map __user *argp = (void __user *)arg; | ||
342 | int err; | 340 | int err; |
343 | 341 | ||
344 | if (copy_from_user(&map, argp, sizeof(map))) { | 342 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) |
345 | return -EFAULT; | ||
346 | } | ||
347 | |||
348 | if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) | ||
349 | return -EPERM; | 343 | return -EPERM; |
350 | 344 | ||
351 | err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, | 345 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
352 | &maplist); | 346 | map->flags, &maplist); |
353 | 347 | ||
354 | if (err) | 348 | if (err) |
355 | return err; | 349 | return err; |
356 | 350 | ||
357 | if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) | ||
358 | return -EFAULT; | ||
359 | |||
360 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ | 351 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ |
361 | if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) | 352 | map->handle = (void *)(unsigned long)maplist->user_token; |
362 | return -EFAULT; | ||
363 | return 0; | 353 | return 0; |
364 | } | 354 | } |
365 | 355 | ||
@@ -449,23 +439,18 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) | |||
449 | * gets used by drivers that the server doesn't need to care about. This seems | 439 | * gets used by drivers that the server doesn't need to care about. This seems |
450 | * unlikely. | 440 | * unlikely. |
451 | */ | 441 | */ |
452 | int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, | 442 | int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
453 | unsigned int cmd, unsigned long arg) | 443 | struct drm_file *file_priv) |
454 | { | 444 | { |
455 | struct drm_device *dev = file_priv->head->dev; | 445 | struct drm_map *request = data; |
456 | struct drm_map request; | ||
457 | drm_local_map_t *map = NULL; | 446 | drm_local_map_t *map = NULL; |
458 | struct drm_map_list *r_list; | 447 | struct drm_map_list *r_list; |
459 | int ret; | 448 | int ret; |
460 | 449 | ||
461 | if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { | ||
462 | return -EFAULT; | ||
463 | } | ||
464 | |||
465 | mutex_lock(&dev->struct_mutex); | 450 | mutex_lock(&dev->struct_mutex); |
466 | list_for_each_entry(r_list, &dev->maplist, head) { | 451 | list_for_each_entry(r_list, &dev->maplist, head) { |
467 | if (r_list->map && | 452 | if (r_list->map && |
468 | r_list->user_token == (unsigned long)request.handle && | 453 | r_list->user_token == (unsigned long)request->handle && |
469 | r_list->map->flags & _DRM_REMOVABLE) { | 454 | r_list->map->flags & _DRM_REMOVABLE) { |
470 | map = r_list->map; | 455 | map = r_list->map; |
471 | break; | 456 | break; |
@@ -1280,37 +1265,27 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request | |||
1280 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent | 1265 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent |
1281 | * PCI memory respectively. | 1266 | * PCI memory respectively. |
1282 | */ | 1267 | */ |
1283 | int drm_addbufs(struct inode *inode, struct drm_file *file_priv, | 1268 | int drm_addbufs(struct drm_device *dev, void *data, |
1284 | unsigned int cmd, unsigned long arg) | 1269 | struct drm_file *file_priv) |
1285 | { | 1270 | { |
1286 | struct drm_buf_desc request; | 1271 | struct drm_buf_desc *request = data; |
1287 | struct drm_device *dev = file_priv->head->dev; | ||
1288 | int ret; | 1272 | int ret; |
1289 | 1273 | ||
1290 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1274 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1291 | return -EINVAL; | 1275 | return -EINVAL; |
1292 | 1276 | ||
1293 | if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, | ||
1294 | sizeof(request))) | ||
1295 | return -EFAULT; | ||
1296 | |||
1297 | #if __OS_HAS_AGP | 1277 | #if __OS_HAS_AGP |
1298 | if (request.flags & _DRM_AGP_BUFFER) | 1278 | if (request->flags & _DRM_AGP_BUFFER) |
1299 | ret = drm_addbufs_agp(dev, &request); | 1279 | ret = drm_addbufs_agp(dev, request); |
1300 | else | 1280 | else |
1301 | #endif | 1281 | #endif |
1302 | if (request.flags & _DRM_SG_BUFFER) | 1282 | if (request->flags & _DRM_SG_BUFFER) |
1303 | ret = drm_addbufs_sg(dev, &request); | 1283 | ret = drm_addbufs_sg(dev, request); |
1304 | else if (request.flags & _DRM_FB_BUFFER) | 1284 | else if (request->flags & _DRM_FB_BUFFER) |
1305 | ret = drm_addbufs_fb(dev, &request); | 1285 | ret = drm_addbufs_fb(dev, request); |
1306 | else | 1286 | else |
1307 | ret = drm_addbufs_pci(dev, &request); | 1287 | ret = drm_addbufs_pci(dev, request); |
1308 | 1288 | ||
1309 | if (ret == 0) { | ||
1310 | if (copy_to_user((void __user *)arg, &request, sizeof(request))) { | ||
1311 | ret = -EFAULT; | ||
1312 | } | ||
1313 | } | ||
1314 | return ret; | 1289 | return ret; |
1315 | } | 1290 | } |
1316 | 1291 | ||
@@ -1331,13 +1306,11 @@ int drm_addbufs(struct inode *inode, struct drm_file *file_priv, | |||
1331 | * lock, preventing of allocating more buffers after this call. Information | 1306 | * lock, preventing of allocating more buffers after this call. Information |
1332 | * about each requested buffer is then copied into user space. | 1307 | * about each requested buffer is then copied into user space. |
1333 | */ | 1308 | */ |
1334 | int drm_infobufs(struct inode *inode, struct drm_file *file_priv, | 1309 | int drm_infobufs(struct drm_device *dev, void *data, |
1335 | unsigned int cmd, unsigned long arg) | 1310 | struct drm_file *file_priv) |
1336 | { | 1311 | { |
1337 | struct drm_device *dev = file_priv->head->dev; | ||
1338 | struct drm_device_dma *dma = dev->dma; | 1312 | struct drm_device_dma *dma = dev->dma; |
1339 | struct drm_buf_info request; | 1313 | struct drm_buf_info *request = data; |
1340 | struct drm_buf_info __user *argp = (void __user *)arg; | ||
1341 | int i; | 1314 | int i; |
1342 | int count; | 1315 | int count; |
1343 | 1316 | ||
@@ -1355,9 +1328,6 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, | |||
1355 | ++dev->buf_use; /* Can't allocate more after this call */ | 1328 | ++dev->buf_use; /* Can't allocate more after this call */ |
1356 | spin_unlock(&dev->count_lock); | 1329 | spin_unlock(&dev->count_lock); |
1357 | 1330 | ||
1358 | if (copy_from_user(&request, argp, sizeof(request))) | ||
1359 | return -EFAULT; | ||
1360 | |||
1361 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { | 1331 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1362 | if (dma->bufs[i].buf_count) | 1332 | if (dma->bufs[i].buf_count) |
1363 | ++count; | 1333 | ++count; |
@@ -1365,11 +1335,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, | |||
1365 | 1335 | ||
1366 | DRM_DEBUG("count = %d\n", count); | 1336 | DRM_DEBUG("count = %d\n", count); |
1367 | 1337 | ||
1368 | if (request.count >= count) { | 1338 | if (request->count >= count) { |
1369 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { | 1339 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1370 | if (dma->bufs[i].buf_count) { | 1340 | if (dma->bufs[i].buf_count) { |
1371 | struct drm_buf_desc __user *to = | 1341 | struct drm_buf_desc __user *to = |
1372 | &request.list[count]; | 1342 | &request->list[count]; |
1373 | struct drm_buf_entry *from = &dma->bufs[i]; | 1343 | struct drm_buf_entry *from = &dma->bufs[i]; |
1374 | struct drm_freelist *list = &dma->bufs[i].freelist; | 1344 | struct drm_freelist *list = &dma->bufs[i].freelist; |
1375 | if (copy_to_user(&to->count, | 1345 | if (copy_to_user(&to->count, |
@@ -1396,10 +1366,7 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, | |||
1396 | } | 1366 | } |
1397 | } | 1367 | } |
1398 | } | 1368 | } |
1399 | request.count = count; | 1369 | request->count = count; |
1400 | |||
1401 | if (copy_to_user(argp, &request, sizeof(request))) | ||
1402 | return -EFAULT; | ||
1403 | 1370 | ||
1404 | return 0; | 1371 | return 0; |
1405 | } | 1372 | } |
@@ -1418,12 +1385,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, | |||
1418 | * | 1385 | * |
1419 | * \note This ioctl is deprecated and mostly never used. | 1386 | * \note This ioctl is deprecated and mostly never used. |
1420 | */ | 1387 | */ |
1421 | int drm_markbufs(struct inode *inode, struct drm_file *file_priv, | 1388 | int drm_markbufs(struct drm_device *dev, void *data, |
1422 | unsigned int cmd, unsigned long arg) | 1389 | struct drm_file *file_priv) |
1423 | { | 1390 | { |
1424 | struct drm_device *dev = file_priv->head->dev; | ||
1425 | struct drm_device_dma *dma = dev->dma; | 1391 | struct drm_device_dma *dma = dev->dma; |
1426 | struct drm_buf_desc request; | 1392 | struct drm_buf_desc *request = data; |
1427 | int order; | 1393 | int order; |
1428 | struct drm_buf_entry *entry; | 1394 | struct drm_buf_entry *entry; |
1429 | 1395 | ||
@@ -1433,24 +1399,20 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, | |||
1433 | if (!dma) | 1399 | if (!dma) |
1434 | return -EINVAL; | 1400 | return -EINVAL; |
1435 | 1401 | ||
1436 | if (copy_from_user(&request, | ||
1437 | (struct drm_buf_desc __user *) arg, sizeof(request))) | ||
1438 | return -EFAULT; | ||
1439 | |||
1440 | DRM_DEBUG("%d, %d, %d\n", | 1402 | DRM_DEBUG("%d, %d, %d\n", |
1441 | request.size, request.low_mark, request.high_mark); | 1403 | request->size, request->low_mark, request->high_mark); |
1442 | order = drm_order(request.size); | 1404 | order = drm_order(request->size); |
1443 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | 1405 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1444 | return -EINVAL; | 1406 | return -EINVAL; |
1445 | entry = &dma->bufs[order]; | 1407 | entry = &dma->bufs[order]; |
1446 | 1408 | ||
1447 | if (request.low_mark < 0 || request.low_mark > entry->buf_count) | 1409 | if (request->low_mark < 0 || request->low_mark > entry->buf_count) |
1448 | return -EINVAL; | 1410 | return -EINVAL; |
1449 | if (request.high_mark < 0 || request.high_mark > entry->buf_count) | 1411 | if (request->high_mark < 0 || request->high_mark > entry->buf_count) |
1450 | return -EINVAL; | 1412 | return -EINVAL; |
1451 | 1413 | ||
1452 | entry->freelist.low_mark = request.low_mark; | 1414 | entry->freelist.low_mark = request->low_mark; |
1453 | entry->freelist.high_mark = request.high_mark; | 1415 | entry->freelist.high_mark = request->high_mark; |
1454 | 1416 | ||
1455 | return 0; | 1417 | return 0; |
1456 | } | 1418 | } |
@@ -1467,12 +1429,11 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, | |||
1467 | * Calls free_buffer() for each used buffer. | 1429 | * Calls free_buffer() for each used buffer. |
1468 | * This function is primarily used for debugging. | 1430 | * This function is primarily used for debugging. |
1469 | */ | 1431 | */ |
1470 | int drm_freebufs(struct inode *inode, struct drm_file *file_priv, | 1432 | int drm_freebufs(struct drm_device *dev, void *data, |
1471 | unsigned int cmd, unsigned long arg) | 1433 | struct drm_file *file_priv) |
1472 | { | 1434 | { |
1473 | struct drm_device *dev = file_priv->head->dev; | ||
1474 | struct drm_device_dma *dma = dev->dma; | 1435 | struct drm_device_dma *dma = dev->dma; |
1475 | struct drm_buf_free request; | 1436 | struct drm_buf_free *request = data; |
1476 | int i; | 1437 | int i; |
1477 | int idx; | 1438 | int idx; |
1478 | struct drm_buf *buf; | 1439 | struct drm_buf *buf; |
@@ -1483,13 +1444,9 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, | |||
1483 | if (!dma) | 1444 | if (!dma) |
1484 | return -EINVAL; | 1445 | return -EINVAL; |
1485 | 1446 | ||
1486 | if (copy_from_user(&request, | 1447 | DRM_DEBUG("%d\n", request->count); |
1487 | (struct drm_buf_free __user *) arg, sizeof(request))) | 1448 | for (i = 0; i < request->count; i++) { |
1488 | return -EFAULT; | 1449 | if (copy_from_user(&idx, &request->list[i], sizeof(idx))) |
1489 | |||
1490 | DRM_DEBUG("%d\n", request.count); | ||
1491 | for (i = 0; i < request.count; i++) { | ||
1492 | if (copy_from_user(&idx, &request.list[i], sizeof(idx))) | ||
1493 | return -EFAULT; | 1450 | return -EFAULT; |
1494 | if (idx < 0 || idx >= dma->buf_count) { | 1451 | if (idx < 0 || idx >= dma->buf_count) { |
1495 | DRM_ERROR("Index %d (of %d max)\n", | 1452 | DRM_ERROR("Index %d (of %d max)\n", |
@@ -1522,17 +1479,15 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, | |||
1522 | * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls | 1479 | * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls |
1523 | * drm_mmap_dma(). | 1480 | * drm_mmap_dma(). |
1524 | */ | 1481 | */ |
1525 | int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, | 1482 | int drm_mapbufs(struct drm_device *dev, void *data, |
1526 | unsigned int cmd, unsigned long arg) | 1483 | struct drm_file *file_priv) |
1527 | { | 1484 | { |
1528 | struct drm_device *dev = file_priv->head->dev; | ||
1529 | struct drm_device_dma *dma = dev->dma; | 1485 | struct drm_device_dma *dma = dev->dma; |
1530 | struct drm_buf_map __user *argp = (void __user *)arg; | ||
1531 | int retcode = 0; | 1486 | int retcode = 0; |
1532 | const int zero = 0; | 1487 | const int zero = 0; |
1533 | unsigned long virtual; | 1488 | unsigned long virtual; |
1534 | unsigned long address; | 1489 | unsigned long address; |
1535 | struct drm_buf_map request; | 1490 | struct drm_buf_map *request = data; |
1536 | int i; | 1491 | int i; |
1537 | 1492 | ||
1538 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1493 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
@@ -1549,10 +1504,7 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, | |||
1549 | dev->buf_use++; /* Can't allocate more after this call */ | 1504 | dev->buf_use++; /* Can't allocate more after this call */ |
1550 | spin_unlock(&dev->count_lock); | 1505 | spin_unlock(&dev->count_lock); |
1551 | 1506 | ||
1552 | if (copy_from_user(&request, argp, sizeof(request))) | 1507 | if (request->count >= dma->buf_count) { |
1553 | return -EFAULT; | ||
1554 | |||
1555 | if (request.count >= dma->buf_count) { | ||
1556 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) | 1508 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
1557 | || (drm_core_check_feature(dev, DRIVER_SG) | 1509 | || (drm_core_check_feature(dev, DRIVER_SG) |
1558 | && (dma->flags & _DRM_DMA_USE_SG)) | 1510 | && (dma->flags & _DRM_DMA_USE_SG)) |
@@ -1565,11 +1517,11 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, | |||
1565 | retcode = -EINVAL; | 1517 | retcode = -EINVAL; |
1566 | goto done; | 1518 | goto done; |
1567 | } | 1519 | } |
1568 | |||
1569 | down_write(¤t->mm->mmap_sem); | 1520 | down_write(¤t->mm->mmap_sem); |
1570 | virtual = do_mmap(file_priv->filp, 0, map->size, | 1521 | virtual = do_mmap(file_priv->filp, 0, map->size, |
1571 | PROT_READ | PROT_WRITE, | 1522 | PROT_READ | PROT_WRITE, |
1572 | MAP_SHARED, token); | 1523 | MAP_SHARED, |
1524 | token); | ||
1573 | up_write(¤t->mm->mmap_sem); | 1525 | up_write(¤t->mm->mmap_sem); |
1574 | } else { | 1526 | } else { |
1575 | down_write(¤t->mm->mmap_sem); | 1527 | down_write(¤t->mm->mmap_sem); |
@@ -1583,28 +1535,28 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, | |||
1583 | retcode = (signed long)virtual; | 1535 | retcode = (signed long)virtual; |
1584 | goto done; | 1536 | goto done; |
1585 | } | 1537 | } |
1586 | request.virtual = (void __user *)virtual; | 1538 | request->virtual = (void __user *)virtual; |
1587 | 1539 | ||
1588 | for (i = 0; i < dma->buf_count; i++) { | 1540 | for (i = 0; i < dma->buf_count; i++) { |
1589 | if (copy_to_user(&request.list[i].idx, | 1541 | if (copy_to_user(&request->list[i].idx, |
1590 | &dma->buflist[i]->idx, | 1542 | &dma->buflist[i]->idx, |
1591 | sizeof(request.list[0].idx))) { | 1543 | sizeof(request->list[0].idx))) { |
1592 | retcode = -EFAULT; | 1544 | retcode = -EFAULT; |
1593 | goto done; | 1545 | goto done; |
1594 | } | 1546 | } |
1595 | if (copy_to_user(&request.list[i].total, | 1547 | if (copy_to_user(&request->list[i].total, |
1596 | &dma->buflist[i]->total, | 1548 | &dma->buflist[i]->total, |
1597 | sizeof(request.list[0].total))) { | 1549 | sizeof(request->list[0].total))) { |
1598 | retcode = -EFAULT; | 1550 | retcode = -EFAULT; |
1599 | goto done; | 1551 | goto done; |
1600 | } | 1552 | } |
1601 | if (copy_to_user(&request.list[i].used, | 1553 | if (copy_to_user(&request->list[i].used, |
1602 | &zero, sizeof(zero))) { | 1554 | &zero, sizeof(zero))) { |
1603 | retcode = -EFAULT; | 1555 | retcode = -EFAULT; |
1604 | goto done; | 1556 | goto done; |
1605 | } | 1557 | } |
1606 | address = virtual + dma->buflist[i]->offset; /* *** */ | 1558 | address = virtual + dma->buflist[i]->offset; /* *** */ |
1607 | if (copy_to_user(&request.list[i].address, | 1559 | if (copy_to_user(&request->list[i].address, |
1608 | &address, sizeof(address))) { | 1560 | &address, sizeof(address))) { |
1609 | retcode = -EFAULT; | 1561 | retcode = -EFAULT; |
1610 | goto done; | 1562 | goto done; |
@@ -1612,11 +1564,8 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, | |||
1612 | } | 1564 | } |
1613 | } | 1565 | } |
1614 | done: | 1566 | done: |
1615 | request.count = dma->buf_count; | 1567 | request->count = dma->buf_count; |
1616 | DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); | 1568 | DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); |
1617 | |||
1618 | if (copy_to_user(argp, &request, sizeof(request))) | ||
1619 | return -EFAULT; | ||
1620 | 1569 | ||
1621 | return retcode; | 1570 | return retcode; |
1622 | } | 1571 | } |