diff options
Diffstat (limited to 'drivers/char/drm/drm_bufs.c')
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index 1db12dcb6802..e2637b4d51de 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -255,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
255 | memset(list, 0, sizeof(*list)); | 255 | memset(list, 0, sizeof(*list)); |
256 | list->map = map; | 256 | list->map = map; |
257 | 257 | ||
258 | down(&dev->struct_sem); | 258 | mutex_lock(&dev->struct_mutex); |
259 | list_add(&list->head, &dev->maplist->head); | 259 | list_add(&list->head, &dev->maplist->head); |
260 | /* Assign a 32-bit handle */ | 260 | /* Assign a 32-bit handle */ |
261 | /* We do it here so that dev->struct_sem protects the increment */ | 261 | /* We do it here so that dev->struct_mutex protects the increment */ |
262 | list->user_token = HandleID(map->type == _DRM_SHM | 262 | list->user_token = HandleID(map->type == _DRM_SHM |
263 | ? (unsigned long)map->handle | 263 | ? (unsigned long)map->handle |
264 | : map->offset, dev); | 264 | : map->offset, dev); |
265 | up(&dev->struct_sem); | 265 | mutex_unlock(&dev->struct_mutex); |
266 | 266 | ||
267 | *maplist = list; | 267 | *maplist = list; |
268 | return 0; | 268 | return 0; |
@@ -392,9 +392,9 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) | |||
392 | { | 392 | { |
393 | int ret; | 393 | int ret; |
394 | 394 | ||
395 | down(&dev->struct_sem); | 395 | mutex_lock(&dev->struct_mutex); |
396 | ret = drm_rmmap_locked(dev, map); | 396 | ret = drm_rmmap_locked(dev, map); |
397 | up(&dev->struct_sem); | 397 | mutex_unlock(&dev->struct_mutex); |
398 | 398 | ||
399 | return ret; | 399 | return ret; |
400 | } | 400 | } |
@@ -423,7 +423,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
423 | return -EFAULT; | 423 | return -EFAULT; |
424 | } | 424 | } |
425 | 425 | ||
426 | down(&dev->struct_sem); | 426 | mutex_lock(&dev->struct_mutex); |
427 | list_for_each(list, &dev->maplist->head) { | 427 | list_for_each(list, &dev->maplist->head) { |
428 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); | 428 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); |
429 | 429 | ||
@@ -439,7 +439,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
439 | * find anything. | 439 | * find anything. |
440 | */ | 440 | */ |
441 | if (list == (&dev->maplist->head)) { | 441 | if (list == (&dev->maplist->head)) { |
442 | up(&dev->struct_sem); | 442 | mutex_unlock(&dev->struct_mutex); |
443 | return -EINVAL; | 443 | return -EINVAL; |
444 | } | 444 | } |
445 | 445 | ||
@@ -448,13 +448,13 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
448 | 448 | ||
449 | /* Register and framebuffer maps are permanent */ | 449 | /* Register and framebuffer maps are permanent */ |
450 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { | 450 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { |
451 | up(&dev->struct_sem); | 451 | mutex_unlock(&dev->struct_mutex); |
452 | return 0; | 452 | return 0; |
453 | } | 453 | } |
454 | 454 | ||
455 | ret = drm_rmmap_locked(dev, map); | 455 | ret = drm_rmmap_locked(dev, map); |
456 | 456 | ||
457 | up(&dev->struct_sem); | 457 | mutex_unlock(&dev->struct_mutex); |
458 | 458 | ||
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
@@ -566,16 +566,16 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
566 | atomic_inc(&dev->buf_alloc); | 566 | atomic_inc(&dev->buf_alloc); |
567 | spin_unlock(&dev->count_lock); | 567 | spin_unlock(&dev->count_lock); |
568 | 568 | ||
569 | down(&dev->struct_sem); | 569 | mutex_lock(&dev->struct_mutex); |
570 | entry = &dma->bufs[order]; | 570 | entry = &dma->bufs[order]; |
571 | if (entry->buf_count) { | 571 | if (entry->buf_count) { |
572 | up(&dev->struct_sem); | 572 | mutex_unlock(&dev->struct_mutex); |
573 | atomic_dec(&dev->buf_alloc); | 573 | atomic_dec(&dev->buf_alloc); |
574 | return -ENOMEM; /* May only call once for each order */ | 574 | return -ENOMEM; /* May only call once for each order */ |
575 | } | 575 | } |
576 | 576 | ||
577 | if (count < 0 || count > 4096) { | 577 | if (count < 0 || count > 4096) { |
578 | up(&dev->struct_sem); | 578 | mutex_unlock(&dev->struct_mutex); |
579 | atomic_dec(&dev->buf_alloc); | 579 | atomic_dec(&dev->buf_alloc); |
580 | return -EINVAL; | 580 | return -EINVAL; |
581 | } | 581 | } |
@@ -583,7 +583,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
583 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 583 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
584 | DRM_MEM_BUFS); | 584 | DRM_MEM_BUFS); |
585 | if (!entry->buflist) { | 585 | if (!entry->buflist) { |
586 | up(&dev->struct_sem); | 586 | mutex_unlock(&dev->struct_mutex); |
587 | atomic_dec(&dev->buf_alloc); | 587 | atomic_dec(&dev->buf_alloc); |
588 | return -ENOMEM; | 588 | return -ENOMEM; |
589 | } | 589 | } |
@@ -616,7 +616,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
616 | /* Set count correctly so we free the proper amount. */ | 616 | /* Set count correctly so we free the proper amount. */ |
617 | entry->buf_count = count; | 617 | entry->buf_count = count; |
618 | drm_cleanup_buf_error(dev, entry); | 618 | drm_cleanup_buf_error(dev, entry); |
619 | up(&dev->struct_sem); | 619 | mutex_unlock(&dev->struct_mutex); |
620 | atomic_dec(&dev->buf_alloc); | 620 | atomic_dec(&dev->buf_alloc); |
621 | return -ENOMEM; | 621 | return -ENOMEM; |
622 | } | 622 | } |
@@ -638,7 +638,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
638 | if (!temp_buflist) { | 638 | if (!temp_buflist) { |
639 | /* Free the entry because it isn't valid */ | 639 | /* Free the entry because it isn't valid */ |
640 | drm_cleanup_buf_error(dev, entry); | 640 | drm_cleanup_buf_error(dev, entry); |
641 | up(&dev->struct_sem); | 641 | mutex_unlock(&dev->struct_mutex); |
642 | atomic_dec(&dev->buf_alloc); | 642 | atomic_dec(&dev->buf_alloc); |
643 | return -ENOMEM; | 643 | return -ENOMEM; |
644 | } | 644 | } |
@@ -656,7 +656,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
656 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 656 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
657 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 657 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
658 | 658 | ||
659 | up(&dev->struct_sem); | 659 | mutex_unlock(&dev->struct_mutex); |
660 | 660 | ||
661 | request->count = entry->buf_count; | 661 | request->count = entry->buf_count; |
662 | request->size = size; | 662 | request->size = size; |
@@ -722,16 +722,16 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
722 | atomic_inc(&dev->buf_alloc); | 722 | atomic_inc(&dev->buf_alloc); |
723 | spin_unlock(&dev->count_lock); | 723 | spin_unlock(&dev->count_lock); |
724 | 724 | ||
725 | down(&dev->struct_sem); | 725 | mutex_lock(&dev->struct_mutex); |
726 | entry = &dma->bufs[order]; | 726 | entry = &dma->bufs[order]; |
727 | if (entry->buf_count) { | 727 | if (entry->buf_count) { |
728 | up(&dev->struct_sem); | 728 | mutex_unlock(&dev->struct_mutex); |
729 | atomic_dec(&dev->buf_alloc); | 729 | atomic_dec(&dev->buf_alloc); |
730 | return -ENOMEM; /* May only call once for each order */ | 730 | return -ENOMEM; /* May only call once for each order */ |
731 | } | 731 | } |
732 | 732 | ||
733 | if (count < 0 || count > 4096) { | 733 | if (count < 0 || count > 4096) { |
734 | up(&dev->struct_sem); | 734 | mutex_unlock(&dev->struct_mutex); |
735 | atomic_dec(&dev->buf_alloc); | 735 | atomic_dec(&dev->buf_alloc); |
736 | return -EINVAL; | 736 | return -EINVAL; |
737 | } | 737 | } |
@@ -739,7 +739,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
739 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 739 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
740 | DRM_MEM_BUFS); | 740 | DRM_MEM_BUFS); |
741 | if (!entry->buflist) { | 741 | if (!entry->buflist) { |
742 | up(&dev->struct_sem); | 742 | mutex_unlock(&dev->struct_mutex); |
743 | atomic_dec(&dev->buf_alloc); | 743 | atomic_dec(&dev->buf_alloc); |
744 | return -ENOMEM; | 744 | return -ENOMEM; |
745 | } | 745 | } |
@@ -750,7 +750,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
750 | if (!entry->seglist) { | 750 | if (!entry->seglist) { |
751 | drm_free(entry->buflist, | 751 | drm_free(entry->buflist, |
752 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | 752 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
753 | up(&dev->struct_sem); | 753 | mutex_unlock(&dev->struct_mutex); |
754 | atomic_dec(&dev->buf_alloc); | 754 | atomic_dec(&dev->buf_alloc); |
755 | return -ENOMEM; | 755 | return -ENOMEM; |
756 | } | 756 | } |
@@ -766,7 +766,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
766 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | 766 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
767 | drm_free(entry->seglist, | 767 | drm_free(entry->seglist, |
768 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); | 768 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); |
769 | up(&dev->struct_sem); | 769 | mutex_unlock(&dev->struct_mutex); |
770 | atomic_dec(&dev->buf_alloc); | 770 | atomic_dec(&dev->buf_alloc); |
771 | return -ENOMEM; | 771 | return -ENOMEM; |
772 | } | 772 | } |
@@ -790,7 +790,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
790 | drm_free(temp_pagelist, | 790 | drm_free(temp_pagelist, |
791 | (dma->page_count + (count << page_order)) | 791 | (dma->page_count + (count << page_order)) |
792 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | 792 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
793 | up(&dev->struct_sem); | 793 | mutex_unlock(&dev->struct_mutex); |
794 | atomic_dec(&dev->buf_alloc); | 794 | atomic_dec(&dev->buf_alloc); |
795 | return -ENOMEM; | 795 | return -ENOMEM; |
796 | } | 796 | } |
@@ -831,7 +831,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
831 | (count << page_order)) | 831 | (count << page_order)) |
832 | * sizeof(*dma->pagelist), | 832 | * sizeof(*dma->pagelist), |
833 | DRM_MEM_PAGES); | 833 | DRM_MEM_PAGES); |
834 | up(&dev->struct_sem); | 834 | mutex_unlock(&dev->struct_mutex); |
835 | atomic_dec(&dev->buf_alloc); | 835 | atomic_dec(&dev->buf_alloc); |
836 | return -ENOMEM; | 836 | return -ENOMEM; |
837 | } | 837 | } |
@@ -853,7 +853,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
853 | drm_free(temp_pagelist, | 853 | drm_free(temp_pagelist, |
854 | (dma->page_count + (count << page_order)) | 854 | (dma->page_count + (count << page_order)) |
855 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | 855 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
856 | up(&dev->struct_sem); | 856 | mutex_unlock(&dev->struct_mutex); |
857 | atomic_dec(&dev->buf_alloc); | 857 | atomic_dec(&dev->buf_alloc); |
858 | return -ENOMEM; | 858 | return -ENOMEM; |
859 | } | 859 | } |
@@ -878,7 +878,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
878 | dma->page_count += entry->seg_count << page_order; | 878 | dma->page_count += entry->seg_count << page_order; |
879 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | 879 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); |
880 | 880 | ||
881 | up(&dev->struct_sem); | 881 | mutex_unlock(&dev->struct_mutex); |
882 | 882 | ||
883 | request->count = entry->buf_count; | 883 | request->count = entry->buf_count; |
884 | request->size = size; | 884 | request->size = size; |
@@ -948,16 +948,16 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
948 | atomic_inc(&dev->buf_alloc); | 948 | atomic_inc(&dev->buf_alloc); |
949 | spin_unlock(&dev->count_lock); | 949 | spin_unlock(&dev->count_lock); |
950 | 950 | ||
951 | down(&dev->struct_sem); | 951 | mutex_lock(&dev->struct_mutex); |
952 | entry = &dma->bufs[order]; | 952 | entry = &dma->bufs[order]; |
953 | if (entry->buf_count) { | 953 | if (entry->buf_count) { |
954 | up(&dev->struct_sem); | 954 | mutex_unlock(&dev->struct_mutex); |
955 | atomic_dec(&dev->buf_alloc); | 955 | atomic_dec(&dev->buf_alloc); |
956 | return -ENOMEM; /* May only call once for each order */ | 956 | return -ENOMEM; /* May only call once for each order */ |
957 | } | 957 | } |
958 | 958 | ||
959 | if (count < 0 || count > 4096) { | 959 | if (count < 0 || count > 4096) { |
960 | up(&dev->struct_sem); | 960 | mutex_unlock(&dev->struct_mutex); |
961 | atomic_dec(&dev->buf_alloc); | 961 | atomic_dec(&dev->buf_alloc); |
962 | return -EINVAL; | 962 | return -EINVAL; |
963 | } | 963 | } |
@@ -965,7 +965,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
965 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 965 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
966 | DRM_MEM_BUFS); | 966 | DRM_MEM_BUFS); |
967 | if (!entry->buflist) { | 967 | if (!entry->buflist) { |
968 | up(&dev->struct_sem); | 968 | mutex_unlock(&dev->struct_mutex); |
969 | atomic_dec(&dev->buf_alloc); | 969 | atomic_dec(&dev->buf_alloc); |
970 | return -ENOMEM; | 970 | return -ENOMEM; |
971 | } | 971 | } |
@@ -999,7 +999,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
999 | /* Set count correctly so we free the proper amount. */ | 999 | /* Set count correctly so we free the proper amount. */ |
1000 | entry->buf_count = count; | 1000 | entry->buf_count = count; |
1001 | drm_cleanup_buf_error(dev, entry); | 1001 | drm_cleanup_buf_error(dev, entry); |
1002 | up(&dev->struct_sem); | 1002 | mutex_unlock(&dev->struct_mutex); |
1003 | atomic_dec(&dev->buf_alloc); | 1003 | atomic_dec(&dev->buf_alloc); |
1004 | return -ENOMEM; | 1004 | return -ENOMEM; |
1005 | } | 1005 | } |
@@ -1022,7 +1022,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
1022 | if (!temp_buflist) { | 1022 | if (!temp_buflist) { |
1023 | /* Free the entry because it isn't valid */ | 1023 | /* Free the entry because it isn't valid */ |
1024 | drm_cleanup_buf_error(dev, entry); | 1024 | drm_cleanup_buf_error(dev, entry); |
1025 | up(&dev->struct_sem); | 1025 | mutex_unlock(&dev->struct_mutex); |
1026 | atomic_dec(&dev->buf_alloc); | 1026 | atomic_dec(&dev->buf_alloc); |
1027 | return -ENOMEM; | 1027 | return -ENOMEM; |
1028 | } | 1028 | } |
@@ -1040,7 +1040,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
1040 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 1040 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1041 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 1041 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
1042 | 1042 | ||
1043 | up(&dev->struct_sem); | 1043 | mutex_unlock(&dev->struct_mutex); |
1044 | 1044 | ||
1045 | request->count = entry->buf_count; | 1045 | request->count = entry->buf_count; |
1046 | request->size = size; | 1046 | request->size = size; |
@@ -1110,16 +1110,16 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1110 | atomic_inc(&dev->buf_alloc); | 1110 | atomic_inc(&dev->buf_alloc); |
1111 | spin_unlock(&dev->count_lock); | 1111 | spin_unlock(&dev->count_lock); |
1112 | 1112 | ||
1113 | down(&dev->struct_sem); | 1113 | mutex_lock(&dev->struct_mutex); |
1114 | entry = &dma->bufs[order]; | 1114 | entry = &dma->bufs[order]; |
1115 | if (entry->buf_count) { | 1115 | if (entry->buf_count) { |
1116 | up(&dev->struct_sem); | 1116 | mutex_unlock(&dev->struct_mutex); |
1117 | atomic_dec(&dev->buf_alloc); | 1117 | atomic_dec(&dev->buf_alloc); |
1118 | return -ENOMEM; /* May only call once for each order */ | 1118 | return -ENOMEM; /* May only call once for each order */ |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | if (count < 0 || count > 4096) { | 1121 | if (count < 0 || count > 4096) { |
1122 | up(&dev->struct_sem); | 1122 | mutex_unlock(&dev->struct_mutex); |
1123 | atomic_dec(&dev->buf_alloc); | 1123 | atomic_dec(&dev->buf_alloc); |
1124 | return -EINVAL; | 1124 | return -EINVAL; |
1125 | } | 1125 | } |
@@ -1127,7 +1127,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1127 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 1127 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
1128 | DRM_MEM_BUFS); | 1128 | DRM_MEM_BUFS); |
1129 | if (!entry->buflist) { | 1129 | if (!entry->buflist) { |
1130 | up(&dev->struct_sem); | 1130 | mutex_unlock(&dev->struct_mutex); |
1131 | atomic_dec(&dev->buf_alloc); | 1131 | atomic_dec(&dev->buf_alloc); |
1132 | return -ENOMEM; | 1132 | return -ENOMEM; |
1133 | } | 1133 | } |
@@ -1160,7 +1160,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1160 | /* Set count correctly so we free the proper amount. */ | 1160 | /* Set count correctly so we free the proper amount. */ |
1161 | entry->buf_count = count; | 1161 | entry->buf_count = count; |
1162 | drm_cleanup_buf_error(dev, entry); | 1162 | drm_cleanup_buf_error(dev, entry); |
1163 | up(&dev->struct_sem); | 1163 | mutex_unlock(&dev->struct_mutex); |
1164 | atomic_dec(&dev->buf_alloc); | 1164 | atomic_dec(&dev->buf_alloc); |
1165 | return -ENOMEM; | 1165 | return -ENOMEM; |
1166 | } | 1166 | } |
@@ -1182,7 +1182,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1182 | if (!temp_buflist) { | 1182 | if (!temp_buflist) { |
1183 | /* Free the entry because it isn't valid */ | 1183 | /* Free the entry because it isn't valid */ |
1184 | drm_cleanup_buf_error(dev, entry); | 1184 | drm_cleanup_buf_error(dev, entry); |
1185 | up(&dev->struct_sem); | 1185 | mutex_unlock(&dev->struct_mutex); |
1186 | atomic_dec(&dev->buf_alloc); | 1186 | atomic_dec(&dev->buf_alloc); |
1187 | return -ENOMEM; | 1187 | return -ENOMEM; |
1188 | } | 1188 | } |
@@ -1200,7 +1200,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1200 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 1200 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1201 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 1201 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
1202 | 1202 | ||
1203 | up(&dev->struct_sem); | 1203 | mutex_unlock(&dev->struct_mutex); |
1204 | 1204 | ||
1205 | request->count = entry->buf_count; | 1205 | request->count = entry->buf_count; |
1206 | request->size = size; | 1206 | request->size = size; |