aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:01 -0500
commitf9aed0e2537174b95908f48b6052ae37196c9390 (patch)
tree0b5cd42f36f08086f0258a753203b840123c9301 /drivers/scsi/sg.c
parenta6f563db09c54c80d80e9013182dc512a5e53d0f (diff)
[PATCH] sg: use compound pages
sg increments the refcount of constituent pages in its higher order memory allocations when they are about to be mapped by userspace. This is done so the subsequent get_page/put_page when doing the mapping and unmapping does not free the page. Move over to the preferred way, that is, using compound pages instead. This fixes a whole class of possible obscure bugs where a get_user_pages on a constituent page may outlast the user mappings or even the driver. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Cc: Douglas Gilbert <dougg@torque.net> Cc: James Bottomley <James.Bottomley@steeleye.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c37
1 files changed, 3 insertions, 34 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5a0a19322d01..0e0ca8fc7318 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,32 +1140,6 @@ sg_fasync(int fd, struct file *filp, int mode)
1140 return (retval < 0) ? retval : 0; 1140 return (retval < 0) ? retval : 0;
1141} 1141}
1142 1142
1143/* When startFinish==1 increments page counts for pages other than the
1144 first of scatter gather elements obtained from alloc_pages().
1145 When startFinish==0 decrements ... */
1146static void
1147sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1148{
1149 struct scatterlist *sg = rsv_schp->buffer;
1150 struct page *page;
1151 int k, m;
1152
1153 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
1154 startFinish, rsv_schp->k_use_sg));
1155 /* N.B. correction _not_ applied to base page of each allocation */
1156 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
1157 for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
1158 page = sg->page;
1159 if (startFinish)
1160 get_page(page);
1161 else {
1162 if (page_count(page) > 0)
1163 __put_page(page);
1164 }
1165 }
1166 }
1167}
1168
1169static struct page * 1143static struct page *
1170sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) 1144sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1171{ 1145{
@@ -1237,10 +1211,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1237 sa += len; 1211 sa += len;
1238 } 1212 }
1239 1213
1240 if (0 == sfp->mmap_called) { 1214 sfp->mmap_called = 1;
1241 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
1242 sfp->mmap_called = 1;
1243 }
1244 vma->vm_flags |= VM_RESERVED; 1215 vma->vm_flags |= VM_RESERVED;
1245 vma->vm_private_data = sfp; 1216 vma->vm_private_data = sfp;
1246 vma->vm_ops = &sg_mmap_vm_ops; 1217 vma->vm_ops = &sg_mmap_vm_ops;
@@ -2395,8 +2366,6 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2395 SCSI_LOG_TIMEOUT(6, 2366 SCSI_LOG_TIMEOUT(6,
2396 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2367 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2397 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2368 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2398 if (sfp->mmap_called)
2399 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
2400 sg_remove_scat(&sfp->reserve); 2369 sg_remove_scat(&sfp->reserve);
2401 } 2370 }
2402 sfp->parentdp = NULL; 2371 sfp->parentdp = NULL;
@@ -2478,9 +2447,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2478 return resp; 2447 return resp;
2479 2448
2480 if (lowDma) 2449 if (lowDma)
2481 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN; 2450 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2482 else 2451 else
2483 page_mask = GFP_ATOMIC | __GFP_NOWARN; 2452 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2484 2453
2485 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2454 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2486 order++, a_size <<= 1) ; 2455 order++, a_size <<= 1) ;