diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-07-16 02:38:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:38 -0400 |
commit | 0a6d3a2a3813e7b25267366cfbf9a4a4698dd1c2 (patch) | |
tree | 98b0c20ad616258dae150a04307ce3a2ad90adf1 /arch/um | |
parent | 7ff9057db7edeb210fd5c00314c52154922d8868 (diff) |
uml: fix request->sector update
It is theoretically possible for a request to finish and be freed between
writing it to the I/O thread and updating the sector count. In this case, the
update will dereference a freed pointer.
To avoid this, I delay the update until processing the next sg segment, when
the request pointer is known to be good.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um')
-rw-r--r-- | arch/um/drivers/ubd_kern.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 2e09f162c42f..0947f2e20045 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -1083,7 +1083,7 @@ static void do_ubd_request(request_queue_t *q) | |||
1083 | { | 1083 | { |
1084 | struct io_thread_req *io_req; | 1084 | struct io_thread_req *io_req; |
1085 | struct request *req; | 1085 | struct request *req; |
1086 | int n; | 1086 | int n, last_sectors; |
1087 | 1087 | ||
1088 | while(1){ | 1088 | while(1){ |
1089 | struct ubd *dev = q->queuedata; | 1089 | struct ubd *dev = q->queuedata; |
@@ -1099,9 +1099,11 @@ static void do_ubd_request(request_queue_t *q) | |||
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | req = dev->request; | 1101 | req = dev->request; |
1102 | last_sectors = 0; | ||
1102 | while(dev->start_sg < dev->end_sg){ | 1103 | while(dev->start_sg < dev->end_sg){ |
1103 | struct scatterlist *sg = &dev->sg[dev->start_sg]; | 1104 | struct scatterlist *sg = &dev->sg[dev->start_sg]; |
1104 | 1105 | ||
1106 | req->sector += last_sectors; | ||
1105 | io_req = kmalloc(sizeof(struct io_thread_req), | 1107 | io_req = kmalloc(sizeof(struct io_thread_req), |
1106 | GFP_ATOMIC); | 1108 | GFP_ATOMIC); |
1107 | if(io_req == NULL){ | 1109 | if(io_req == NULL){ |
@@ -1113,6 +1115,7 @@ static void do_ubd_request(request_queue_t *q) | |||
1113 | (unsigned long long) req->sector << 9, | 1115 | (unsigned long long) req->sector << 9, |
1114 | sg->offset, sg->length, sg->page); | 1116 | sg->offset, sg->length, sg->page); |
1115 | 1117 | ||
1118 | last_sectors = sg->length >> 9; | ||
1116 | n = os_write_file(thread_fd, &io_req, | 1119 | n = os_write_file(thread_fd, &io_req, |
1117 | sizeof(struct io_thread_req *)); | 1120 | sizeof(struct io_thread_req *)); |
1118 | if(n != sizeof(struct io_thread_req *)){ | 1121 | if(n != sizeof(struct io_thread_req *)){ |
@@ -1124,7 +1127,6 @@ static void do_ubd_request(request_queue_t *q) | |||
1124 | return; | 1127 | return; |
1125 | } | 1128 | } |
1126 | 1129 | ||
1127 | req->sector += sg->length >> 9; | ||
1128 | dev->start_sg++; | 1130 | dev->start_sg++; |
1129 | } | 1131 | } |
1130 | dev->end_sg = 0; | 1132 | dev->end_sg = 0; |