aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorNisheeth Bhat <nisheeth.bhat@intel.com>2011-09-29 10:10:10 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:53:04 -0400
commit0d1bc9125890426b52ca2de6abedd32e31722e5c (patch)
treec62cff35187c87c8a2b1f6dafdfc577cba94c137 /drivers/block
parentbc5fc7e4b22ca855902aba02b28c96f09b446407 (diff)
Fix calculation of number of pages in a PRP List
The existing calculation underestimated the number of pages required as it did not take into account the pointer at the end of each page. The replacement calculation may overestimate the number of pages required if the last page in the PRP List is entirely full. By using ->npages as a counter as we fill in the pages, we ensure that we don't try to free a page that was never allocated. Signed-off-by: Nisheeth Bhat <nisheeth.bhat@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index b77894a7585..3afdc750aaa 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -265,7 +265,7 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
265} 265}
266 266
267struct nvme_prps { 267struct nvme_prps {
268 int npages; 268 int npages; /* 0 means small pool in use */
269 dma_addr_t first_dma; 269 dma_addr_t first_dma;
270 __le64 *list[0]; 270 __le64 *list[0];
271}; 271};
@@ -347,7 +347,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
347 int offset = offset_in_page(dma_addr); 347 int offset = offset_in_page(dma_addr);
348 __le64 *prp_list; 348 __le64 *prp_list;
349 dma_addr_t prp_dma; 349 dma_addr_t prp_dma;
350 int nprps, npages, i, prp_page; 350 int nprps, npages, i;
351 struct nvme_prps *prps = NULL; 351 struct nvme_prps *prps = NULL;
352 352
353 cmd->prp1 = cpu_to_le64(dma_addr); 353 cmd->prp1 = cpu_to_le64(dma_addr);
@@ -370,20 +370,20 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
370 } 370 }
371 371
372 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 372 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
373 npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE); 373 npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
374 prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp); 374 prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
375 if (!prps) { 375 if (!prps) {
376 cmd->prp2 = cpu_to_le64(dma_addr); 376 cmd->prp2 = cpu_to_le64(dma_addr);
377 *len = (*len - length) + PAGE_SIZE; 377 *len = (*len - length) + PAGE_SIZE;
378 return prps; 378 return prps;
379 } 379 }
380 prp_page = 0; 380
381 if (nprps <= (256 / 8)) { 381 if (nprps <= (256 / 8)) {
382 pool = dev->prp_small_pool; 382 pool = dev->prp_small_pool;
383 prps->npages = 0; 383 prps->npages = 0;
384 } else { 384 } else {
385 pool = dev->prp_page_pool; 385 pool = dev->prp_page_pool;
386 prps->npages = npages; 386 prps->npages = 1;
387 } 387 }
388 388
389 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 389 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
@@ -393,7 +393,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
393 kfree(prps); 393 kfree(prps);
394 return NULL; 394 return NULL;
395 } 395 }
396 prps->list[prp_page++] = prp_list; 396 prps->list[0] = prp_list;
397 prps->first_dma = prp_dma; 397 prps->first_dma = prp_dma;
398 cmd->prp2 = cpu_to_le64(prp_dma); 398 cmd->prp2 = cpu_to_le64(prp_dma);
399 i = 0; 399 i = 0;
@@ -405,7 +405,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
405 *len = (*len - length); 405 *len = (*len - length);
406 return prps; 406 return prps;
407 } 407 }
408 prps->list[prp_page++] = prp_list; 408 prps->list[prps->npages++] = prp_list;
409 prp_list[0] = old_prp_list[i - 1]; 409 prp_list[0] = old_prp_list[i - 1];
410 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 410 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
411 i = 1; 411 i = 1;