aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2011-11-23 04:59:13 -0500
committerJens Axboe <axboe@kernel.dk>2011-11-23 04:59:13 -0500
commit5151412dd4338b273afdb107c3772528e9e67d92 (patch)
treece88b8d64d903cf99b2ee3f5c66af143d23b0bc6 /block
parentb4bbb02934e4511d9083f15c23e90703482e84ad (diff)
block: initialize request_queue's numa node during
struct request_queue is allocated with __GFP_ZERO so its "node" field is zero before initialization. This causes an oops if node 0 is offline in the page allocator because its zonelists are not initialized. From Dave Young's dmesg: SRAT: Node 1 PXM 2 0-d0000000 SRAT: Node 1 PXM 2 100000000-330000000 SRAT: Node 0 PXM 1 330000000-630000000 Initmem setup node 1 0000000000000000-000000000affb000 ... Built 1 zonelists in Node order, mobility grouping on. ... BUG: unable to handle kernel paging request at 0000000000001c08 IP: [<ffffffff8111c355>] __alloc_pages_nodemask+0xb5/0x870 and __alloc_pages_nodemask+0xb5 translates to a NULL pointer on zonelist->_zonerefs. The fix is to initialize q->node at the time of allocation so the correct node is passed to the slab allocator later. Since blk_init_allocated_queue_node() is no longer needed, merge it with blk_init_allocated_queue(). [rientjes@google.com: changelog, initializing q->node] Cc: stable@vger.kernel.org [2.6.37+] Reported-by: Dave Young <dyoung@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: David Rientjes <rientjes@google.com> Tested-by: Dave Young <dyoung@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c80cd3..20d69f6beb6b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -467,6 +467,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
467 q->backing_dev_info.state = 0; 467 q->backing_dev_info.state = 0;
468 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 468 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
469 q->backing_dev_info.name = "block"; 469 q->backing_dev_info.name = "block";
470 q->node = node_id;
470 471
471 err = bdi_init(&q->backing_dev_info); 472 err = bdi_init(&q->backing_dev_info);
472 if (err) { 473 if (err) {
@@ -551,7 +552,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
551 if (!uninit_q) 552 if (!uninit_q)
552 return NULL; 553 return NULL;
553 554
554 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 555 q = blk_init_allocated_queue(uninit_q, rfn, lock);
555 if (!q) 556 if (!q)
556 blk_cleanup_queue(uninit_q); 557 blk_cleanup_queue(uninit_q);
557 558
@@ -563,18 +564,9 @@ struct request_queue *
563blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 564blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
564 spinlock_t *lock) 565 spinlock_t *lock)
565{ 566{
566 return blk_init_allocated_queue_node(q, rfn, lock, -1);
567}
568EXPORT_SYMBOL(blk_init_allocated_queue);
569
570struct request_queue *
571blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
572 spinlock_t *lock, int node_id)
573{
574 if (!q) 567 if (!q)
575 return NULL; 568 return NULL;
576 569
577 q->node = node_id;
578 if (blk_init_free_list(q)) 570 if (blk_init_free_list(q))
579 return NULL; 571 return NULL;
580 572
@@ -604,7 +596,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
604 596
605 return NULL; 597 return NULL;
606} 598}
607EXPORT_SYMBOL(blk_init_allocated_queue_node); 599EXPORT_SYMBOL(blk_init_allocated_queue);
608 600
609int blk_get_queue(struct request_queue *q) 601int blk_get_queue(struct request_queue *q)
610{ 602{