aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-05-20 02:23:53 -0400
committerDave Airlie <airlied@redhat.com>2015-05-20 02:23:53 -0400
commitbdcddf95e82b1c4e370fc1196b1f4f50f775dab4 (patch)
treeef2af2b3faee1f8e8287ca45d265809f56fbd0f6 /block
parent91d9f9856f91c82ac6289a0fff65dd12cfa07e34 (diff)
parente26081808edadfd257c6c9d81014e3b25e9a6118 (diff)
Backmerge v4.1-rc4 into into drm-next
We picked up a silent conflict in amdkfd with drm-fixes and drm-next, backmerge v4.1-rc5 and fix the conflicts Signed-off-by: Dave Airlie <airlied@redhat.com> Conflicts: drivers/gpu/drm/drm_irq.c
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/bounce.c2
-rw-r--r--block/elevator.c6
5 files changed, 40 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fd154b94447a..7871603f0a29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
552 q->queue_lock = &q->__queue_lock; 552 q->queue_lock = &q->__queue_lock;
553 spin_unlock_irq(lock); 553 spin_unlock_irq(lock);
554 554
555 bdi_destroy(&q->backing_dev_info);
556
555 /* @q is and will stay empty, shutdown and put */ 557 /* @q is and will stay empty, shutdown and put */
556 blk_put_queue(q); 558 blk_put_queue(q);
557} 559}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ade8a2d1b0aa..e68b71b85a7e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
677 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 677 data.next = blk_rq_timeout(round_jiffies_up(data.next));
678 mod_timer(&q->timeout, data.next); 678 mod_timer(&q->timeout, data.next);
679 } else { 679 } else {
680 queue_for_each_hw_ctx(q, hctx, i) 680 queue_for_each_hw_ctx(q, hctx, i) {
681 blk_mq_tag_idle(hctx); 681 /* the hctx may be unmapped, so check it here */
682 if (blk_mq_hw_queue_mapped(hctx))
683 blk_mq_tag_idle(hctx);
684 }
682 } 685 }
683} 686}
684 687
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
855 spin_lock(&hctx->lock); 858 spin_lock(&hctx->lock);
856 list_splice(&rq_list, &hctx->dispatch); 859 list_splice(&rq_list, &hctx->dispatch);
857 spin_unlock(&hctx->lock); 860 spin_unlock(&hctx->lock);
861 /*
862 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
863 * it's possible the queue is stopped and restarted again
864 * before this. Queue restart will dispatch requests. And since
865 * requests in rq_list aren't added into hctx->dispatch yet,
866 * the requests in rq_list might get lost.
867 *
868 * blk_mq_run_hw_queue() already checks the STOPPED bit
869 **/
870 blk_mq_run_hw_queue(hctx, true);
858 } 871 }
859} 872}
860 873
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1571 return NOTIFY_OK; 1584 return NOTIFY_OK;
1572} 1585}
1573 1586
1574static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1575{
1576 struct request_queue *q = hctx->queue;
1577 struct blk_mq_tag_set *set = q->tag_set;
1578
1579 if (set->tags[hctx->queue_num])
1580 return NOTIFY_OK;
1581
1582 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1583 if (!set->tags[hctx->queue_num])
1584 return NOTIFY_STOP;
1585
1586 hctx->tags = set->tags[hctx->queue_num];
1587 return NOTIFY_OK;
1588}
1589
1590static int blk_mq_hctx_notify(void *data, unsigned long action, 1587static int blk_mq_hctx_notify(void *data, unsigned long action,
1591 unsigned int cpu) 1588 unsigned int cpu)
1592{ 1589{
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
1594 1591
1595 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 1592 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1596 return blk_mq_hctx_cpu_offline(hctx, cpu); 1593 return blk_mq_hctx_cpu_offline(hctx, cpu);
1597 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 1594
1598 return blk_mq_hctx_cpu_online(hctx, cpu); 1595 /*
1596 * In case of CPU online, tags may be reallocated
1597 * in blk_mq_map_swqueue() after mapping is updated.
1598 */
1599 1599
1600 return NOTIFY_OK; 1600 return NOTIFY_OK;
1601} 1601}
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1775 unsigned int i; 1775 unsigned int i;
1776 struct blk_mq_hw_ctx *hctx; 1776 struct blk_mq_hw_ctx *hctx;
1777 struct blk_mq_ctx *ctx; 1777 struct blk_mq_ctx *ctx;
1778 struct blk_mq_tag_set *set = q->tag_set;
1778 1779
1779 queue_for_each_hw_ctx(q, hctx, i) { 1780 queue_for_each_hw_ctx(q, hctx, i) {
1780 cpumask_clear(hctx->cpumask); 1781 cpumask_clear(hctx->cpumask);
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1803 * disable it and free the request entries. 1804 * disable it and free the request entries.
1804 */ 1805 */
1805 if (!hctx->nr_ctx) { 1806 if (!hctx->nr_ctx) {
1806 struct blk_mq_tag_set *set = q->tag_set;
1807
1808 if (set->tags[i]) { 1807 if (set->tags[i]) {
1809 blk_mq_free_rq_map(set, set->tags[i], i); 1808 blk_mq_free_rq_map(set, set->tags[i], i);
1810 set->tags[i] = NULL; 1809 set->tags[i] = NULL;
1811 hctx->tags = NULL;
1812 } 1810 }
1811 hctx->tags = NULL;
1813 continue; 1812 continue;
1814 } 1813 }
1815 1814
1815 /* unmapped hw queue can be remapped after CPU topo changed */
1816 if (!set->tags[i])
1817 set->tags[i] = blk_mq_init_rq_map(set, i);
1818 hctx->tags = set->tags[i];
1819 WARN_ON(!hctx->tags);
1820
1816 /* 1821 /*
1817 * Set the map size to the number of mapped software queues. 1822 * Set the map size to the number of mapped software queues.
1818 * This is more accurate and more efficient than looping 1823 * This is more accurate and more efficient than looping
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2090 */ 2095 */
2091 list_for_each_entry(q, &all_q_list, all_q_node) 2096 list_for_each_entry(q, &all_q_list, all_q_node)
2092 blk_mq_freeze_queue_start(q); 2097 blk_mq_freeze_queue_start(q);
2093 list_for_each_entry(q, &all_q_list, all_q_node) 2098 list_for_each_entry(q, &all_q_list, all_q_node) {
2094 blk_mq_freeze_queue_wait(q); 2099 blk_mq_freeze_queue_wait(q);
2095 2100
2101 /*
2102 * timeout handler can't touch hw queue during the
2103 * reinitialization
2104 */
2105 del_timer_sync(&q->timeout);
2106 }
2107
2096 list_for_each_entry(q, &all_q_list, all_q_node) 2108 list_for_each_entry(q, &all_q_list, all_q_node)
2097 blk_mq_queue_reinit(q); 2109 blk_mq_queue_reinit(q);
2098 2110
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index faaf36ade7eb..2b8fd302f677 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
522 522
523 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
524 524
525 bdi_destroy(&q->backing_dev_info);
526
527 ida_simple_remove(&blk_queue_ida, q->id); 525 ida_simple_remove(&blk_queue_ida, q->id);
528 call_rcu(&q->rcu_head, blk_free_queue_rcu); 526 call_rcu(&q->rcu_head, blk_free_queue_rcu);
529} 527}
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba203d5c..ed9dd8067120 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -221,8 +221,8 @@ bounce:
221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) 221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
222 continue; 222 continue;
223 223
224 inc_zone_page_state(to->bv_page, NR_BOUNCE);
225 to->bv_page = mempool_alloc(pool, q->bounce_gfp); 224 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
225 inc_zone_page_state(to->bv_page, NR_BOUNCE);
226 226
227 if (rw == WRITE) { 227 if (rw == WRITE) {
228 char *vto, *vfrom; 228 char *vto, *vfrom;
diff --git a/block/elevator.c b/block/elevator.c
index 59794d0d38e3..8985038f398c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
157 157
158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); 158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
159 if (unlikely(!eq)) 159 if (unlikely(!eq))
160 goto err; 160 return NULL;
161 161
162 eq->type = e; 162 eq->type = e;
163 kobject_init(&eq->kobj, &elv_ktype); 163 kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
165 hash_init(eq->hash); 165 hash_init(eq->hash);
166 166
167 return eq; 167 return eq;
168err:
169 kfree(eq);
170 elevator_put(e);
171 return NULL;
172} 168}
173EXPORT_SYMBOL(elevator_alloc); 169EXPORT_SYMBOL(elevator_alloc);
174 170