aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-07-21 17:22:35 -0400
committerArnd Bergmann <arnd@arndb.de>2012-07-21 17:22:35 -0400
commit2d05bf6ef7391ac3642ab62bc3a649fdb32731fe (patch)
treea8671f0eafa885b4f3ad05204e3ed642b6acab59 /block
parent9c63cd5a955ce8a3de1776a9e4b6b89c69b2a09e (diff)
parent8c0383b8f6a8012b107cd62eaa5a556586d8ea56 (diff)
Merge branch 'samsung/defconfig' into next/defconfig
From Kukjin Kim <kgene.kim@samsung.com>: It is including new exynos_defconfig for DT configuration of exynos4 and exynos5 together. The old exynos4_defconfig will be used for non-DT for a while and we will try to move on using exynos_defconfig for only DT. * samsung/defconfig: ARM: exynos_defconfig: enable more platforms in defconfig Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c25
-rw-r--r--block/blk-timeout.c41
-rw-r--r--block/cfq-iosched.c30
-rw-r--r--block/scsi_ioctl.c5
5 files changed, 43 insertions, 67 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 02cf6335e9bd..e7dee617358e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
125 125
126 blkg->pd[i] = pd; 126 blkg->pd[i] = pd;
127 pd->blkg = blkg; 127 pd->blkg = blkg;
128 }
129
130 /* invoke per-policy init */
131 for (i = 0; i < BLKCG_MAX_POLS; i++) {
132 struct blkcg_policy *pol = blkcg_policy[i];
133 128
129 /* invoke per-policy init */
134 if (blkcg_policy_enabled(blkg->q, pol)) 130 if (blkcg_policy_enabled(blkg->q, pol))
135 pol->pd_init_fn(blkg); 131 pol->pd_init_fn(blkg);
136 } 132 }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
245 241
246static void blkg_destroy(struct blkcg_gq *blkg) 242static void blkg_destroy(struct blkcg_gq *blkg)
247{ 243{
248 struct request_queue *q = blkg->q;
249 struct blkcg *blkcg = blkg->blkcg; 244 struct blkcg *blkcg = blkg->blkcg;
250 245
251 lockdep_assert_held(q->queue_lock); 246 lockdep_assert_held(blkg->q->queue_lock);
252 lockdep_assert_held(&blkcg->lock); 247 lockdep_assert_held(&blkcg->lock);
253 248
254 /* Something wrong if we are trying to remove same group twice */ 249 /* Something wrong if we are trying to remove same group twice */
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7aeb56..93eb3e4f88ce 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
361 */ 361 */
362void blk_drain_queue(struct request_queue *q, bool drain_all) 362void blk_drain_queue(struct request_queue *q, bool drain_all)
363{ 363{
364 int i;
365
364 while (true) { 366 while (true) {
365 bool drain = false; 367 bool drain = false;
366 int i;
367 368
368 spin_lock_irq(q->queue_lock); 369 spin_lock_irq(q->queue_lock);
369 370
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
408 break; 409 break;
409 msleep(10); 410 msleep(10);
410 } 411 }
412
413 /*
414 * With queue marked dead, any woken up waiter will fail the
415 * allocation path, so the wakeup chaining is lost and we're
416 * left with hung waiters. We need to wake up those waiters.
417 */
418 if (q->request_fn) {
419 spin_lock_irq(q->queue_lock);
420 for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
421 wake_up_all(&q->rq.wait[i]);
422 spin_unlock_irq(q->queue_lock);
423 }
411} 424}
412 425
413/** 426/**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
467 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 480 /* mark @q DEAD, no new request or merges will be allowed afterwards */
468 mutex_lock(&q->sysfs_lock); 481 mutex_lock(&q->sysfs_lock);
469 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 482 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
470
471 spin_lock_irq(lock); 483 spin_lock_irq(lock);
472 484
473 /* 485 /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
485 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 497 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
486 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 498 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
487 queue_flag_set(QUEUE_FLAG_DEAD, q); 499 queue_flag_set(QUEUE_FLAG_DEAD, q);
488
489 if (q->queue_lock != &q->__queue_lock)
490 q->queue_lock = &q->__queue_lock;
491
492 spin_unlock_irq(lock); 500 spin_unlock_irq(lock);
493 mutex_unlock(&q->sysfs_lock); 501 mutex_unlock(&q->sysfs_lock);
494 502
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
499 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 507 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
500 blk_sync_queue(q); 508 blk_sync_queue(q);
501 509
510 spin_lock_irq(lock);
511 if (q->queue_lock != &q->__queue_lock)
512 q->queue_lock = &q->__queue_lock;
513 spin_unlock_irq(lock);
514
502 /* @q is and will stay empty, shutdown and put */ 515 /* @q is and will stay empty, shutdown and put */
503 blk_put_queue(q); 516 blk_put_queue(q);
504} 517}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 780354888958..6e4744cbfb56 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
197 mod_timer(&q->timeout, expiry); 197 mod_timer(&q->timeout, expiry);
198} 198}
199 199
200/**
201 * blk_abort_queue -- Abort all request on given queue
202 * @queue: pointer to queue
203 *
204 */
205void blk_abort_queue(struct request_queue *q)
206{
207 unsigned long flags;
208 struct request *rq, *tmp;
209 LIST_HEAD(list);
210
211 /*
212 * Not a request based block device, nothing to abort
213 */
214 if (!q->request_fn)
215 return;
216
217 spin_lock_irqsave(q->queue_lock, flags);
218
219 elv_abort_queue(q);
220
221 /*
222 * Splice entries to local list, to avoid deadlocking if entries
223 * get readded to the timeout list by error handling
224 */
225 list_splice_init(&q->timeout_list, &list);
226
227 list_for_each_entry_safe(rq, tmp, &list, timeout_list)
228 blk_abort_request(rq);
229
230 /*
231 * Occasionally, blk_abort_request() will return without
232 * deleting the element from the list. Make sure we add those back
233 * instead of leaving them on the local stack list.
234 */
235 list_splice(&list, &q->timeout_list);
236
237 spin_unlock_irqrestore(q->queue_lock, flags);
238
239}
240EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 673c977cc2bf..fb52df9744f5 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,8 +17,6 @@
17#include "blk.h" 17#include "blk.h"
18#include "blk-cgroup.h" 18#include "blk-cgroup.h"
19 19
20static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
21
22/* 20/*
23 * tunables 21 * tunables
24 */ 22 */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
418 return pd ? container_of(pd, struct cfq_group, pd) : NULL; 416 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
419} 417}
420 418
421static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
422{
423 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
424}
425
426static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) 419static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
427{ 420{
428 return pd_to_blkg(&cfqg->pd); 421 return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
572 565
573#ifdef CONFIG_CFQ_GROUP_IOSCHED 566#ifdef CONFIG_CFQ_GROUP_IOSCHED
574 567
568static struct blkcg_policy blkcg_policy_cfq;
569
570static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
571{
572 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
573}
574
575static inline void cfqg_get(struct cfq_group *cfqg) 575static inline void cfqg_get(struct cfq_group *cfqg)
576{ 576{
577 return blkg_get(cfqg_to_blkg(cfqg)); 577 return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
3951 3951
3952 cfq_shutdown_timer_wq(cfqd); 3952 cfq_shutdown_timer_wq(cfqd);
3953 3953
3954#ifndef CONFIG_CFQ_GROUP_IOSCHED 3954#ifdef CONFIG_CFQ_GROUP_IOSCHED
3955 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
3956#else
3955 kfree(cfqd->root_group); 3957 kfree(cfqd->root_group);
3956#endif 3958#endif
3957 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
3958 kfree(cfqd); 3959 kfree(cfqd);
3959} 3960}
3960 3961
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
4194#ifdef CONFIG_CFQ_GROUP_IOSCHED 4195#ifdef CONFIG_CFQ_GROUP_IOSCHED
4195 if (!cfq_group_idle) 4196 if (!cfq_group_idle)
4196 cfq_group_idle = 1; 4197 cfq_group_idle = 1;
4197#else
4198 cfq_group_idle = 0;
4199#endif
4200 4198
4201 ret = blkcg_policy_register(&blkcg_policy_cfq); 4199 ret = blkcg_policy_register(&blkcg_policy_cfq);
4202 if (ret) 4200 if (ret)
4203 return ret; 4201 return ret;
4202#else
4203 cfq_group_idle = 0;
4204#endif
4204 4205
4206 ret = -ENOMEM;
4205 cfq_pool = KMEM_CACHE(cfq_queue, 0); 4207 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4206 if (!cfq_pool) 4208 if (!cfq_pool)
4207 goto err_pol_unreg; 4209 goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
4215err_free_pool: 4217err_free_pool:
4216 kmem_cache_destroy(cfq_pool); 4218 kmem_cache_destroy(cfq_pool);
4217err_pol_unreg: 4219err_pol_unreg:
4220#ifdef CONFIG_CFQ_GROUP_IOSCHED
4218 blkcg_policy_unregister(&blkcg_policy_cfq); 4221 blkcg_policy_unregister(&blkcg_policy_cfq);
4222#endif
4219 return ret; 4223 return ret;
4220} 4224}
4221 4225
4222static void __exit cfq_exit(void) 4226static void __exit cfq_exit(void)
4223{ 4227{
4228#ifdef CONFIG_CFQ_GROUP_IOSCHED
4224 blkcg_policy_unregister(&blkcg_policy_cfq); 4229 blkcg_policy_unregister(&blkcg_policy_cfq);
4230#endif
4225 elv_unregister(&iosched_cfq); 4231 elv_unregister(&iosched_cfq);
4226 kmem_cache_destroy(cfq_pool); 4232 kmem_cache_destroy(cfq_pool);
4227} 4233}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 260fa80ef575..9a87daa6f4fb 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
721 break; 721 break;
722 } 722 }
723 723
724 if (capable(CAP_SYS_RAWIO))
725 return 0;
726
724 /* In particular, rule out all resets and host-specific ioctls. */ 727 /* In particular, rule out all resets and host-specific ioctls. */
725 printk_ratelimited(KERN_WARNING 728 printk_ratelimited(KERN_WARNING
726 "%s: sending ioctl %x to a partition!\n", current->comm, cmd); 729 "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
727 730
728 return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD; 731 return -ENOIOCTLCMD;
729} 732}
730EXPORT_SYMBOL(scsi_verify_blk_ioctl); 733EXPORT_SYMBOL(scsi_verify_blk_ioctl);
731 734