From b9d9c82b4d081feb464f62dfc786c8621d09ecd2 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Thu, 15 Jun 2006 15:31:56 +0200 Subject: [PATCH] Driver core: add generic "subsystem" link to all devices Like the SUBSYTEM= key we find in the environment of the uevent, this creates a generic "subsystem" link in sysfs for every device. Userspace usually doesn't care at all if its a "class" or a "bus" device. This provides an unified way to determine the subsytem of a device, regardless of the way the driver core has created it. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- block/genhd.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 5a8d3bf02f..8d7339511e 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -17,8 +17,7 @@ #include #include -static struct subsystem block_subsys; - +struct subsystem block_subsys; static DEFINE_MUTEX(block_subsys_lock); /* @@ -511,9 +510,7 @@ static struct kset_uevent_ops block_uevent_ops = { .uevent = block_uevent, }; -/* declare block_subsys. */ -static decl_subsys(block, &ktype_block, &block_uevent_ops); - +decl_subsys(block, &ktype_block, &block_uevent_ops); /* * aggregate disk stat collector. Uses the same stats that the sysfs -- cgit v1.2.2 From 626ab0e69d376fa07599af669af8ba92d58e87c1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 23 Jun 2006 02:05:55 -0700 Subject: [PATCH] list: use list_replace_init() instead of list_splice_init() list_splice_init(list, head) does unneeded job if it is known that list_empty(head) == 1. We can use list_replace_init() instead. Signed-off-by: Oleg Nesterov Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/ll_rw_blk.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 7eb36c53f4..465b54312c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { -- cgit v1.2.2 From bae386f7884aa3720cc7880b36a41a1d2b9c327b Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 24 Apr 2006 21:12:59 +0200 Subject: [PATCH] iosched: use hlist for request hashtable Use hlist instead of list_head for request hashtable in deadline-iosched and as-iosched. It also can remove the flag to know hashed or unhashed. Signed-off-by: Akinobu Mita Signed-off-by: Jens Axboe block/as-iosched.c | 45 +++++++++++++++++++-------------------------- block/deadline-iosched.c | 39 ++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 49 deletions(-) --- block/as-iosched.c | 45 +++++++++++++++++++-------------------------- block/deadline-iosched.c | 39 ++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 49 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 0c750393be..9b13d72ffe 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -96,7 +96,7 @@ struct as_data { struct as_rq *next_arq[2]; /* next in sort order */ sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ - struct list_head *hash; /* request hash */ + struct hlist_head *hash; /* request hash */ unsigned long exit_prob; /* probability a task will exit while being waited on */ @@ -165,8 +165,7 @@ struct as_rq { /* * request hash, key is the ending offset (for back merge lookup) */ - struct list_head hash; - unsigned int on_hash; + struct hlist_node hash; /* * expire fifo @@ -282,17 +281,15 @@ static const int as_hash_shift = 6; #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) #define AS_HASH_ENTRIES (1 << as_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash) static inline void __as_del_arq_hash(struct as_rq *arq) { - arq->on_hash = 0; - list_del_init(&arq->hash); + hlist_del_init(&arq->hash); } static inline void as_del_arq_hash(struct as_rq *arq) { - if (arq->on_hash) + if (!hlist_unhashed(&arq->hash)) __as_del_arq_hash(arq); } @@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; - BUG_ON(arq->on_hash); + BUG_ON(!hlist_unhashed(&arq->hash)); - arq->on_hash = 1; - list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); + hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); } /* @@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; - struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; + struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; - if (!arq->on_hash) { + if (hlist_unhashed(&arq->hash)) { WARN_ON(1); return; } - if (arq->hash.prev != head) { - list_del(&arq->hash); - list_add(&arq->hash, head); + if (&arq->hash != head->first) { + hlist_del(&arq->hash); + hlist_add_head(&arq->hash, head); } } static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) { - struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; - struct list_head *entry, *next = hash_list->next; + struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; + struct hlist_node *entry, *next; + struct as_rq *arq; - while ((entry = next) != hash_list) { - struct as_rq *arq = list_entry_hash(entry); + hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) { struct request *__rq = arq->request; - next = entry->next; - - BUG_ON(!arq->on_hash); + BUG_ON(hlist_unhashed(&arq->hash)); if (!rq_mergeable(__rq)) { as_del_arq_hash(arq); @@ -1601,8 +1595,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, arq->request = rq; arq->state = AS_RQ_PRESCHED; arq->io_context = NULL; - INIT_LIST_HEAD(&arq->hash); - arq->on_hash = 0; + INIT_HLIST_NODE(&arq->hash); INIT_LIST_HEAD(&arq->fifo); rq->elevator_private = arq; return 0; @@ -1662,7 +1655,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) ad->q = q; /* Identify what queue the data belongs to */ - ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, + ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES, GFP_KERNEL, q->node); if (!ad->hash) { kfree(ad); @@ -1684,7 +1677,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) INIT_WORK(&ad->antic_work, as_work_handler, q); for (i = 0; i < AS_HASH_ENTRIES; i++) - INIT_LIST_HEAD(&ad->hash[i]); + INIT_HLIST_HEAD(&ad->hash[i]); INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c94de8e12f..e5bccaaed5 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5; #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) #define DL_HASH_ENTRIES (1 << deadline_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) -#define ON_HASH(drq) (drq)->on_hash +#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) struct deadline_data { /* @@ -48,7 +47,7 @@ struct deadline_data { * next in sort order. read, write or both are NULL */ struct deadline_rq *next_drq[2]; - struct list_head *hash; /* request hash */ + struct hlist_head *hash; /* request hash */ unsigned int batching; /* number of sequential requests made */ sector_t last_sector; /* head position */ unsigned int starved; /* times reads have starved writes */ @@ -79,8 +78,7 @@ struct deadline_rq { /* * request hash, key is the ending offset (for back merge lookup) */ - struct list_head hash; - char on_hash; + struct hlist_node hash; /* * expire fifo @@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool; */ static inline void __deadline_del_drq_hash(struct deadline_rq *drq) { - drq->on_hash = 0; - list_del_init(&drq->hash); + hlist_del_init(&drq->hash); } static inline void deadline_del_drq_hash(struct deadline_rq *drq) @@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) BUG_ON(ON_HASH(drq)); - drq->on_hash = 1; - list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); + hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); } /* @@ -128,26 +124,24 @@ static inline void deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) { struct request *rq = drq->request; - struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; + struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; - if (ON_HASH(drq) && drq->hash.prev != head) { - list_del(&drq->hash); - list_add(&drq->hash, head); + if (ON_HASH(drq) && &drq->hash != head->first) { + hlist_del(&drq->hash); + hlist_add_head(&drq->hash, head); } } static struct request * deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) { - struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; - struct list_head *entry, *next = hash_list->next; + struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; + struct hlist_node *entry, *next; + struct deadline_rq *drq; - while ((entry = next) != hash_list) { - struct deadline_rq *drq = list_entry_hash(entry); + hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { struct request *__rq = drq->request; - next = entry->next; - BUG_ON(!ON_HASH(drq)); if (!rq_mergeable(__rq)) { @@ -625,7 +619,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) return NULL; memset(dd, 0, sizeof(*dd)); - dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, + dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, GFP_KERNEL, q->node); if (!dd->hash) { kfree(dd); @@ -641,7 +635,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) } for (i = 0; i < DL_HASH_ENTRIES; i++) - INIT_LIST_HEAD(&dd->hash[i]); + INIT_HLIST_HEAD(&dd->hash[i]); INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); @@ -677,8 +671,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, RB_CLEAR(&drq->rb_node); drq->request = rq; - INIT_LIST_HEAD(&drq->hash); - drq->on_hash = 0; + INIT_HLIST_NODE(&drq->hash); INIT_LIST_HEAD(&drq->fifo); -- cgit v1.2.2 From a038e2536472b4dd932399b5277e65f188811de5 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Mon, 5 Jun 2006 12:09:01 +0200 Subject: [PATCH] blk_start_queue() must be called with irq disabled - add warning The queue lock can be taken from interrupts so it must always be taken with irq disabling primitives. Some primitives already verify this. blk_start_queue() is called under this lock, so interrupts must be disabled. Also document this requirement clearly in blk_init_queue(), where the queue spinlock is set. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 465b54312c..17c42ddd31 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. -- cgit v1.2.2 From acf421755593f7d7bd9352d57eda796c6eb4fa43 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 12 Jun 2006 14:20:58 +0200 Subject: [PATCH] remove dead code from elevator switching We already drop the refcount in elevator_exit(), and as we're setting 'e' to NULL, we'll never take that branch anyway. Finally, as 'e' is a local var that isn't referenced afterwards, setting it to NULL is pointless. Signed-off-by: Dave Jones Signed-off-by: Jens Axboe --- block/elevator.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'block') diff --git a/block/elevator.c b/block/elevator.c index a0afdd317c..d00b283f31 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -850,12 +850,9 @@ fail_register: * one again (along with re-adding the sysfs dir) */ elevator_exit(e); - e = NULL; q->elevator = old_elevator; elv_register_queue(q); clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - if (e) - kobject_put(&e->kobj); return 0; } -- cgit v1.2.2 From 271f18f102c789f59644bb6c53a69da1df72b2f4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 Jun 2006 08:08:38 +0200 Subject: [PATCH] cfq-iosched: Don't set the queue batching limits We cannot update them if the user changes nr_requests, so don't set it in the first place. The gains are pretty questionable as well. The batching loss has been shown to decrease throughput. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 48 +++--------------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e2e6ad0a15..c88f161d3f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -123,8 +123,6 @@ struct cfq_data { */ struct hlist_head *crq_hash; - unsigned int max_queued; - mempool_t *crq_pool; int rq_in_driver; @@ -1910,7 +1908,6 @@ static inline int __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct task_struct *task, int rw) { -#if 1 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); @@ -1918,39 +1915,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, } return ELV_MQUEUE_MAY; -#else - if (!cfqq || task->flags & PF_MEMALLOC) - return ELV_MQUEUE_MAY; - if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { - if (cfq_cfqq_wait_request(cfqq)) - return ELV_MQUEUE_MUST; - - /* - * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we - * can quickly flood the queue with writes from a single task - */ - if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { - cfq_mark_cfqq_must_alloc_slice(cfqq); - return ELV_MQUEUE_MUST; - } - - return ELV_MQUEUE_MAY; - } - if (cfq_class_idle(cfqq)) - return ELV_MQUEUE_NO; - if (cfqq->allocated[rw] >= cfqd->max_queued) { - struct io_context *ioc = get_io_context(GFP_ATOMIC); - int ret = ELV_MQUEUE_NO; - - if (ioc && ioc->nr_batch_requests) - ret = ELV_MQUEUE_MAY; - - put_io_context(ioc); - return ret; - } - - return ELV_MQUEUE_MAY; -#endif } static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) @@ -1979,16 +1943,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct request_list *rl = &q->rq; - if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { + if (unlikely(cfqd->rq_starved)) { + struct request_list *rl = &q->rq; + smp_mb(); if (waitqueue_active(&rl->wait[READ])) wake_up(&rl->wait[READ]); - } - - if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { - smp_mb(); if (waitqueue_active(&rl->wait[WRITE])) wake_up(&rl->wait[WRITE]); } @@ -2278,9 +2239,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->queue = q; - cfqd->max_queued = q->nr_requests / 4; - q->nr_batching = cfq_queued; - init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; -- cgit v1.2.2 From b31dc66a54ad986b6b73bdc49c8efc17cbad1833 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 Jun 2006 08:26:10 +0200 Subject: [PATCH] Kill PF_SYNCWRITE flag A process flag to indicate whether we are doing sync io is incredibly ugly. It also causes performance problems when one does a lot of async io and then proceeds to sync it. Part of the io will go out as async, and the other part as sync. This causes a disconnect between the previously submitted io and the synced io. For io schedulers such as CFQ, this will cause us lost merges and suboptimal behaviour in scheduling. Remove PF_SYNCWRITE completely from the fsync/msync paths, and let the O_DIRECT path just directly indicate that the writes are sync by using WRITE_SYNC instead. Signed-off-by: Jens Axboe --- block/as-iosched.c | 2 +- block/cfq-iosched.c | 4 +--- block/ll_rw_blk.c | 3 +++ 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 9b13d72ffe..56c99fa037 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1339,7 +1339,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) arq->state = AS_RQ_NEW; if (rq_data_dir(arq->request) == READ - || current->flags&PF_SYNCWRITE) + || (arq->request->flags & REQ_RW_SYNC)) arq->is_sync = 1; else arq->is_sync = 0; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c88f161d3f..4c4e9cc3ae 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -277,8 +277,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); -#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) - /* * lots of deadline iosched dupes, can be abstracted later... */ @@ -334,7 +332,7 @@ static int cfq_queue_empty(request_queue_t *q) static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) { - if (rw == READ || process_sync(task)) + if (rw == READ || rw == WRITE_SYNC) return task->pid; return CFQ_KEY_ASYNC; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 17c42ddd31..2270bb4513 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2827,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); -- cgit v1.2.2 From b17fd9bceb99610f6dc7998c9a4ed6b71520be2b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 Jun 2006 10:06:48 +0200 Subject: [PATCH] Make CFQ the default IO scheduler Signed-off-by: Jens Axboe --- block/Kconfig.iosched | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index f3b7753aac..48d090e266 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -40,7 +40,7 @@ config IOSCHED_CFQ choice prompt "Default I/O scheduler" - default DEFAULT_AS + default DEFAULT_CFQ help Select the I/O scheduler which will be used by default for all block devices. -- cgit v1.2.2 From 35e6077cb16f93517ba5a51ba849b186d2474d60 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 14 Jun 2006 09:10:45 +0200 Subject: [PATCH] cfq-iosched: correctly set ioprio on both targets Patch originally from Vasily Tarasov If you set io-priority of process 1 using sys_ioprio_set system call by another process 2 (like ionice do), then cfq_init_prio_data() function sets priority of process 2 (current) on queue of process 1 and clears the flag, that designates change of ioprio. So the process 1 will work like with priority of process 2. I propose not to call cfq_init_prio_data() on io-priority change, but only mark queue as queue with changed prority. Every time when new request comes cfq-scheduler checks for this flag and atomaticaly changes priority of queue to new value. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4c4e9cc3ae..84b75f88c2 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1388,10 +1388,9 @@ static inline void changed_ioprio(struct cfq_io_context *cic) } } cfqq = cic->cfqq[SYNC]; - if (cfqq) { + if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); - cfq_init_prio_data(cfqq); - } + spin_unlock(cfqd->queue->queue_lock); } } -- cgit v1.2.2 From caaa5f9f0a75d1dc5e812e69afdbb8720e077fd3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 16 Jun 2006 11:23:00 +0200 Subject: [PATCH] cfq-iosched: many performance fixes This is a collection of patches that greatly improve CFQ performance in some circumstances. - Change the idling logic to only kick in after a request is done and we are deciding what to do. Before the idling included the request service time, so it was hard to adjust. Now it's true think/idle time. - Take advantage of TCQ/NCQ/queueing for seeky sync workloads, but keep it in control for sync and sequential (or close to) workloads. - Expire queues immediately and move on to other busy queues, if we are not going to idle after the current one finishes. - Don't rearm idle timer if there are no busy queues. Just leave the system idle. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 116 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 40 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 84b75f88c2..13c4793fdf 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; -static int cfq_slice_idle = HZ / 70; +static int cfq_slice_idle = HZ / 125; #define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_SLICE_SCALE (5) @@ -906,6 +906,8 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) return cfqq; } +#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) + static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { @@ -939,7 +941,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) * fair distribution of slice time for a process doing back-to-back * seeks. so allow a little bit of time for him to submit a new rq */ - if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) sl = 2; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); @@ -1038,8 +1040,10 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) */ if (!RB_EMPTY(&cfqq->sort_list)) goto keep_queue; - else if (cfq_cfqq_class_sync(cfqq) && - time_before(now, cfqq->slice_end)) { + else if (cfq_cfqq_dispatched(cfqq)) { + cfqq = NULL; + goto keep_queue; + } else if (cfq_cfqq_class_sync(cfqq)) { if (cfq_arm_slice_timer(cfqd, cfqq)) return NULL; } @@ -1088,8 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, } while (dispatched < max_dispatch); /* - * if slice end isn't set yet, set it. if at least one request was - * sync, use the sync time slice value + * if slice end isn't set yet, set it. */ if (!cfqq->slice_end) cfq_set_prio_slice(cfqd, cfqq); @@ -1100,7 +1103,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, */ if ((!cfq_cfqq_sync(cfqq) && cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || - cfq_class_idle(cfqq)) + cfq_class_idle(cfqq) || + !cfq_cfqq_idle_window(cfqq)) cfq_slice_expired(cfqd, 0); return dispatched; @@ -1109,10 +1113,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, static int cfq_forced_dispatch_cfqqs(struct list_head *list) { - int dispatched = 0; struct cfq_queue *cfqq, *next; struct cfq_rq *crq; + int dispatched; + dispatched = 0; list_for_each_entry_safe(cfqq, next, list, cfq_list) { while ((crq = cfqq->next_crq)) { cfq_dispatch_insert(cfqq->cfqd->queue, crq); @@ -1120,6 +1125,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list) } BUG_ON(!list_empty(&cfqq->fifo)); } + return dispatched; } @@ -1146,7 +1152,8 @@ static int cfq_dispatch_requests(request_queue_t *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; + struct cfq_queue *cfqq, *prev_cfqq; + int dispatched; if (!cfqd->busy_queues) return 0; @@ -1154,10 +1161,17 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (unlikely(force)) return cfq_forced_dispatch(cfqd); - cfqq = cfq_select_queue(cfqd); - if (cfqq) { + dispatched = 0; + prev_cfqq = NULL; + while ((cfqq = cfq_select_queue(cfqd)) != NULL) { int max_dispatch; + /* + * Don't repeat dispatch from the previous queue. + */ + if (prev_cfqq == cfqq) + break; + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); @@ -1166,10 +1180,19 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (cfq_class_idle(cfqq)) max_dispatch = 1; - return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + + /* + * If the dispatch cfqq has idling enabled and is still + * the active queue, break out. + */ + if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue) + break; + + prev_cfqq = cfqq; } - return 0; + return dispatched; } /* @@ -1375,24 +1398,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; - if (cfqd) { - spin_lock(cfqd->queue->queue_lock); - cfqq = cic->cfqq[ASYNC]; - if (cfqq) { - struct cfq_queue *new_cfqq; - new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, - cic->ioc->task, GFP_ATOMIC); - if (new_cfqq) { - cic->cfqq[ASYNC] = new_cfqq; - cfq_put_queue(cfqq); - } - } - cfqq = cic->cfqq[SYNC]; - if (cfqq) - cfq_mark_cfqq_prio_changed(cfqq); - spin_unlock(cfqd->queue->queue_lock); + if (unlikely(!cfqd)) + return; + + spin_lock(cfqd->queue->queue_lock); + + cfqq = cic->cfqq[ASYNC]; + if (cfqq) { + struct cfq_queue *new_cfqq; + new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, + GFP_ATOMIC); + if (new_cfqq) { + cic->cfqq[ASYNC] = new_cfqq; + cfq_put_queue(cfqq); + } } + + cfqq = cic->cfqq[SYNC]; + if (cfqq) + cfq_mark_cfqq_prio_changed(cfqq); + + spin_unlock(cfqd->queue->queue_lock); } /* @@ -1461,8 +1488,7 @@ retry: * set ->slice_left to allow preemption for a new process */ cfqq->slice_left = 2 * cfqd->cfq_slice_idle; - if (!cfqd->hw_tag) - cfq_mark_cfqq_idle_window(cfqq); + cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_prio_changed(cfqq); cfq_init_prio_data(cfqq); } @@ -1653,7 +1679,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, { int enable_idle = cfq_cfqq_idle_window(cfqq); - if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) + if (!cic->ioc->task || !cfqd->cfq_slice_idle || + (cfqd->hw_tag && CIC_SEEKY(cic))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) @@ -1683,7 +1710,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, return 0; if (!cfqq) - return 1; + return 0; if (cfq_class_idle(cfqq)) return 1; @@ -1715,7 +1742,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; cfqq->slice_end = cfqq->slice_left + jiffies; - __cfq_slice_expired(cfqd, cfqq, 1); + cfq_slice_expired(cfqd, 1); __cfq_set_active_queue(cfqd, cfqq); } @@ -1834,11 +1861,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) cfqq->service_last = now; cfq_resort_rr_list(cfqq, 0); } - cfq_schedule_dispatch(cfqd); } - if (cfq_crq_is_sync(crq)) + if (sync) crq->io_context->last_end_request = now; + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (cfqd->active_queue == cfqq) { + if (time_after(now, cfqq->slice_end)) + cfq_slice_expired(cfqd, 0); + else if (sync && RB_EMPTY(&cfqq->sort_list)) { + if (!cfq_arm_slice_timer(cfqd, cfqq)) + cfq_schedule_dispatch(cfqd); + } + } } static struct request * @@ -2106,11 +2145,8 @@ static void cfq_idle_slice_timer(unsigned long data) * only expire and reinvoke request handler, if there are * other queues with pending requests */ - if (!cfqd->busy_queues) { - cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); - add_timer(&cfqd->idle_slice_timer); + if (!cfqd->busy_queues) goto out_cont; - } /* * not expired and it has a request pending, let it dispatch -- cgit v1.2.2 From fd61af0384014ca29428ace7c17a978b755aeddd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 16 Jun 2006 15:35:39 +0200 Subject: [PATCH] cfq-iosched: rq update fixes - Remember to set ->last_sector so that the cfq_choose_req() logic works correctly. - Remove redundant call to cfq_choose_req() Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 13c4793fdf..940364edf2 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -952,11 +952,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = crq->cfq_queue; + struct request *rq; cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); cfq_remove_request(crq->request); cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; elv_dispatch_sort(q, crq->request); + + rq = list_entry(q->queue_head.prev, struct request, queuelist); + cfqd->last_sector = rq->sector + rq->nr_sectors; } /* @@ -1767,11 +1771,7 @@ static void cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_rq *crq) { - struct cfq_io_context *cic; - - cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); - - cic = crq->io_context; + struct cfq_io_context *cic = crq->io_context; /* * we never wait for an async request and we don't allow preemption -- cgit v1.2.2 From dd67d051529387f6e44d22d1d5540ef281965fdd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 21 Jun 2006 09:36:18 +0200 Subject: [PATCH] rbtree: support functions used by the io schedulers They all duplicate macros to check for empty root and/or node, and clearing a node. So put those in rbtree.h. Signed-off-by: Jens Axboe --- block/as-iosched.c | 17 +++++++---------- block/cfq-iosched.c | 22 ++++++++-------------- block/deadline-iosched.c | 13 +++++-------- 3 files changed, 20 insertions(+), 32 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 56c99fa037..1ec5df4667 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -347,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) /* * rb tree support functions */ -#define RB_EMPTY(root) ((root)->rb_node == NULL) -#define ON_RB(node) (rb_parent(node) != node) -#define RB_CLEAR(node) (rb_set_parent(node, node)) #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) #define rq_rb_key(rq) (rq)->sector @@ -418,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) { - if (!ON_RB(&arq->rb_node)) { + if (!RB_EMPTY_NODE(&arq->rb_node)) { WARN_ON(1); return; } rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); - RB_CLEAR(&arq->rb_node); + RB_CLEAR_NODE(&arq->rb_node); } static struct request * @@ -545,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last) struct rb_node *rbprev = rb_prev(&last->rb_node); struct as_rq *arq_next, *arq_prev; - BUG_ON(!ON_RB(&last->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&last->rb_node)); if (rbprev) arq_prev = rb_entry_arq(rbprev); @@ -1122,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) struct request *rq = arq->request; const int data_dir = arq->is_sync; - BUG_ON(!ON_RB(&arq->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&arq->rb_node)); as_antic_stop(ad); ad->antic_status = ANTIC_OFF; @@ -1247,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); if (writes && ad->batch_data_dir == REQ_SYNC) /* @@ -1271,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); if (ad->batch_data_dir == REQ_SYNC) { ad->changed_batch = 1; @@ -1591,7 +1588,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, if (arq) { memset(arq, 0, sizeof(*arq)); - RB_CLEAR(&arq->rb_node); + RB_CLEAR_NODE(&arq->rb_node); arq->request = rq; arq->state = AS_RQ_PRESCHED; arq->io_context = NULL; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 940364edf2..e25223e147 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock); /* * rb-tree defines */ -#define RB_EMPTY(node) ((node)->rb_node == NULL) -#define RB_CLEAR(node) do { \ - memset(node, 0, sizeof(*node)); \ -} while (0) -#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) #define rq_rb_key(rq) (rq)->sector @@ -559,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) rb_erase(&crq->rb_node, &cfqq->sort_list); - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); } @@ -914,7 +909,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) struct cfq_io_context *cic; unsigned long sl; - WARN_ON(!RB_EMPTY(&cfqq->sort_list)); + WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(cfqq != cfqd->active_queue); /* @@ -1042,7 +1037,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * if queue has requests, dispatch one. if not, check if * enough slice is left to wait for one */ - if (!RB_EMPTY(&cfqq->sort_list)) + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto keep_queue; else if (cfq_cfqq_dispatched(cfqq)) { cfqq = NULL; @@ -1066,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, { int dispatched = 0; - BUG_ON(RB_EMPTY(&cfqq->sort_list)); + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); do { struct cfq_rq *crq; @@ -1090,7 +1085,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->active_cic = crq->io_context; } - if (RB_EMPTY(&cfqq->sort_list)) + if (RB_EMPTY_ROOT(&cfqq->sort_list)) break; } while (dispatched < max_dispatch); @@ -1480,7 +1475,6 @@ retry: INIT_HLIST_NODE(&cfqq->cfq_hash); INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); INIT_LIST_HEAD(&cfqq->fifo); cfqq->key = key; @@ -1873,7 +1867,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) if (cfqd->active_queue == cfqq) { if (time_after(now, cfqq->slice_end)) cfq_slice_expired(cfqd, 0); - else if (sync && RB_EMPTY(&cfqq->sort_list)) { + else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { if (!cfq_arm_slice_timer(cfqd, cfqq)) cfq_schedule_dispatch(cfqd); } @@ -2059,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { - RB_CLEAR(&crq->rb_node); + RB_CLEAR_NODE(&crq->rb_node); crq->rb_key = 0; crq->request = rq; INIT_HLIST_NODE(&crq->hash); @@ -2151,7 +2145,7 @@ static void cfq_idle_slice_timer(unsigned long data) /* * not expired and it has a request pending, let it dispatch */ - if (!RB_EMPTY(&cfqq->sort_list)) { + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { cfq_mark_cfqq_must_dispatch(cfqq); goto out_kick; } diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index e5bccaaed5..4469dd8462 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -159,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) /* * rb tree support functions */ -#define RB_EMPTY(root) ((root)->rb_node == NULL) -#define ON_RB(node) (rb_parent(node) != node) -#define RB_CLEAR(node) (rb_set_parent(node, node)) #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) #define rq_rb_key(rq) (rq)->sector @@ -220,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) dd->next_drq[data_dir] = rb_entry_drq(rbnext); } - BUG_ON(!ON_RB(&drq->rb_node)); + BUG_ON(!RB_EMPTY_NODE(&drq->rb_node)); rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); - RB_CLEAR(&drq->rb_node); + RB_CLEAR_NODE(&drq->rb_node); } static struct request * @@ -496,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY(&dd->sort_list[READ])); + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); if (writes && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; @@ -512,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); dd->starved = 0; @@ -668,7 +665,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, drq = mempool_alloc(dd->drq_pool, gfp_mask); if (drq) { memset(drq, 0, sizeof(*drq)); - RB_CLEAR(&drq->rb_node); + RB_CLEAR_NODE(&drq->rb_node); drq->request = rq; INIT_HLIST_NODE(&drq->hash); -- cgit v1.2.2 From 8269730b389d4793348d521140f9c76fb1828249 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 21 Jun 2006 14:48:09 +0200 Subject: [BLOCK] Fix bounce limit address check Do a safer check for when to enable DMA. Currently we enable ISA DMA for cases that do not need it, resulting in OOM conditions when ZONE_DMA runs out of space. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 2270bb4513..0603ab2f36 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) + if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else -- cgit v1.2.2