aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c8
-rw-r--r--block/cfq-iosched.c13
-rw-r--r--block/elevator.c11
-rw-r--r--block/ll_rw_blk.c13
-rw-r--r--block/noop-iosched.c2
5 files changed, 25 insertions, 22 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 8e1fef1eafc9..f6dc95489316 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -210,9 +210,9 @@ static struct as_io_context *alloc_as_io_context(void)
210 * If the current task has no AS IO context then create one and initialise it. 210 * If the current task has no AS IO context then create one and initialise it.
211 * Then take a ref on the task's io context and return it. 211 * Then take a ref on the task's io context and return it.
212 */ 212 */
213static struct io_context *as_get_io_context(void) 213static struct io_context *as_get_io_context(int node)
214{ 214{
215 struct io_context *ioc = get_io_context(GFP_ATOMIC); 215 struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
216 if (ioc && !ioc->aic) { 216 if (ioc && !ioc->aic) {
217 ioc->aic = alloc_as_io_context(); 217 ioc->aic = alloc_as_io_context();
218 if (!ioc->aic) { 218 if (!ioc->aic) {
@@ -1148,7 +1148,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1148 1148
1149 data_dir = rq_is_sync(rq); 1149 data_dir = rq_is_sync(rq);
1150 1150
1151 rq->elevator_private = as_get_io_context(); 1151 rq->elevator_private = as_get_io_context(q->node);
1152 1152
1153 if (RQ_IOC(rq)) { 1153 if (RQ_IOC(rq)) {
1154 as_update_iohist(ad, RQ_IOC(rq)->aic, rq); 1154 as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
@@ -1292,7 +1292,7 @@ static int as_may_queue(request_queue_t *q, int rw)
1292 struct io_context *ioc; 1292 struct io_context *ioc;
1293 if (ad->antic_status == ANTIC_WAIT_REQ || 1293 if (ad->antic_status == ANTIC_WAIT_REQ ||
1294 ad->antic_status == ANTIC_WAIT_NEXT) { 1294 ad->antic_status == ANTIC_WAIT_NEXT) {
1295 ioc = as_get_io_context(); 1295 ioc = as_get_io_context(q->node);
1296 if (ad->io_context == ioc) 1296 if (ad->io_context == ioc)
1297 ret = ELV_MQUEUE_MUST; 1297 ret = ELV_MQUEUE_MUST;
1298 put_io_context(ioc); 1298 put_io_context(ioc);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 85f1d87e86d4..0452108a932e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1148,8 +1148,9 @@ static void cfq_exit_io_context(struct io_context *ioc)
1148static struct cfq_io_context * 1148static struct cfq_io_context *
1149cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1149cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1150{ 1150{
1151 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); 1151 struct cfq_io_context *cic;
1152 1152
1153 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
1153 if (cic) { 1154 if (cic) {
1154 memset(cic, 0, sizeof(*cic)); 1155 memset(cic, 0, sizeof(*cic));
1155 cic->last_end_request = jiffies; 1156 cic->last_end_request = jiffies;
@@ -1277,11 +1278,11 @@ retry:
1277 * free memory. 1278 * free memory.
1278 */ 1279 */
1279 spin_unlock_irq(cfqd->queue->queue_lock); 1280 spin_unlock_irq(cfqd->queue->queue_lock);
1280 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL); 1281 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1281 spin_lock_irq(cfqd->queue->queue_lock); 1282 spin_lock_irq(cfqd->queue->queue_lock);
1282 goto retry; 1283 goto retry;
1283 } else { 1284 } else {
1284 cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); 1285 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1285 if (!cfqq) 1286 if (!cfqq)
1286 goto out; 1287 goto out;
1287 } 1288 }
@@ -1407,7 +1408,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1407 1408
1408 might_sleep_if(gfp_mask & __GFP_WAIT); 1409 might_sleep_if(gfp_mask & __GFP_WAIT);
1409 1410
1410 ioc = get_io_context(gfp_mask); 1411 ioc = get_io_context(gfp_mask, cfqd->queue->node);
1411 if (!ioc) 1412 if (!ioc)
1412 return NULL; 1413 return NULL;
1413 1414
@@ -1955,7 +1956,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
1955 struct cfq_data *cfqd; 1956 struct cfq_data *cfqd;
1956 int i; 1957 int i;
1957 1958
1958 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); 1959 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
1959 if (!cfqd) 1960 if (!cfqd)
1960 return NULL; 1961 return NULL;
1961 1962
@@ -1970,7 +1971,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
1970 INIT_LIST_HEAD(&cfqd->empty_list); 1971 INIT_LIST_HEAD(&cfqd->empty_list);
1971 INIT_LIST_HEAD(&cfqd->cic_list); 1972 INIT_LIST_HEAD(&cfqd->cic_list);
1972 1973
1973 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); 1974 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
1974 if (!cfqd->cfq_hash) 1975 if (!cfqd->cfq_hash)
1975 goto out_free; 1976 goto out_free;
1976 1977
diff --git a/block/elevator.c b/block/elevator.c
index 788d2d81994c..e643291793a4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -161,12 +161,12 @@ __setup("elevator=", elevator_setup);
161 161
162static struct kobj_type elv_ktype; 162static struct kobj_type elv_ktype;
163 163
164static elevator_t *elevator_alloc(struct elevator_type *e) 164static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
165{ 165{
166 elevator_t *eq; 166 elevator_t *eq;
167 int i; 167 int i;
168 168
169 eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); 169 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
170 if (unlikely(!eq)) 170 if (unlikely(!eq))
171 goto err; 171 goto err;
172 172
@@ -178,7 +178,8 @@ static elevator_t *elevator_alloc(struct elevator_type *e)
178 eq->kobj.ktype = &elv_ktype; 178 eq->kobj.ktype = &elv_ktype;
179 mutex_init(&eq->sysfs_lock); 179 mutex_init(&eq->sysfs_lock);
180 180
181 eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL); 181 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
182 GFP_KERNEL, q->node);
182 if (!eq->hash) 183 if (!eq->hash)
183 goto err; 184 goto err;
184 185
@@ -224,7 +225,7 @@ int elevator_init(request_queue_t *q, char *name)
224 e = elevator_get("noop"); 225 e = elevator_get("noop");
225 } 226 }
226 227
227 eq = elevator_alloc(e); 228 eq = elevator_alloc(q, e);
228 if (!eq) 229 if (!eq)
229 return -ENOMEM; 230 return -ENOMEM;
230 231
@@ -987,7 +988,7 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
987 /* 988 /*
988 * Allocate new elevator 989 * Allocate new elevator
989 */ 990 */
990 e = elevator_alloc(new_e); 991 e = elevator_alloc(q, new_e);
991 if (!e) 992 if (!e)
992 return 0; 993 return 0;
993 994
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 4b7b4461e8d6..c6dfa889206c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data);
39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 39static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40static void init_request_from_bio(struct request *req, struct bio *bio); 40static void init_request_from_bio(struct request *req, struct bio *bio);
41static int __make_request(request_queue_t *q, struct bio *bio); 41static int __make_request(request_queue_t *q, struct bio *bio);
42static struct io_context *current_io_context(gfp_t gfp_flags, int node);
42 43
43/* 44/*
44 * For the allocated request tables 45 * For the allocated request tables
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
2114 2115
2115 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { 2116 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
2116 if (rl->count[rw]+1 >= q->nr_requests) { 2117 if (rl->count[rw]+1 >= q->nr_requests) {
2117 ioc = current_io_context(GFP_ATOMIC); 2118 ioc = current_io_context(GFP_ATOMIC, q->node);
2118 /* 2119 /*
2119 * The queue will fill after this allocation, so set 2120 * The queue will fill after this allocation, so set
2120 * it as full, and mark this process as "batching". 2121 * it as full, and mark this process as "batching".
@@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2234 * up to a big batch of them for a small period time. 2235 * up to a big batch of them for a small period time.
2235 * See ioc_batching, ioc_set_batching 2236 * See ioc_batching, ioc_set_batching
2236 */ 2237 */
2237 ioc = current_io_context(GFP_NOIO); 2238 ioc = current_io_context(GFP_NOIO, q->node);
2238 ioc_set_batching(q, ioc); 2239 ioc_set_batching(q, ioc);
2239 2240
2240 spin_lock_irq(q->queue_lock); 2241 spin_lock_irq(q->queue_lock);
@@ -3641,7 +3642,7 @@ void exit_io_context(void)
3641 * but since the current task itself holds a reference, the context can be 3642 * but since the current task itself holds a reference, the context can be
3642 * used in general code, so long as it stays within `current` context. 3643 * used in general code, so long as it stays within `current` context.
3643 */ 3644 */
3644struct io_context *current_io_context(gfp_t gfp_flags) 3645static struct io_context *current_io_context(gfp_t gfp_flags, int node)
3645{ 3646{
3646 struct task_struct *tsk = current; 3647 struct task_struct *tsk = current;
3647 struct io_context *ret; 3648 struct io_context *ret;
@@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3650 if (likely(ret)) 3651 if (likely(ret))
3651 return ret; 3652 return ret;
3652 3653
3653 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); 3654 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
3654 if (ret) { 3655 if (ret) {
3655 atomic_set(&ret->refcount, 1); 3656 atomic_set(&ret->refcount, 1);
3656 ret->task = current; 3657 ret->task = current;
@@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context);
3674 * 3675 *
3675 * This is always called in the context of the task which submitted the I/O. 3676 * This is always called in the context of the task which submitted the I/O.
3676 */ 3677 */
3677struct io_context *get_io_context(gfp_t gfp_flags) 3678struct io_context *get_io_context(gfp_t gfp_flags, int node)
3678{ 3679{
3679 struct io_context *ret; 3680 struct io_context *ret;
3680 ret = current_io_context(gfp_flags); 3681 ret = current_io_context(gfp_flags, node);
3681 if (likely(ret)) 3682 if (likely(ret))
3682 atomic_inc(&ret->refcount); 3683 atomic_inc(&ret->refcount);
3683 return ret; 3684 return ret;
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 56a7c620574f..79af43179421 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -69,7 +69,7 @@ static void *noop_init_queue(request_queue_t *q, elevator_t *e)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
72 nd = kmalloc(sizeof(*nd), GFP_KERNEL); 72 nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
73 if (!nd) 73 if (!nd)
74 return NULL; 74 return NULL;
75 INIT_LIST_HEAD(&nd->queue); 75 INIT_LIST_HEAD(&nd->queue);