aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/as-iosched.c327
-rw-r--r--drivers/block/cfq-iosched.c372
-rw-r--r--drivers/block/deadline-iosched.c125
-rw-r--r--drivers/block/elevator.c345
-rw-r--r--drivers/block/ll_rw_blk.c193
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/noop-iosched.c48
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/n_tty.c2
-rw-r--r--drivers/ieee1394/eth1394.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h2
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/myri_sbus.h2
-rw-r--r--drivers/net/sunbmac.c3
-rw-r--r--drivers/net/sunbmac.h2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/scsi.c8
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hcd.h8
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/urb.c4
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/ether.c22
-rw-r--r--drivers/usb/gadget/goku_udc.c6
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c12
-rw-r--r--drivers/usb/gadget/net2280.c6
-rw-r--r--drivers/usb/gadget/omap_udc.c6
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c6
-rw-r--r--drivers/usb/gadget/serial.c16
-rw-r--r--drivers/usb/gadget/zero.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mem.c6
-rw-r--r--drivers/usb/host/ehci-q.c6
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-mem.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/misc/uss720.c6
-rw-r--r--drivers/usb/net/asix.c2
-rw-r--r--drivers/usb/net/gl620a.c2
-rw-r--r--drivers/usb/net/kaweth.c6
-rw-r--r--drivers/usb/net/net1080.c2
-rw-r--r--drivers/usb/net/rndis_host.c2
-rw-r--r--drivers/usb/net/usbnet.c2
-rw-r--r--drivers/usb/net/usbnet.h2
-rw-r--r--drivers/usb/net/zaurus.c2
-rw-r--r--drivers/usb/net/zd1201.c2
72 files changed, 597 insertions, 1078 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 95c0a3690b0f..4081c36c8c19 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
98 98
99 struct as_rq *next_arq[2]; /* next in sort order */ 99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */ 101 struct list_head *hash; /* request hash */
103 102
104 unsigned long exit_prob; /* probability a task will exit while 103 unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
239 return ioc; 238 return ioc;
240} 239}
241 240
241static void as_put_io_context(struct as_rq *arq)
242{
243 struct as_io_context *aic;
244
245 if (unlikely(!arq->io_context))
246 return;
247
248 aic = arq->io_context->aic;
249
250 if (arq->is_sync == REQ_SYNC && aic) {
251 spin_lock(&aic->lock);
252 set_bit(AS_TASK_IORUNNING, &aic->state);
253 aic->last_end_request = jiffies;
254 spin_unlock(&aic->lock);
255 }
256
257 put_io_context(arq->io_context);
258}
259
242/* 260/*
243 * the back merge hash support functions 261 * the back merge hash support functions
244 */ 262 */
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq)
261 __as_del_arq_hash(arq); 279 __as_del_arq_hash(arq);
262} 280}
263 281
264static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
265{
266 as_del_arq_hash(arq);
267
268 if (q->last_merge == arq->request)
269 q->last_merge = NULL;
270}
271
272static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) 282static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
273{ 283{
274 struct request *rq = arq->request; 284 struct request *rq = arq->request;
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
312 BUG_ON(!arq->on_hash); 322 BUG_ON(!arq->on_hash);
313 323
314 if (!rq_mergeable(__rq)) { 324 if (!rq_mergeable(__rq)) {
315 as_remove_merge_hints(ad->q, arq); 325 as_del_arq_hash(arq);
316 continue; 326 continue;
317 } 327 }
318 328
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
950 960
951 WARN_ON(!list_empty(&rq->queuelist)); 961 WARN_ON(!list_empty(&rq->queuelist));
952 962
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) { 963 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state); 964 printk("arq->state %d\n", arq->state);
963 WARN_ON(1); 965 WARN_ON(1);
964 goto out; 966 goto out;
965 } 967 }
966 968
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) { 969 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work); 970 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0; 971 ad->changed_batch = 0;
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
1001 } 1000 }
1002 } 1001 }
1003 1002
1004out_ioc: 1003 as_put_io_context(arq);
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out: 1004out:
1020 arq->state = AS_RQ_POSTSCHED; 1005 arq->state = AS_RQ_POSTSCHED;
1021} 1006}
@@ -1047,73 +1032,11 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1047 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 1032 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1048 1033
1049 list_del_init(&arq->fifo); 1034 list_del_init(&arq->fifo);
1050 as_remove_merge_hints(q, arq); 1035 as_del_arq_hash(arq);
1051 as_del_arq_rb(ad, arq); 1036 as_del_arq_rb(ad, arq);
1052} 1037}
1053 1038
1054/* 1039/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo, 1040 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per 1041 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests 1042 * `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad)
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 1088static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{ 1089{
1167 struct request *rq = arq->request; 1090 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync; 1091 const int data_dir = arq->is_sync;
1170 1092
1171 BUG_ON(!ON_RB(&arq->rb_node)); 1093 BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1198 /* 1120 /*
1199 * take it off the sort and fifo list, add to dispatch queue 1121 * take it off the sort and fifo list, add to dispatch queue
1200 */ 1122 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) { 1123 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next); 1124 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq); 1125 struct as_rq *__arq = RQ_DATA(__rq);
1206 1126
1207 list_move_tail(&__rq->queuelist, ad->dispatch); 1127 list_del(&__rq->queuelist);
1128
1129 elv_dispatch_add_tail(ad->q, __rq);
1208 1130
1209 if (__arq->io_context && __arq->io_context->aic) 1131 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched); 1132 atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1218 as_remove_queued_request(ad->q, rq); 1140 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED); 1141 WARN_ON(arq->state != AS_RQ_QUEUED);
1220 1142
1221 list_add(&rq->queuelist, insert); 1143 elv_dispatch_sort(ad->q, rq);
1144
1222 arq->state = AS_RQ_DISPATCHED; 1145 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic) 1146 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched); 1147 atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1230 * read/write expire, batch expire, etc, and moves it to the dispatch 1153 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise. 1154 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */ 1155 */
1233static int as_dispatch_request(struct as_data *ad) 1156static int as_dispatch_request(request_queue_t *q, int force)
1234{ 1157{
1158 struct as_data *ad = q->elevator->elevator_data;
1235 struct as_rq *arq; 1159 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1160 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1161 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238 1162
1163 if (unlikely(force)) {
1164 /*
1165 * Forced dispatch, accounting is useless. Reset
1166 * accounting states and dump fifo_lists. Note that
1167 * batch_data_dir is reset to REQ_SYNC to avoid
1168 * screwing write batch accounting as write batch
1169 * accounting occurs on W->R transition.
1170 */
1171 int dispatched = 0;
1172
1173 ad->batch_data_dir = REQ_SYNC;
1174 ad->changed_batch = 0;
1175 ad->new_batch = 0;
1176
1177 while (ad->next_arq[REQ_SYNC]) {
1178 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1179 dispatched++;
1180 }
1181 ad->last_check_fifo[REQ_SYNC] = jiffies;
1182
1183 while (ad->next_arq[REQ_ASYNC]) {
1184 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1185 dispatched++;
1186 }
1187 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1188
1189 return dispatched;
1190 }
1191
1239 /* Signal that the write batch was uncontended, so we can't time it */ 1192 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1193 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes) 1194 if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1312,6 @@ fifo_expired:
1359 return 1; 1312 return 1;
1360} 1313}
1361 1314
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/* 1315/*
1377 * Add arq to a list behind alias 1316 * Add arq to a list behind alias
1378 */ 1317 */
@@ -1404,17 +1343,25 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
1404 /* 1343 /*
1405 * Don't want to have to handle merges. 1344 * Don't want to have to handle merges.
1406 */ 1345 */
1407 as_remove_merge_hints(ad->q, arq); 1346 as_del_arq_hash(arq);
1408} 1347}
1409 1348
1410/* 1349/*
1411 * add arq to rbtree and fifo 1350 * add arq to rbtree and fifo
1412 */ 1351 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq) 1352static void as_add_request(request_queue_t *q, struct request *rq)
1414{ 1353{
1354 struct as_data *ad = q->elevator->elevator_data;
1355 struct as_rq *arq = RQ_DATA(rq);
1415 struct as_rq *alias; 1356 struct as_rq *alias;
1416 int data_dir; 1357 int data_dir;
1417 1358
1359 if (arq->state != AS_RQ_PRESCHED) {
1360 printk("arq->state: %d\n", arq->state);
1361 WARN_ON(1);
1362 }
1363 arq->state = AS_RQ_NEW;
1364
1418 if (rq_data_dir(arq->request) == READ 1365 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE) 1366 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1; 1367 arq->is_sync = 1;
@@ -1437,12 +1384,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1437 arq->expires = jiffies + ad->fifo_expire[data_dir]; 1384 arq->expires = jiffies + ad->fifo_expire[data_dir];
1438 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); 1385 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1439 1386
1440 if (rq_mergeable(arq->request)) { 1387 if (rq_mergeable(arq->request))
1441 as_add_arq_hash(ad, arq); 1388 as_add_arq_hash(ad, arq);
1442
1443 if (!ad->q->last_merge)
1444 ad->q->last_merge = arq->request;
1445 }
1446 as_update_arq(ad, arq); /* keep state machine up to date */ 1389 as_update_arq(ad, arq); /* keep state machine up to date */
1447 1390
1448 } else { 1391 } else {
@@ -1463,96 +1406,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1463 arq->state = AS_RQ_QUEUED; 1406 arq->state = AS_RQ_QUEUED;
1464} 1407}
1465 1408
1466static void as_deactivate_request(request_queue_t *q, struct request *rq) 1409static void as_activate_request(request_queue_t *q, struct request *rq)
1467{ 1410{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq); 1411 struct as_rq *arq = RQ_DATA(rq);
1470 1412
1471 if (arq) { 1413 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1472 if (arq->state == AS_RQ_REMOVED) { 1414 arq->state = AS_RQ_REMOVED;
1473 arq->state = AS_RQ_DISPATCHED; 1415 if (arq->io_context && arq->io_context->aic)
1474 if (arq->io_context && arq->io_context->aic) 1416 atomic_dec(&arq->io_context->aic->nr_dispatched);
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508} 1417}
1509 1418
1510static void 1419static void as_deactivate_request(request_queue_t *q, struct request *rq)
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{ 1420{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq); 1421 struct as_rq *arq = RQ_DATA(rq);
1515 1422
1516 if (arq) { 1423 WARN_ON(arq->state != AS_RQ_REMOVED);
1517 if (arq->state != AS_RQ_PRESCHED) { 1424 arq->state = AS_RQ_DISPATCHED;
1518 printk("arq->state: %d\n", arq->state); 1425 if (arq->io_context && arq->io_context->aic)
1519 WARN_ON(1); 1426 atomic_inc(&arq->io_context->aic->nr_dispatched);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556} 1427}
1557 1428
1558/* 1429/*
@@ -1565,12 +1436,8 @@ static int as_queue_empty(request_queue_t *q)
1565{ 1436{
1566 struct as_data *ad = q->elevator->elevator_data; 1437 struct as_data *ad = q->elevator->elevator_data;
1567 1438
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC]) 1439 return list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC]) 1440 && list_empty(&ad->fifo_list[REQ_SYNC]);
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574} 1441}
1575 1442
1576static struct request * 1443static struct request *
@@ -1608,15 +1475,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1608 int ret; 1475 int ret;
1609 1476
1610 /* 1477 /*
1611 * try last_merge to avoid going to hash
1612 */
1613 ret = elv_try_last_merge(q, bio);
1614 if (ret != ELEVATOR_NO_MERGE) {
1615 __rq = q->last_merge;
1616 goto out_insert;
1617 }
1618
1619 /*
1620 * see if the merge hash can satisfy a back merge 1478 * see if the merge hash can satisfy a back merge
1621 */ 1479 */
1622 __rq = as_find_arq_hash(ad, bio->bi_sector); 1480 __rq = as_find_arq_hash(ad, bio->bi_sector);
@@ -1644,9 +1502,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1644 1502
1645 return ELEVATOR_NO_MERGE; 1503 return ELEVATOR_NO_MERGE;
1646out: 1504out:
1647 if (rq_mergeable(__rq))
1648 q->last_merge = __rq;
1649out_insert:
1650 if (ret) { 1505 if (ret) {
1651 if (rq_mergeable(__rq)) 1506 if (rq_mergeable(__rq))
1652 as_hot_arq_hash(ad, RQ_DATA(__rq)); 1507 as_hot_arq_hash(ad, RQ_DATA(__rq));
@@ -1693,9 +1548,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1693 * behind the disk head. We currently don't bother adjusting. 1548 * behind the disk head. We currently don't bother adjusting.
1694 */ 1549 */
1695 } 1550 }
1696
1697 if (arq->on_hash)
1698 q->last_merge = req;
1699} 1551}
1700 1552
1701static void 1553static void
@@ -1763,6 +1615,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
1763 * kill knowledge of next, this one is a goner 1615 * kill knowledge of next, this one is a goner
1764 */ 1616 */
1765 as_remove_queued_request(q, next); 1617 as_remove_queued_request(q, next);
1618 as_put_io_context(anext);
1766 1619
1767 anext->state = AS_RQ_MERGED; 1620 anext->state = AS_RQ_MERGED;
1768} 1621}
@@ -1782,7 +1635,7 @@ static void as_work_handler(void *data)
1782 unsigned long flags; 1635 unsigned long flags;
1783 1636
1784 spin_lock_irqsave(q->queue_lock, flags); 1637 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q)) 1638 if (!as_queue_empty(q))
1786 q->request_fn(q); 1639 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags); 1640 spin_unlock_irqrestore(q->queue_lock, flags);
1788} 1641}
@@ -1797,7 +1650,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1797 return; 1650 return;
1798 } 1651 }
1799 1652
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { 1653 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1654 arq->state != AS_RQ_PRESCHED &&
1655 arq->state != AS_RQ_MERGED)) {
1801 printk("arq->state %d\n", arq->state); 1656 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1); 1657 WARN_ON(1);
1803 } 1658 }
@@ -1807,7 +1662,7 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1807} 1662}
1808 1663
1809static int as_set_request(request_queue_t *q, struct request *rq, 1664static int as_set_request(request_queue_t *q, struct request *rq,
1810 struct bio *bio, int gfp_mask) 1665 struct bio *bio, gfp_t gfp_mask)
1811{ 1666{
1812 struct as_data *ad = q->elevator->elevator_data; 1667 struct as_data *ad = q->elevator->elevator_data;
1813 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); 1668 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@@ -1907,7 +1762,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1907 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1762 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1908 ad->sort_list[REQ_SYNC] = RB_ROOT; 1763 ad->sort_list[REQ_SYNC] = RB_ROOT;
1909 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1764 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1910 ad->dispatch = &q->queue_head;
1911 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1765 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1912 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1766 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1913 ad->antic_expire = default_antic_expire; 1767 ad->antic_expire = default_antic_expire;
@@ -2072,10 +1926,9 @@ static struct elevator_type iosched_as = {
2072 .elevator_merge_fn = as_merge, 1926 .elevator_merge_fn = as_merge,
2073 .elevator_merged_fn = as_merged_request, 1927 .elevator_merged_fn = as_merged_request,
2074 .elevator_merge_req_fn = as_merged_requests, 1928 .elevator_merge_req_fn = as_merged_requests,
2075 .elevator_next_req_fn = as_next_request, 1929 .elevator_dispatch_fn = as_dispatch_request,
2076 .elevator_add_req_fn = as_insert_request, 1930 .elevator_add_req_fn = as_add_request,
2077 .elevator_remove_req_fn = as_remove_request, 1931 .elevator_activate_req_fn = as_activate_request,
2078 .elevator_requeue_req_fn = as_requeue_request,
2079 .elevator_deactivate_req_fn = as_deactivate_request, 1932 .elevator_deactivate_req_fn = as_deactivate_request,
2080 .elevator_queue_empty_fn = as_queue_empty, 1933 .elevator_queue_empty_fn = as_queue_empty,
2081 .elevator_completed_req_fn = as_completed_request, 1934 .elevator_completed_req_fn = as_completed_request,
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..94690e4d41e0 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2;
84 (node)->rb_left = NULL; \ 84 (node)->rb_left = NULL; \
85} while (0) 85} while (0)
86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) 86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
87#define ON_RB(node) ((node)->rb_color != RB_NONE)
88#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 87#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
89#define rq_rb_key(rq) (rq)->sector 88#define rq_rb_key(rq) (rq)->sector
90 89
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired);
271#undef CFQ_CFQQ_FNS 270#undef CFQ_CFQQ_FNS
272 271
273enum cfq_rq_state_flags { 272enum cfq_rq_state_flags {
274 CFQ_CRQ_FLAG_in_flight = 0, 273 CFQ_CRQ_FLAG_is_sync = 0,
275 CFQ_CRQ_FLAG_in_driver,
276 CFQ_CRQ_FLAG_is_sync,
277 CFQ_CRQ_FLAG_requeued,
278}; 274};
279 275
280#define CFQ_CRQ_FNS(name) \ 276#define CFQ_CRQ_FNS(name) \
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \
291 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ 287 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
292} 288}
293 289
294CFQ_CRQ_FNS(in_flight);
295CFQ_CRQ_FNS(in_driver);
296CFQ_CRQ_FNS(is_sync); 290CFQ_CRQ_FNS(is_sync);
297CFQ_CRQ_FNS(requeued);
298#undef CFQ_CRQ_FNS 291#undef CFQ_CRQ_FNS
299 292
300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 293static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 294static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
302static void cfq_put_cfqd(struct cfq_data *cfqd); 295static void cfq_put_cfqd(struct cfq_data *cfqd);
303 296
304#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 297#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
@@ -311,14 +304,6 @@ static inline void cfq_del_crq_hash(struct cfq_rq *crq)
311 hlist_del_init(&crq->hash); 304 hlist_del_init(&crq->hash);
312} 305}
313 306
314static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
315{
316 cfq_del_crq_hash(crq);
317
318 if (q->last_merge == crq->request)
319 q->last_merge = NULL;
320}
321
322static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) 307static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
323{ 308{
324 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); 309 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
@@ -347,18 +332,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
347 return NULL; 332 return NULL;
348} 333}
349 334
350static inline int cfq_pending_requests(struct cfq_data *cfqd)
351{
352 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
353}
354
355/* 335/*
356 * scheduler run of queue, if there are requests pending and no one in the 336 * scheduler run of queue, if there are requests pending and no one in the
357 * driver that will restart queueing 337 * driver that will restart queueing
358 */ 338 */
359static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 339static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
360{ 340{
361 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) 341 if (!cfqd->rq_in_driver && cfqd->busy_queues)
362 kblockd_schedule_work(&cfqd->unplug_work); 342 kblockd_schedule_work(&cfqd->unplug_work);
363} 343}
364 344
@@ -366,7 +346,7 @@ static int cfq_queue_empty(request_queue_t *q)
366{ 346{
367 struct cfq_data *cfqd = q->elevator->elevator_data; 347 struct cfq_data *cfqd = q->elevator->elevator_data;
368 348
369 return !cfq_pending_requests(cfqd); 349 return !cfqd->busy_queues;
370} 350}
371 351
372/* 352/*
@@ -386,11 +366,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
386 if (crq2 == NULL) 366 if (crq2 == NULL)
387 return crq1; 367 return crq1;
388 368
389 if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
390 return crq1;
391 else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
392 return crq2;
393
394 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) 369 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
395 return crq1; 370 return crq1;
396 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) 371 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
@@ -461,10 +436,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
461 struct cfq_rq *crq_next = NULL, *crq_prev = NULL; 436 struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
462 struct rb_node *rbnext, *rbprev; 437 struct rb_node *rbnext, *rbprev;
463 438
464 rbnext = NULL; 439 if (!(rbnext = rb_next(&last->rb_node))) {
465 if (ON_RB(&last->rb_node))
466 rbnext = rb_next(&last->rb_node);
467 if (!rbnext) {
468 rbnext = rb_first(&cfqq->sort_list); 440 rbnext = rb_first(&cfqq->sort_list);
469 if (rbnext == &last->rb_node) 441 if (rbnext == &last->rb_node)
470 rbnext = NULL; 442 rbnext = NULL;
@@ -545,13 +517,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
545 * the pending list according to last request service 517 * the pending list according to last request service
546 */ 518 */
547static inline void 519static inline void
548cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) 520cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549{ 521{
550 BUG_ON(cfq_cfqq_on_rr(cfqq)); 522 BUG_ON(cfq_cfqq_on_rr(cfqq));
551 cfq_mark_cfqq_on_rr(cfqq); 523 cfq_mark_cfqq_on_rr(cfqq);
552 cfqd->busy_queues++; 524 cfqd->busy_queues++;
553 525
554 cfq_resort_rr_list(cfqq, requeue); 526 cfq_resort_rr_list(cfqq, 0);
555} 527}
556 528
557static inline void 529static inline void
@@ -571,22 +543,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
571static inline void cfq_del_crq_rb(struct cfq_rq *crq) 543static inline void cfq_del_crq_rb(struct cfq_rq *crq)
572{ 544{
573 struct cfq_queue *cfqq = crq->cfq_queue; 545 struct cfq_queue *cfqq = crq->cfq_queue;
546 struct cfq_data *cfqd = cfqq->cfqd;
547 const int sync = cfq_crq_is_sync(crq);
574 548
575 if (ON_RB(&crq->rb_node)) { 549 BUG_ON(!cfqq->queued[sync]);
576 struct cfq_data *cfqd = cfqq->cfqd; 550 cfqq->queued[sync]--;
577 const int sync = cfq_crq_is_sync(crq);
578 551
579 BUG_ON(!cfqq->queued[sync]); 552 cfq_update_next_crq(crq);
580 cfqq->queued[sync]--;
581 553
582 cfq_update_next_crq(crq); 554 rb_erase(&crq->rb_node, &cfqq->sort_list);
555 RB_CLEAR_COLOR(&crq->rb_node);
583 556
584 rb_erase(&crq->rb_node, &cfqq->sort_list); 557 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
585 RB_CLEAR_COLOR(&crq->rb_node); 558 cfq_del_cfqq_rr(cfqd, cfqq);
586
587 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
588 cfq_del_cfqq_rr(cfqd, cfqq);
589 }
590} 559}
591 560
592static struct cfq_rq * 561static struct cfq_rq *
@@ -627,12 +596,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
627 * if that happens, put the alias on the dispatch list 596 * if that happens, put the alias on the dispatch list
628 */ 597 */
629 while ((__alias = __cfq_add_crq_rb(crq)) != NULL) 598 while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
630 cfq_dispatch_sort(cfqd->queue, __alias); 599 cfq_dispatch_insert(cfqd->queue, __alias);
631 600
632 rb_insert_color(&crq->rb_node, &cfqq->sort_list); 601 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
633 602
634 if (!cfq_cfqq_on_rr(cfqq)) 603 if (!cfq_cfqq_on_rr(cfqq))
635 cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); 604 cfq_add_cfqq_rr(cfqd, cfqq);
636 605
637 /* 606 /*
638 * check if this request is a better next-serve candidate 607 * check if this request is a better next-serve candidate
@@ -643,10 +612,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
643static inline void 612static inline void
644cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) 613cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
645{ 614{
646 if (ON_RB(&crq->rb_node)) { 615 rb_erase(&crq->rb_node, &cfqq->sort_list);
647 rb_erase(&crq->rb_node, &cfqq->sort_list); 616 cfqq->queued[cfq_crq_is_sync(crq)]--;
648 cfqq->queued[cfq_crq_is_sync(crq)]--;
649 }
650 617
651 cfq_add_crq_rb(crq); 618 cfq_add_crq_rb(crq);
652} 619}
@@ -676,49 +643,28 @@ out:
676 return NULL; 643 return NULL;
677} 644}
678 645
679static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 646static void cfq_activate_request(request_queue_t *q, struct request *rq)
680{ 647{
681 struct cfq_data *cfqd = q->elevator->elevator_data; 648 struct cfq_data *cfqd = q->elevator->elevator_data;
682 struct cfq_rq *crq = RQ_DATA(rq);
683
684 if (crq) {
685 struct cfq_queue *cfqq = crq->cfq_queue;
686
687 if (cfq_crq_in_driver(crq)) {
688 cfq_clear_crq_in_driver(crq);
689 WARN_ON(!cfqd->rq_in_driver);
690 cfqd->rq_in_driver--;
691 }
692 if (cfq_crq_in_flight(crq)) {
693 const int sync = cfq_crq_is_sync(crq);
694 649
695 cfq_clear_crq_in_flight(crq); 650 cfqd->rq_in_driver++;
696 WARN_ON(!cfqq->on_dispatch[sync]);
697 cfqq->on_dispatch[sync]--;
698 }
699 cfq_mark_crq_requeued(crq);
700 }
701} 651}
702 652
703/* 653static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
704 * make sure the service time gets corrected on reissue of this request
705 */
706static void cfq_requeue_request(request_queue_t *q, struct request *rq)
707{ 654{
708 cfq_deactivate_request(q, rq); 655 struct cfq_data *cfqd = q->elevator->elevator_data;
709 list_add(&rq->queuelist, &q->queue_head); 656
657 WARN_ON(!cfqd->rq_in_driver);
658 cfqd->rq_in_driver--;
710} 659}
711 660
712static void cfq_remove_request(request_queue_t *q, struct request *rq) 661static void cfq_remove_request(struct request *rq)
713{ 662{
714 struct cfq_rq *crq = RQ_DATA(rq); 663 struct cfq_rq *crq = RQ_DATA(rq);
715 664
716 if (crq) { 665 list_del_init(&rq->queuelist);
717 list_del_init(&rq->queuelist); 666 cfq_del_crq_rb(crq);
718 cfq_del_crq_rb(crq); 667 cfq_del_crq_hash(crq);
719 cfq_remove_merge_hints(q, crq);
720
721 }
722} 668}
723 669
724static int 670static int
@@ -728,12 +674,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
728 struct request *__rq; 674 struct request *__rq;
729 int ret; 675 int ret;
730 676
731 ret = elv_try_last_merge(q, bio);
732 if (ret != ELEVATOR_NO_MERGE) {
733 __rq = q->last_merge;
734 goto out_insert;
735 }
736
737 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); 677 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
738 if (__rq && elv_rq_merge_ok(__rq, bio)) { 678 if (__rq && elv_rq_merge_ok(__rq, bio)) {
739 ret = ELEVATOR_BACK_MERGE; 679 ret = ELEVATOR_BACK_MERGE;
@@ -748,8 +688,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
748 688
749 return ELEVATOR_NO_MERGE; 689 return ELEVATOR_NO_MERGE;
750out: 690out:
751 q->last_merge = __rq;
752out_insert:
753 *req = __rq; 691 *req = __rq;
754 return ret; 692 return ret;
755} 693}
@@ -762,14 +700,12 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
762 cfq_del_crq_hash(crq); 700 cfq_del_crq_hash(crq);
763 cfq_add_crq_hash(cfqd, crq); 701 cfq_add_crq_hash(cfqd, crq);
764 702
765 if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { 703 if (rq_rb_key(req) != crq->rb_key) {
766 struct cfq_queue *cfqq = crq->cfq_queue; 704 struct cfq_queue *cfqq = crq->cfq_queue;
767 705
768 cfq_update_next_crq(crq); 706 cfq_update_next_crq(crq);
769 cfq_reposition_crq_rb(cfqq, crq); 707 cfq_reposition_crq_rb(cfqq, crq);
770 } 708 }
771
772 q->last_merge = req;
773} 709}
774 710
775static void 711static void
@@ -785,7 +721,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
785 time_before(next->start_time, rq->start_time)) 721 time_before(next->start_time, rq->start_time))
786 list_move(&rq->queuelist, &next->queuelist); 722 list_move(&rq->queuelist, &next->queuelist);
787 723
788 cfq_remove_request(q, next); 724 cfq_remove_request(next);
789} 725}
790 726
791static inline void 727static inline void
@@ -992,53 +928,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
992 return 1; 928 return 1;
993} 929}
994 930
995/* 931static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
996 * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
997 * this function sector sorts the selected request to minimize seeks. we start
998 * at cfqd->last_sector, not 0.
999 */
1000static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
1001{ 932{
1002 struct cfq_data *cfqd = q->elevator->elevator_data; 933 struct cfq_data *cfqd = q->elevator->elevator_data;
1003 struct cfq_queue *cfqq = crq->cfq_queue; 934 struct cfq_queue *cfqq = crq->cfq_queue;
1004 struct list_head *head = &q->queue_head, *entry = head;
1005 struct request *__rq;
1006 sector_t last;
1007
1008 list_del(&crq->request->queuelist);
1009
1010 last = cfqd->last_sector;
1011 list_for_each_entry_reverse(__rq, head, queuelist) {
1012 struct cfq_rq *__crq = RQ_DATA(__rq);
1013
1014 if (blk_barrier_rq(__rq))
1015 break;
1016 if (!blk_fs_request(__rq))
1017 break;
1018 if (cfq_crq_requeued(__crq))
1019 break;
1020
1021 if (__rq->sector <= crq->request->sector)
1022 break;
1023 if (__rq->sector > last && crq->request->sector < last) {
1024 last = crq->request->sector + crq->request->nr_sectors;
1025 break;
1026 }
1027 entry = &__rq->queuelist;
1028 }
1029
1030 cfqd->last_sector = last;
1031 935
1032 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); 936 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
1033 937 cfq_remove_request(crq->request);
1034 cfq_del_crq_rb(crq);
1035 cfq_remove_merge_hints(q, crq);
1036
1037 cfq_mark_crq_in_flight(crq);
1038 cfq_clear_crq_requeued(crq);
1039
1040 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; 938 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
1041 list_add_tail(&crq->request->queuelist, entry); 939 elv_dispatch_sort(q, crq->request);
1042} 940}
1043 941
1044/* 942/*
@@ -1159,7 +1057,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1159 /* 1057 /*
1160 * finally, insert request into driver dispatch list 1058 * finally, insert request into driver dispatch list
1161 */ 1059 */
1162 cfq_dispatch_sort(cfqd->queue, crq); 1060 cfq_dispatch_insert(cfqd->queue, crq);
1163 1061
1164 cfqd->dispatch_slice++; 1062 cfqd->dispatch_slice++;
1165 dispatched++; 1063 dispatched++;
@@ -1194,7 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1194} 1092}
1195 1093
1196static int 1094static int
1197cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) 1095cfq_dispatch_requests(request_queue_t *q, int force)
1198{ 1096{
1199 struct cfq_data *cfqd = q->elevator->elevator_data; 1097 struct cfq_data *cfqd = q->elevator->elevator_data;
1200 struct cfq_queue *cfqq; 1098 struct cfq_queue *cfqq;
@@ -1204,12 +1102,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1204 1102
1205 cfqq = cfq_select_queue(cfqd, force); 1103 cfqq = cfq_select_queue(cfqd, force);
1206 if (cfqq) { 1104 if (cfqq) {
1105 int max_dispatch;
1106
1107 /*
1108 * if idle window is disabled, allow queue buildup
1109 */
1110 if (!cfq_cfqq_idle_window(cfqq) &&
1111 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1112 return 0;
1113
1207 cfq_clear_cfqq_must_dispatch(cfqq); 1114 cfq_clear_cfqq_must_dispatch(cfqq);
1208 cfq_clear_cfqq_wait_request(cfqq); 1115 cfq_clear_cfqq_wait_request(cfqq);
1209 del_timer(&cfqd->idle_slice_timer); 1116 del_timer(&cfqd->idle_slice_timer);
1210 1117
1211 if (cfq_class_idle(cfqq)) 1118 if (!force) {
1212 max_dispatch = 1; 1119 max_dispatch = cfqd->cfq_quantum;
1120 if (cfq_class_idle(cfqq))
1121 max_dispatch = 1;
1122 } else
1123 max_dispatch = INT_MAX;
1213 1124
1214 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1125 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1215 } 1126 }
@@ -1217,93 +1128,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1217 return 0; 1128 return 0;
1218} 1129}
1219 1130
1220static inline void cfq_account_dispatch(struct cfq_rq *crq)
1221{
1222 struct cfq_queue *cfqq = crq->cfq_queue;
1223 struct cfq_data *cfqd = cfqq->cfqd;
1224
1225 if (unlikely(!blk_fs_request(crq->request)))
1226 return;
1227
1228 /*
1229 * accounted bit is necessary since some drivers will call
1230 * elv_next_request() many times for the same request (eg ide)
1231 */
1232 if (cfq_crq_in_driver(crq))
1233 return;
1234
1235 cfq_mark_crq_in_driver(crq);
1236 cfqd->rq_in_driver++;
1237}
1238
1239static inline void
1240cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
1241{
1242 struct cfq_data *cfqd = cfqq->cfqd;
1243 unsigned long now;
1244
1245 if (!cfq_crq_in_driver(crq))
1246 return;
1247
1248 now = jiffies;
1249
1250 WARN_ON(!cfqd->rq_in_driver);
1251 cfqd->rq_in_driver--;
1252
1253 if (!cfq_class_idle(cfqq))
1254 cfqd->last_end_request = now;
1255
1256 if (!cfq_cfqq_dispatched(cfqq)) {
1257 if (cfq_cfqq_on_rr(cfqq)) {
1258 cfqq->service_last = now;
1259 cfq_resort_rr_list(cfqq, 0);
1260 }
1261 if (cfq_cfqq_expired(cfqq)) {
1262 __cfq_slice_expired(cfqd, cfqq, 0);
1263 cfq_schedule_dispatch(cfqd);
1264 }
1265 }
1266
1267 if (cfq_crq_is_sync(crq))
1268 crq->io_context->last_end_request = now;
1269}
1270
1271static struct request *cfq_next_request(request_queue_t *q)
1272{
1273 struct cfq_data *cfqd = q->elevator->elevator_data;
1274 struct request *rq;
1275
1276 if (!list_empty(&q->queue_head)) {
1277 struct cfq_rq *crq;
1278dispatch:
1279 rq = list_entry_rq(q->queue_head.next);
1280
1281 crq = RQ_DATA(rq);
1282 if (crq) {
1283 struct cfq_queue *cfqq = crq->cfq_queue;
1284
1285 /*
1286 * if idle window is disabled, allow queue buildup
1287 */
1288 if (!cfq_crq_in_driver(crq) &&
1289 !cfq_cfqq_idle_window(cfqq) &&
1290 !blk_barrier_rq(rq) &&
1291 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1292 return NULL;
1293
1294 cfq_remove_merge_hints(q, crq);
1295 cfq_account_dispatch(crq);
1296 }
1297
1298 return rq;
1299 }
1300
1301 if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
1302 goto dispatch;
1303
1304 return NULL;
1305}
1306
1307/* 1131/*
1308 * task holds one reference to the queue, dropped when task exits. each crq 1132 * task holds one reference to the queue, dropped when task exits. each crq
1309 * in-flight on this queue also holds a reference, dropped when crq is freed. 1133 * in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1422,7 +1246,7 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
1422} 1246}
1423 1247
1424static struct cfq_io_context * 1248static struct cfq_io_context *
1425cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) 1249cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1426{ 1250{
1427 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); 1251 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1428 1252
@@ -1517,7 +1341,7 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1517 1341
1518static struct cfq_queue * 1342static struct cfq_queue *
1519cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, 1343cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
1520 int gfp_mask) 1344 gfp_t gfp_mask)
1521{ 1345{
1522 const int hashval = hash_long(key, CFQ_QHASH_SHIFT); 1346 const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1523 struct cfq_queue *cfqq, *new_cfqq = NULL; 1347 struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1578,7 +1402,7 @@ out:
1578 * cfqq, so we don't need to worry about it disappearing 1402 * cfqq, so we don't need to worry about it disappearing
1579 */ 1403 */
1580static struct cfq_io_context * 1404static struct cfq_io_context *
1581cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) 1405cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1582{ 1406{
1583 struct io_context *ioc = NULL; 1407 struct io_context *ioc = NULL;
1584 struct cfq_io_context *cic; 1408 struct cfq_io_context *cic;
@@ -1816,8 +1640,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1816 } 1640 }
1817} 1641}
1818 1642
1819static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) 1643static void cfq_insert_request(request_queue_t *q, struct request *rq)
1820{ 1644{
1645 struct cfq_data *cfqd = q->elevator->elevator_data;
1821 struct cfq_rq *crq = RQ_DATA(rq); 1646 struct cfq_rq *crq = RQ_DATA(rq);
1822 struct cfq_queue *cfqq = crq->cfq_queue; 1647 struct cfq_queue *cfqq = crq->cfq_queue;
1823 1648
@@ -1827,66 +1652,43 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
1827 1652
1828 list_add_tail(&rq->queuelist, &cfqq->fifo); 1653 list_add_tail(&rq->queuelist, &cfqq->fifo);
1829 1654
1830 if (rq_mergeable(rq)) { 1655 if (rq_mergeable(rq))
1831 cfq_add_crq_hash(cfqd, crq); 1656 cfq_add_crq_hash(cfqd, crq);
1832 1657
1833 if (!cfqd->queue->last_merge)
1834 cfqd->queue->last_merge = rq;
1835 }
1836
1837 cfq_crq_enqueued(cfqd, cfqq, crq); 1658 cfq_crq_enqueued(cfqd, cfqq, crq);
1838} 1659}
1839 1660
1840static void
1841cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1842{
1843 struct cfq_data *cfqd = q->elevator->elevator_data;
1844
1845 switch (where) {
1846 case ELEVATOR_INSERT_BACK:
1847 while (cfq_dispatch_requests(q, INT_MAX, 1))
1848 ;
1849 list_add_tail(&rq->queuelist, &q->queue_head);
1850 /*
1851 * If we were idling with pending requests on
1852 * inactive cfqqs, force dispatching will
1853 * remove the idle timer and the queue won't
1854 * be kicked by __make_request() afterward.
1855 * Kick it here.
1856 */
1857 cfq_schedule_dispatch(cfqd);
1858 break;
1859 case ELEVATOR_INSERT_FRONT:
1860 list_add(&rq->queuelist, &q->queue_head);
1861 break;
1862 case ELEVATOR_INSERT_SORT:
1863 BUG_ON(!blk_fs_request(rq));
1864 cfq_enqueue(cfqd, rq);
1865 break;
1866 default:
1867 printk("%s: bad insert point %d\n", __FUNCTION__,where);
1868 return;
1869 }
1870}
1871
1872static void cfq_completed_request(request_queue_t *q, struct request *rq) 1661static void cfq_completed_request(request_queue_t *q, struct request *rq)
1873{ 1662{
1874 struct cfq_rq *crq = RQ_DATA(rq); 1663 struct cfq_rq *crq = RQ_DATA(rq);
1875 struct cfq_queue *cfqq; 1664 struct cfq_queue *cfqq = crq->cfq_queue;
1665 struct cfq_data *cfqd = cfqq->cfqd;
1666 const int sync = cfq_crq_is_sync(crq);
1667 unsigned long now;
1876 1668
1877 if (unlikely(!blk_fs_request(rq))) 1669 now = jiffies;
1878 return;
1879 1670
1880 cfqq = crq->cfq_queue; 1671 WARN_ON(!cfqd->rq_in_driver);
1672 WARN_ON(!cfqq->on_dispatch[sync]);
1673 cfqd->rq_in_driver--;
1674 cfqq->on_dispatch[sync]--;
1881 1675
1882 if (cfq_crq_in_flight(crq)) { 1676 if (!cfq_class_idle(cfqq))
1883 const int sync = cfq_crq_is_sync(crq); 1677 cfqd->last_end_request = now;
1884 1678
1885 WARN_ON(!cfqq->on_dispatch[sync]); 1679 if (!cfq_cfqq_dispatched(cfqq)) {
1886 cfqq->on_dispatch[sync]--; 1680 if (cfq_cfqq_on_rr(cfqq)) {
1681 cfqq->service_last = now;
1682 cfq_resort_rr_list(cfqq, 0);
1683 }
1684 if (cfq_cfqq_expired(cfqq)) {
1685 __cfq_slice_expired(cfqd, cfqq, 0);
1686 cfq_schedule_dispatch(cfqd);
1687 }
1887 } 1688 }
1888 1689
1889 cfq_account_completion(cfqq, crq); 1690 if (cfq_crq_is_sync(crq))
1691 crq->io_context->last_end_request = now;
1890} 1692}
1891 1693
1892static struct request * 1694static struct request *
@@ -2075,7 +1877,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
2075 */ 1877 */
2076static int 1878static int
2077cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, 1879cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2078 int gfp_mask) 1880 gfp_t gfp_mask)
2079{ 1881{
2080 struct cfq_data *cfqd = q->elevator->elevator_data; 1882 struct cfq_data *cfqd = q->elevator->elevator_data;
2081 struct task_struct *tsk = current; 1883 struct task_struct *tsk = current;
@@ -2118,9 +1920,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2118 INIT_HLIST_NODE(&crq->hash); 1920 INIT_HLIST_NODE(&crq->hash);
2119 crq->cfq_queue = cfqq; 1921 crq->cfq_queue = cfqq;
2120 crq->io_context = cic; 1922 crq->io_context = cic;
2121 cfq_clear_crq_in_flight(crq);
2122 cfq_clear_crq_in_driver(crq);
2123 cfq_clear_crq_requeued(crq);
2124 1923
2125 if (rw == READ || process_sync(tsk)) 1924 if (rw == READ || process_sync(tsk))
2126 cfq_mark_crq_is_sync(crq); 1925 cfq_mark_crq_is_sync(crq);
@@ -2201,7 +2000,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2201 * only expire and reinvoke request handler, if there are 2000 * only expire and reinvoke request handler, if there are
2202 * other queues with pending requests 2001 * other queues with pending requests
2203 */ 2002 */
2204 if (!cfq_pending_requests(cfqd)) { 2003 if (!cfqd->busy_queues) {
2205 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); 2004 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2206 add_timer(&cfqd->idle_slice_timer); 2005 add_timer(&cfqd->idle_slice_timer);
2207 goto out_cont; 2006 goto out_cont;
@@ -2576,10 +2375,9 @@ static struct elevator_type iosched_cfq = {
2576 .elevator_merge_fn = cfq_merge, 2375 .elevator_merge_fn = cfq_merge,
2577 .elevator_merged_fn = cfq_merged_request, 2376 .elevator_merged_fn = cfq_merged_request,
2578 .elevator_merge_req_fn = cfq_merged_requests, 2377 .elevator_merge_req_fn = cfq_merged_requests,
2579 .elevator_next_req_fn = cfq_next_request, 2378 .elevator_dispatch_fn = cfq_dispatch_requests,
2580 .elevator_add_req_fn = cfq_insert_request, 2379 .elevator_add_req_fn = cfq_insert_request,
2581 .elevator_remove_req_fn = cfq_remove_request, 2380 .elevator_activate_req_fn = cfq_activate_request,
2582 .elevator_requeue_req_fn = cfq_requeue_request,
2583 .elevator_deactivate_req_fn = cfq_deactivate_request, 2381 .elevator_deactivate_req_fn = cfq_deactivate_request,
2584 .elevator_queue_empty_fn = cfq_queue_empty, 2382 .elevator_queue_empty_fn = cfq_queue_empty,
2585 .elevator_completed_req_fn = cfq_completed_request, 2383 .elevator_completed_req_fn = cfq_completed_request,
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index 52a3ae5289a0..7929471d7df7 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -50,7 +50,6 @@ struct deadline_data {
50 * next in sort order. read, write or both are NULL 50 * next in sort order. read, write or both are NULL
51 */ 51 */
52 struct deadline_rq *next_drq[2]; 52 struct deadline_rq *next_drq[2];
53 struct list_head *dispatch; /* driver dispatch queue */
54 struct list_head *hash; /* request hash */ 53 struct list_head *hash; /* request hash */
55 unsigned int batching; /* number of sequential requests made */ 54 unsigned int batching; /* number of sequential requests made */
56 sector_t last_sector; /* head position */ 55 sector_t last_sector; /* head position */
@@ -113,15 +112,6 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq)
113 __deadline_del_drq_hash(drq); 112 __deadline_del_drq_hash(drq);
114} 113}
115 114
116static void
117deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
118{
119 deadline_del_drq_hash(drq);
120
121 if (q->last_merge == drq->request)
122 q->last_merge = NULL;
123}
124
125static inline void 115static inline void
126deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) 116deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
127{ 117{
@@ -239,10 +229,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
239 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 229 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
240 } 230 }
241 231
242 if (ON_RB(&drq->rb_node)) { 232 BUG_ON(!ON_RB(&drq->rb_node));
243 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); 233 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
244 RB_CLEAR(&drq->rb_node); 234 RB_CLEAR(&drq->rb_node);
245 }
246} 235}
247 236
248static struct request * 237static struct request *
@@ -286,7 +275,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
286/* 275/*
287 * add drq to rbtree and fifo 276 * add drq to rbtree and fifo
288 */ 277 */
289static inline void 278static void
290deadline_add_request(struct request_queue *q, struct request *rq) 279deadline_add_request(struct request_queue *q, struct request *rq)
291{ 280{
292 struct deadline_data *dd = q->elevator->elevator_data; 281 struct deadline_data *dd = q->elevator->elevator_data;
@@ -301,12 +290,8 @@ deadline_add_request(struct request_queue *q, struct request *rq)
301 drq->expires = jiffies + dd->fifo_expire[data_dir]; 290 drq->expires = jiffies + dd->fifo_expire[data_dir];
302 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); 291 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
303 292
304 if (rq_mergeable(rq)) { 293 if (rq_mergeable(rq))
305 deadline_add_drq_hash(dd, drq); 294 deadline_add_drq_hash(dd, drq);
306
307 if (!q->last_merge)
308 q->last_merge = rq;
309 }
310} 295}
311 296
312/* 297/*
@@ -315,14 +300,11 @@ deadline_add_request(struct request_queue *q, struct request *rq)
315static void deadline_remove_request(request_queue_t *q, struct request *rq) 300static void deadline_remove_request(request_queue_t *q, struct request *rq)
316{ 301{
317 struct deadline_rq *drq = RQ_DATA(rq); 302 struct deadline_rq *drq = RQ_DATA(rq);
303 struct deadline_data *dd = q->elevator->elevator_data;
318 304
319 if (drq) { 305 list_del_init(&drq->fifo);
320 struct deadline_data *dd = q->elevator->elevator_data; 306 deadline_del_drq_rb(dd, drq);
321 307 deadline_del_drq_hash(drq);
322 list_del_init(&drq->fifo);
323 deadline_remove_merge_hints(q, drq);
324 deadline_del_drq_rb(dd, drq);
325 }
326} 308}
327 309
328static int 310static int
@@ -333,15 +315,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
333 int ret; 315 int ret;
334 316
335 /* 317 /*
336 * try last_merge to avoid going to hash
337 */
338 ret = elv_try_last_merge(q, bio);
339 if (ret != ELEVATOR_NO_MERGE) {
340 __rq = q->last_merge;
341 goto out_insert;
342 }
343
344 /*
345 * see if the merge hash can satisfy a back merge 318 * see if the merge hash can satisfy a back merge
346 */ 319 */
347 __rq = deadline_find_drq_hash(dd, bio->bi_sector); 320 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
@@ -373,8 +346,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
373 346
374 return ELEVATOR_NO_MERGE; 347 return ELEVATOR_NO_MERGE;
375out: 348out:
376 q->last_merge = __rq;
377out_insert:
378 if (ret) 349 if (ret)
379 deadline_hot_drq_hash(dd, RQ_DATA(__rq)); 350 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
380 *req = __rq; 351 *req = __rq;
@@ -399,8 +370,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
399 deadline_del_drq_rb(dd, drq); 370 deadline_del_drq_rb(dd, drq);
400 deadline_add_drq_rb(dd, drq); 371 deadline_add_drq_rb(dd, drq);
401 } 372 }
402
403 q->last_merge = req;
404} 373}
405 374
406static void 375static void
@@ -452,7 +421,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
452 request_queue_t *q = drq->request->q; 421 request_queue_t *q = drq->request->q;
453 422
454 deadline_remove_request(q, drq->request); 423 deadline_remove_request(q, drq->request);
455 list_add_tail(&drq->request->queuelist, dd->dispatch); 424 elv_dispatch_add_tail(q, drq->request);
456} 425}
457 426
458/* 427/*
@@ -502,8 +471,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
502 * deadline_dispatch_requests selects the best request according to 471 * deadline_dispatch_requests selects the best request according to
503 * read/write expire, fifo_batch, etc 472 * read/write expire, fifo_batch, etc
504 */ 473 */
505static int deadline_dispatch_requests(struct deadline_data *dd) 474static int deadline_dispatch_requests(request_queue_t *q, int force)
506{ 475{
476 struct deadline_data *dd = q->elevator->elevator_data;
507 const int reads = !list_empty(&dd->fifo_list[READ]); 477 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]); 478 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq; 479 struct deadline_rq *drq;
@@ -597,65 +567,12 @@ dispatch_request:
597 return 1; 567 return 1;
598} 568}
599 569
600static struct request *deadline_next_request(request_queue_t *q)
601{
602 struct deadline_data *dd = q->elevator->elevator_data;
603 struct request *rq;
604
605 /*
606 * if there are still requests on the dispatch queue, grab the first one
607 */
608 if (!list_empty(dd->dispatch)) {
609dispatch:
610 rq = list_entry_rq(dd->dispatch->next);
611 return rq;
612 }
613
614 if (deadline_dispatch_requests(dd))
615 goto dispatch;
616
617 return NULL;
618}
619
620static void
621deadline_insert_request(request_queue_t *q, struct request *rq, int where)
622{
623 struct deadline_data *dd = q->elevator->elevator_data;
624
625 /* barriers must flush the reorder queue */
626 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
627 && where == ELEVATOR_INSERT_SORT))
628 where = ELEVATOR_INSERT_BACK;
629
630 switch (where) {
631 case ELEVATOR_INSERT_BACK:
632 while (deadline_dispatch_requests(dd))
633 ;
634 list_add_tail(&rq->queuelist, dd->dispatch);
635 break;
636 case ELEVATOR_INSERT_FRONT:
637 list_add(&rq->queuelist, dd->dispatch);
638 break;
639 case ELEVATOR_INSERT_SORT:
640 BUG_ON(!blk_fs_request(rq));
641 deadline_add_request(q, rq);
642 break;
643 default:
644 printk("%s: bad insert point %d\n", __FUNCTION__,where);
645 return;
646 }
647}
648
649static int deadline_queue_empty(request_queue_t *q) 570static int deadline_queue_empty(request_queue_t *q)
650{ 571{
651 struct deadline_data *dd = q->elevator->elevator_data; 572 struct deadline_data *dd = q->elevator->elevator_data;
652 573
653 if (!list_empty(&dd->fifo_list[WRITE]) 574 return list_empty(&dd->fifo_list[WRITE])
654 || !list_empty(&dd->fifo_list[READ]) 575 && list_empty(&dd->fifo_list[READ]);
655 || !list_empty(dd->dispatch))
656 return 0;
657
658 return 1;
659} 576}
660 577
661static struct request * 578static struct request *
@@ -733,7 +650,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
733 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 650 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
734 dd->sort_list[READ] = RB_ROOT; 651 dd->sort_list[READ] = RB_ROOT;
735 dd->sort_list[WRITE] = RB_ROOT; 652 dd->sort_list[WRITE] = RB_ROOT;
736 dd->dispatch = &q->queue_head;
737 dd->fifo_expire[READ] = read_expire; 653 dd->fifo_expire[READ] = read_expire;
738 dd->fifo_expire[WRITE] = write_expire; 654 dd->fifo_expire[WRITE] = write_expire;
739 dd->writes_starved = writes_starved; 655 dd->writes_starved = writes_starved;
@@ -748,15 +664,13 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
748 struct deadline_data *dd = q->elevator->elevator_data; 664 struct deadline_data *dd = q->elevator->elevator_data;
749 struct deadline_rq *drq = RQ_DATA(rq); 665 struct deadline_rq *drq = RQ_DATA(rq);
750 666
751 if (drq) { 667 mempool_free(drq, dd->drq_pool);
752 mempool_free(drq, dd->drq_pool); 668 rq->elevator_private = NULL;
753 rq->elevator_private = NULL;
754 }
755} 669}
756 670
757static int 671static int
758deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, 672deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
759 int gfp_mask) 673 gfp_t gfp_mask)
760{ 674{
761 struct deadline_data *dd = q->elevator->elevator_data; 675 struct deadline_data *dd = q->elevator->elevator_data;
762 struct deadline_rq *drq; 676 struct deadline_rq *drq;
@@ -917,9 +831,8 @@ static struct elevator_type iosched_deadline = {
917 .elevator_merge_fn = deadline_merge, 831 .elevator_merge_fn = deadline_merge,
918 .elevator_merged_fn = deadline_merged_request, 832 .elevator_merged_fn = deadline_merged_request,
919 .elevator_merge_req_fn = deadline_merged_requests, 833 .elevator_merge_req_fn = deadline_merged_requests,
920 .elevator_next_req_fn = deadline_next_request, 834 .elevator_dispatch_fn = deadline_dispatch_requests,
921 .elevator_add_req_fn = deadline_insert_request, 835 .elevator_add_req_fn = deadline_add_request,
922 .elevator_remove_req_fn = deadline_remove_request,
923 .elevator_queue_empty_fn = deadline_queue_empty, 836 .elevator_queue_empty_fn = deadline_queue_empty,
924 .elevator_former_req_fn = deadline_former_request, 837 .elevator_former_req_fn = deadline_former_request,
925 .elevator_latter_req_fn = deadline_latter_request, 838 .elevator_latter_req_fn = deadline_latter_request,
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 98f0126a2deb..55621d5c5774 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/compiler.h> 36#include <linux/compiler.h>
37#include <linux/delay.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
@@ -83,21 +84,11 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
83} 84}
84EXPORT_SYMBOL(elv_try_merge); 85EXPORT_SYMBOL(elv_try_merge);
85 86
86inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
87{
88 if (q->last_merge)
89 return elv_try_merge(q->last_merge, bio);
90
91 return ELEVATOR_NO_MERGE;
92}
93EXPORT_SYMBOL(elv_try_last_merge);
94
95static struct elevator_type *elevator_find(const char *name) 87static struct elevator_type *elevator_find(const char *name)
96{ 88{
97 struct elevator_type *e = NULL; 89 struct elevator_type *e = NULL;
98 struct list_head *entry; 90 struct list_head *entry;
99 91
100 spin_lock_irq(&elv_list_lock);
101 list_for_each(entry, &elv_list) { 92 list_for_each(entry, &elv_list) {
102 struct elevator_type *__e; 93 struct elevator_type *__e;
103 94
@@ -108,7 +99,6 @@ static struct elevator_type *elevator_find(const char *name)
108 break; 99 break;
109 } 100 }
110 } 101 }
111 spin_unlock_irq(&elv_list_lock);
112 102
113 return e; 103 return e;
114} 104}
@@ -120,12 +110,15 @@ static void elevator_put(struct elevator_type *e)
120 110
121static struct elevator_type *elevator_get(const char *name) 111static struct elevator_type *elevator_get(const char *name)
122{ 112{
123 struct elevator_type *e = elevator_find(name); 113 struct elevator_type *e;
124 114
125 if (!e) 115 spin_lock_irq(&elv_list_lock);
126 return NULL; 116
127 if (!try_module_get(e->elevator_owner)) 117 e = elevator_find(name);
128 return NULL; 118 if (e && !try_module_get(e->elevator_owner))
119 e = NULL;
120
121 spin_unlock_irq(&elv_list_lock);
129 122
130 return e; 123 return e;
131} 124}
@@ -139,8 +132,6 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
139 eq->ops = &e->ops; 132 eq->ops = &e->ops;
140 eq->elevator_type = e; 133 eq->elevator_type = e;
141 134
142 INIT_LIST_HEAD(&q->queue_head);
143 q->last_merge = NULL;
144 q->elevator = eq; 135 q->elevator = eq;
145 136
146 if (eq->ops->elevator_init_fn) 137 if (eq->ops->elevator_init_fn)
@@ -153,11 +144,15 @@ static char chosen_elevator[16];
153 144
154static void elevator_setup_default(void) 145static void elevator_setup_default(void)
155{ 146{
147 struct elevator_type *e;
148
156 /* 149 /*
157 * check if default is set and exists 150 * check if default is set and exists
158 */ 151 */
159 if (chosen_elevator[0] && elevator_find(chosen_elevator)) 152 if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
153 elevator_put(e);
160 return; 154 return;
155 }
161 156
162#if defined(CONFIG_IOSCHED_AS) 157#if defined(CONFIG_IOSCHED_AS)
163 strcpy(chosen_elevator, "anticipatory"); 158 strcpy(chosen_elevator, "anticipatory");
@@ -186,6 +181,11 @@ int elevator_init(request_queue_t *q, char *name)
186 struct elevator_queue *eq; 181 struct elevator_queue *eq;
187 int ret = 0; 182 int ret = 0;
188 183
184 INIT_LIST_HEAD(&q->queue_head);
185 q->last_merge = NULL;
186 q->end_sector = 0;
187 q->boundary_rq = NULL;
188
189 elevator_setup_default(); 189 elevator_setup_default();
190 190
191 if (!name) 191 if (!name)
@@ -220,9 +220,52 @@ void elevator_exit(elevator_t *e)
220 kfree(e); 220 kfree(e);
221} 221}
222 222
223/*
224 * Insert rq into dispatch queue of q. Queue lock must be held on
225 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
226 * appended to the dispatch queue. To be used by specific elevators.
227 */
228void elv_dispatch_sort(request_queue_t *q, struct request *rq)
229{
230 sector_t boundary;
231 struct list_head *entry;
232
233 if (q->last_merge == rq)
234 q->last_merge = NULL;
235
236 boundary = q->end_sector;
237
238 list_for_each_prev(entry, &q->queue_head) {
239 struct request *pos = list_entry_rq(entry);
240
241 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
242 break;
243 if (rq->sector >= boundary) {
244 if (pos->sector < boundary)
245 continue;
246 } else {
247 if (pos->sector >= boundary)
248 break;
249 }
250 if (rq->sector >= pos->sector)
251 break;
252 }
253
254 list_add(&rq->queuelist, entry);
255}
256
223int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 257int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
224{ 258{
225 elevator_t *e = q->elevator; 259 elevator_t *e = q->elevator;
260 int ret;
261
262 if (q->last_merge) {
263 ret = elv_try_merge(q->last_merge, bio);
264 if (ret != ELEVATOR_NO_MERGE) {
265 *req = q->last_merge;
266 return ret;
267 }
268 }
226 269
227 if (e->ops->elevator_merge_fn) 270 if (e->ops->elevator_merge_fn)
228 return e->ops->elevator_merge_fn(q, req, bio); 271 return e->ops->elevator_merge_fn(q, req, bio);
@@ -236,6 +279,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
236 279
237 if (e->ops->elevator_merged_fn) 280 if (e->ops->elevator_merged_fn)
238 e->ops->elevator_merged_fn(q, rq); 281 e->ops->elevator_merged_fn(q, rq);
282
283 q->last_merge = rq;
239} 284}
240 285
241void elv_merge_requests(request_queue_t *q, struct request *rq, 286void elv_merge_requests(request_queue_t *q, struct request *rq,
@@ -243,20 +288,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
243{ 288{
244 elevator_t *e = q->elevator; 289 elevator_t *e = q->elevator;
245 290
246 if (q->last_merge == next)
247 q->last_merge = NULL;
248
249 if (e->ops->elevator_merge_req_fn) 291 if (e->ops->elevator_merge_req_fn)
250 e->ops->elevator_merge_req_fn(q, rq, next); 292 e->ops->elevator_merge_req_fn(q, rq, next);
293
294 q->last_merge = rq;
251} 295}
252 296
253/* 297void elv_requeue_request(request_queue_t *q, struct request *rq)
254 * For careful internal use by the block layer. Essentially the same as
255 * a requeue in that it tells the io scheduler that this request is not
256 * active in the driver or hardware anymore, but we don't want the request
257 * added back to the scheduler. Function is not exported.
258 */
259void elv_deactivate_request(request_queue_t *q, struct request *rq)
260{ 298{
261 elevator_t *e = q->elevator; 299 elevator_t *e = q->elevator;
262 300
@@ -264,19 +302,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq)
264 * it already went through dequeue, we need to decrement the 302 * it already went through dequeue, we need to decrement the
265 * in_flight count again 303 * in_flight count again
266 */ 304 */
267 if (blk_account_rq(rq)) 305 if (blk_account_rq(rq)) {
268 q->in_flight--; 306 q->in_flight--;
307 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
308 e->ops->elevator_deactivate_req_fn(q, rq);
309 }
269 310
270 rq->flags &= ~REQ_STARTED; 311 rq->flags &= ~REQ_STARTED;
271 312
272 if (e->ops->elevator_deactivate_req_fn)
273 e->ops->elevator_deactivate_req_fn(q, rq);
274}
275
276void elv_requeue_request(request_queue_t *q, struct request *rq)
277{
278 elv_deactivate_request(q, rq);
279
280 /* 313 /*
281 * if this is the flush, requeue the original instead and drop the flush 314 * if this is the flush, requeue the original instead and drop the flush
282 */ 315 */
@@ -285,31 +318,27 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
285 rq = rq->end_io_data; 318 rq = rq->end_io_data;
286 } 319 }
287 320
288 /* 321 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
289 * the request is prepped and may have some resources allocated.
290 * allowing unprepped requests to pass this one may cause resource
291 * deadlock. turn on softbarrier.
292 */
293 rq->flags |= REQ_SOFTBARRIER;
294
295 /*
296 * if iosched has an explicit requeue hook, then use that. otherwise
297 * just put the request at the front of the queue
298 */
299 if (q->elevator->ops->elevator_requeue_req_fn)
300 q->elevator->ops->elevator_requeue_req_fn(q, rq);
301 else
302 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
303} 322}
304 323
305void __elv_add_request(request_queue_t *q, struct request *rq, int where, 324void __elv_add_request(request_queue_t *q, struct request *rq, int where,
306 int plug) 325 int plug)
307{ 326{
308 /* 327 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
309 * barriers implicitly indicate back insertion 328 /*
310 */ 329 * barriers implicitly indicate back insertion
311 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && 330 */
312 where == ELEVATOR_INSERT_SORT) 331 if (where == ELEVATOR_INSERT_SORT)
332 where = ELEVATOR_INSERT_BACK;
333
334 /*
335 * this request is scheduling boundary, update end_sector
336 */
337 if (blk_fs_request(rq)) {
338 q->end_sector = rq_end_sector(rq);
339 q->boundary_rq = rq;
340 }
341 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
313 where = ELEVATOR_INSERT_BACK; 342 where = ELEVATOR_INSERT_BACK;
314 343
315 if (plug) 344 if (plug)
@@ -317,23 +346,54 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
317 346
318 rq->q = q; 347 rq->q = q;
319 348
320 if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { 349 switch (where) {
321 q->elevator->ops->elevator_add_req_fn(q, rq, where); 350 case ELEVATOR_INSERT_FRONT:
351 rq->flags |= REQ_SOFTBARRIER;
322 352
323 if (blk_queue_plugged(q)) { 353 list_add(&rq->queuelist, &q->queue_head);
324 int nrq = q->rq.count[READ] + q->rq.count[WRITE] 354 break;
325 - q->in_flight;
326 355
327 if (nrq >= q->unplug_thresh) 356 case ELEVATOR_INSERT_BACK:
328 __generic_unplug_device(q); 357 rq->flags |= REQ_SOFTBARRIER;
329 } 358
330 } else 359 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
360 ;
361 list_add_tail(&rq->queuelist, &q->queue_head);
331 /* 362 /*
332 * if drain is set, store the request "locally". when the drain 363 * We kick the queue here for the following reasons.
333 * is finished, the requests will be handed ordered to the io 364 * - The elevator might have returned NULL previously
334 * scheduler 365 * to delay requests and returned them now. As the
366 * queue wasn't empty before this request, ll_rw_blk
367 * won't run the queue on return, resulting in hang.
368 * - Usually, back inserted requests won't be merged
369 * with anything. There's no point in delaying queue
370 * processing.
335 */ 371 */
336 list_add_tail(&rq->queuelist, &q->drain_list); 372 blk_remove_plug(q);
373 q->request_fn(q);
374 break;
375
376 case ELEVATOR_INSERT_SORT:
377 BUG_ON(!blk_fs_request(rq));
378 rq->flags |= REQ_SORTED;
379 q->elevator->ops->elevator_add_req_fn(q, rq);
380 if (q->last_merge == NULL && rq_mergeable(rq))
381 q->last_merge = rq;
382 break;
383
384 default:
385 printk(KERN_ERR "%s: bad insertion point %d\n",
386 __FUNCTION__, where);
387 BUG();
388 }
389
390 if (blk_queue_plugged(q)) {
391 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
392 - q->in_flight;
393
394 if (nrq >= q->unplug_thresh)
395 __generic_unplug_device(q);
396 }
337} 397}
338 398
339void elv_add_request(request_queue_t *q, struct request *rq, int where, 399void elv_add_request(request_queue_t *q, struct request *rq, int where,
@@ -348,13 +408,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
348 408
349static inline struct request *__elv_next_request(request_queue_t *q) 409static inline struct request *__elv_next_request(request_queue_t *q)
350{ 410{
351 struct request *rq = q->elevator->ops->elevator_next_req_fn(q); 411 struct request *rq;
412
413 if (unlikely(list_empty(&q->queue_head) &&
414 !q->elevator->ops->elevator_dispatch_fn(q, 0)))
415 return NULL;
416
417 rq = list_entry_rq(q->queue_head.next);
352 418
353 /* 419 /*
354 * if this is a barrier write and the device has to issue a 420 * if this is a barrier write and the device has to issue a
355 * flush sequence to support it, check how far we are 421 * flush sequence to support it, check how far we are
356 */ 422 */
357 if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { 423 if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
358 BUG_ON(q->ordered == QUEUE_ORDERED_NONE); 424 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
359 425
360 if (q->ordered == QUEUE_ORDERED_FLUSH && 426 if (q->ordered == QUEUE_ORDERED_FLUSH &&
@@ -371,15 +437,30 @@ struct request *elv_next_request(request_queue_t *q)
371 int ret; 437 int ret;
372 438
373 while ((rq = __elv_next_request(q)) != NULL) { 439 while ((rq = __elv_next_request(q)) != NULL) {
374 /* 440 if (!(rq->flags & REQ_STARTED)) {
375 * just mark as started even if we don't start it, a request 441 elevator_t *e = q->elevator;
376 * that has been delayed should not be passed by new incoming
377 * requests
378 */
379 rq->flags |= REQ_STARTED;
380 442
381 if (rq == q->last_merge) 443 /*
382 q->last_merge = NULL; 444 * This is the first time the device driver
445 * sees this request (possibly after
446 * requeueing). Notify IO scheduler.
447 */
448 if (blk_sorted_rq(rq) &&
449 e->ops->elevator_activate_req_fn)
450 e->ops->elevator_activate_req_fn(q, rq);
451
452 /*
453 * just mark as started even if we don't start
454 * it, a request that has been delayed should
455 * not be passed by new incoming requests
456 */
457 rq->flags |= REQ_STARTED;
458 }
459
460 if (!q->boundary_rq || q->boundary_rq == rq) {
461 q->end_sector = rq_end_sector(rq);
462 q->boundary_rq = NULL;
463 }
383 464
384 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) 465 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
385 break; 466 break;
@@ -391,9 +472,9 @@ struct request *elv_next_request(request_queue_t *q)
391 /* 472 /*
392 * the request may have been (partially) prepped. 473 * the request may have been (partially) prepped.
393 * we need to keep this request in the front to 474 * we need to keep this request in the front to
394 * avoid resource deadlock. turn on softbarrier. 475 * avoid resource deadlock. REQ_STARTED will
476 * prevent other fs requests from passing this one.
395 */ 477 */
396 rq->flags |= REQ_SOFTBARRIER;
397 rq = NULL; 478 rq = NULL;
398 break; 479 break;
399 } else if (ret == BLKPREP_KILL) { 480 } else if (ret == BLKPREP_KILL) {
@@ -416,42 +497,32 @@ struct request *elv_next_request(request_queue_t *q)
416 return rq; 497 return rq;
417} 498}
418 499
419void elv_remove_request(request_queue_t *q, struct request *rq) 500void elv_dequeue_request(request_queue_t *q, struct request *rq)
420{ 501{
421 elevator_t *e = q->elevator; 502 BUG_ON(list_empty(&rq->queuelist));
503
504 list_del_init(&rq->queuelist);
422 505
423 /* 506 /*
424 * the time frame between a request being removed from the lists 507 * the time frame between a request being removed from the lists
425 * and to it is freed is accounted as io that is in progress at 508 * and to it is freed is accounted as io that is in progress at
426 * the driver side. note that we only account requests that the 509 * the driver side.
427 * driver has seen (REQ_STARTED set), to avoid false accounting
428 * for request-request merges
429 */ 510 */
430 if (blk_account_rq(rq)) 511 if (blk_account_rq(rq))
431 q->in_flight++; 512 q->in_flight++;
432
433 /*
434 * the main clearing point for q->last_merge is on retrieval of
435 * request by driver (it calls elv_next_request()), but it _can_
436 * also happen here if a request is added to the queue but later
437 * deleted without ever being given to driver (merged with another
438 * request).
439 */
440 if (rq == q->last_merge)
441 q->last_merge = NULL;
442
443 if (e->ops->elevator_remove_req_fn)
444 e->ops->elevator_remove_req_fn(q, rq);
445} 513}
446 514
447int elv_queue_empty(request_queue_t *q) 515int elv_queue_empty(request_queue_t *q)
448{ 516{
449 elevator_t *e = q->elevator; 517 elevator_t *e = q->elevator;
450 518
519 if (!list_empty(&q->queue_head))
520 return 0;
521
451 if (e->ops->elevator_queue_empty_fn) 522 if (e->ops->elevator_queue_empty_fn)
452 return e->ops->elevator_queue_empty_fn(q); 523 return e->ops->elevator_queue_empty_fn(q);
453 524
454 return list_empty(&q->queue_head); 525 return 1;
455} 526}
456 527
457struct request *elv_latter_request(request_queue_t *q, struct request *rq) 528struct request *elv_latter_request(request_queue_t *q, struct request *rq)
@@ -487,7 +558,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
487} 558}
488 559
489int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, 560int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
490 int gfp_mask) 561 gfp_t gfp_mask)
491{ 562{
492 elevator_t *e = q->elevator; 563 elevator_t *e = q->elevator;
493 564
@@ -523,11 +594,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
523 /* 594 /*
524 * request is released from the driver, io must be done 595 * request is released from the driver, io must be done
525 */ 596 */
526 if (blk_account_rq(rq)) 597 if (blk_account_rq(rq)) {
527 q->in_flight--; 598 q->in_flight--;
528 599 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
529 if (e->ops->elevator_completed_req_fn) 600 e->ops->elevator_completed_req_fn(q, rq);
530 e->ops->elevator_completed_req_fn(q, rq); 601 }
531} 602}
532 603
533int elv_register_queue(struct request_queue *q) 604int elv_register_queue(struct request_queue *q)
@@ -555,10 +626,9 @@ void elv_unregister_queue(struct request_queue *q)
555 626
556int elv_register(struct elevator_type *e) 627int elv_register(struct elevator_type *e)
557{ 628{
629 spin_lock_irq(&elv_list_lock);
558 if (elevator_find(e->elevator_name)) 630 if (elevator_find(e->elevator_name))
559 BUG(); 631 BUG();
560
561 spin_lock_irq(&elv_list_lock);
562 list_add_tail(&e->list, &elv_list); 632 list_add_tail(&e->list, &elv_list);
563 spin_unlock_irq(&elv_list_lock); 633 spin_unlock_irq(&elv_list_lock);
564 634
@@ -582,25 +652,36 @@ EXPORT_SYMBOL_GPL(elv_unregister);
582 * switch to new_e io scheduler. be careful not to introduce deadlocks - 652 * switch to new_e io scheduler. be careful not to introduce deadlocks -
583 * we don't free the old io scheduler, before we have allocated what we 653 * we don't free the old io scheduler, before we have allocated what we
584 * need for the new one. this way we have a chance of going back to the old 654 * need for the new one. this way we have a chance of going back to the old
585 * one, if the new one fails init for some reason. we also do an intermediate 655 * one, if the new one fails init for some reason.
586 * switch to noop to ensure safety with stack-allocated requests, since they
587 * don't originate from the block layer allocator. noop is safe here, because
588 * it never needs to touch the elevator itself for completion events. DRAIN
589 * flags will make sure we don't touch it for additions either.
590 */ 656 */
591static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) 657static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
592{ 658{
593 elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL); 659 elevator_t *old_elevator, *e;
594 struct elevator_type *noop_elevator = NULL;
595 elevator_t *old_elevator;
596 660
661 /*
662 * Allocate new elevator
663 */
664 e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
597 if (!e) 665 if (!e)
598 goto error; 666 goto error;
599 667
600 /* 668 /*
601 * first step, drain requests from the block freelist 669 * Turn on BYPASS and drain all requests w/ elevator private data
602 */ 670 */
603 blk_wait_queue_drained(q, 0); 671 spin_lock_irq(q->queue_lock);
672
673 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
674
675 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
676 ;
677
678 while (q->rq.elvpriv) {
679 spin_unlock_irq(q->queue_lock);
680 msleep(10);
681 spin_lock_irq(q->queue_lock);
682 }
683
684 spin_unlock_irq(q->queue_lock);
604 685
605 /* 686 /*
606 * unregister old elevator data 687 * unregister old elevator data
@@ -609,18 +690,6 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
609 old_elevator = q->elevator; 690 old_elevator = q->elevator;
610 691
611 /* 692 /*
612 * next step, switch to noop since it uses no private rq structures
613 * and doesn't allocate any memory for anything. then wait for any
614 * non-fs requests in-flight
615 */
616 noop_elevator = elevator_get("noop");
617 spin_lock_irq(q->queue_lock);
618 elevator_attach(q, noop_elevator, e);
619 spin_unlock_irq(q->queue_lock);
620
621 blk_wait_queue_drained(q, 1);
622
623 /*
624 * attach and start new elevator 693 * attach and start new elevator
625 */ 694 */
626 if (elevator_attach(q, new_e, e)) 695 if (elevator_attach(q, new_e, e))
@@ -630,11 +699,10 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
630 goto fail_register; 699 goto fail_register;
631 700
632 /* 701 /*
633 * finally exit old elevator and start queue again 702 * finally exit old elevator and turn off BYPASS.
634 */ 703 */
635 elevator_exit(old_elevator); 704 elevator_exit(old_elevator);
636 blk_finish_queue_drain(q); 705 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
637 elevator_put(noop_elevator);
638 return; 706 return;
639 707
640fail_register: 708fail_register:
@@ -643,13 +711,13 @@ fail_register:
643 * one again (along with re-adding the sysfs dir) 711 * one again (along with re-adding the sysfs dir)
644 */ 712 */
645 elevator_exit(e); 713 elevator_exit(e);
714 e = NULL;
646fail: 715fail:
647 q->elevator = old_elevator; 716 q->elevator = old_elevator;
648 elv_register_queue(q); 717 elv_register_queue(q);
649 blk_finish_queue_drain(q); 718 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
719 kfree(e);
650error: 720error:
651 if (noop_elevator)
652 elevator_put(noop_elevator);
653 elevator_put(new_e); 721 elevator_put(new_e);
654 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); 722 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
655} 723}
@@ -701,11 +769,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
701 return len; 769 return len;
702} 770}
703 771
772EXPORT_SYMBOL(elv_dispatch_sort);
704EXPORT_SYMBOL(elv_add_request); 773EXPORT_SYMBOL(elv_add_request);
705EXPORT_SYMBOL(__elv_add_request); 774EXPORT_SYMBOL(__elv_add_request);
706EXPORT_SYMBOL(elv_requeue_request); 775EXPORT_SYMBOL(elv_requeue_request);
707EXPORT_SYMBOL(elv_next_request); 776EXPORT_SYMBOL(elv_next_request);
708EXPORT_SYMBOL(elv_remove_request); 777EXPORT_SYMBOL(elv_dequeue_request);
709EXPORT_SYMBOL(elv_queue_empty); 778EXPORT_SYMBOL(elv_queue_empty);
710EXPORT_SYMBOL(elv_completed_request); 779EXPORT_SYMBOL(elv_completed_request);
711EXPORT_SYMBOL(elevator_exit); 780EXPORT_SYMBOL(elevator_exit);
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index baedac522945..0af73512b9a8 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
263 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 263 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
264 264
265 blk_queue_activity_fn(q, NULL, NULL); 265 blk_queue_activity_fn(q, NULL, NULL);
266
267 INIT_LIST_HEAD(&q->drain_list);
268} 266}
269 267
270EXPORT_SYMBOL(blk_queue_make_request); 268EXPORT_SYMBOL(blk_queue_make_request);
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
353 struct request *rq = flush_rq->end_io_data; 351 struct request *rq = flush_rq->end_io_data;
354 request_queue_t *q = rq->q; 352 request_queue_t *q = rq->q;
355 353
354 elv_completed_request(q, flush_rq);
355
356 rq->flags |= REQ_BAR_PREFLUSH; 356 rq->flags |= REQ_BAR_PREFLUSH;
357 357
358 if (!flush_rq->errors) 358 if (!flush_rq->errors)
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq)
369 struct request *rq = flush_rq->end_io_data; 369 struct request *rq = flush_rq->end_io_data;
370 request_queue_t *q = rq->q; 370 request_queue_t *q = rq->q;
371 371
372 elv_completed_request(q, flush_rq);
373
372 rq->flags |= REQ_BAR_POSTFLUSH; 374 rq->flags |= REQ_BAR_POSTFLUSH;
373 375
374 q->end_flush_fn(q, flush_rq); 376 q->end_flush_fn(q, flush_rq);
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
408 if (!list_empty(&rq->queuelist)) 410 if (!list_empty(&rq->queuelist))
409 blkdev_dequeue_request(rq); 411 blkdev_dequeue_request(rq);
410 412
411 elv_deactivate_request(q, rq);
412
413 flush_rq->end_io_data = rq; 413 flush_rq->end_io_data = rq;
414 flush_rq->end_io = blk_pre_flush_end_io; 414 flush_rq->end_io = blk_pre_flush_end_io;
415 415
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
1040static char *rq_flags[] = { 1040static char *rq_flags[] = {
1041 "REQ_RW", 1041 "REQ_RW",
1042 "REQ_FAILFAST", 1042 "REQ_FAILFAST",
1043 "REQ_SORTED",
1043 "REQ_SOFTBARRIER", 1044 "REQ_SOFTBARRIER",
1044 "REQ_HARDBARRIER", 1045 "REQ_HARDBARRIER",
1045 "REQ_CMD", 1046 "REQ_CMD",
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = {
1047 "REQ_STARTED", 1048 "REQ_STARTED",
1048 "REQ_DONTPREP", 1049 "REQ_DONTPREP",
1049 "REQ_QUEUED", 1050 "REQ_QUEUED",
1051 "REQ_ELVPRIV",
1050 "REQ_PC", 1052 "REQ_PC",
1051 "REQ_BLOCK_PC", 1053 "REQ_BLOCK_PC",
1052 "REQ_SENSE", 1054 "REQ_SENSE",
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q)
1637 1639
1638 rl->count[READ] = rl->count[WRITE] = 0; 1640 rl->count[READ] = rl->count[WRITE] = 0;
1639 rl->starved[READ] = rl->starved[WRITE] = 0; 1641 rl->starved[READ] = rl->starved[WRITE] = 0;
1642 rl->elvpriv = 0;
1640 init_waitqueue_head(&rl->wait[READ]); 1643 init_waitqueue_head(&rl->wait[READ]);
1641 init_waitqueue_head(&rl->wait[WRITE]); 1644 init_waitqueue_head(&rl->wait[WRITE]);
1642 init_waitqueue_head(&rl->drain);
1643 1645
1644 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 1646 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1645 mempool_free_slab, request_cachep, q->node); 1647 mempool_free_slab, request_cachep, q->node);
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q)
1652 1654
1653static int __make_request(request_queue_t *, struct bio *); 1655static int __make_request(request_queue_t *, struct bio *);
1654 1656
1655request_queue_t *blk_alloc_queue(int gfp_mask) 1657request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
1656{ 1658{
1657 return blk_alloc_queue_node(gfp_mask, -1); 1659 return blk_alloc_queue_node(gfp_mask, -1);
1658} 1660}
1659EXPORT_SYMBOL(blk_alloc_queue); 1661EXPORT_SYMBOL(blk_alloc_queue);
1660 1662
1661request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) 1663request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1662{ 1664{
1663 request_queue_t *q; 1665 request_queue_t *q;
1664 1666
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue);
1782 1784
1783static inline void blk_free_request(request_queue_t *q, struct request *rq) 1785static inline void blk_free_request(request_queue_t *q, struct request *rq)
1784{ 1786{
1785 elv_put_request(q, rq); 1787 if (rq->flags & REQ_ELVPRIV)
1788 elv_put_request(q, rq);
1786 mempool_free(rq, q->rq.rq_pool); 1789 mempool_free(rq, q->rq.rq_pool);
1787} 1790}
1788 1791
1789static inline struct request * 1792static inline struct request *
1790blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) 1793blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
1794 int priv, gfp_t gfp_mask)
1791{ 1795{
1792 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 1796 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1793 1797
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
1800 */ 1804 */
1801 rq->flags = rw; 1805 rq->flags = rw;
1802 1806
1803 if (!elv_set_request(q, rq, bio, gfp_mask)) 1807 if (priv) {
1804 return rq; 1808 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
1809 mempool_free(rq, q->rq.rq_pool);
1810 return NULL;
1811 }
1812 rq->flags |= REQ_ELVPRIV;
1813 }
1805 1814
1806 mempool_free(rq, q->rq.rq_pool); 1815 return rq;
1807 return NULL;
1808} 1816}
1809 1817
1810/* 1818/*
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw)
1860 * A request has just been released. Account for it, update the full and 1868 * A request has just been released. Account for it, update the full and
1861 * congestion status, wake up any waiters. Called under q->queue_lock. 1869 * congestion status, wake up any waiters. Called under q->queue_lock.
1862 */ 1870 */
1863static void freed_request(request_queue_t *q, int rw) 1871static void freed_request(request_queue_t *q, int rw, int priv)
1864{ 1872{
1865 struct request_list *rl = &q->rq; 1873 struct request_list *rl = &q->rq;
1866 1874
1867 rl->count[rw]--; 1875 rl->count[rw]--;
1876 if (priv)
1877 rl->elvpriv--;
1868 1878
1869 __freed_request(q, rw); 1879 __freed_request(q, rw);
1870 1880
1871 if (unlikely(rl->starved[rw ^ 1])) 1881 if (unlikely(rl->starved[rw ^ 1]))
1872 __freed_request(q, rw ^ 1); 1882 __freed_request(q, rw ^ 1);
1873
1874 if (!rl->count[READ] && !rl->count[WRITE]) {
1875 smp_mb();
1876 if (unlikely(waitqueue_active(&rl->drain)))
1877 wake_up(&rl->drain);
1878 }
1879} 1883}
1880 1884
1881#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) 1885#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw)
1885 * Returns !NULL on success, with queue_lock *not held*. 1889 * Returns !NULL on success, with queue_lock *not held*.
1886 */ 1890 */
1887static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 1891static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1888 int gfp_mask) 1892 gfp_t gfp_mask)
1889{ 1893{
1890 struct request *rq = NULL; 1894 struct request *rq = NULL;
1891 struct request_list *rl = &q->rq; 1895 struct request_list *rl = &q->rq;
1892 struct io_context *ioc = current_io_context(GFP_ATOMIC); 1896 struct io_context *ioc = current_io_context(GFP_ATOMIC);
1893 1897 int priv;
1894 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1895 goto out;
1896 1898
1897 if (rl->count[rw]+1 >= q->nr_requests) { 1899 if (rl->count[rw]+1 >= q->nr_requests) {
1898 /* 1900 /*
@@ -1937,9 +1939,14 @@ get_rq:
1937 rl->starved[rw] = 0; 1939 rl->starved[rw] = 0;
1938 if (rl->count[rw] >= queue_congestion_on_threshold(q)) 1940 if (rl->count[rw] >= queue_congestion_on_threshold(q))
1939 set_queue_congested(q, rw); 1941 set_queue_congested(q, rw);
1942
1943 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1944 if (priv)
1945 rl->elvpriv++;
1946
1940 spin_unlock_irq(q->queue_lock); 1947 spin_unlock_irq(q->queue_lock);
1941 1948
1942 rq = blk_alloc_request(q, rw, bio, gfp_mask); 1949 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
1943 if (!rq) { 1950 if (!rq) {
1944 /* 1951 /*
1945 * Allocation failed presumably due to memory. Undo anything 1952 * Allocation failed presumably due to memory. Undo anything
@@ -1949,7 +1956,7 @@ get_rq:
1949 * wait queue, but this is pretty rare. 1956 * wait queue, but this is pretty rare.
1950 */ 1957 */
1951 spin_lock_irq(q->queue_lock); 1958 spin_lock_irq(q->queue_lock);
1952 freed_request(q, rw); 1959 freed_request(q, rw, priv);
1953 1960
1954 /* 1961 /*
1955 * in the very unlikely event that allocation failed and no 1962 * in the very unlikely event that allocation failed and no
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
2019 return rq; 2026 return rq;
2020} 2027}
2021 2028
2022struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) 2029struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
2023{ 2030{
2024 struct request *rq; 2031 struct request *rq;
2025 2032
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
2251 * @gfp_mask: memory allocation flags 2258 * @gfp_mask: memory allocation flags
2252 */ 2259 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, 2260int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask) 2261 unsigned int len, gfp_t gfp_mask)
2255{ 2262{
2256 struct bio *bio; 2263 struct bio *bio;
2257 2264
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk)
2433{ 2440{
2434 unsigned long now = jiffies; 2441 unsigned long now = jiffies;
2435 2442
2436 __disk_stat_add(disk, time_in_queue, 2443 if (now == disk->stamp)
2437 disk->in_flight * (now - disk->stamp)); 2444 return;
2438 disk->stamp = now;
2439 2445
2440 if (disk->in_flight) 2446 if (disk->in_flight) {
2441 __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle)); 2447 __disk_stat_add(disk, time_in_queue,
2442 disk->stamp_idle = now; 2448 disk->in_flight * (now - disk->stamp));
2449 __disk_stat_add(disk, io_ticks, (now - disk->stamp));
2450 }
2451 disk->stamp = now;
2443} 2452}
2444 2453
2445/* 2454/*
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
2454 if (unlikely(--req->ref_count)) 2463 if (unlikely(--req->ref_count))
2455 return; 2464 return;
2456 2465
2466 elv_completed_request(q, req);
2467
2457 req->rq_status = RQ_INACTIVE; 2468 req->rq_status = RQ_INACTIVE;
2458 req->rl = NULL; 2469 req->rl = NULL;
2459 2470
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
2463 */ 2474 */
2464 if (rl) { 2475 if (rl) {
2465 int rw = rq_data_dir(req); 2476 int rw = rq_data_dir(req);
2466 2477 int priv = req->flags & REQ_ELVPRIV;
2467 elv_completed_request(q, req);
2468 2478
2469 BUG_ON(!list_empty(&req->queuelist)); 2479 BUG_ON(!list_empty(&req->queuelist));
2470 2480
2471 blk_free_request(q, req); 2481 blk_free_request(q, req);
2472 freed_request(q, rw); 2482 freed_request(q, rw, priv);
2473 } 2483 }
2474} 2484}
2475 2485
2476void blk_put_request(struct request *req) 2486void blk_put_request(struct request *req)
2477{ 2487{
2488 unsigned long flags;
2489 request_queue_t *q = req->q;
2490
2478 /* 2491 /*
2479 * if req->rl isn't set, this request didnt originate from the 2492 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
2480 * block layer, so it's safe to just disregard it 2493 * following if (q) test.
2481 */ 2494 */
2482 if (req->rl) { 2495 if (q) {
2483 unsigned long flags;
2484 request_queue_t *q = req->q;
2485
2486 spin_lock_irqsave(q->queue_lock, flags); 2496 spin_lock_irqsave(q->queue_lock, flags);
2487 __blk_put_request(q, req); 2497 __blk_put_request(q, req);
2488 spin_unlock_irqrestore(q->queue_lock, flags); 2498 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio)
2797 } 2807 }
2798} 2808}
2799 2809
2800void blk_finish_queue_drain(request_queue_t *q)
2801{
2802 struct request_list *rl = &q->rq;
2803 struct request *rq;
2804 int requeued = 0;
2805
2806 spin_lock_irq(q->queue_lock);
2807 clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2808
2809 while (!list_empty(&q->drain_list)) {
2810 rq = list_entry_rq(q->drain_list.next);
2811
2812 list_del_init(&rq->queuelist);
2813 elv_requeue_request(q, rq);
2814 requeued++;
2815 }
2816
2817 if (requeued)
2818 q->request_fn(q);
2819
2820 spin_unlock_irq(q->queue_lock);
2821
2822 wake_up(&rl->wait[0]);
2823 wake_up(&rl->wait[1]);
2824 wake_up(&rl->drain);
2825}
2826
2827static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
2828{
2829 int wait = rl->count[READ] + rl->count[WRITE];
2830
2831 if (dispatch)
2832 wait += !list_empty(&q->queue_head);
2833
2834 return wait;
2835}
2836
2837/*
2838 * We rely on the fact that only requests allocated through blk_alloc_request()
2839 * have io scheduler private data structures associated with them. Any other
2840 * type of request (allocated on stack or through kmalloc()) should not go
2841 * to the io scheduler core, but be attached to the queue head instead.
2842 */
2843void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
2844{
2845 struct request_list *rl = &q->rq;
2846 DEFINE_WAIT(wait);
2847
2848 spin_lock_irq(q->queue_lock);
2849 set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2850
2851 while (wait_drain(q, rl, wait_dispatch)) {
2852 prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
2853
2854 if (wait_drain(q, rl, wait_dispatch)) {
2855 __generic_unplug_device(q);
2856 spin_unlock_irq(q->queue_lock);
2857 io_schedule();
2858 spin_lock_irq(q->queue_lock);
2859 }
2860
2861 finish_wait(&rl->drain, &wait);
2862 }
2863
2864 spin_unlock_irq(q->queue_lock);
2865}
2866
2867/*
2868 * block waiting for the io scheduler being started again.
2869 */
2870static inline void block_wait_queue_running(request_queue_t *q)
2871{
2872 DEFINE_WAIT(wait);
2873
2874 while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
2875 struct request_list *rl = &q->rq;
2876
2877 prepare_to_wait_exclusive(&rl->drain, &wait,
2878 TASK_UNINTERRUPTIBLE);
2879
2880 /*
2881 * re-check the condition. avoids using prepare_to_wait()
2882 * in the fast path (queue is running)
2883 */
2884 if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
2885 io_schedule();
2886
2887 finish_wait(&rl->drain, &wait);
2888 }
2889}
2890
2891static void handle_bad_sector(struct bio *bio) 2810static void handle_bad_sector(struct bio *bio)
2892{ 2811{
2893 char b[BDEVNAME_SIZE]; 2812 char b[BDEVNAME_SIZE];
@@ -2983,8 +2902,6 @@ end_io:
2983 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 2902 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
2984 goto end_io; 2903 goto end_io;
2985 2904
2986 block_wait_queue_running(q);
2987
2988 /* 2905 /*
2989 * If this device has partitions, remap block n 2906 * If this device has partitions, remap block n
2990 * of partition p to block n+start(p) of the disk. 2907 * of partition p to block n+start(p) of the disk.
@@ -3393,7 +3310,7 @@ void exit_io_context(void)
3393 * but since the current task itself holds a reference, the context can be 3310 * but since the current task itself holds a reference, the context can be
3394 * used in general code, so long as it stays within `current` context. 3311 * used in general code, so long as it stays within `current` context.
3395 */ 3312 */
3396struct io_context *current_io_context(int gfp_flags) 3313struct io_context *current_io_context(gfp_t gfp_flags)
3397{ 3314{
3398 struct task_struct *tsk = current; 3315 struct task_struct *tsk = current;
3399 struct io_context *ret; 3316 struct io_context *ret;
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context);
3424 * 3341 *
3425 * This is always called in the context of the task which submitted the I/O. 3342 * This is always called in the context of the task which submitted the I/O.
3426 */ 3343 */
3427struct io_context *get_io_context(int gfp_flags) 3344struct io_context *get_io_context(gfp_t gfp_flags)
3428{ 3345{
3429 struct io_context *ret; 3346 struct io_context *ret;
3430 ret = current_io_context(gfp_flags); 3347 ret = current_io_context(gfp_flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b35e08876dd4..96c664af8d06 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -881,7 +881,7 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
881static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) 881static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
882{ 882{
883 struct file *filp = lo->lo_backing_file; 883 struct file *filp = lo->lo_backing_file;
884 int gfp = lo->old_gfp_mask; 884 gfp_t gfp = lo->old_gfp_mask;
885 885
886 if (lo->lo_state != Lo_bound) 886 if (lo->lo_state != Lo_bound)
887 return -ENXIO; 887 return -ENXIO;
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
index b1730b62c37e..f56b8edb06e4 100644
--- a/drivers/block/noop-iosched.c
+++ b/drivers/block/noop-iosched.c
@@ -7,57 +7,19 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/init.h> 8#include <linux/init.h>
9 9
10/* 10static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
11 * See if we can find a request that this buffer can be coalesced with.
12 */
13static int elevator_noop_merge(request_queue_t *q, struct request **req,
14 struct bio *bio)
15{
16 int ret;
17
18 ret = elv_try_last_merge(q, bio);
19 if (ret != ELEVATOR_NO_MERGE)
20 *req = q->last_merge;
21
22 return ret;
23}
24
25static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
26 struct request *next)
27{
28 list_del_init(&next->queuelist);
29}
30
31static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
32 int where)
33{ 11{
34 if (where == ELEVATOR_INSERT_FRONT) 12 elv_dispatch_add_tail(q, rq);
35 list_add(&rq->queuelist, &q->queue_head);
36 else
37 list_add_tail(&rq->queuelist, &q->queue_head);
38
39 /*
40 * new merges must not precede this barrier
41 */
42 if (rq->flags & REQ_HARDBARRIER)
43 q->last_merge = NULL;
44 else if (!q->last_merge)
45 q->last_merge = rq;
46} 13}
47 14
48static struct request *elevator_noop_next_request(request_queue_t *q) 15static int elevator_noop_dispatch(request_queue_t *q, int force)
49{ 16{
50 if (!list_empty(&q->queue_head)) 17 return 0;
51 return list_entry_rq(q->queue_head.next);
52
53 return NULL;
54} 18}
55 19
56static struct elevator_type elevator_noop = { 20static struct elevator_type elevator_noop = {
57 .ops = { 21 .ops = {
58 .elevator_merge_fn = elevator_noop_merge, 22 .elevator_dispatch_fn = elevator_noop_dispatch,
59 .elevator_merge_req_fn = elevator_noop_merge_requests,
60 .elevator_next_req_fn = elevator_noop_next_request,
61 .elevator_add_req_fn = elevator_noop_add_request, 23 .elevator_add_req_fn = elevator_noop_add_request,
62 }, 24 },
63 .elevator_name = "noop", 25 .elevator_name = "noop",
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 145c1fbffe01..68c60a5bcdab 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -348,7 +348,7 @@ static int rd_open(struct inode *inode, struct file *filp)
348 struct block_device *bdev = inode->i_bdev; 348 struct block_device *bdev = inode->i_bdev;
349 struct address_space *mapping; 349 struct address_space *mapping;
350 unsigned bsize; 350 unsigned bsize;
351 int gfp_mask; 351 gfp_t gfp_mask;
352 352
353 inode = igrab(bdev->bd_inode); 353 inode = igrab(bdev->bd_inode);
354 rd_bdev[unit] = bdev; 354 rd_bdev[unit] = bdev;
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index c9bdf544ed2c..c556f4d3ccd7 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -62,7 +62,7 @@
62 62
63static inline unsigned char *alloc_buf(void) 63static inline unsigned char *alloc_buf(void)
64{ 64{
65 unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 65 gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
66 66
67 if (PAGE_SIZE != N_TTY_BUF_SIZE) 67 if (PAGE_SIZE != N_TTY_BUF_SIZE)
68 return kmalloc(N_TTY_BUF_SIZE, prio); 68 return kmalloc(N_TTY_BUF_SIZE, prio);
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 4802bbbb6dc9..c9e92d85c893 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1630,7 +1630,7 @@ static void ether1394_complete_cb(void *__ptask)
1630/* Transmit a packet (called by kernel) */ 1630/* Transmit a packet (called by kernel) */
1631static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) 1631static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1632{ 1632{
1633 int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 1633 gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
1634 struct eth1394hdr *eth; 1634 struct eth1394hdr *eth;
1635 struct eth1394_priv *priv = netdev_priv(dev); 1635 struct eth1394_priv *priv = netdev_priv(dev);
1636 int proto; 1636 int proto;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f6a8ac026557..378646b5a1b8 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -524,7 +524,7 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
524} 524}
525 525
526struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, 526struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
527 unsigned int gfp_mask) 527 gfp_t gfp_mask)
528{ 528{
529 struct mthca_mailbox *mailbox; 529 struct mthca_mailbox *mailbox;
530 530
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 65f976a13e02..18175bec84c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -248,7 +248,7 @@ void mthca_cmd_event(struct mthca_dev *dev, u16 token,
248 u8 status, u64 out_param); 248 u8 status, u64 out_param);
249 249
250struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, 250struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
251 unsigned int gfp_mask); 251 gfp_t gfp_mask);
252void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); 252void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
253 253
254int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); 254int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7bd7a4bec7b4..9ad8b3b6cfef 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -82,7 +82,7 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
82} 82}
83 83
84struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, 84struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
85 unsigned int gfp_mask) 85 gfp_t gfp_mask)
86{ 86{
87 struct mthca_icm *icm; 87 struct mthca_icm *icm;
88 struct mthca_icm_chunk *chunk = NULL; 88 struct mthca_icm_chunk *chunk = NULL;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index bafa51544aa3..29433f295253 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -77,7 +77,7 @@ struct mthca_icm_iter {
77struct mthca_dev; 77struct mthca_dev;
78 78
79struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, 79struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
80 unsigned int gfp_mask); 80 gfp_t gfp_mask);
81void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm); 81void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
82 82
83struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, 83struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2fba2bbe72d8..01654fcabc52 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -91,7 +91,7 @@ int bitmap_active(struct bitmap *bitmap)
91 91
92#define WRITE_POOL_SIZE 256 92#define WRITE_POOL_SIZE 256
93/* mempool for queueing pending writes on the bitmap file */ 93/* mempool for queueing pending writes on the bitmap file */
94static void *write_pool_alloc(unsigned int gfp_flags, void *data) 94static void *write_pool_alloc(gfp_t gfp_flags, void *data)
95{ 95{
96 return kmalloc(sizeof(struct page_list), gfp_flags); 96 return kmalloc(sizeof(struct page_list), gfp_flags);
97} 97}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b6148f6f7836..28c1a628621f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -331,7 +331,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
331{ 331{
332 struct bio *bio; 332 struct bio *bio;
333 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 333 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
334 int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 334 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
335 unsigned int i; 335 unsigned int i;
336 336
337 /* 337 /*
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 2e617424d3fb..50f43dbf31ae 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -489,7 +489,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
489/* local page allocation routines for the receive buffers. jumbo pages 489/* local page allocation routines for the receive buffers. jumbo pages
490 * require at least 8K contiguous and 8K aligned buffers. 490 * require at least 8K contiguous and 8K aligned buffers.
491 */ 491 */
492static cas_page_t *cas_page_alloc(struct cas *cp, const int flags) 492static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
493{ 493{
494 cas_page_t *page; 494 cas_page_t *page;
495 495
@@ -561,7 +561,7 @@ static void cas_spare_free(struct cas *cp)
561} 561}
562 562
563/* replenish spares if needed */ 563/* replenish spares if needed */
564static void cas_spare_recover(struct cas *cp, const int flags) 564static void cas_spare_recover(struct cas *cp, const gfp_t flags)
565{ 565{
566 struct list_head list, *elem, *tmp; 566 struct list_head list, *elem, *tmp;
567 int needed, i; 567 int needed, i;
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index b4929beb33b2..1d75ca0bb939 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -298,7 +298,7 @@ enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_
298static unsigned char lance_need_isa_bounce_buffers = 1; 298static unsigned char lance_need_isa_bounce_buffers = 1;
299 299
300static int lance_open(struct net_device *dev); 300static int lance_open(struct net_device *dev);
301static void lance_init_ring(struct net_device *dev, int mode); 301static void lance_init_ring(struct net_device *dev, gfp_t mode);
302static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); 302static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
303static int lance_rx(struct net_device *dev); 303static int lance_rx(struct net_device *dev);
304static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs); 304static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
@@ -846,7 +846,7 @@ lance_purge_ring(struct net_device *dev)
846 846
847/* Initialize the LANCE Rx and Tx rings. */ 847/* Initialize the LANCE Rx and Tx rings. */
848static void 848static void
849lance_init_ring(struct net_device *dev, int gfp) 849lance_init_ring(struct net_device *dev, gfp_t gfp)
850{ 850{
851 struct lance_private *lp = dev->priv; 851 struct lance_private *lp = dev->priv;
852 int i; 852 int i;
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index f0996ce5c268..6c86dca62e2a 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -277,7 +277,7 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
277 struct recvq __iomem *rq = mp->rq; 277 struct recvq __iomem *rq = mp->rq;
278 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0]; 278 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
279 struct net_device *dev = mp->dev; 279 struct net_device *dev = mp->dev;
280 int gfp_flags = GFP_KERNEL; 280 gfp_t gfp_flags = GFP_KERNEL;
281 int i; 281 int i;
282 282
283 if (from_irq || in_interrupt()) 283 if (from_irq || in_interrupt())
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
index 9391e55a5e92..47722f708a41 100644
--- a/drivers/net/myri_sbus.h
+++ b/drivers/net/myri_sbus.h
@@ -296,7 +296,7 @@ struct myri_eth {
296/* We use this to acquire receive skb's that we can DMA directly into. */ 296/* We use this to acquire receive skb's that we can DMA directly into. */
297#define ALIGNED_RX_SKB_ADDR(addr) \ 297#define ALIGNED_RX_SKB_ADDR(addr) \
298 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) 298 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
299static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags) 299static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
300{ 300{
301 struct sk_buff *skb; 301 struct sk_buff *skb;
302 302
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index f88f5e32b714..cfaf47c63c58 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -214,7 +214,8 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
214{ 214{
215 struct bmac_init_block *bb = bp->bmac_block; 215 struct bmac_init_block *bb = bp->bmac_block;
216 struct net_device *dev = bp->dev; 216 struct net_device *dev = bp->dev;
217 int i, gfp_flags = GFP_KERNEL; 217 int i;
218 gfp_t gfp_flags = GFP_KERNEL;
218 219
219 if (from_irq || in_interrupt()) 220 if (from_irq || in_interrupt())
220 gfp_flags = GFP_ATOMIC; 221 gfp_flags = GFP_ATOMIC;
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 5674003fc38a..b0dbc5187143 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -339,7 +339,7 @@ struct bigmac {
339#define ALIGNED_RX_SKB_ADDR(addr) \ 339#define ALIGNED_RX_SKB_ADDR(addr) \
340 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) 340 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
341 341
342static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags) 342static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
343{ 343{
344 struct sk_buff *skb; 344 struct sk_buff *skb;
345 345
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 0e98a9d9834c..a3bd91a61827 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -836,7 +836,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
836 * This function implements the pci_alloc_consistent function. 836 * This function implements the pci_alloc_consistent function.
837 */ 837 */
838static void * 838static void *
839ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) 839ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
840{ 840{
841 void *ret; 841 void *ret;
842#if 0 842#if 0
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 82ea68b55df4..bd8b3e5a5cd7 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -986,7 +986,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
986 * See Documentation/DMA-mapping.txt 986 * See Documentation/DMA-mapping.txt
987 */ 987 */
988static void *sba_alloc_consistent(struct device *hwdev, size_t size, 988static void *sba_alloc_consistent(struct device *hwdev, size_t size,
989 dma_addr_t *dma_handle, int gfp) 989 dma_addr_t *dma_handle, gfp_t gfp)
990{ 990{
991 void *ret; 991 void *ret;
992 992
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index fa09440d82e5..38f50b7129a2 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -16,7 +16,7 @@ MODULE_LICENSE("GPL");
16 16
17fsm_instance * 17fsm_instance *
18init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, 18init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
19 int nr_events, const fsm_node *tmpl, int tmpl_len, int order) 19 int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
20{ 20{
21 int i; 21 int i;
22 fsm_instance *this; 22 fsm_instance *this;
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index f9a011001eb6..1b8a7e7c34f3 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -110,7 +110,7 @@ extern fsm_instance *
110init_fsm(char *name, const char **state_names, 110init_fsm(char *name, const char **state_names,
111 const char **event_names, 111 const char **event_names,
112 int nr_states, int nr_events, const fsm_node *tmpl, 112 int nr_states, int nr_events, const fsm_node *tmpl,
113 int tmpl_len, int order); 113 int tmpl_len, gfp_t order);
114 114
115/** 115/**
116 * Releases an FSM 116 * Releases an FSM
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c10e45b94b62..3d13fdee4fc2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1357,7 +1357,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1357 1357
1358 for (i = 0; i < shost->can_queue; i++) { 1358 for (i = 0; i < shost->can_queue; i++) {
1359 size_t sz = shost->sg_tablesize *sizeof(struct sg_list); 1359 size_t sz = shost->sg_tablesize *sizeof(struct sg_list);
1360 unsigned int gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; 1360 gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
1361 ha->cp[i].sglist = kmalloc(sz, gfp_mask); 1361 ha->cp[i].sglist = kmalloc(sz, gfp_mask);
1362 if (!ha->cp[i].sglist) { 1362 if (!ha->cp[i].sglist) {
1363 printk 1363 printk
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 02fe371b0ab8..f24d84538fd5 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -287,7 +287,8 @@ static void scsi_host_dev_release(struct device *dev)
287struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) 287struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
288{ 288{
289 struct Scsi_Host *shost; 289 struct Scsi_Host *shost;
290 int gfp_mask = GFP_KERNEL, rval; 290 gfp_t gfp_mask = GFP_KERNEL;
291 int rval;
291 292
292 if (sht->unchecked_isa_dma && privsize) 293 if (sht->unchecked_isa_dma && privsize)
293 gfp_mask |= __GFP_DMA; 294 gfp_mask |= __GFP_DMA;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0aba13ceaacf..352df47bcaca 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -39,7 +39,7 @@
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41static void * 41static void *
42lpfc_pool_kmalloc(unsigned int gfp_flags, void *data) 42lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
43{ 43{
44 return kmalloc((unsigned long)data, gfp_flags); 44 return kmalloc((unsigned long)data, gfp_flags);
45} 45}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 3f2f2464fa63..af1133104b3f 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5146,7 +5146,8 @@ static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned
5146/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */ 5146/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
5147static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg ) 5147static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
5148{ 5148{
5149 int i, priority; 5149 int i;
5150 gfp_t priority;
5150 struct osst_buffer *tb; 5151 struct osst_buffer *tb;
5151 5152
5152 if (from_initialization) 5153 if (from_initialization)
@@ -5178,7 +5179,8 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
5178/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */ 5179/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
5179static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) 5180static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
5180{ 5181{
5181 int segs, nbr, max_segs, b_size, priority, order, got; 5182 int segs, nbr, max_segs, b_size, order, got;
5183 gfp_t priority;
5182 5184
5183 if (STbuffer->buffer_size >= OS_FRAME_SIZE) 5185 if (STbuffer->buffer_size >= OS_FRAME_SIZE)
5184 return 1; 5186 return 1;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1ed32e7b5472..e451941ad81d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -52,7 +52,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
52extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *); 52extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
53extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *); 53extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *);
54 54
55extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, int); 55extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
56 56
57extern int qla2x00_loop_resync(scsi_qla_host_t *); 57extern int qla2x00_loop_resync(scsi_qla_host_t *);
58 58
@@ -277,7 +277,7 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
277/* 277/*
278 * Global Function Prototypes in qla_rscn.c source file. 278 * Global Function Prototypes in qla_rscn.c source file.
279 */ 279 */
280extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, int); 280extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, gfp_t);
281extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *, 281extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *,
282 int); 282 int);
283extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *); 283extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 23d095d3817b..fbb6feee40cf 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1685,7 +1685,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1685 * Returns a pointer to the allocated fcport, or NULL, if none available. 1685 * Returns a pointer to the allocated fcport, or NULL, if none available.
1686 */ 1686 */
1687fc_port_t * 1687fc_port_t *
1688qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags) 1688qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1689{ 1689{
1690 fc_port_t *fcport; 1690 fc_port_t *fcport;
1691 1691
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 1eba98828636..7534efcc8918 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -1066,7 +1066,7 @@ qla2x00_send_login_iocb_cb(scsi_qla_host_t *ha, struct io_descriptor *iodesc,
1066 * Returns a pointer to the allocated RSCN fcport, or NULL, if none available. 1066 * Returns a pointer to the allocated RSCN fcport, or NULL, if none available.
1067 */ 1067 */
1068fc_port_t * 1068fc_port_t *
1069qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, int flags) 1069qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, gfp_t flags)
1070{ 1070{
1071 fc_port_t *fcport; 1071 fc_port_t *fcport;
1072 1072
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f0ebabf6d47..a5711d545d71 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(scsi_device_types);
130 * Returns: Pointer to request block. 130 * Returns: Pointer to request block.
131 */ 131 */
132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 132struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
133 int gfp_mask) 133 gfp_t gfp_mask)
134{ 134{
135 const int offset = ALIGN(sizeof(struct scsi_request), 4); 135 const int offset = ALIGN(sizeof(struct scsi_request), 4);
136 const int size = offset + sizeof(struct request); 136 const int size = offset + sizeof(struct request);
@@ -196,7 +196,7 @@ struct scsi_host_cmd_pool {
196 unsigned int users; 196 unsigned int users;
197 char *name; 197 char *name;
198 unsigned int slab_flags; 198 unsigned int slab_flags;
199 unsigned int gfp_mask; 199 gfp_t gfp_mask;
200}; 200};
201 201
202static struct scsi_host_cmd_pool scsi_cmd_pool = { 202static struct scsi_host_cmd_pool scsi_cmd_pool = {
@@ -213,7 +213,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
213static DECLARE_MUTEX(host_cmd_pool_mutex); 213static DECLARE_MUTEX(host_cmd_pool_mutex);
214 214
215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 215static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
216 int gfp_mask) 216 gfp_t gfp_mask)
217{ 217{
218 struct scsi_cmnd *cmd; 218 struct scsi_cmnd *cmd;
219 219
@@ -245,7 +245,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
245 * 245 *
246 * Returns: The allocated scsi command structure. 246 * Returns: The allocated scsi command structure.
247 */ 247 */
248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) 248struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
249{ 249{
250 struct scsi_cmnd *cmd; 250 struct scsi_cmnd *cmd;
251 251
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index de7f98cc38fe..6a3f6aae8a97 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -205,7 +205,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
205 unsigned int inlen, outlen, cmdlen; 205 unsigned int inlen, outlen, cmdlen;
206 unsigned int needed, buf_needed; 206 unsigned int needed, buf_needed;
207 int timeout, retries, result; 207 int timeout, retries, result;
208 int data_direction, gfp_mask = GFP_KERNEL; 208 int data_direction;
209 gfp_t gfp_mask = GFP_KERNEL;
209 210
210 if (!sic) 211 if (!sic)
211 return -EINVAL; 212 return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0074f28c37b2..3ff538809786 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -677,7 +677,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
677 return NULL; 677 return NULL;
678} 678}
679 679
680static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask) 680static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
681{ 681{
682 struct scsi_host_sg_pool *sgp; 682 struct scsi_host_sg_pool *sgp;
683 struct scatterlist *sgl; 683 struct scatterlist *sgl;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ad94367df430..fd56b7ec88b6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2644,7 +2644,7 @@ static char *
2644sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2644sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2645{ 2645{
2646 char *resp = NULL; 2646 char *resp = NULL;
2647 int page_mask; 2647 gfp_t page_mask;
2648 int order, a_size; 2648 int order, a_size;
2649 int resSz = rqSz; 2649 int resSz = rqSz;
2650 2650
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d001c046551b..927d700f0073 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3577,7 +3577,8 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
3577static struct st_buffer * 3577static struct st_buffer *
3578 new_tape_buffer(int from_initialization, int need_dma, int max_sg) 3578 new_tape_buffer(int from_initialization, int need_dma, int max_sg)
3579{ 3579{
3580 int i, priority, got = 0, segs = 0; 3580 int i, got = 0, segs = 0;
3581 gfp_t priority;
3581 struct st_buffer *tb; 3582 struct st_buffer *tb;
3582 3583
3583 if (from_initialization) 3584 if (from_initialization)
@@ -3610,7 +3611,8 @@ static struct st_buffer *
3610/* Try to allocate enough space in the tape buffer */ 3611/* Try to allocate enough space in the tape buffer */
3611static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) 3612static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
3612{ 3613{
3613 int segs, nbr, max_segs, b_size, priority, order, got; 3614 int segs, nbr, max_segs, b_size, order, got;
3615 gfp_t priority;
3614 3616
3615 if (new_size <= STbuffer->buffer_size) 3617 if (new_size <= STbuffer->buffer_size)
3616 return 1; 3618 return 1;
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index fc15b4acc8af..57e800ac3cee 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -106,7 +106,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
106void *hcd_buffer_alloc ( 106void *hcd_buffer_alloc (
107 struct usb_bus *bus, 107 struct usb_bus *bus,
108 size_t size, 108 size_t size,
109 unsigned mem_flags, 109 gfp_t mem_flags,
110 dma_addr_t *dma 110 dma_addr_t *dma
111) 111)
112{ 112{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1017a97a418b..ff19d64041b5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1112,7 +1112,7 @@ static void urb_unlink (struct urb *urb)
1112 * expects usb_submit_urb() to have sanity checked and conditioned all 1112 * expects usb_submit_urb() to have sanity checked and conditioned all
1113 * inputs in the urb 1113 * inputs in the urb
1114 */ 1114 */
1115static int hcd_submit_urb (struct urb *urb, unsigned mem_flags) 1115static int hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1116{ 1116{
1117 int status; 1117 int status;
1118 struct usb_hcd *hcd = urb->dev->bus->hcpriv; 1118 struct usb_hcd *hcd = urb->dev->bus->hcpriv;
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index ac451fa7e4d2..1f1ed6211af8 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -142,12 +142,12 @@ struct hcd_timeout { /* timeouts we allocate */
142 142
143struct usb_operations { 143struct usb_operations {
144 int (*get_frame_number) (struct usb_device *usb_dev); 144 int (*get_frame_number) (struct usb_device *usb_dev);
145 int (*submit_urb) (struct urb *urb, unsigned mem_flags); 145 int (*submit_urb) (struct urb *urb, gfp_t mem_flags);
146 int (*unlink_urb) (struct urb *urb, int status); 146 int (*unlink_urb) (struct urb *urb, int status);
147 147
148 /* allocate dma-consistent buffer for URB_DMA_NOMAPPING */ 148 /* allocate dma-consistent buffer for URB_DMA_NOMAPPING */
149 void *(*buffer_alloc)(struct usb_bus *bus, size_t size, 149 void *(*buffer_alloc)(struct usb_bus *bus, size_t size,
150 unsigned mem_flags, 150 gfp_t mem_flags,
151 dma_addr_t *dma); 151 dma_addr_t *dma);
152 void (*buffer_free)(struct usb_bus *bus, size_t size, 152 void (*buffer_free)(struct usb_bus *bus, size_t size,
153 void *addr, dma_addr_t dma); 153 void *addr, dma_addr_t dma);
@@ -200,7 +200,7 @@ struct hc_driver {
200 int (*urb_enqueue) (struct usb_hcd *hcd, 200 int (*urb_enqueue) (struct usb_hcd *hcd,
201 struct usb_host_endpoint *ep, 201 struct usb_host_endpoint *ep,
202 struct urb *urb, 202 struct urb *urb,
203 unsigned mem_flags); 203 gfp_t mem_flags);
204 int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb); 204 int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
205 205
206 /* hw synch, freeing endpoint resources that urb_dequeue can't */ 206 /* hw synch, freeing endpoint resources that urb_dequeue can't */
@@ -247,7 +247,7 @@ int hcd_buffer_create (struct usb_hcd *hcd);
247void hcd_buffer_destroy (struct usb_hcd *hcd); 247void hcd_buffer_destroy (struct usb_hcd *hcd);
248 248
249void *hcd_buffer_alloc (struct usb_bus *bus, size_t size, 249void *hcd_buffer_alloc (struct usb_bus *bus, size_t size,
250 unsigned mem_flags, dma_addr_t *dma); 250 gfp_t mem_flags, dma_addr_t *dma);
251void hcd_buffer_free (struct usb_bus *bus, size_t size, 251void hcd_buffer_free (struct usb_bus *bus, size_t size,
252 void *addr, dma_addr_t dma); 252 void *addr, dma_addr_t dma);
253 253
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f1fb67fe22a8..f9a81e84dbdf 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -321,7 +321,7 @@ int usb_sg_init (
321 struct scatterlist *sg, 321 struct scatterlist *sg,
322 int nents, 322 int nents,
323 size_t length, 323 size_t length,
324 unsigned mem_flags 324 gfp_t mem_flags
325) 325)
326{ 326{
327 int i; 327 int i;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c846fefb7386..b32898e0a27d 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -60,7 +60,7 @@ void usb_init_urb(struct urb *urb)
60 * 60 *
61 * The driver must call usb_free_urb() when it is finished with the urb. 61 * The driver must call usb_free_urb() when it is finished with the urb.
62 */ 62 */
63struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags) 63struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
64{ 64{
65 struct urb *urb; 65 struct urb *urb;
66 66
@@ -224,7 +224,7 @@ struct urb * usb_get_urb(struct urb *urb)
224 * GFP_NOIO, unless b) or c) apply 224 * GFP_NOIO, unless b) or c) apply
225 * 225 *
226 */ 226 */
227int usb_submit_urb(struct urb *urb, unsigned mem_flags) 227int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
228{ 228{
229 int pipe, temp, max; 229 int pipe, temp, max;
230 struct usb_device *dev; 230 struct usb_device *dev;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7d131509e419..4c57f3f649ed 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1147,7 +1147,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
1147void *usb_buffer_alloc ( 1147void *usb_buffer_alloc (
1148 struct usb_device *dev, 1148 struct usb_device *dev,
1149 size_t size, 1149 size_t size,
1150 unsigned mem_flags, 1150 gfp_t mem_flags,
1151 dma_addr_t *dma 1151 dma_addr_t *dma
1152) 1152)
1153{ 1153{
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 583db7c38cf1..8d9d8ee89554 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -470,7 +470,7 @@ static int dummy_disable (struct usb_ep *_ep)
470} 470}
471 471
472static struct usb_request * 472static struct usb_request *
473dummy_alloc_request (struct usb_ep *_ep, unsigned mem_flags) 473dummy_alloc_request (struct usb_ep *_ep, gfp_t mem_flags)
474{ 474{
475 struct dummy_ep *ep; 475 struct dummy_ep *ep;
476 struct dummy_request *req; 476 struct dummy_request *req;
@@ -507,7 +507,7 @@ dummy_alloc_buffer (
507 struct usb_ep *_ep, 507 struct usb_ep *_ep,
508 unsigned bytes, 508 unsigned bytes,
509 dma_addr_t *dma, 509 dma_addr_t *dma,
510 unsigned mem_flags 510 gfp_t mem_flags
511) { 511) {
512 char *retval; 512 char *retval;
513 struct dummy_ep *ep; 513 struct dummy_ep *ep;
@@ -541,7 +541,7 @@ fifo_complete (struct usb_ep *ep, struct usb_request *req)
541 541
542static int 542static int
543dummy_queue (struct usb_ep *_ep, struct usb_request *_req, 543dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
544 unsigned mem_flags) 544 gfp_t mem_flags)
545{ 545{
546 struct dummy_ep *ep; 546 struct dummy_ep *ep;
547 struct dummy_request *req; 547 struct dummy_request *req;
@@ -999,7 +999,7 @@ static int dummy_urb_enqueue (
999 struct usb_hcd *hcd, 999 struct usb_hcd *hcd,
1000 struct usb_host_endpoint *ep, 1000 struct usb_host_endpoint *ep,
1001 struct urb *urb, 1001 struct urb *urb,
1002 unsigned mem_flags 1002 gfp_t mem_flags
1003) { 1003) {
1004 struct dummy *dum; 1004 struct dummy *dum;
1005 struct urbp *urbp; 1005 struct urbp *urbp;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 49459e33e952..f1024e804d5c 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -945,11 +945,11 @@ config_buf (enum usb_device_speed speed,
945 945
946/*-------------------------------------------------------------------------*/ 946/*-------------------------------------------------------------------------*/
947 947
948static void eth_start (struct eth_dev *dev, unsigned gfp_flags); 948static void eth_start (struct eth_dev *dev, gfp_t gfp_flags);
949static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags); 949static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
950 950
951static int 951static int
952set_ether_config (struct eth_dev *dev, unsigned gfp_flags) 952set_ether_config (struct eth_dev *dev, gfp_t gfp_flags)
953{ 953{
954 int result = 0; 954 int result = 0;
955 struct usb_gadget *gadget = dev->gadget; 955 struct usb_gadget *gadget = dev->gadget;
@@ -1081,7 +1081,7 @@ static void eth_reset_config (struct eth_dev *dev)
1081 * that returns config descriptors, and altsetting code. 1081 * that returns config descriptors, and altsetting code.
1082 */ 1082 */
1083static int 1083static int
1084eth_set_config (struct eth_dev *dev, unsigned number, unsigned gfp_flags) 1084eth_set_config (struct eth_dev *dev, unsigned number, gfp_t gfp_flags)
1085{ 1085{
1086 int result = 0; 1086 int result = 0;
1087 struct usb_gadget *gadget = dev->gadget; 1087 struct usb_gadget *gadget = dev->gadget;
@@ -1598,7 +1598,7 @@ static void defer_kevent (struct eth_dev *dev, int flag)
1598static void rx_complete (struct usb_ep *ep, struct usb_request *req); 1598static void rx_complete (struct usb_ep *ep, struct usb_request *req);
1599 1599
1600static int 1600static int
1601rx_submit (struct eth_dev *dev, struct usb_request *req, unsigned gfp_flags) 1601rx_submit (struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
1602{ 1602{
1603 struct sk_buff *skb; 1603 struct sk_buff *skb;
1604 int retval = -ENOMEM; 1604 int retval = -ENOMEM;
@@ -1724,7 +1724,7 @@ clean:
1724} 1724}
1725 1725
1726static int prealloc (struct list_head *list, struct usb_ep *ep, 1726static int prealloc (struct list_head *list, struct usb_ep *ep,
1727 unsigned n, unsigned gfp_flags) 1727 unsigned n, gfp_t gfp_flags)
1728{ 1728{
1729 unsigned i; 1729 unsigned i;
1730 struct usb_request *req; 1730 struct usb_request *req;
@@ -1763,7 +1763,7 @@ extra:
1763 return 0; 1763 return 0;
1764} 1764}
1765 1765
1766static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags) 1766static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
1767{ 1767{
1768 int status; 1768 int status;
1769 1769
@@ -1779,7 +1779,7 @@ fail:
1779 return status; 1779 return status;
1780} 1780}
1781 1781
1782static void rx_fill (struct eth_dev *dev, unsigned gfp_flags) 1782static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
1783{ 1783{
1784 struct usb_request *req; 1784 struct usb_request *req;
1785 unsigned long flags; 1785 unsigned long flags;
@@ -1962,7 +1962,7 @@ drop:
1962 * normally just one notification will be queued. 1962 * normally just one notification will be queued.
1963 */ 1963 */
1964 1964
1965static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, unsigned); 1965static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, gfp_t);
1966static void eth_req_free (struct usb_ep *ep, struct usb_request *req); 1966static void eth_req_free (struct usb_ep *ep, struct usb_request *req);
1967 1967
1968static void 1968static void
@@ -2024,7 +2024,7 @@ static int rndis_control_ack (struct net_device *net)
2024 2024
2025#endif /* RNDIS */ 2025#endif /* RNDIS */
2026 2026
2027static void eth_start (struct eth_dev *dev, unsigned gfp_flags) 2027static void eth_start (struct eth_dev *dev, gfp_t gfp_flags)
2028{ 2028{
2029 DEBUG (dev, "%s\n", __FUNCTION__); 2029 DEBUG (dev, "%s\n", __FUNCTION__);
2030 2030
@@ -2092,7 +2092,7 @@ static int eth_stop (struct net_device *net)
2092/*-------------------------------------------------------------------------*/ 2092/*-------------------------------------------------------------------------*/
2093 2093
2094static struct usb_request * 2094static struct usb_request *
2095eth_req_alloc (struct usb_ep *ep, unsigned size, unsigned gfp_flags) 2095eth_req_alloc (struct usb_ep *ep, unsigned size, gfp_t gfp_flags)
2096{ 2096{
2097 struct usb_request *req; 2097 struct usb_request *req;
2098 2098
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index eaab26f4ed37..b0f3cd63e3b9 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -269,7 +269,7 @@ static int goku_ep_disable(struct usb_ep *_ep)
269/*-------------------------------------------------------------------------*/ 269/*-------------------------------------------------------------------------*/
270 270
271static struct usb_request * 271static struct usb_request *
272goku_alloc_request(struct usb_ep *_ep, unsigned gfp_flags) 272goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
273{ 273{
274 struct goku_request *req; 274 struct goku_request *req;
275 275
@@ -327,7 +327,7 @@ goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
327 */ 327 */
328static void * 328static void *
329goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes, 329goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
330 dma_addr_t *dma, unsigned gfp_flags) 330 dma_addr_t *dma, gfp_t gfp_flags)
331{ 331{
332 void *retval; 332 void *retval;
333 struct goku_ep *ep; 333 struct goku_ep *ep;
@@ -789,7 +789,7 @@ finished:
789/*-------------------------------------------------------------------------*/ 789/*-------------------------------------------------------------------------*/
790 790
791static int 791static int
792goku_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) 792goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
793{ 793{
794 struct goku_request *req; 794 struct goku_request *req;
795 struct goku_ep *ep; 795 struct goku_ep *ep;
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index 4842577789c9..012d1e5f1524 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -71,13 +71,13 @@ static char *state_names[] = {
71static int lh7a40x_ep_enable(struct usb_ep *ep, 71static int lh7a40x_ep_enable(struct usb_ep *ep,
72 const struct usb_endpoint_descriptor *); 72 const struct usb_endpoint_descriptor *);
73static int lh7a40x_ep_disable(struct usb_ep *ep); 73static int lh7a40x_ep_disable(struct usb_ep *ep);
74static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int); 74static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t);
75static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *); 75static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
76static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *, 76static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
77 int); 77 gfp_t);
78static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t, 78static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
79 unsigned); 79 unsigned);
80static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int); 80static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t);
81static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *); 81static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
82static int lh7a40x_set_halt(struct usb_ep *ep, int); 82static int lh7a40x_set_halt(struct usb_ep *ep, int);
83static int lh7a40x_fifo_status(struct usb_ep *ep); 83static int lh7a40x_fifo_status(struct usb_ep *ep);
@@ -1106,7 +1106,7 @@ static int lh7a40x_ep_disable(struct usb_ep *_ep)
1106} 1106}
1107 1107
1108static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, 1108static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
1109 unsigned gfp_flags) 1109 gfp_t gfp_flags)
1110{ 1110{
1111 struct lh7a40x_request *req; 1111 struct lh7a40x_request *req;
1112 1112
@@ -1134,7 +1134,7 @@ static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
1134} 1134}
1135 1135
1136static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes, 1136static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
1137 dma_addr_t * dma, unsigned gfp_flags) 1137 dma_addr_t * dma, gfp_t gfp_flags)
1138{ 1138{
1139 char *retval; 1139 char *retval;
1140 1140
@@ -1158,7 +1158,7 @@ static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
1158 * NOTE: Sets INDEX register 1158 * NOTE: Sets INDEX register
1159 */ 1159 */
1160static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req, 1160static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
1161 unsigned gfp_flags) 1161 gfp_t gfp_flags)
1162{ 1162{
1163 struct lh7a40x_request *req; 1163 struct lh7a40x_request *req;
1164 struct lh7a40x_ep *ep; 1164 struct lh7a40x_ep *ep;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 477fab2e74d1..c32e1f7476da 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -376,7 +376,7 @@ static int net2280_disable (struct usb_ep *_ep)
376/*-------------------------------------------------------------------------*/ 376/*-------------------------------------------------------------------------*/
377 377
378static struct usb_request * 378static struct usb_request *
379net2280_alloc_request (struct usb_ep *_ep, unsigned gfp_flags) 379net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
380{ 380{
381 struct net2280_ep *ep; 381 struct net2280_ep *ep;
382 struct net2280_request *req; 382 struct net2280_request *req;
@@ -463,7 +463,7 @@ net2280_alloc_buffer (
463 struct usb_ep *_ep, 463 struct usb_ep *_ep,
464 unsigned bytes, 464 unsigned bytes,
465 dma_addr_t *dma, 465 dma_addr_t *dma,
466 unsigned gfp_flags 466 gfp_t gfp_flags
467) 467)
468{ 468{
469 void *retval; 469 void *retval;
@@ -897,7 +897,7 @@ done (struct net2280_ep *ep, struct net2280_request *req, int status)
897/*-------------------------------------------------------------------------*/ 897/*-------------------------------------------------------------------------*/
898 898
899static int 899static int
900net2280_queue (struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) 900net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
901{ 901{
902 struct net2280_request *req; 902 struct net2280_request *req;
903 struct net2280_ep *ep; 903 struct net2280_ep *ep;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index ff5533e69560..287c5900fb13 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -269,7 +269,7 @@ static int omap_ep_disable(struct usb_ep *_ep)
269/*-------------------------------------------------------------------------*/ 269/*-------------------------------------------------------------------------*/
270 270
271static struct usb_request * 271static struct usb_request *
272omap_alloc_request(struct usb_ep *ep, unsigned gfp_flags) 272omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
273{ 273{
274 struct omap_req *req; 274 struct omap_req *req;
275 275
@@ -298,7 +298,7 @@ omap_alloc_buffer(
298 struct usb_ep *_ep, 298 struct usb_ep *_ep,
299 unsigned bytes, 299 unsigned bytes,
300 dma_addr_t *dma, 300 dma_addr_t *dma,
301 unsigned gfp_flags 301 gfp_t gfp_flags
302) 302)
303{ 303{
304 void *retval; 304 void *retval;
@@ -937,7 +937,7 @@ static void dma_channel_release(struct omap_ep *ep)
937/*-------------------------------------------------------------------------*/ 937/*-------------------------------------------------------------------------*/
938 938
939static int 939static int
940omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) 940omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
941{ 941{
942 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); 942 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
943 struct omap_req *req = container_of(_req, struct omap_req, req); 943 struct omap_req *req = container_of(_req, struct omap_req, req);
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 73f8c9404156..6e545393cfff 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -332,7 +332,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
332 * pxa2xx_ep_alloc_request - allocate a request data structure 332 * pxa2xx_ep_alloc_request - allocate a request data structure
333 */ 333 */
334static struct usb_request * 334static struct usb_request *
335pxa2xx_ep_alloc_request (struct usb_ep *_ep, unsigned gfp_flags) 335pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
336{ 336{
337 struct pxa2xx_request *req; 337 struct pxa2xx_request *req;
338 338
@@ -367,7 +367,7 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
367 */ 367 */
368static void * 368static void *
369pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes, 369pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
370 dma_addr_t *dma, unsigned gfp_flags) 370 dma_addr_t *dma, gfp_t gfp_flags)
371{ 371{
372 char *retval; 372 char *retval;
373 373
@@ -874,7 +874,7 @@ done:
874/*-------------------------------------------------------------------------*/ 874/*-------------------------------------------------------------------------*/
875 875
876static int 876static int
877pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) 877pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
878{ 878{
879 struct pxa2xx_request *req; 879 struct pxa2xx_request *req;
880 struct pxa2xx_ep *ep; 880 struct pxa2xx_ep *ep;
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index c925d9222f53..b35ac6d334f8 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -300,18 +300,18 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
300 u8 type, unsigned int index, int is_otg); 300 u8 type, unsigned int index, int is_otg);
301 301
302static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, 302static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
303 unsigned kmalloc_flags); 303 gfp_t kmalloc_flags);
304static void gs_free_req(struct usb_ep *ep, struct usb_request *req); 304static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
305 305
306static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len, 306static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
307 unsigned kmalloc_flags); 307 gfp_t kmalloc_flags);
308static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req); 308static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
309 309
310static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags); 310static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
311static void gs_free_ports(struct gs_dev *dev); 311static void gs_free_ports(struct gs_dev *dev);
312 312
313/* circular buffer */ 313/* circular buffer */
314static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags); 314static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
315static void gs_buf_free(struct gs_buf *gb); 315static void gs_buf_free(struct gs_buf *gb);
316static void gs_buf_clear(struct gs_buf *gb); 316static void gs_buf_clear(struct gs_buf *gb);
317static unsigned int gs_buf_data_avail(struct gs_buf *gb); 317static unsigned int gs_buf_data_avail(struct gs_buf *gb);
@@ -2091,7 +2091,7 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
2091 * usb_request or NULL if there is an error. 2091 * usb_request or NULL if there is an error.
2092 */ 2092 */
2093static struct usb_request * 2093static struct usb_request *
2094gs_alloc_req(struct usb_ep *ep, unsigned int len, unsigned kmalloc_flags) 2094gs_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t kmalloc_flags)
2095{ 2095{
2096 struct usb_request *req; 2096 struct usb_request *req;
2097 2097
@@ -2132,7 +2132,7 @@ static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
2132 * endpoint, buffer len, and kmalloc flags. 2132 * endpoint, buffer len, and kmalloc flags.
2133 */ 2133 */
2134static struct gs_req_entry * 2134static struct gs_req_entry *
2135gs_alloc_req_entry(struct usb_ep *ep, unsigned len, unsigned kmalloc_flags) 2135gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
2136{ 2136{
2137 struct gs_req_entry *req; 2137 struct gs_req_entry *req;
2138 2138
@@ -2173,7 +2173,7 @@ static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
2173 * 2173 *
2174 * The device lock is normally held when calling this function. 2174 * The device lock is normally held when calling this function.
2175 */ 2175 */
2176static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags) 2176static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
2177{ 2177{
2178 int i; 2178 int i;
2179 struct gs_port *port; 2179 struct gs_port *port;
@@ -2255,7 +2255,7 @@ static void gs_free_ports(struct gs_dev *dev)
2255 * 2255 *
2256 * Allocate a circular buffer and all associated memory. 2256 * Allocate a circular buffer and all associated memory.
2257 */ 2257 */
2258static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags) 2258static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
2259{ 2259{
2260 struct gs_buf *gb; 2260 struct gs_buf *gb;
2261 2261
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 6890e773b2a2..ec9c424f1d97 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -612,7 +612,7 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
612} 612}
613 613
614static struct usb_request * 614static struct usb_request *
615source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags) 615source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
616{ 616{
617 struct usb_request *req; 617 struct usb_request *req;
618 int status; 618 int status;
@@ -640,7 +640,7 @@ source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags)
640} 640}
641 641
642static int 642static int
643set_source_sink_config (struct zero_dev *dev, unsigned gfp_flags) 643set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
644{ 644{
645 int result = 0; 645 int result = 0;
646 struct usb_ep *ep; 646 struct usb_ep *ep;
@@ -744,7 +744,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
744} 744}
745 745
746static int 746static int
747set_loopback_config (struct zero_dev *dev, unsigned gfp_flags) 747set_loopback_config (struct zero_dev *dev, gfp_t gfp_flags)
748{ 748{
749 int result = 0; 749 int result = 0;
750 struct usb_ep *ep; 750 struct usb_ep *ep;
@@ -845,7 +845,7 @@ static void zero_reset_config (struct zero_dev *dev)
845 * by limiting configuration choices (like the pxa2xx). 845 * by limiting configuration choices (like the pxa2xx).
846 */ 846 */
847static int 847static int
848zero_set_config (struct zero_dev *dev, unsigned number, unsigned gfp_flags) 848zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
849{ 849{
850 int result = 0; 850 int result = 0;
851 struct usb_gadget *gadget = dev->gadget; 851 struct usb_gadget *gadget = dev->gadget;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b948ffd94f45..f5eb9e7b5b18 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -983,7 +983,7 @@ static int ehci_urb_enqueue (
983 struct usb_hcd *hcd, 983 struct usb_hcd *hcd,
984 struct usb_host_endpoint *ep, 984 struct usb_host_endpoint *ep,
985 struct urb *urb, 985 struct urb *urb,
986 unsigned mem_flags 986 gfp_t mem_flags
987) { 987) {
988 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 988 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
989 struct list_head qtd_list; 989 struct list_head qtd_list;
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 5c38ad869485..91c2ab43cbcc 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -45,7 +45,7 @@ static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma)
45 INIT_LIST_HEAD (&qtd->qtd_list); 45 INIT_LIST_HEAD (&qtd->qtd_list);
46} 46}
47 47
48static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags) 48static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
49{ 49{
50 struct ehci_qtd *qtd; 50 struct ehci_qtd *qtd;
51 dma_addr_t dma; 51 dma_addr_t dma;
@@ -79,7 +79,7 @@ static void qh_destroy (struct kref *kref)
79 dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); 79 dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
80} 80}
81 81
82static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) 82static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
83{ 83{
84 struct ehci_qh *qh; 84 struct ehci_qh *qh;
85 dma_addr_t dma; 85 dma_addr_t dma;
@@ -161,7 +161,7 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
161} 161}
162 162
163/* remember to add cleanup code (above) if you add anything here */ 163/* remember to add cleanup code (above) if you add anything here */
164static int ehci_mem_init (struct ehci_hcd *ehci, int flags) 164static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
165{ 165{
166 int i; 166 int i;
167 167
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 940d38ca7d91..5bb872c3496d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -477,7 +477,7 @@ qh_urb_transaction (
477 struct ehci_hcd *ehci, 477 struct ehci_hcd *ehci,
478 struct urb *urb, 478 struct urb *urb,
479 struct list_head *head, 479 struct list_head *head,
480 int flags 480 gfp_t flags
481) { 481) {
482 struct ehci_qtd *qtd, *qtd_prev; 482 struct ehci_qtd *qtd, *qtd_prev;
483 dma_addr_t buf; 483 dma_addr_t buf;
@@ -629,7 +629,7 @@ static struct ehci_qh *
629qh_make ( 629qh_make (
630 struct ehci_hcd *ehci, 630 struct ehci_hcd *ehci,
631 struct urb *urb, 631 struct urb *urb,
632 int flags 632 gfp_t flags
633) { 633) {
634 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); 634 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
635 u32 info1 = 0, info2 = 0; 635 u32 info1 = 0, info2 = 0;
@@ -906,7 +906,7 @@ submit_async (
906 struct usb_host_endpoint *ep, 906 struct usb_host_endpoint *ep,
907 struct urb *urb, 907 struct urb *urb,
908 struct list_head *qtd_list, 908 struct list_head *qtd_list,
909 unsigned mem_flags 909 gfp_t mem_flags
910) { 910) {
911 struct ehci_qtd *qtd; 911 struct ehci_qtd *qtd;
912 int epnum; 912 int epnum;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index ccc7300baa6d..f0c8aa1ccd5d 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -589,7 +589,7 @@ static int intr_submit (
589 struct usb_host_endpoint *ep, 589 struct usb_host_endpoint *ep,
590 struct urb *urb, 590 struct urb *urb,
591 struct list_head *qtd_list, 591 struct list_head *qtd_list,
592 unsigned mem_flags 592 gfp_t mem_flags
593) { 593) {
594 unsigned epnum; 594 unsigned epnum;
595 unsigned long flags; 595 unsigned long flags;
@@ -634,7 +634,7 @@ done:
634/* ehci_iso_stream ops work with both ITD and SITD */ 634/* ehci_iso_stream ops work with both ITD and SITD */
635 635
636static struct ehci_iso_stream * 636static struct ehci_iso_stream *
637iso_stream_alloc (unsigned mem_flags) 637iso_stream_alloc (gfp_t mem_flags)
638{ 638{
639 struct ehci_iso_stream *stream; 639 struct ehci_iso_stream *stream;
640 640
@@ -851,7 +851,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
851/* ehci_iso_sched ops can be ITD-only or SITD-only */ 851/* ehci_iso_sched ops can be ITD-only or SITD-only */
852 852
853static struct ehci_iso_sched * 853static struct ehci_iso_sched *
854iso_sched_alloc (unsigned packets, unsigned mem_flags) 854iso_sched_alloc (unsigned packets, gfp_t mem_flags)
855{ 855{
856 struct ehci_iso_sched *iso_sched; 856 struct ehci_iso_sched *iso_sched;
857 int size = sizeof *iso_sched; 857 int size = sizeof *iso_sched;
@@ -924,7 +924,7 @@ itd_urb_transaction (
924 struct ehci_iso_stream *stream, 924 struct ehci_iso_stream *stream,
925 struct ehci_hcd *ehci, 925 struct ehci_hcd *ehci,
926 struct urb *urb, 926 struct urb *urb,
927 unsigned mem_flags 927 gfp_t mem_flags
928) 928)
929{ 929{
930 struct ehci_itd *itd; 930 struct ehci_itd *itd;
@@ -1418,7 +1418,7 @@ itd_complete (
1418/*-------------------------------------------------------------------------*/ 1418/*-------------------------------------------------------------------------*/
1419 1419
1420static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, 1420static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1421 unsigned mem_flags) 1421 gfp_t mem_flags)
1422{ 1422{
1423 int status = -EINVAL; 1423 int status = -EINVAL;
1424 unsigned long flags; 1424 unsigned long flags;
@@ -1529,7 +1529,7 @@ sitd_urb_transaction (
1529 struct ehci_iso_stream *stream, 1529 struct ehci_iso_stream *stream,
1530 struct ehci_hcd *ehci, 1530 struct ehci_hcd *ehci,
1531 struct urb *urb, 1531 struct urb *urb,
1532 unsigned mem_flags 1532 gfp_t mem_flags
1533) 1533)
1534{ 1534{
1535 struct ehci_sitd *sitd; 1535 struct ehci_sitd *sitd;
@@ -1779,7 +1779,7 @@ sitd_complete (
1779 1779
1780 1780
1781static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, 1781static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
1782 unsigned mem_flags) 1782 gfp_t mem_flags)
1783{ 1783{
1784 int status = -EINVAL; 1784 int status = -EINVAL;
1785 unsigned long flags; 1785 unsigned long flags;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index e142056b0d2c..2548d94fcd72 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -694,7 +694,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load)
694 694
695static int isp116x_urb_enqueue(struct usb_hcd *hcd, 695static int isp116x_urb_enqueue(struct usb_hcd *hcd,
696 struct usb_host_endpoint *hep, struct urb *urb, 696 struct usb_host_endpoint *hep, struct urb *urb,
697 unsigned mem_flags) 697 gfp_t mem_flags)
698{ 698{
699 struct isp116x *isp116x = hcd_to_isp116x(hcd); 699 struct isp116x *isp116x = hcd_to_isp116x(hcd);
700 struct usb_device *udev = urb->dev; 700 struct usb_device *udev = urb->dev;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 67c1aa5eb1c1..f8da8c7af7c6 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -180,7 +180,7 @@ static int ohci_urb_enqueue (
180 struct usb_hcd *hcd, 180 struct usb_hcd *hcd,
181 struct usb_host_endpoint *ep, 181 struct usb_host_endpoint *ep,
182 struct urb *urb, 182 struct urb *urb,
183 unsigned mem_flags 183 gfp_t mem_flags
184) { 184) {
185 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 185 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
186 struct ed *ed; 186 struct ed *ed;
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index fd3c4d3714bd..9fb83dfb1eb4 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -84,7 +84,7 @@ dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
84 84
85/* TDs ... */ 85/* TDs ... */
86static struct td * 86static struct td *
87td_alloc (struct ohci_hcd *hc, unsigned mem_flags) 87td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
88{ 88{
89 dma_addr_t dma; 89 dma_addr_t dma;
90 struct td *td; 90 struct td *td;
@@ -118,7 +118,7 @@ td_free (struct ohci_hcd *hc, struct td *td)
118 118
119/* EDs ... */ 119/* EDs ... */
120static struct ed * 120static struct ed *
121ed_alloc (struct ohci_hcd *hc, unsigned mem_flags) 121ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
122{ 122{
123 dma_addr_t dma; 123 dma_addr_t dma;
124 struct ed *ed; 124 struct ed *ed;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d42a15d10a46..cad858575cea 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -818,7 +818,7 @@ static int sl811h_urb_enqueue(
818 struct usb_hcd *hcd, 818 struct usb_hcd *hcd,
819 struct usb_host_endpoint *hep, 819 struct usb_host_endpoint *hep,
820 struct urb *urb, 820 struct urb *urb,
821 unsigned mem_flags 821 gfp_t mem_flags
822) { 822) {
823 struct sl811 *sl811 = hcd_to_sl811(hcd); 823 struct sl811 *sl811 = hcd_to_sl811(hcd);
824 struct usb_device *udev = urb->dev; 824 struct usb_device *udev = urb->dev;
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index ea0d168a8c67..4e0fbe2c1a9a 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1164,7 +1164,7 @@ static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1164 1164
1165static int uhci_urb_enqueue(struct usb_hcd *hcd, 1165static int uhci_urb_enqueue(struct usb_hcd *hcd,
1166 struct usb_host_endpoint *ep, 1166 struct usb_host_endpoint *ep,
1167 struct urb *urb, unsigned mem_flags) 1167 struct urb *urb, gfp_t mem_flags)
1168{ 1168{
1169 int ret; 1169 int ret;
1170 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 1170 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 03fb70ef2eb3..0592cb5e6c4d 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -137,7 +137,7 @@ static void async_complete(struct urb *urb, struct pt_regs *ptregs)
137 137
138static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv, 138static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv,
139 __u8 request, __u8 requesttype, __u16 value, __u16 index, 139 __u8 request, __u8 requesttype, __u16 value, __u16 index,
140 unsigned int mem_flags) 140 gfp_t mem_flags)
141{ 141{
142 struct usb_device *usbdev; 142 struct usb_device *usbdev;
143 struct uss720_async_request *rq; 143 struct uss720_async_request *rq;
@@ -204,7 +204,7 @@ static unsigned int kill_all_async_requests_priv(struct parport_uss720_private *
204 204
205/* --------------------------------------------------------------------- */ 205/* --------------------------------------------------------------------- */
206 206
207static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, unsigned int mem_flags) 207static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags)
208{ 208{
209 struct parport_uss720_private *priv; 209 struct parport_uss720_private *priv;
210 struct uss720_async_request *rq; 210 struct uss720_async_request *rq;
@@ -238,7 +238,7 @@ static int get_1284_register(struct parport *pp, unsigned char reg, unsigned cha
238 return -EIO; 238 return -EIO;
239} 239}
240 240
241static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, unsigned int mem_flags) 241static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags)
242{ 242{
243 struct parport_uss720_private *priv; 243 struct parport_uss720_private *priv;
244 struct uss720_async_request *rq; 244 struct uss720_async_request *rq;
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 861f00a43750..252a34fbb42c 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -753,7 +753,7 @@ static int ax88772_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
753} 753}
754 754
755static struct sk_buff *ax88772_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 755static struct sk_buff *ax88772_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
756 unsigned flags) 756 gfp_t flags)
757{ 757{
758 int padlen; 758 int padlen;
759 int headroom = skb_headroom(skb); 759 int headroom = skb_headroom(skb);
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c
index c8763ae33c73..c0f263b202a6 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/usb/net/gl620a.c
@@ -301,7 +301,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
301} 301}
302 302
303static struct sk_buff * 303static struct sk_buff *
304genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) 304genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
305{ 305{
306 int padlen; 306 int padlen;
307 int length = skb->len; 307 int length = skb->len;
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index e04b0ce3611a..c82655d3d448 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -477,13 +477,13 @@ static int kaweth_reset(struct kaweth_device *kaweth)
477} 477}
478 478
479static void kaweth_usb_receive(struct urb *, struct pt_regs *regs); 479static void kaweth_usb_receive(struct urb *, struct pt_regs *regs);
480static int kaweth_resubmit_rx_urb(struct kaweth_device *, unsigned); 480static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t);
481 481
482/**************************************************************** 482/****************************************************************
483 int_callback 483 int_callback
484*****************************************************************/ 484*****************************************************************/
485 485
486static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, int mf) 486static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf)
487{ 487{
488 int status; 488 int status;
489 489
@@ -550,7 +550,7 @@ static void kaweth_resubmit_tl(void *d)
550 * kaweth_resubmit_rx_urb 550 * kaweth_resubmit_rx_urb
551 ****************************************************************/ 551 ****************************************************************/
552static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, 552static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
553 unsigned mem_flags) 553 gfp_t mem_flags)
554{ 554{
555 int result; 555 int result;
556 556
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c
index a4309c4a491b..cee55f8cf64f 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/usb/net/net1080.c
@@ -500,7 +500,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
500} 500}
501 501
502static struct sk_buff * 502static struct sk_buff *
503net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) 503net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
504{ 504{
505 int padlen; 505 int padlen;
506 struct sk_buff *skb2; 506 struct sk_buff *skb2;
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index 2ed2e5fb7778..b5a925dc1beb 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -517,7 +517,7 @@ static int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
517} 517}
518 518
519static struct sk_buff * 519static struct sk_buff *
520rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) 520rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
521{ 521{
522 struct rndis_data_hdr *hdr; 522 struct rndis_data_hdr *hdr;
523 struct sk_buff *skb2; 523 struct sk_buff *skb2;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 6c460918d54f..fce81d738933 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
288 288
289static void rx_complete (struct urb *urb, struct pt_regs *regs); 289static void rx_complete (struct urb *urb, struct pt_regs *regs);
290 290
291static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags) 291static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
292{ 292{
293 struct sk_buff *skb; 293 struct sk_buff *skb;
294 struct skb_data *entry; 294 struct skb_data *entry;
diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h
index 7aa0abd1a9bd..89fc4958eecf 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/usb/net/usbnet.h
@@ -107,7 +107,7 @@ struct driver_info {
107 107
108 /* fixup tx packet (add framing) */ 108 /* fixup tx packet (add framing) */
109 struct sk_buff *(*tx_fixup)(struct usbnet *dev, 109 struct sk_buff *(*tx_fixup)(struct usbnet *dev,
110 struct sk_buff *skb, unsigned flags); 110 struct sk_buff *skb, gfp_t flags);
111 111
112 /* for new devices, use the descriptor-reading code instead */ 112 /* for new devices, use the descriptor-reading code instead */
113 int in; /* rx endpoint */ 113 int in; /* rx endpoint */
diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c
index ee3b892aeabc..5d4b7d55b097 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/usb/net/zaurus.c
@@ -62,7 +62,7 @@
62 */ 62 */
63 63
64static struct sk_buff * 64static struct sk_buff *
65zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) 65zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
66{ 66{
67 int padlen; 67 int padlen;
68 struct sk_buff *skb2; 68 struct sk_buff *skb2;
diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c
index c4e479ee926a..2f52261c7cc1 100644
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/usb/net/zd1201.c
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int
521 int reqlen; 521 int reqlen;
522 char seq=0; 522 char seq=0;
523 struct urb *urb; 523 struct urb *urb;
524 unsigned int gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; 524 gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
525 525
526 len += 4; /* first 4 are for header */ 526 len += 4; /* first 4 are for header */
527 527