diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoechr.c | 10 | ||||
-rw-r--r-- | drivers/block/as-iosched.c | 27 | ||||
-rw-r--r-- | drivers/block/cciss.c | 67 | ||||
-rw-r--r-- | drivers/block/cciss.h | 4 | ||||
-rw-r--r-- | drivers/block/cfq-iosched.c | 2097 | ||||
-rw-r--r-- | drivers/block/deadline-iosched.c | 15 | ||||
-rw-r--r-- | drivers/block/elevator.c | 9 | ||||
-rw-r--r-- | drivers/block/genhd.c | 27 | ||||
-rw-r--r-- | drivers/block/ioctl.c | 74 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 346 | ||||
-rw-r--r-- | drivers/block/loop.c | 81 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 14 | ||||
-rw-r--r-- | drivers/block/paride/pt.c | 20 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 39 | ||||
-rw-r--r-- | drivers/block/swim3.c | 10 | ||||
-rw-r--r-- | drivers/block/sx8.c | 11 | ||||
-rw-r--r-- | drivers/block/ub.c | 213 |
17 files changed, 2023 insertions, 1041 deletions
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 14aeca3e2e8c..45a243096187 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c | |||
@@ -36,7 +36,7 @@ static int emsgs_head_idx, emsgs_tail_idx; | |||
36 | static struct semaphore emsgs_sema; | 36 | static struct semaphore emsgs_sema; |
37 | static spinlock_t emsgs_lock; | 37 | static spinlock_t emsgs_lock; |
38 | static int nblocked_emsgs_readers; | 38 | static int nblocked_emsgs_readers; |
39 | static struct class_simple *aoe_class; | 39 | static struct class *aoe_class; |
40 | static struct aoe_chardev chardevs[] = { | 40 | static struct aoe_chardev chardevs[] = { |
41 | { MINOR_ERR, "err" }, | 41 | { MINOR_ERR, "err" }, |
42 | { MINOR_DISCOVER, "discover" }, | 42 | { MINOR_DISCOVER, "discover" }, |
@@ -218,13 +218,13 @@ aoechr_init(void) | |||
218 | } | 218 | } |
219 | sema_init(&emsgs_sema, 0); | 219 | sema_init(&emsgs_sema, 0); |
220 | spin_lock_init(&emsgs_lock); | 220 | spin_lock_init(&emsgs_lock); |
221 | aoe_class = class_simple_create(THIS_MODULE, "aoe"); | 221 | aoe_class = class_create(THIS_MODULE, "aoe"); |
222 | if (IS_ERR(aoe_class)) { | 222 | if (IS_ERR(aoe_class)) { |
223 | unregister_chrdev(AOE_MAJOR, "aoechr"); | 223 | unregister_chrdev(AOE_MAJOR, "aoechr"); |
224 | return PTR_ERR(aoe_class); | 224 | return PTR_ERR(aoe_class); |
225 | } | 225 | } |
226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) | 226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) |
227 | class_simple_device_add(aoe_class, | 227 | class_device_create(aoe_class, |
228 | MKDEV(AOE_MAJOR, chardevs[i].minor), | 228 | MKDEV(AOE_MAJOR, chardevs[i].minor), |
229 | NULL, chardevs[i].name); | 229 | NULL, chardevs[i].name); |
230 | 230 | ||
@@ -237,8 +237,8 @@ aoechr_exit(void) | |||
237 | int i; | 237 | int i; |
238 | 238 | ||
239 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) | 239 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) |
240 | class_simple_device_remove(MKDEV(AOE_MAJOR, chardevs[i].minor)); | 240 | class_device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); |
241 | class_simple_destroy(aoe_class); | 241 | class_destroy(aoe_class); |
242 | unregister_chrdev(AOE_MAJOR, "aoechr"); | 242 | unregister_chrdev(AOE_MAJOR, "aoechr"); |
243 | } | 243 | } |
244 | 244 | ||
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index a9575bb58a5e..95c0a3690b0f 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c | |||
@@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq) | |||
1806 | rq->elevator_private = NULL; | 1806 | rq->elevator_private = NULL; |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | 1809 | static int as_set_request(request_queue_t *q, struct request *rq, |
1810 | struct bio *bio, int gfp_mask) | ||
1810 | { | 1811 | { |
1811 | struct as_data *ad = q->elevator->elevator_data; | 1812 | struct as_data *ad = q->elevator->elevator_data; |
1812 | struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); | 1813 | struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); |
@@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | |||
1827 | return 1; | 1828 | return 1; |
1828 | } | 1829 | } |
1829 | 1830 | ||
1830 | static int as_may_queue(request_queue_t *q, int rw) | 1831 | static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) |
1831 | { | 1832 | { |
1832 | int ret = ELV_MQUEUE_MAY; | 1833 | int ret = ELV_MQUEUE_MAY; |
1833 | struct as_data *ad = q->elevator->elevator_data; | 1834 | struct as_data *ad = q->elevator->elevator_data; |
@@ -1871,20 +1872,22 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
1871 | if (!arq_pool) | 1872 | if (!arq_pool) |
1872 | return -ENOMEM; | 1873 | return -ENOMEM; |
1873 | 1874 | ||
1874 | ad = kmalloc(sizeof(*ad), GFP_KERNEL); | 1875 | ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); |
1875 | if (!ad) | 1876 | if (!ad) |
1876 | return -ENOMEM; | 1877 | return -ENOMEM; |
1877 | memset(ad, 0, sizeof(*ad)); | 1878 | memset(ad, 0, sizeof(*ad)); |
1878 | 1879 | ||
1879 | ad->q = q; /* Identify what queue the data belongs to */ | 1880 | ad->q = q; /* Identify what queue the data belongs to */ |
1880 | 1881 | ||
1881 | ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL); | 1882 | ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, |
1883 | GFP_KERNEL, q->node); | ||
1882 | if (!ad->hash) { | 1884 | if (!ad->hash) { |
1883 | kfree(ad); | 1885 | kfree(ad); |
1884 | return -ENOMEM; | 1886 | return -ENOMEM; |
1885 | } | 1887 | } |
1886 | 1888 | ||
1887 | ad->arq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool); | 1889 | ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
1890 | mempool_free_slab, arq_pool, q->node); | ||
1888 | if (!ad->arq_pool) { | 1891 | if (!ad->arq_pool) { |
1889 | kfree(ad->hash); | 1892 | kfree(ad->hash); |
1890 | kfree(ad); | 1893 | kfree(ad); |
@@ -1932,23 +1935,15 @@ struct as_fs_entry { | |||
1932 | static ssize_t | 1935 | static ssize_t |
1933 | as_var_show(unsigned int var, char *page) | 1936 | as_var_show(unsigned int var, char *page) |
1934 | { | 1937 | { |
1935 | var = (var * 1000) / HZ; | ||
1936 | return sprintf(page, "%d\n", var); | 1938 | return sprintf(page, "%d\n", var); |
1937 | } | 1939 | } |
1938 | 1940 | ||
1939 | static ssize_t | 1941 | static ssize_t |
1940 | as_var_store(unsigned long *var, const char *page, size_t count) | 1942 | as_var_store(unsigned long *var, const char *page, size_t count) |
1941 | { | 1943 | { |
1942 | unsigned long tmp; | ||
1943 | char *p = (char *) page; | 1944 | char *p = (char *) page; |
1944 | 1945 | ||
1945 | tmp = simple_strtoul(p, &p, 10); | 1946 | *var = simple_strtoul(p, &p, 10); |
1946 | if (tmp != 0) { | ||
1947 | tmp = (tmp * HZ) / 1000; | ||
1948 | if (tmp == 0) | ||
1949 | tmp = 1; | ||
1950 | } | ||
1951 | *var = tmp; | ||
1952 | return count; | 1947 | return count; |
1953 | } | 1948 | } |
1954 | 1949 | ||
@@ -2044,7 +2039,7 @@ as_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2044 | struct as_fs_entry *entry = to_as(attr); | 2039 | struct as_fs_entry *entry = to_as(attr); |
2045 | 2040 | ||
2046 | if (!entry->show) | 2041 | if (!entry->show) |
2047 | return 0; | 2042 | return -EIO; |
2048 | 2043 | ||
2049 | return entry->show(e->elevator_data, page); | 2044 | return entry->show(e->elevator_data, page); |
2050 | } | 2045 | } |
@@ -2057,7 +2052,7 @@ as_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2057 | struct as_fs_entry *entry = to_as(attr); | 2052 | struct as_fs_entry *entry = to_as(attr); |
2058 | 2053 | ||
2059 | if (!entry->store) | 2054 | if (!entry->store) |
2060 | return -EINVAL; | 2055 | return -EIO; |
2061 | 2056 | ||
2062 | return entry->store(e->elevator_data, page, length); | 2057 | return entry->store(e->elevator_data, page, length); |
2063 | } | 2058 | } |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index abde27027c06..418b1469d75d 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for HP SA 5xxx and 6xxx Controllers | 2 | * Disk Array driver for HP SA 5xxx and 6xxx Controllers |
3 | * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P. | 3 | * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -54,7 +54,7 @@ | |||
54 | MODULE_AUTHOR("Hewlett-Packard Company"); | 54 | MODULE_AUTHOR("Hewlett-Packard Company"); |
55 | MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6"); | 55 | MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6"); |
56 | MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" | 56 | MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" |
57 | " SA6i P600 P800 E400"); | 57 | " SA6i P600 P800 E400 E300"); |
58 | MODULE_LICENSE("GPL"); | 58 | MODULE_LICENSE("GPL"); |
59 | 59 | ||
60 | #include "cciss_cmd.h" | 60 | #include "cciss_cmd.h" |
@@ -85,8 +85,10 @@ static const struct pci_device_id cciss_pci_device_id[] = { | |||
85 | 0x103C, 0x3225, 0, 0, 0}, | 85 | 0x103C, 0x3225, 0, 0, 0}, |
86 | { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, | 86 | { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, |
87 | 0x103c, 0x3223, 0, 0, 0}, | 87 | 0x103c, 0x3223, 0, 0, 0}, |
88 | { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, | 88 | { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, |
89 | 0x103c, 0x3231, 0, 0, 0}, | 89 | 0x103c, 0x3231, 0, 0, 0}, |
90 | { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, | ||
91 | 0x103c, 0x3233, 0, 0, 0}, | ||
90 | {0,} | 92 | {0,} |
91 | }; | 93 | }; |
92 | MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); | 94 | MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); |
@@ -110,6 +112,7 @@ static struct board_type products[] = { | |||
110 | { 0x3225103C, "Smart Array P600", &SA5_access}, | 112 | { 0x3225103C, "Smart Array P600", &SA5_access}, |
111 | { 0x3223103C, "Smart Array P800", &SA5_access}, | 113 | { 0x3223103C, "Smart Array P800", &SA5_access}, |
112 | { 0x3231103C, "Smart Array E400", &SA5_access}, | 114 | { 0x3231103C, "Smart Array E400", &SA5_access}, |
115 | { 0x3233103C, "Smart Array E300", &SA5_access}, | ||
113 | }; | 116 | }; |
114 | 117 | ||
115 | /* How long to wait (in millesconds) for board to go into simple mode */ | 118 | /* How long to wait (in millesconds) for board to go into simple mode */ |
@@ -635,6 +638,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, | |||
635 | cciss_pci_info_struct pciinfo; | 638 | cciss_pci_info_struct pciinfo; |
636 | 639 | ||
637 | if (!arg) return -EINVAL; | 640 | if (!arg) return -EINVAL; |
641 | pciinfo.domain = pci_domain_nr(host->pdev->bus); | ||
638 | pciinfo.bus = host->pdev->bus->number; | 642 | pciinfo.bus = host->pdev->bus->number; |
639 | pciinfo.dev_fn = host->pdev->devfn; | 643 | pciinfo.dev_fn = host->pdev->devfn; |
640 | pciinfo.board_id = host->board_id; | 644 | pciinfo.board_id = host->board_id; |
@@ -782,18 +786,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, | |||
782 | 786 | ||
783 | case CCISS_GETLUNINFO: { | 787 | case CCISS_GETLUNINFO: { |
784 | LogvolInfo_struct luninfo; | 788 | LogvolInfo_struct luninfo; |
785 | int i; | ||
786 | 789 | ||
787 | luninfo.LunID = drv->LunID; | 790 | luninfo.LunID = drv->LunID; |
788 | luninfo.num_opens = drv->usage_count; | 791 | luninfo.num_opens = drv->usage_count; |
789 | luninfo.num_parts = 0; | 792 | luninfo.num_parts = 0; |
790 | /* count partitions 1 to 15 with sizes > 0 */ | ||
791 | for (i = 0; i < MAX_PART - 1; i++) { | ||
792 | if (!disk->part[i]) | ||
793 | continue; | ||
794 | if (disk->part[i]->nr_sects != 0) | ||
795 | luninfo.num_parts++; | ||
796 | } | ||
797 | if (copy_to_user(argp, &luninfo, | 793 | if (copy_to_user(argp, &luninfo, |
798 | sizeof(LogvolInfo_struct))) | 794 | sizeof(LogvolInfo_struct))) |
799 | return -EFAULT; | 795 | return -EFAULT; |
@@ -1139,7 +1135,7 @@ static int revalidate_allvol(ctlr_info_t *host) | |||
1139 | /* this is for the online array utilities */ | 1135 | /* this is for the online array utilities */ |
1140 | if (!drv->heads && i) | 1136 | if (!drv->heads && i) |
1141 | continue; | 1137 | continue; |
1142 | blk_queue_hardsect_size(host->queue, drv->block_size); | 1138 | blk_queue_hardsect_size(drv->queue, drv->block_size); |
1143 | set_capacity(disk, drv->nr_blocks); | 1139 | set_capacity(disk, drv->nr_blocks); |
1144 | add_disk(disk); | 1140 | add_disk(disk); |
1145 | } | 1141 | } |
@@ -1695,7 +1691,7 @@ static int cciss_revalidate(struct gendisk *disk) | |||
1695 | cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); | 1691 | cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); |
1696 | cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); | 1692 | cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); |
1697 | 1693 | ||
1698 | blk_queue_hardsect_size(h->queue, drv->block_size); | 1694 | blk_queue_hardsect_size(drv->queue, drv->block_size); |
1699 | set_capacity(disk, drv->nr_blocks); | 1695 | set_capacity(disk, drv->nr_blocks); |
1700 | 1696 | ||
1701 | kfree(size_buff); | 1697 | kfree(size_buff); |
@@ -2252,12 +2248,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2252 | * them up. We will also keep track of the next queue to run so | 2248 | * them up. We will also keep track of the next queue to run so |
2253 | * that every queue gets a chance to be started first. | 2249 | * that every queue gets a chance to be started first. |
2254 | */ | 2250 | */ |
2255 | for (j=0; j < NWD; j++){ | 2251 | for (j=0; j < h->highest_lun + 1; j++){ |
2256 | int curr_queue = (start_queue + j) % NWD; | 2252 | int curr_queue = (start_queue + j) % (h->highest_lun + 1); |
2257 | /* make sure the disk has been added and the drive is real | 2253 | /* make sure the disk has been added and the drive is real |
2258 | * because this can be called from the middle of init_one. | 2254 | * because this can be called from the middle of init_one. |
2259 | */ | 2255 | */ |
2260 | if(!(h->gendisk[curr_queue]->queue) || | 2256 | if(!(h->drv[curr_queue].queue) || |
2261 | !(h->drv[curr_queue].heads)) | 2257 | !(h->drv[curr_queue].heads)) |
2262 | continue; | 2258 | continue; |
2263 | blk_start_queue(h->gendisk[curr_queue]->queue); | 2259 | blk_start_queue(h->gendisk[curr_queue]->queue); |
@@ -2268,14 +2264,14 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2268 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) | 2264 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) |
2269 | { | 2265 | { |
2270 | if (curr_queue == start_queue){ | 2266 | if (curr_queue == start_queue){ |
2271 | h->next_to_run = (start_queue + 1) % NWD; | 2267 | h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); |
2272 | goto cleanup; | 2268 | goto cleanup; |
2273 | } else { | 2269 | } else { |
2274 | h->next_to_run = curr_queue; | 2270 | h->next_to_run = curr_queue; |
2275 | goto cleanup; | 2271 | goto cleanup; |
2276 | } | 2272 | } |
2277 | } else { | 2273 | } else { |
2278 | curr_queue = (curr_queue + 1) % NWD; | 2274 | curr_queue = (curr_queue + 1) % (h->highest_lun + 1); |
2279 | } | 2275 | } |
2280 | } | 2276 | } |
2281 | 2277 | ||
@@ -2283,7 +2279,6 @@ cleanup: | |||
2283 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2279 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
2284 | return IRQ_HANDLED; | 2280 | return IRQ_HANDLED; |
2285 | } | 2281 | } |
2286 | |||
2287 | /* | 2282 | /* |
2288 | * We cannot read the structure directly, for portablity we must use | 2283 | * We cannot read the structure directly, for portablity we must use |
2289 | * the io functions. | 2284 | * the io functions. |
@@ -2793,13 +2788,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2793 | } | 2788 | } |
2794 | 2789 | ||
2795 | spin_lock_init(&hba[i]->lock); | 2790 | spin_lock_init(&hba[i]->lock); |
2796 | q = blk_init_queue(do_cciss_request, &hba[i]->lock); | ||
2797 | if (!q) | ||
2798 | goto clean4; | ||
2799 | |||
2800 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
2801 | hba[i]->queue = q; | ||
2802 | q->queuedata = hba[i]; | ||
2803 | 2791 | ||
2804 | /* Initialize the pdev driver private data. | 2792 | /* Initialize the pdev driver private data. |
2805 | have it point to hba[i]. */ | 2793 | have it point to hba[i]. */ |
@@ -2821,6 +2809,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2821 | 2809 | ||
2822 | cciss_procinit(i); | 2810 | cciss_procinit(i); |
2823 | 2811 | ||
2812 | for(j=0; j < NWD; j++) { /* mfm */ | ||
2813 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
2814 | struct gendisk *disk = hba[i]->gendisk[j]; | ||
2815 | |||
2816 | q = blk_init_queue(do_cciss_request, &hba[i]->lock); | ||
2817 | if (!q) { | ||
2818 | printk(KERN_ERR | ||
2819 | "cciss: unable to allocate queue for disk %d\n", | ||
2820 | j); | ||
2821 | break; | ||
2822 | } | ||
2823 | drv->queue = q; | ||
2824 | |||
2825 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
2824 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 2826 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
2825 | 2827 | ||
2826 | /* This is a hardware imposed limit. */ | 2828 | /* This is a hardware imposed limit. */ |
@@ -2831,26 +2833,23 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2831 | 2833 | ||
2832 | blk_queue_max_sectors(q, 512); | 2834 | blk_queue_max_sectors(q, 512); |
2833 | 2835 | ||
2834 | 2836 | q->queuedata = hba[i]; | |
2835 | for(j=0; j<NWD; j++) { | ||
2836 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
2837 | struct gendisk *disk = hba[i]->gendisk[j]; | ||
2838 | |||
2839 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); | 2837 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); |
2840 | sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j); | 2838 | sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j); |
2841 | disk->major = hba[i]->major; | 2839 | disk->major = hba[i]->major; |
2842 | disk->first_minor = j << NWD_SHIFT; | 2840 | disk->first_minor = j << NWD_SHIFT; |
2843 | disk->fops = &cciss_fops; | 2841 | disk->fops = &cciss_fops; |
2844 | disk->queue = hba[i]->queue; | 2842 | disk->queue = q; |
2845 | disk->private_data = drv; | 2843 | disk->private_data = drv; |
2846 | /* we must register the controller even if no disks exist */ | 2844 | /* we must register the controller even if no disks exist */ |
2847 | /* this is for the online array utilities */ | 2845 | /* this is for the online array utilities */ |
2848 | if(!drv->heads && j) | 2846 | if(!drv->heads && j) |
2849 | continue; | 2847 | continue; |
2850 | blk_queue_hardsect_size(hba[i]->queue, drv->block_size); | 2848 | blk_queue_hardsect_size(q, drv->block_size); |
2851 | set_capacity(disk, drv->nr_blocks); | 2849 | set_capacity(disk, drv->nr_blocks); |
2852 | add_disk(disk); | 2850 | add_disk(disk); |
2853 | } | 2851 | } |
2852 | |||
2854 | return(1); | 2853 | return(1); |
2855 | 2854 | ||
2856 | clean4: | 2855 | clean4: |
@@ -2916,10 +2915,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev) | |||
2916 | for (j = 0; j < NWD; j++) { | 2915 | for (j = 0; j < NWD; j++) { |
2917 | struct gendisk *disk = hba[i]->gendisk[j]; | 2916 | struct gendisk *disk = hba[i]->gendisk[j]; |
2918 | if (disk->flags & GENHD_FL_UP) | 2917 | if (disk->flags & GENHD_FL_UP) |
2918 | blk_cleanup_queue(disk->queue); | ||
2919 | del_gendisk(disk); | 2919 | del_gendisk(disk); |
2920 | } | 2920 | } |
2921 | 2921 | ||
2922 | blk_cleanup_queue(hba[i]->queue); | ||
2923 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), | 2922 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), |
2924 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); | 2923 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); |
2925 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), | 2924 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 8fb19206eddb..566587d0a500 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -29,6 +29,7 @@ typedef struct _drive_info_struct | |||
29 | { | 29 | { |
30 | __u32 LunID; | 30 | __u32 LunID; |
31 | int usage_count; | 31 | int usage_count; |
32 | struct request_queue *queue; | ||
32 | sector_t nr_blocks; | 33 | sector_t nr_blocks; |
33 | int block_size; | 34 | int block_size; |
34 | int heads; | 35 | int heads; |
@@ -72,7 +73,6 @@ struct ctlr_info | |||
72 | unsigned int maxQsinceinit; | 73 | unsigned int maxQsinceinit; |
73 | unsigned int maxSG; | 74 | unsigned int maxSG; |
74 | spinlock_t lock; | 75 | spinlock_t lock; |
75 | struct request_queue *queue; | ||
76 | 76 | ||
77 | //* pointers to command and error info pool */ | 77 | //* pointers to command and error info pool */ |
78 | CommandList_struct *cmd_pool; | 78 | CommandList_struct *cmd_pool; |
@@ -260,7 +260,7 @@ struct board_type { | |||
260 | struct access_method *access; | 260 | struct access_method *access; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | #define CCISS_LOCK(i) (hba[i]->queue->queue_lock) | 263 | #define CCISS_LOCK(i) (&hba[i]->lock) |
264 | 264 | ||
265 | #endif /* CCISS_H */ | 265 | #endif /* CCISS_H */ |
266 | 266 | ||
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index 2210bacad56a..cd056e7e64ec 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c | |||
@@ -21,22 +21,34 @@ | |||
21 | #include <linux/hash.h> | 21 | #include <linux/hash.h> |
22 | #include <linux/rbtree.h> | 22 | #include <linux/rbtree.h> |
23 | #include <linux/mempool.h> | 23 | #include <linux/mempool.h> |
24 | 24 | #include <linux/ioprio.h> | |
25 | static unsigned long max_elapsed_crq; | 25 | #include <linux/writeback.h> |
26 | static unsigned long max_elapsed_dispatch; | ||
27 | 26 | ||
28 | /* | 27 | /* |
29 | * tunables | 28 | * tunables |
30 | */ | 29 | */ |
31 | static int cfq_quantum = 4; /* max queue in one round of service */ | 30 | static int cfq_quantum = 4; /* max queue in one round of service */ |
32 | static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ | 31 | static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ |
33 | static int cfq_service = HZ; /* period over which service is avg */ | 32 | static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
34 | static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */ | ||
35 | static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */ | ||
36 | static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */ | ||
37 | static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ | 33 | static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ |
38 | static int cfq_back_penalty = 2; /* penalty of a backwards seek */ | 34 | static int cfq_back_penalty = 2; /* penalty of a backwards seek */ |
39 | 35 | ||
36 | static int cfq_slice_sync = HZ / 10; | ||
37 | static int cfq_slice_async = HZ / 25; | ||
38 | static int cfq_slice_async_rq = 2; | ||
39 | static int cfq_slice_idle = HZ / 100; | ||
40 | |||
41 | #define CFQ_IDLE_GRACE (HZ / 10) | ||
42 | #define CFQ_SLICE_SCALE (5) | ||
43 | |||
44 | #define CFQ_KEY_ASYNC (0) | ||
45 | #define CFQ_KEY_ANY (0xffff) | ||
46 | |||
47 | /* | ||
48 | * disable queueing at the driver/hardware level | ||
49 | */ | ||
50 | static int cfq_max_depth = 2; | ||
51 | |||
40 | /* | 52 | /* |
41 | * for the hash of cfqq inside the cfqd | 53 | * for the hash of cfqq inside the cfqd |
42 | */ | 54 | */ |
@@ -55,6 +67,7 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ | |||
55 | #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) | 67 | #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) |
56 | 68 | ||
57 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) | 69 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) |
70 | #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) | ||
58 | 71 | ||
59 | #define RQ_DATA(rq) (rq)->elevator_private | 72 | #define RQ_DATA(rq) (rq)->elevator_private |
60 | 73 | ||
@@ -75,78 +88,110 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ | |||
75 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) | 88 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) |
76 | #define rq_rb_key(rq) (rq)->sector | 89 | #define rq_rb_key(rq) (rq)->sector |
77 | 90 | ||
78 | /* | ||
79 | * threshold for switching off non-tag accounting | ||
80 | */ | ||
81 | #define CFQ_MAX_TAG (4) | ||
82 | |||
83 | /* | ||
84 | * sort key types and names | ||
85 | */ | ||
86 | enum { | ||
87 | CFQ_KEY_PGID, | ||
88 | CFQ_KEY_TGID, | ||
89 | CFQ_KEY_UID, | ||
90 | CFQ_KEY_GID, | ||
91 | CFQ_KEY_LAST, | ||
92 | }; | ||
93 | |||
94 | static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL }; | ||
95 | |||
96 | static kmem_cache_t *crq_pool; | 91 | static kmem_cache_t *crq_pool; |
97 | static kmem_cache_t *cfq_pool; | 92 | static kmem_cache_t *cfq_pool; |
98 | static kmem_cache_t *cfq_ioc_pool; | 93 | static kmem_cache_t *cfq_ioc_pool; |
99 | 94 | ||
95 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | ||
96 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | ||
97 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) | ||
98 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) | ||
99 | |||
100 | #define ASYNC (0) | ||
101 | #define SYNC (1) | ||
102 | |||
103 | #define cfq_cfqq_dispatched(cfqq) \ | ||
104 | ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) | ||
105 | |||
106 | #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) | ||
107 | |||
108 | #define cfq_cfqq_sync(cfqq) \ | ||
109 | (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) | ||
110 | |||
111 | /* | ||
112 | * Per block device queue structure | ||
113 | */ | ||
100 | struct cfq_data { | 114 | struct cfq_data { |
101 | struct list_head rr_list; | 115 | atomic_t ref; |
116 | request_queue_t *queue; | ||
117 | |||
118 | /* | ||
119 | * rr list of queues with requests and the count of them | ||
120 | */ | ||
121 | struct list_head rr_list[CFQ_PRIO_LISTS]; | ||
122 | struct list_head busy_rr; | ||
123 | struct list_head cur_rr; | ||
124 | struct list_head idle_rr; | ||
125 | unsigned int busy_queues; | ||
126 | |||
127 | /* | ||
128 | * non-ordered list of empty cfqq's | ||
129 | */ | ||
102 | struct list_head empty_list; | 130 | struct list_head empty_list; |
103 | 131 | ||
132 | /* | ||
133 | * cfqq lookup hash | ||
134 | */ | ||
104 | struct hlist_head *cfq_hash; | 135 | struct hlist_head *cfq_hash; |
105 | struct hlist_head *crq_hash; | ||
106 | 136 | ||
107 | /* queues on rr_list (ie they have pending requests */ | 137 | /* |
108 | unsigned int busy_queues; | 138 | * global crq hash for all queues |
139 | */ | ||
140 | struct hlist_head *crq_hash; | ||
109 | 141 | ||
110 | unsigned int max_queued; | 142 | unsigned int max_queued; |
111 | 143 | ||
112 | atomic_t ref; | 144 | mempool_t *crq_pool; |
113 | 145 | ||
114 | int key_type; | 146 | int rq_in_driver; |
115 | 147 | ||
116 | mempool_t *crq_pool; | 148 | /* |
149 | * schedule slice state info | ||
150 | */ | ||
151 | /* | ||
152 | * idle window management | ||
153 | */ | ||
154 | struct timer_list idle_slice_timer; | ||
155 | struct work_struct unplug_work; | ||
117 | 156 | ||
118 | request_queue_t *queue; | 157 | struct cfq_queue *active_queue; |
158 | struct cfq_io_context *active_cic; | ||
159 | int cur_prio, cur_end_prio; | ||
160 | unsigned int dispatch_slice; | ||
161 | |||
162 | struct timer_list idle_class_timer; | ||
119 | 163 | ||
120 | sector_t last_sector; | 164 | sector_t last_sector; |
165 | unsigned long last_end_request; | ||
121 | 166 | ||
122 | int rq_in_driver; | 167 | unsigned int rq_starved; |
123 | 168 | ||
124 | /* | 169 | /* |
125 | * tunables, see top of file | 170 | * tunables, see top of file |
126 | */ | 171 | */ |
127 | unsigned int cfq_quantum; | 172 | unsigned int cfq_quantum; |
128 | unsigned int cfq_queued; | 173 | unsigned int cfq_queued; |
129 | unsigned int cfq_fifo_expire_r; | 174 | unsigned int cfq_fifo_expire[2]; |
130 | unsigned int cfq_fifo_expire_w; | ||
131 | unsigned int cfq_fifo_batch_expire; | ||
132 | unsigned int cfq_back_penalty; | 175 | unsigned int cfq_back_penalty; |
133 | unsigned int cfq_back_max; | 176 | unsigned int cfq_back_max; |
134 | unsigned int find_best_crq; | 177 | unsigned int cfq_slice[2]; |
135 | 178 | unsigned int cfq_slice_async_rq; | |
136 | unsigned int cfq_tagged; | 179 | unsigned int cfq_slice_idle; |
180 | unsigned int cfq_max_depth; | ||
137 | }; | 181 | }; |
138 | 182 | ||
183 | /* | ||
184 | * Per process-grouping structure | ||
185 | */ | ||
139 | struct cfq_queue { | 186 | struct cfq_queue { |
140 | /* reference count */ | 187 | /* reference count */ |
141 | atomic_t ref; | 188 | atomic_t ref; |
142 | /* parent cfq_data */ | 189 | /* parent cfq_data */ |
143 | struct cfq_data *cfqd; | 190 | struct cfq_data *cfqd; |
144 | /* hash of mergeable requests */ | 191 | /* cfqq lookup hash */ |
145 | struct hlist_node cfq_hash; | 192 | struct hlist_node cfq_hash; |
146 | /* hash key */ | 193 | /* hash key */ |
147 | unsigned long key; | 194 | unsigned int key; |
148 | /* whether queue is on rr (or empty) list */ | ||
149 | int on_rr; | ||
150 | /* on either rr or empty list of cfqd */ | 195 | /* on either rr or empty list of cfqd */ |
151 | struct list_head cfq_list; | 196 | struct list_head cfq_list; |
152 | /* sorted list of pending requests */ | 197 | /* sorted list of pending requests */ |
@@ -158,21 +203,22 @@ struct cfq_queue { | |||
158 | /* currently allocated requests */ | 203 | /* currently allocated requests */ |
159 | int allocated[2]; | 204 | int allocated[2]; |
160 | /* fifo list of requests in sort_list */ | 205 | /* fifo list of requests in sort_list */ |
161 | struct list_head fifo[2]; | 206 | struct list_head fifo; |
162 | /* last time fifo expired */ | ||
163 | unsigned long last_fifo_expire; | ||
164 | 207 | ||
165 | int key_type; | 208 | unsigned long slice_start; |
209 | unsigned long slice_end; | ||
210 | unsigned long slice_left; | ||
211 | unsigned long service_last; | ||
166 | 212 | ||
167 | unsigned long service_start; | 213 | /* number of requests that are on the dispatch list */ |
168 | unsigned long service_used; | 214 | int on_dispatch[2]; |
169 | 215 | ||
170 | unsigned int max_rate; | 216 | /* io prio of this group */ |
217 | unsigned short ioprio, org_ioprio; | ||
218 | unsigned short ioprio_class, org_ioprio_class; | ||
171 | 219 | ||
172 | /* number of requests that have been handed to the driver */ | 220 | /* various state flags, see below */ |
173 | int in_flight; | 221 | unsigned int flags; |
174 | /* number of currently allocated requests */ | ||
175 | int alloc_limit[2]; | ||
176 | }; | 222 | }; |
177 | 223 | ||
178 | struct cfq_rq { | 224 | struct cfq_rq { |
@@ -184,42 +230,78 @@ struct cfq_rq { | |||
184 | struct cfq_queue *cfq_queue; | 230 | struct cfq_queue *cfq_queue; |
185 | struct cfq_io_context *io_context; | 231 | struct cfq_io_context *io_context; |
186 | 232 | ||
187 | unsigned long service_start; | 233 | unsigned int crq_flags; |
188 | unsigned long queue_start; | 234 | }; |
235 | |||
236 | enum cfqq_state_flags { | ||
237 | CFQ_CFQQ_FLAG_on_rr = 0, | ||
238 | CFQ_CFQQ_FLAG_wait_request, | ||
239 | CFQ_CFQQ_FLAG_must_alloc, | ||
240 | CFQ_CFQQ_FLAG_must_alloc_slice, | ||
241 | CFQ_CFQQ_FLAG_must_dispatch, | ||
242 | CFQ_CFQQ_FLAG_fifo_expire, | ||
243 | CFQ_CFQQ_FLAG_idle_window, | ||
244 | CFQ_CFQQ_FLAG_prio_changed, | ||
245 | CFQ_CFQQ_FLAG_expired, | ||
246 | }; | ||
189 | 247 | ||
190 | unsigned int in_flight : 1; | 248 | #define CFQ_CFQQ_FNS(name) \ |
191 | unsigned int accounted : 1; | 249 | static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ |
192 | unsigned int is_sync : 1; | 250 | { \ |
193 | unsigned int is_write : 1; | 251 | cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
252 | } \ | ||
253 | static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ | ||
254 | { \ | ||
255 | cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ | ||
256 | } \ | ||
257 | static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ | ||
258 | { \ | ||
259 | return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ | ||
260 | } | ||
261 | |||
262 | CFQ_CFQQ_FNS(on_rr); | ||
263 | CFQ_CFQQ_FNS(wait_request); | ||
264 | CFQ_CFQQ_FNS(must_alloc); | ||
265 | CFQ_CFQQ_FNS(must_alloc_slice); | ||
266 | CFQ_CFQQ_FNS(must_dispatch); | ||
267 | CFQ_CFQQ_FNS(fifo_expire); | ||
268 | CFQ_CFQQ_FNS(idle_window); | ||
269 | CFQ_CFQQ_FNS(prio_changed); | ||
270 | CFQ_CFQQ_FNS(expired); | ||
271 | #undef CFQ_CFQQ_FNS | ||
272 | |||
273 | enum cfq_rq_state_flags { | ||
274 | CFQ_CRQ_FLAG_in_flight = 0, | ||
275 | CFQ_CRQ_FLAG_in_driver, | ||
276 | CFQ_CRQ_FLAG_is_sync, | ||
277 | CFQ_CRQ_FLAG_requeued, | ||
194 | }; | 278 | }; |
195 | 279 | ||
196 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long); | 280 | #define CFQ_CRQ_FNS(name) \ |
281 | static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \ | ||
282 | { \ | ||
283 | crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \ | ||
284 | } \ | ||
285 | static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \ | ||
286 | { \ | ||
287 | crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \ | ||
288 | } \ | ||
289 | static inline int cfq_crq_##name(const struct cfq_rq *crq) \ | ||
290 | { \ | ||
291 | return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ | ||
292 | } | ||
293 | |||
294 | CFQ_CRQ_FNS(in_flight); | ||
295 | CFQ_CRQ_FNS(in_driver); | ||
296 | CFQ_CRQ_FNS(is_sync); | ||
297 | CFQ_CRQ_FNS(requeued); | ||
298 | #undef CFQ_CRQ_FNS | ||
299 | |||
300 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | ||
197 | static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); | 301 | static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); |
198 | static void cfq_update_next_crq(struct cfq_rq *); | ||
199 | static void cfq_put_cfqd(struct cfq_data *cfqd); | 302 | static void cfq_put_cfqd(struct cfq_data *cfqd); |
200 | 303 | ||
201 | /* | 304 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
202 | * what the fairness is based on (ie how processes are grouped and | ||
203 | * differentiated) | ||
204 | */ | ||
205 | static inline unsigned long | ||
206 | cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk) | ||
207 | { | ||
208 | /* | ||
209 | * optimize this so that ->key_type is the offset into the struct | ||
210 | */ | ||
211 | switch (cfqd->key_type) { | ||
212 | case CFQ_KEY_PGID: | ||
213 | return process_group(tsk); | ||
214 | default: | ||
215 | case CFQ_KEY_TGID: | ||
216 | return tsk->tgid; | ||
217 | case CFQ_KEY_UID: | ||
218 | return tsk->uid; | ||
219 | case CFQ_KEY_GID: | ||
220 | return tsk->gid; | ||
221 | } | ||
222 | } | ||
223 | 305 | ||
224 | /* | 306 | /* |
225 | * lots of deadline iosched dupes, can be abstracted later... | 307 | * lots of deadline iosched dupes, can be abstracted later... |
@@ -235,16 +317,12 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq) | |||
235 | 317 | ||
236 | if (q->last_merge == crq->request) | 318 | if (q->last_merge == crq->request) |
237 | q->last_merge = NULL; | 319 | q->last_merge = NULL; |
238 | |||
239 | cfq_update_next_crq(crq); | ||
240 | } | 320 | } |
241 | 321 | ||
242 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) | 322 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) |
243 | { | 323 | { |
244 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); | 324 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); |
245 | 325 | ||
246 | BUG_ON(!hlist_unhashed(&crq->hash)); | ||
247 | |||
248 | hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); | 326 | hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); |
249 | } | 327 | } |
250 | 328 | ||
@@ -257,8 +335,6 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) | |||
257 | struct cfq_rq *crq = list_entry_hash(entry); | 335 | struct cfq_rq *crq = list_entry_hash(entry); |
258 | struct request *__rq = crq->request; | 336 | struct request *__rq = crq->request; |
259 | 337 | ||
260 | BUG_ON(hlist_unhashed(&crq->hash)); | ||
261 | |||
262 | if (!rq_mergeable(__rq)) { | 338 | if (!rq_mergeable(__rq)) { |
263 | cfq_del_crq_hash(crq); | 339 | cfq_del_crq_hash(crq); |
264 | continue; | 340 | continue; |
@@ -271,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) | |||
271 | return NULL; | 347 | return NULL; |
272 | } | 348 | } |
273 | 349 | ||
350 | static inline int cfq_pending_requests(struct cfq_data *cfqd) | ||
351 | { | ||
352 | return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * scheduler run of queue, if there are requests pending and no one in the | ||
357 | * driver that will restart queueing | ||
358 | */ | ||
359 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | ||
360 | { | ||
361 | if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) | ||
362 | kblockd_schedule_work(&cfqd->unplug_work); | ||
363 | } | ||
364 | |||
365 | static int cfq_queue_empty(request_queue_t *q) | ||
366 | { | ||
367 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
368 | |||
369 | return !cfq_pending_requests(cfqd); | ||
370 | } | ||
371 | |||
274 | /* | 372 | /* |
275 | * Lifted from AS - choose which of crq1 and crq2 that is best served now. | 373 | * Lifted from AS - choose which of crq1 and crq2 that is best served now. |
276 | * We choose the request that is closest to the head right now. Distance | 374 | * We choose the request that is closest to the head right now. Distance |
@@ -288,35 +386,21 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) | |||
288 | if (crq2 == NULL) | 386 | if (crq2 == NULL) |
289 | return crq1; | 387 | return crq1; |
290 | 388 | ||
389 | if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2)) | ||
390 | return crq1; | ||
391 | else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1)) | ||
392 | return crq2; | ||
393 | |||
394 | if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) | ||
395 | return crq1; | ||
396 | else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) | ||
397 | return crq2; | ||
398 | |||
291 | s1 = crq1->request->sector; | 399 | s1 = crq1->request->sector; |
292 | s2 = crq2->request->sector; | 400 | s2 = crq2->request->sector; |
293 | 401 | ||
294 | last = cfqd->last_sector; | 402 | last = cfqd->last_sector; |
295 | 403 | ||
296 | #if 0 | ||
297 | if (!list_empty(&cfqd->queue->queue_head)) { | ||
298 | struct list_head *entry = &cfqd->queue->queue_head; | ||
299 | unsigned long distance = ~0UL; | ||
300 | struct request *rq; | ||
301 | |||
302 | while ((entry = entry->prev) != &cfqd->queue->queue_head) { | ||
303 | rq = list_entry_rq(entry); | ||
304 | |||
305 | if (blk_barrier_rq(rq)) | ||
306 | break; | ||
307 | |||
308 | if (distance < abs(s1 - rq->sector + rq->nr_sectors)) { | ||
309 | distance = abs(s1 - rq->sector +rq->nr_sectors); | ||
310 | last = rq->sector + rq->nr_sectors; | ||
311 | } | ||
312 | if (distance < abs(s2 - rq->sector + rq->nr_sectors)) { | ||
313 | distance = abs(s2 - rq->sector +rq->nr_sectors); | ||
314 | last = rq->sector + rq->nr_sectors; | ||
315 | } | ||
316 | } | ||
317 | } | ||
318 | #endif | ||
319 | |||
320 | /* | 404 | /* |
321 | * by definition, 1KiB is 2 sectors | 405 | * by definition, 1KiB is 2 sectors |
322 | */ | 406 | */ |
@@ -377,11 +461,14 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
377 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; | 461 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; |
378 | struct rb_node *rbnext, *rbprev; | 462 | struct rb_node *rbnext, *rbprev; |
379 | 463 | ||
380 | if (!ON_RB(&last->rb_node)) | 464 | rbnext = NULL; |
381 | return NULL; | 465 | if (ON_RB(&last->rb_node)) |
382 | 466 | rbnext = rb_next(&last->rb_node); | |
383 | if ((rbnext = rb_next(&last->rb_node)) == NULL) | 467 | if (!rbnext) { |
384 | rbnext = rb_first(&cfqq->sort_list); | 468 | rbnext = rb_first(&cfqq->sort_list); |
469 | if (rbnext == &last->rb_node) | ||
470 | rbnext = NULL; | ||
471 | } | ||
385 | 472 | ||
386 | rbprev = rb_prev(&last->rb_node); | 473 | rbprev = rb_prev(&last->rb_node); |
387 | 474 | ||
@@ -401,67 +488,53 @@ static void cfq_update_next_crq(struct cfq_rq *crq) | |||
401 | cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); | 488 | cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); |
402 | } | 489 | } |
403 | 490 | ||
404 | static int cfq_check_sort_rr_list(struct cfq_queue *cfqq) | 491 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) |
405 | { | 492 | { |
406 | struct list_head *head = &cfqq->cfqd->rr_list; | 493 | struct cfq_data *cfqd = cfqq->cfqd; |
407 | struct list_head *next, *prev; | 494 | struct list_head *list, *entry; |
408 | |||
409 | /* | ||
410 | * list might still be ordered | ||
411 | */ | ||
412 | next = cfqq->cfq_list.next; | ||
413 | if (next != head) { | ||
414 | struct cfq_queue *cnext = list_entry_cfqq(next); | ||
415 | 495 | ||
416 | if (cfqq->service_used > cnext->service_used) | 496 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
417 | return 1; | ||
418 | } | ||
419 | 497 | ||
420 | prev = cfqq->cfq_list.prev; | 498 | list_del(&cfqq->cfq_list); |
421 | if (prev != head) { | ||
422 | struct cfq_queue *cprev = list_entry_cfqq(prev); | ||
423 | 499 | ||
424 | if (cfqq->service_used < cprev->service_used) | 500 | if (cfq_class_rt(cfqq)) |
425 | return 1; | 501 | list = &cfqd->cur_rr; |
502 | else if (cfq_class_idle(cfqq)) | ||
503 | list = &cfqd->idle_rr; | ||
504 | else { | ||
505 | /* | ||
506 | * if cfqq has requests in flight, don't allow it to be | ||
507 | * found in cfq_set_active_queue before it has finished them. | ||
508 | * this is done to increase fairness between a process that | ||
509 | * has lots of io pending vs one that only generates one | ||
510 | * sporadically or synchronously | ||
511 | */ | ||
512 | if (cfq_cfqq_dispatched(cfqq)) | ||
513 | list = &cfqd->busy_rr; | ||
514 | else | ||
515 | list = &cfqd->rr_list[cfqq->ioprio]; | ||
426 | } | 516 | } |
427 | 517 | ||
428 | return 0; | 518 | /* |
429 | } | 519 | * if queue was preempted, just add to front to be fair. busy_rr |
430 | 520 | * isn't sorted. | |
431 | static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) | 521 | */ |
432 | { | 522 | if (preempted || list == &cfqd->busy_rr) { |
433 | struct list_head *entry = &cfqq->cfqd->rr_list; | 523 | list_add(&cfqq->cfq_list, list); |
434 | |||
435 | if (!cfqq->on_rr) | ||
436 | return; | ||
437 | if (!new_queue && !cfq_check_sort_rr_list(cfqq)) | ||
438 | return; | 524 | return; |
439 | 525 | } | |
440 | list_del(&cfqq->cfq_list); | ||
441 | 526 | ||
442 | /* | 527 | /* |
443 | * sort by our mean service_used, sub-sort by in-flight requests | 528 | * sort by when queue was last serviced |
444 | */ | 529 | */ |
445 | while ((entry = entry->prev) != &cfqq->cfqd->rr_list) { | 530 | entry = list; |
531 | while ((entry = entry->prev) != list) { | ||
446 | struct cfq_queue *__cfqq = list_entry_cfqq(entry); | 532 | struct cfq_queue *__cfqq = list_entry_cfqq(entry); |
447 | 533 | ||
448 | if (cfqq->service_used > __cfqq->service_used) | 534 | if (!__cfqq->service_last) |
535 | break; | ||
536 | if (time_before(__cfqq->service_last, cfqq->service_last)) | ||
449 | break; | 537 | break; |
450 | else if (cfqq->service_used == __cfqq->service_used) { | ||
451 | struct list_head *prv; | ||
452 | |||
453 | while ((prv = entry->prev) != &cfqq->cfqd->rr_list) { | ||
454 | __cfqq = list_entry_cfqq(prv); | ||
455 | |||
456 | WARN_ON(__cfqq->service_used > cfqq->service_used); | ||
457 | if (cfqq->service_used != __cfqq->service_used) | ||
458 | break; | ||
459 | if (cfqq->in_flight > __cfqq->in_flight) | ||
460 | break; | ||
461 | |||
462 | entry = prv; | ||
463 | } | ||
464 | } | ||
465 | } | 538 | } |
466 | 539 | ||
467 | list_add(&cfqq->cfq_list, entry); | 540 | list_add(&cfqq->cfq_list, entry); |
@@ -469,28 +542,24 @@ static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) | |||
469 | 542 | ||
470 | /* | 543 | /* |
471 | * add to busy list of queues for service, trying to be fair in ordering | 544 | * add to busy list of queues for service, trying to be fair in ordering |
472 | * the pending list according to requests serviced | 545 | * the pending list according to last request service |
473 | */ | 546 | */ |
474 | static inline void | 547 | static inline void |
475 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 548 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) |
476 | { | 549 | { |
477 | /* | 550 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
478 | * it's currently on the empty list | 551 | cfq_mark_cfqq_on_rr(cfqq); |
479 | */ | ||
480 | cfqq->on_rr = 1; | ||
481 | cfqd->busy_queues++; | 552 | cfqd->busy_queues++; |
482 | 553 | ||
483 | if (time_after(jiffies, cfqq->service_start + cfq_service)) | 554 | cfq_resort_rr_list(cfqq, requeue); |
484 | cfqq->service_used >>= 3; | ||
485 | |||
486 | cfq_sort_rr_list(cfqq, 1); | ||
487 | } | 555 | } |
488 | 556 | ||
489 | static inline void | 557 | static inline void |
490 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 558 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
491 | { | 559 | { |
560 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | ||
561 | cfq_clear_cfqq_on_rr(cfqq); | ||
492 | list_move(&cfqq->cfq_list, &cfqd->empty_list); | 562 | list_move(&cfqq->cfq_list, &cfqd->empty_list); |
493 | cfqq->on_rr = 0; | ||
494 | 563 | ||
495 | BUG_ON(!cfqd->busy_queues); | 564 | BUG_ON(!cfqd->busy_queues); |
496 | cfqd->busy_queues--; | 565 | cfqd->busy_queues--; |
@@ -505,16 +574,17 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) | |||
505 | 574 | ||
506 | if (ON_RB(&crq->rb_node)) { | 575 | if (ON_RB(&crq->rb_node)) { |
507 | struct cfq_data *cfqd = cfqq->cfqd; | 576 | struct cfq_data *cfqd = cfqq->cfqd; |
577 | const int sync = cfq_crq_is_sync(crq); | ||
508 | 578 | ||
509 | BUG_ON(!cfqq->queued[crq->is_sync]); | 579 | BUG_ON(!cfqq->queued[sync]); |
580 | cfqq->queued[sync]--; | ||
510 | 581 | ||
511 | cfq_update_next_crq(crq); | 582 | cfq_update_next_crq(crq); |
512 | 583 | ||
513 | cfqq->queued[crq->is_sync]--; | ||
514 | rb_erase(&crq->rb_node, &cfqq->sort_list); | 584 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
515 | RB_CLEAR_COLOR(&crq->rb_node); | 585 | RB_CLEAR_COLOR(&crq->rb_node); |
516 | 586 | ||
517 | if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr) | 587 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) |
518 | cfq_del_cfqq_rr(cfqd, cfqq); | 588 | cfq_del_cfqq_rr(cfqd, cfqq); |
519 | } | 589 | } |
520 | } | 590 | } |
@@ -550,7 +620,7 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) | |||
550 | struct cfq_rq *__alias; | 620 | struct cfq_rq *__alias; |
551 | 621 | ||
552 | crq->rb_key = rq_rb_key(rq); | 622 | crq->rb_key = rq_rb_key(rq); |
553 | cfqq->queued[crq->is_sync]++; | 623 | cfqq->queued[cfq_crq_is_sync(crq)]++; |
554 | 624 | ||
555 | /* | 625 | /* |
556 | * looks a little odd, but the first insert might return an alias. | 626 | * looks a little odd, but the first insert might return an alias. |
@@ -561,8 +631,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) | |||
561 | 631 | ||
562 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); | 632 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); |
563 | 633 | ||
564 | if (!cfqq->on_rr) | 634 | if (!cfq_cfqq_on_rr(cfqq)) |
565 | cfq_add_cfqq_rr(cfqd, cfqq); | 635 | cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); |
566 | 636 | ||
567 | /* | 637 | /* |
568 | * check if this request is a better next-serve candidate | 638 | * check if this request is a better next-serve candidate |
@@ -575,17 +645,16 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) | |||
575 | { | 645 | { |
576 | if (ON_RB(&crq->rb_node)) { | 646 | if (ON_RB(&crq->rb_node)) { |
577 | rb_erase(&crq->rb_node, &cfqq->sort_list); | 647 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
578 | cfqq->queued[crq->is_sync]--; | 648 | cfqq->queued[cfq_crq_is_sync(crq)]--; |
579 | } | 649 | } |
580 | 650 | ||
581 | cfq_add_crq_rb(crq); | 651 | cfq_add_crq_rb(crq); |
582 | } | 652 | } |
583 | 653 | ||
584 | static struct request * | 654 | static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) |
585 | cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) | 655 | |
586 | { | 656 | { |
587 | const unsigned long key = cfq_hash_key(cfqd, current); | 657 | struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); |
588 | struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key); | ||
589 | struct rb_node *n; | 658 | struct rb_node *n; |
590 | 659 | ||
591 | if (!cfqq) | 660 | if (!cfqq) |
@@ -609,20 +678,25 @@ out: | |||
609 | 678 | ||
610 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) | 679 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) |
611 | { | 680 | { |
681 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
612 | struct cfq_rq *crq = RQ_DATA(rq); | 682 | struct cfq_rq *crq = RQ_DATA(rq); |
613 | 683 | ||
614 | if (crq) { | 684 | if (crq) { |
615 | struct cfq_queue *cfqq = crq->cfq_queue; | 685 | struct cfq_queue *cfqq = crq->cfq_queue; |
616 | 686 | ||
617 | if (cfqq->cfqd->cfq_tagged) { | 687 | if (cfq_crq_in_driver(crq)) { |
618 | cfqq->service_used--; | 688 | cfq_clear_crq_in_driver(crq); |
619 | cfq_sort_rr_list(cfqq, 0); | 689 | WARN_ON(!cfqd->rq_in_driver); |
690 | cfqd->rq_in_driver--; | ||
620 | } | 691 | } |
692 | if (cfq_crq_in_flight(crq)) { | ||
693 | const int sync = cfq_crq_is_sync(crq); | ||
621 | 694 | ||
622 | if (crq->accounted) { | 695 | cfq_clear_crq_in_flight(crq); |
623 | crq->accounted = 0; | 696 | WARN_ON(!cfqq->on_dispatch[sync]); |
624 | cfqq->cfqd->rq_in_driver--; | 697 | cfqq->on_dispatch[sync]--; |
625 | } | 698 | } |
699 | cfq_mark_crq_requeued(crq); | ||
626 | } | 700 | } |
627 | } | 701 | } |
628 | 702 | ||
@@ -640,11 +714,10 @@ static void cfq_remove_request(request_queue_t *q, struct request *rq) | |||
640 | struct cfq_rq *crq = RQ_DATA(rq); | 714 | struct cfq_rq *crq = RQ_DATA(rq); |
641 | 715 | ||
642 | if (crq) { | 716 | if (crq) { |
643 | cfq_remove_merge_hints(q, crq); | ||
644 | list_del_init(&rq->queuelist); | 717 | list_del_init(&rq->queuelist); |
718 | cfq_del_crq_rb(crq); | ||
719 | cfq_remove_merge_hints(q, crq); | ||
645 | 720 | ||
646 | if (crq->cfq_queue) | ||
647 | cfq_del_crq_rb(crq); | ||
648 | } | 721 | } |
649 | } | 722 | } |
650 | 723 | ||
@@ -662,21 +735,15 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
662 | } | 735 | } |
663 | 736 | ||
664 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); | 737 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); |
665 | if (__rq) { | 738 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
666 | BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); | 739 | ret = ELEVATOR_BACK_MERGE; |
667 | 740 | goto out; | |
668 | if (elv_rq_merge_ok(__rq, bio)) { | ||
669 | ret = ELEVATOR_BACK_MERGE; | ||
670 | goto out; | ||
671 | } | ||
672 | } | 741 | } |
673 | 742 | ||
674 | __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); | 743 | __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); |
675 | if (__rq) { | 744 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
676 | if (elv_rq_merge_ok(__rq, bio)) { | 745 | ret = ELEVATOR_FRONT_MERGE; |
677 | ret = ELEVATOR_FRONT_MERGE; | 746 | goto out; |
678 | goto out; | ||
679 | } | ||
680 | } | 747 | } |
681 | 748 | ||
682 | return ELEVATOR_NO_MERGE; | 749 | return ELEVATOR_NO_MERGE; |
@@ -709,20 +776,220 @@ static void | |||
709 | cfq_merged_requests(request_queue_t *q, struct request *rq, | 776 | cfq_merged_requests(request_queue_t *q, struct request *rq, |
710 | struct request *next) | 777 | struct request *next) |
711 | { | 778 | { |
712 | struct cfq_rq *crq = RQ_DATA(rq); | ||
713 | struct cfq_rq *cnext = RQ_DATA(next); | ||
714 | |||
715 | cfq_merged_request(q, rq); | 779 | cfq_merged_request(q, rq); |
716 | 780 | ||
717 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { | 781 | /* |
718 | if (time_before(cnext->queue_start, crq->queue_start)) { | 782 | * reposition in fifo if next is older than rq |
719 | list_move(&rq->queuelist, &next->queuelist); | 783 | */ |
720 | crq->queue_start = cnext->queue_start; | 784 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
785 | time_before(next->start_time, rq->start_time)) | ||
786 | list_move(&rq->queuelist, &next->queuelist); | ||
787 | |||
788 | cfq_remove_request(q, next); | ||
789 | } | ||
790 | |||
791 | static inline void | ||
792 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
793 | { | ||
794 | if (cfqq) { | ||
795 | /* | ||
796 | * stop potential idle class queues waiting service | ||
797 | */ | ||
798 | del_timer(&cfqd->idle_class_timer); | ||
799 | |||
800 | cfqq->slice_start = jiffies; | ||
801 | cfqq->slice_end = 0; | ||
802 | cfqq->slice_left = 0; | ||
803 | cfq_clear_cfqq_must_alloc_slice(cfqq); | ||
804 | cfq_clear_cfqq_fifo_expire(cfqq); | ||
805 | cfq_clear_cfqq_expired(cfqq); | ||
806 | } | ||
807 | |||
808 | cfqd->active_queue = cfqq; | ||
809 | } | ||
810 | |||
811 | /* | ||
812 | * 0 | ||
813 | * 0,1 | ||
814 | * 0,1,2 | ||
815 | * 0,1,2,3 | ||
816 | * 0,1,2,3,4 | ||
817 | * 0,1,2,3,4,5 | ||
818 | * 0,1,2,3,4,5,6 | ||
819 | * 0,1,2,3,4,5,6,7 | ||
820 | */ | ||
821 | static int cfq_get_next_prio_level(struct cfq_data *cfqd) | ||
822 | { | ||
823 | int prio, wrap; | ||
824 | |||
825 | prio = -1; | ||
826 | wrap = 0; | ||
827 | do { | ||
828 | int p; | ||
829 | |||
830 | for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { | ||
831 | if (!list_empty(&cfqd->rr_list[p])) { | ||
832 | prio = p; | ||
833 | break; | ||
834 | } | ||
835 | } | ||
836 | |||
837 | if (prio != -1) | ||
838 | break; | ||
839 | cfqd->cur_prio = 0; | ||
840 | if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { | ||
841 | cfqd->cur_end_prio = 0; | ||
842 | if (wrap) | ||
843 | break; | ||
844 | wrap = 1; | ||
721 | } | 845 | } |
846 | } while (1); | ||
847 | |||
848 | if (unlikely(prio == -1)) | ||
849 | return -1; | ||
850 | |||
851 | BUG_ON(prio >= CFQ_PRIO_LISTS); | ||
852 | |||
853 | list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); | ||
854 | |||
855 | cfqd->cur_prio = prio + 1; | ||
856 | if (cfqd->cur_prio > cfqd->cur_end_prio) { | ||
857 | cfqd->cur_end_prio = cfqd->cur_prio; | ||
858 | cfqd->cur_prio = 0; | ||
859 | } | ||
860 | if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { | ||
861 | cfqd->cur_prio = 0; | ||
862 | cfqd->cur_end_prio = 0; | ||
722 | } | 863 | } |
723 | 864 | ||
724 | cfq_update_next_crq(cnext); | 865 | return prio; |
725 | cfq_remove_request(q, next); | 866 | } |
867 | |||
868 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | ||
869 | { | ||
870 | struct cfq_queue *cfqq; | ||
871 | |||
872 | /* | ||
873 | * if current queue is expired but not done with its requests yet, | ||
874 | * wait for that to happen | ||
875 | */ | ||
876 | if ((cfqq = cfqd->active_queue) != NULL) { | ||
877 | if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq)) | ||
878 | return NULL; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * if current list is non-empty, grab first entry. if it is empty, | ||
883 | * get next prio level and grab first entry then if any are spliced | ||
884 | */ | ||
885 | if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) | ||
886 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | ||
887 | |||
888 | /* | ||
889 | * if we have idle queues and no rt or be queues had pending | ||
890 | * requests, either allow immediate service if the grace period | ||
891 | * has passed or arm the idle grace timer | ||
892 | */ | ||
893 | if (!cfqq && !list_empty(&cfqd->idle_rr)) { | ||
894 | unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
895 | |||
896 | if (time_after_eq(jiffies, end)) | ||
897 | cfqq = list_entry_cfqq(cfqd->idle_rr.next); | ||
898 | else | ||
899 | mod_timer(&cfqd->idle_class_timer, end); | ||
900 | } | ||
901 | |||
902 | __cfq_set_active_queue(cfqd, cfqq); | ||
903 | return cfqq; | ||
904 | } | ||
905 | |||
906 | /* | ||
907 | * current cfqq expired its slice (or was too idle), select new one | ||
908 | */ | ||
909 | static void | ||
910 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
911 | int preempted) | ||
912 | { | ||
913 | unsigned long now = jiffies; | ||
914 | |||
915 | if (cfq_cfqq_wait_request(cfqq)) | ||
916 | del_timer(&cfqd->idle_slice_timer); | ||
917 | |||
918 | if (!preempted && !cfq_cfqq_dispatched(cfqq)) | ||
919 | cfqq->service_last = now; | ||
920 | |||
921 | cfq_clear_cfqq_must_dispatch(cfqq); | ||
922 | cfq_clear_cfqq_wait_request(cfqq); | ||
923 | |||
924 | /* | ||
925 | * store what was left of this slice, if the queue idled out | ||
926 | * or was preempted | ||
927 | */ | ||
928 | if (time_after(now, cfqq->slice_end)) | ||
929 | cfqq->slice_left = now - cfqq->slice_end; | ||
930 | else | ||
931 | cfqq->slice_left = 0; | ||
932 | |||
933 | if (cfq_cfqq_on_rr(cfqq)) | ||
934 | cfq_resort_rr_list(cfqq, preempted); | ||
935 | |||
936 | if (cfqq == cfqd->active_queue) | ||
937 | cfqd->active_queue = NULL; | ||
938 | |||
939 | if (cfqd->active_cic) { | ||
940 | put_io_context(cfqd->active_cic->ioc); | ||
941 | cfqd->active_cic = NULL; | ||
942 | } | ||
943 | |||
944 | cfqd->dispatch_slice = 0; | ||
945 | } | ||
946 | |||
947 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) | ||
948 | { | ||
949 | struct cfq_queue *cfqq = cfqd->active_queue; | ||
950 | |||
951 | if (cfqq) { | ||
952 | /* | ||
953 | * use deferred expiry, if there are requests in progress as | ||
954 | * not to disturb the slice of the next queue | ||
955 | */ | ||
956 | if (cfq_cfqq_dispatched(cfqq)) | ||
957 | cfq_mark_cfqq_expired(cfqq); | ||
958 | else | ||
959 | __cfq_slice_expired(cfqd, cfqq, preempted); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
964 | |||
965 | { | ||
966 | WARN_ON(!RB_EMPTY(&cfqq->sort_list)); | ||
967 | WARN_ON(cfqq != cfqd->active_queue); | ||
968 | |||
969 | /* | ||
970 | * idle is disabled, either manually or by past process history | ||
971 | */ | ||
972 | if (!cfqd->cfq_slice_idle) | ||
973 | return 0; | ||
974 | if (!cfq_cfqq_idle_window(cfqq)) | ||
975 | return 0; | ||
976 | /* | ||
977 | * task has exited, don't wait | ||
978 | */ | ||
979 | if (cfqd->active_cic && !cfqd->active_cic->ioc->task) | ||
980 | return 0; | ||
981 | |||
982 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
983 | cfq_mark_cfqq_wait_request(cfqq); | ||
984 | |||
985 | if (!timer_pending(&cfqd->idle_slice_timer)) { | ||
986 | unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); | ||
987 | |||
988 | cfqd->idle_slice_timer.expires = jiffies + slice_left; | ||
989 | add_timer(&cfqd->idle_slice_timer); | ||
990 | } | ||
991 | |||
992 | return 1; | ||
726 | } | 993 | } |
727 | 994 | ||
728 | /* | 995 | /* |
@@ -738,31 +1005,40 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) | |||
738 | struct request *__rq; | 1005 | struct request *__rq; |
739 | sector_t last; | 1006 | sector_t last; |
740 | 1007 | ||
741 | cfq_del_crq_rb(crq); | ||
742 | cfq_remove_merge_hints(q, crq); | ||
743 | list_del(&crq->request->queuelist); | 1008 | list_del(&crq->request->queuelist); |
744 | 1009 | ||
745 | last = cfqd->last_sector; | 1010 | last = cfqd->last_sector; |
746 | while ((entry = entry->prev) != head) { | 1011 | list_for_each_entry_reverse(__rq, head, queuelist) { |
747 | __rq = list_entry_rq(entry); | 1012 | struct cfq_rq *__crq = RQ_DATA(__rq); |
748 | 1013 | ||
749 | if (blk_barrier_rq(crq->request)) | 1014 | if (blk_barrier_rq(__rq)) |
750 | break; | 1015 | break; |
751 | if (!blk_fs_request(crq->request)) | 1016 | if (!blk_fs_request(__rq)) |
1017 | break; | ||
1018 | if (cfq_crq_requeued(__crq)) | ||
752 | break; | 1019 | break; |
753 | 1020 | ||
754 | if (crq->request->sector > __rq->sector) | 1021 | if (__rq->sector <= crq->request->sector) |
755 | break; | 1022 | break; |
756 | if (__rq->sector > last && crq->request->sector < last) { | 1023 | if (__rq->sector > last && crq->request->sector < last) { |
757 | last = crq->request->sector; | 1024 | last = crq->request->sector + crq->request->nr_sectors; |
758 | break; | 1025 | break; |
759 | } | 1026 | } |
1027 | entry = &__rq->queuelist; | ||
760 | } | 1028 | } |
761 | 1029 | ||
762 | cfqd->last_sector = last; | 1030 | cfqd->last_sector = last; |
763 | crq->in_flight = 1; | 1031 | |
764 | cfqq->in_flight++; | 1032 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); |
765 | list_add(&crq->request->queuelist, entry); | 1033 | |
1034 | cfq_del_crq_rb(crq); | ||
1035 | cfq_remove_merge_hints(q, crq); | ||
1036 | |||
1037 | cfq_mark_crq_in_flight(crq); | ||
1038 | cfq_clear_crq_requeued(crq); | ||
1039 | |||
1040 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; | ||
1041 | list_add_tail(&crq->request->queuelist, entry); | ||
766 | } | 1042 | } |
767 | 1043 | ||
768 | /* | 1044 | /* |
@@ -771,173 +1047,225 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) | |||
771 | static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) | 1047 | static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) |
772 | { | 1048 | { |
773 | struct cfq_data *cfqd = cfqq->cfqd; | 1049 | struct cfq_data *cfqd = cfqq->cfqd; |
774 | const int reads = !list_empty(&cfqq->fifo[0]); | 1050 | struct request *rq; |
775 | const int writes = !list_empty(&cfqq->fifo[1]); | ||
776 | unsigned long now = jiffies; | ||
777 | struct cfq_rq *crq; | 1051 | struct cfq_rq *crq; |
778 | 1052 | ||
779 | if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire)) | 1053 | if (cfq_cfqq_fifo_expire(cfqq)) |
780 | return NULL; | 1054 | return NULL; |
781 | 1055 | ||
782 | crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist)); | 1056 | if (!list_empty(&cfqq->fifo)) { |
783 | if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) { | 1057 | int fifo = cfq_cfqq_class_sync(cfqq); |
784 | cfqq->last_fifo_expire = now; | ||
785 | return crq; | ||
786 | } | ||
787 | 1058 | ||
788 | crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist)); | 1059 | crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); |
789 | if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) { | 1060 | rq = crq->request; |
790 | cfqq->last_fifo_expire = now; | 1061 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { |
791 | return crq; | 1062 | cfq_mark_cfqq_fifo_expire(cfqq); |
1063 | return crq; | ||
1064 | } | ||
792 | } | 1065 | } |
793 | 1066 | ||
794 | return NULL; | 1067 | return NULL; |
795 | } | 1068 | } |
796 | 1069 | ||
797 | /* | 1070 | /* |
798 | * dispatch a single request from given queue | 1071 | * Scale schedule slice based on io priority. Use the sync time slice only |
1072 | * if a queue is marked sync and has sync io queued. A sync queue with async | ||
1073 | * io only, should not get full sync slice length. | ||
799 | */ | 1074 | */ |
1075 | static inline int | ||
1076 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1077 | { | ||
1078 | const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; | ||
1079 | |||
1080 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
1081 | |||
1082 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); | ||
1083 | } | ||
1084 | |||
800 | static inline void | 1085 | static inline void |
801 | cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd, | 1086 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
802 | struct cfq_queue *cfqq) | ||
803 | { | 1087 | { |
804 | struct cfq_rq *crq; | 1088 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
1089 | } | ||
1090 | |||
1091 | static inline int | ||
1092 | cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1093 | { | ||
1094 | const int base_rq = cfqd->cfq_slice_async_rq; | ||
1095 | |||
1096 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
1097 | |||
1098 | return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); | ||
1099 | } | ||
1100 | |||
1101 | /* | ||
1102 | * get next queue for service | ||
1103 | */ | ||
1104 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) | ||
1105 | { | ||
1106 | unsigned long now = jiffies; | ||
1107 | struct cfq_queue *cfqq; | ||
1108 | |||
1109 | cfqq = cfqd->active_queue; | ||
1110 | if (!cfqq) | ||
1111 | goto new_queue; | ||
1112 | |||
1113 | if (cfq_cfqq_expired(cfqq)) | ||
1114 | goto new_queue; | ||
805 | 1115 | ||
806 | /* | 1116 | /* |
807 | * follow expired path, else get first next available | 1117 | * slice has expired |
808 | */ | 1118 | */ |
809 | if ((crq = cfq_check_fifo(cfqq)) == NULL) { | 1119 | if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end)) |
810 | if (cfqd->find_best_crq) | 1120 | goto expire; |
811 | crq = cfqq->next_crq; | ||
812 | else | ||
813 | crq = rb_entry_crq(rb_first(&cfqq->sort_list)); | ||
814 | } | ||
815 | |||
816 | cfqd->last_sector = crq->request->sector + crq->request->nr_sectors; | ||
817 | 1121 | ||
818 | /* | 1122 | /* |
819 | * finally, insert request into driver list | 1123 | * if queue has requests, dispatch one. if not, check if |
1124 | * enough slice is left to wait for one | ||
820 | */ | 1125 | */ |
821 | cfq_dispatch_sort(q, crq); | 1126 | if (!RB_EMPTY(&cfqq->sort_list)) |
1127 | goto keep_queue; | ||
1128 | else if (!force && cfq_cfqq_class_sync(cfqq) && | ||
1129 | time_before(now, cfqq->slice_end)) { | ||
1130 | if (cfq_arm_slice_timer(cfqd, cfqq)) | ||
1131 | return NULL; | ||
1132 | } | ||
1133 | |||
1134 | expire: | ||
1135 | cfq_slice_expired(cfqd, 0); | ||
1136 | new_queue: | ||
1137 | cfqq = cfq_set_active_queue(cfqd); | ||
1138 | keep_queue: | ||
1139 | return cfqq; | ||
822 | } | 1140 | } |
823 | 1141 | ||
824 | static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) | 1142 | static int |
1143 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1144 | int max_dispatch) | ||
825 | { | 1145 | { |
826 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1146 | int dispatched = 0; |
827 | struct cfq_queue *cfqq; | ||
828 | struct list_head *entry, *tmp; | ||
829 | int queued, busy_queues, first_round; | ||
830 | 1147 | ||
831 | if (list_empty(&cfqd->rr_list)) | 1148 | BUG_ON(RB_EMPTY(&cfqq->sort_list)); |
832 | return 0; | ||
833 | 1149 | ||
834 | queued = 0; | 1150 | do { |
835 | first_round = 1; | 1151 | struct cfq_rq *crq; |
836 | restart: | ||
837 | busy_queues = 0; | ||
838 | list_for_each_safe(entry, tmp, &cfqd->rr_list) { | ||
839 | cfqq = list_entry_cfqq(entry); | ||
840 | 1152 | ||
841 | BUG_ON(RB_EMPTY(&cfqq->sort_list)); | 1153 | /* |
1154 | * follow expired path, else get first next available | ||
1155 | */ | ||
1156 | if ((crq = cfq_check_fifo(cfqq)) == NULL) | ||
1157 | crq = cfqq->next_crq; | ||
842 | 1158 | ||
843 | /* | 1159 | /* |
844 | * first round of queueing, only select from queues that | 1160 | * finally, insert request into driver dispatch list |
845 | * don't already have io in-flight | ||
846 | */ | 1161 | */ |
847 | if (first_round && cfqq->in_flight) | 1162 | cfq_dispatch_sort(cfqd->queue, crq); |
848 | continue; | ||
849 | 1163 | ||
850 | cfq_dispatch_request(q, cfqd, cfqq); | 1164 | cfqd->dispatch_slice++; |
1165 | dispatched++; | ||
851 | 1166 | ||
852 | if (!RB_EMPTY(&cfqq->sort_list)) | 1167 | if (!cfqd->active_cic) { |
853 | busy_queues++; | 1168 | atomic_inc(&crq->io_context->ioc->refcount); |
1169 | cfqd->active_cic = crq->io_context; | ||
1170 | } | ||
854 | 1171 | ||
855 | queued++; | 1172 | if (RB_EMPTY(&cfqq->sort_list)) |
856 | } | 1173 | break; |
1174 | |||
1175 | } while (dispatched < max_dispatch); | ||
1176 | |||
1177 | /* | ||
1178 | * if slice end isn't set yet, set it. if at least one request was | ||
1179 | * sync, use the sync time slice value | ||
1180 | */ | ||
1181 | if (!cfqq->slice_end) | ||
1182 | cfq_set_prio_slice(cfqd, cfqq); | ||
1183 | |||
1184 | /* | ||
1185 | * expire an async queue immediately if it has used up its slice. idle | ||
1186 | * queue always expire after 1 dispatch round. | ||
1187 | */ | ||
1188 | if ((!cfq_cfqq_sync(cfqq) && | ||
1189 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || | ||
1190 | cfq_class_idle(cfqq)) | ||
1191 | cfq_slice_expired(cfqd, 0); | ||
1192 | |||
1193 | return dispatched; | ||
1194 | } | ||
1195 | |||
1196 | static int | ||
1197 | cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) | ||
1198 | { | ||
1199 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1200 | struct cfq_queue *cfqq; | ||
1201 | |||
1202 | if (!cfqd->busy_queues) | ||
1203 | return 0; | ||
1204 | |||
1205 | cfqq = cfq_select_queue(cfqd, force); | ||
1206 | if (cfqq) { | ||
1207 | cfq_clear_cfqq_must_dispatch(cfqq); | ||
1208 | cfq_clear_cfqq_wait_request(cfqq); | ||
1209 | del_timer(&cfqd->idle_slice_timer); | ||
857 | 1210 | ||
858 | if ((queued < max_dispatch) && (busy_queues || first_round)) { | 1211 | if (cfq_class_idle(cfqq)) |
859 | first_round = 0; | 1212 | max_dispatch = 1; |
860 | goto restart; | 1213 | |
1214 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | ||
861 | } | 1215 | } |
862 | 1216 | ||
863 | return queued; | 1217 | return 0; |
864 | } | 1218 | } |
865 | 1219 | ||
866 | static inline void cfq_account_dispatch(struct cfq_rq *crq) | 1220 | static inline void cfq_account_dispatch(struct cfq_rq *crq) |
867 | { | 1221 | { |
868 | struct cfq_queue *cfqq = crq->cfq_queue; | 1222 | struct cfq_queue *cfqq = crq->cfq_queue; |
869 | struct cfq_data *cfqd = cfqq->cfqd; | 1223 | struct cfq_data *cfqd = cfqq->cfqd; |
870 | unsigned long now, elapsed; | ||
871 | 1224 | ||
872 | if (!blk_fs_request(crq->request)) | 1225 | if (unlikely(!blk_fs_request(crq->request))) |
873 | return; | 1226 | return; |
874 | 1227 | ||
875 | /* | 1228 | /* |
876 | * accounted bit is necessary since some drivers will call | 1229 | * accounted bit is necessary since some drivers will call |
877 | * elv_next_request() many times for the same request (eg ide) | 1230 | * elv_next_request() many times for the same request (eg ide) |
878 | */ | 1231 | */ |
879 | if (crq->accounted) | 1232 | if (cfq_crq_in_driver(crq)) |
880 | return; | 1233 | return; |
881 | 1234 | ||
882 | now = jiffies; | 1235 | cfq_mark_crq_in_driver(crq); |
883 | if (cfqq->service_start == ~0UL) | 1236 | cfqd->rq_in_driver++; |
884 | cfqq->service_start = now; | ||
885 | |||
886 | /* | ||
887 | * on drives with tagged command queueing, command turn-around time | ||
888 | * doesn't necessarily reflect the time spent processing this very | ||
889 | * command inside the drive. so do the accounting differently there, | ||
890 | * by just sorting on the number of requests | ||
891 | */ | ||
892 | if (cfqd->cfq_tagged) { | ||
893 | if (time_after(now, cfqq->service_start + cfq_service)) { | ||
894 | cfqq->service_start = now; | ||
895 | cfqq->service_used /= 10; | ||
896 | } | ||
897 | |||
898 | cfqq->service_used++; | ||
899 | cfq_sort_rr_list(cfqq, 0); | ||
900 | } | ||
901 | |||
902 | elapsed = now - crq->queue_start; | ||
903 | if (elapsed > max_elapsed_dispatch) | ||
904 | max_elapsed_dispatch = elapsed; | ||
905 | |||
906 | crq->accounted = 1; | ||
907 | crq->service_start = now; | ||
908 | |||
909 | if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) { | ||
910 | cfqq->cfqd->cfq_tagged = 1; | ||
911 | printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG); | ||
912 | } | ||
913 | } | 1237 | } |
914 | 1238 | ||
915 | static inline void | 1239 | static inline void |
916 | cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) | 1240 | cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) |
917 | { | 1241 | { |
918 | struct cfq_data *cfqd = cfqq->cfqd; | 1242 | struct cfq_data *cfqd = cfqq->cfqd; |
1243 | unsigned long now; | ||
919 | 1244 | ||
920 | if (!crq->accounted) | 1245 | if (!cfq_crq_in_driver(crq)) |
921 | return; | 1246 | return; |
922 | 1247 | ||
1248 | now = jiffies; | ||
1249 | |||
923 | WARN_ON(!cfqd->rq_in_driver); | 1250 | WARN_ON(!cfqd->rq_in_driver); |
924 | cfqd->rq_in_driver--; | 1251 | cfqd->rq_in_driver--; |
925 | 1252 | ||
926 | if (!cfqd->cfq_tagged) { | 1253 | if (!cfq_class_idle(cfqq)) |
927 | unsigned long now = jiffies; | 1254 | cfqd->last_end_request = now; |
928 | unsigned long duration = now - crq->service_start; | ||
929 | 1255 | ||
930 | if (time_after(now, cfqq->service_start + cfq_service)) { | 1256 | if (!cfq_cfqq_dispatched(cfqq)) { |
931 | cfqq->service_start = now; | 1257 | if (cfq_cfqq_on_rr(cfqq)) { |
932 | cfqq->service_used >>= 3; | 1258 | cfqq->service_last = now; |
1259 | cfq_resort_rr_list(cfqq, 0); | ||
1260 | } | ||
1261 | if (cfq_cfqq_expired(cfqq)) { | ||
1262 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1263 | cfq_schedule_dispatch(cfqd); | ||
933 | } | 1264 | } |
934 | |||
935 | cfqq->service_used += duration; | ||
936 | cfq_sort_rr_list(cfqq, 0); | ||
937 | |||
938 | if (duration > max_elapsed_crq) | ||
939 | max_elapsed_crq = duration; | ||
940 | } | 1265 | } |
1266 | |||
1267 | if (cfq_crq_is_sync(crq)) | ||
1268 | crq->io_context->last_end_request = now; | ||
941 | } | 1269 | } |
942 | 1270 | ||
943 | static struct request *cfq_next_request(request_queue_t *q) | 1271 | static struct request *cfq_next_request(request_queue_t *q) |
@@ -950,7 +1278,19 @@ static struct request *cfq_next_request(request_queue_t *q) | |||
950 | dispatch: | 1278 | dispatch: |
951 | rq = list_entry_rq(q->queue_head.next); | 1279 | rq = list_entry_rq(q->queue_head.next); |
952 | 1280 | ||
953 | if ((crq = RQ_DATA(rq)) != NULL) { | 1281 | crq = RQ_DATA(rq); |
1282 | if (crq) { | ||
1283 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1284 | |||
1285 | /* | ||
1286 | * if idle window is disabled, allow queue buildup | ||
1287 | */ | ||
1288 | if (!cfq_crq_in_driver(crq) && | ||
1289 | !cfq_cfqq_idle_window(cfqq) && | ||
1290 | !blk_barrier_rq(rq) && | ||
1291 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) | ||
1292 | return NULL; | ||
1293 | |||
954 | cfq_remove_merge_hints(q, crq); | 1294 | cfq_remove_merge_hints(q, crq); |
955 | cfq_account_dispatch(crq); | 1295 | cfq_account_dispatch(crq); |
956 | } | 1296 | } |
@@ -958,7 +1298,7 @@ dispatch: | |||
958 | return rq; | 1298 | return rq; |
959 | } | 1299 | } |
960 | 1300 | ||
961 | if (cfq_dispatch_requests(q, cfqd->cfq_quantum)) | 1301 | if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) |
962 | goto dispatch; | 1302 | goto dispatch; |
963 | 1303 | ||
964 | return NULL; | 1304 | return NULL; |
@@ -972,13 +1312,21 @@ dispatch: | |||
972 | */ | 1312 | */ |
973 | static void cfq_put_queue(struct cfq_queue *cfqq) | 1313 | static void cfq_put_queue(struct cfq_queue *cfqq) |
974 | { | 1314 | { |
975 | BUG_ON(!atomic_read(&cfqq->ref)); | 1315 | struct cfq_data *cfqd = cfqq->cfqd; |
1316 | |||
1317 | BUG_ON(atomic_read(&cfqq->ref) <= 0); | ||
976 | 1318 | ||
977 | if (!atomic_dec_and_test(&cfqq->ref)) | 1319 | if (!atomic_dec_and_test(&cfqq->ref)) |
978 | return; | 1320 | return; |
979 | 1321 | ||
980 | BUG_ON(rb_first(&cfqq->sort_list)); | 1322 | BUG_ON(rb_first(&cfqq->sort_list)); |
981 | BUG_ON(cfqq->on_rr); | 1323 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
1324 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | ||
1325 | |||
1326 | if (unlikely(cfqd->active_queue == cfqq)) { | ||
1327 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1328 | cfq_schedule_dispatch(cfqd); | ||
1329 | } | ||
982 | 1330 | ||
983 | cfq_put_cfqd(cfqq->cfqd); | 1331 | cfq_put_cfqd(cfqq->cfqd); |
984 | 1332 | ||
@@ -991,15 +1339,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
991 | } | 1339 | } |
992 | 1340 | ||
993 | static inline struct cfq_queue * | 1341 | static inline struct cfq_queue * |
994 | __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) | 1342 | __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, |
1343 | const int hashval) | ||
995 | { | 1344 | { |
996 | struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; | 1345 | struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; |
997 | struct hlist_node *entry, *next; | 1346 | struct hlist_node *entry, *next; |
998 | 1347 | ||
999 | hlist_for_each_safe(entry, next, hash_list) { | 1348 | hlist_for_each_safe(entry, next, hash_list) { |
1000 | struct cfq_queue *__cfqq = list_entry_qhash(entry); | 1349 | struct cfq_queue *__cfqq = list_entry_qhash(entry); |
1350 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); | ||
1001 | 1351 | ||
1002 | if (__cfqq->key == key) | 1352 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) |
1003 | return __cfqq; | 1353 | return __cfqq; |
1004 | } | 1354 | } |
1005 | 1355 | ||
@@ -1007,94 +1357,220 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) | |||
1007 | } | 1357 | } |
1008 | 1358 | ||
1009 | static struct cfq_queue * | 1359 | static struct cfq_queue * |
1010 | cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key) | 1360 | cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) |
1011 | { | 1361 | { |
1012 | return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT)); | 1362 | return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); |
1013 | } | 1363 | } |
1014 | 1364 | ||
1015 | static inline void | 1365 | static void cfq_free_io_context(struct cfq_io_context *cic) |
1016 | cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq, | ||
1017 | struct cfq_io_context *cic) | ||
1018 | { | 1366 | { |
1019 | unsigned long hashkey = cfq_hash_key(cfqd, current); | 1367 | struct cfq_io_context *__cic; |
1020 | unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT); | 1368 | struct list_head *entry, *next; |
1021 | struct cfq_queue *__cfqq; | ||
1022 | unsigned long flags; | ||
1023 | |||
1024 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
1025 | 1369 | ||
1026 | hlist_del(&(*cfqq)->cfq_hash); | 1370 | list_for_each_safe(entry, next, &cic->list) { |
1027 | 1371 | __cic = list_entry(entry, struct cfq_io_context, list); | |
1028 | __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval); | 1372 | kmem_cache_free(cfq_ioc_pool, __cic); |
1029 | if (!__cfqq || __cfqq == *cfqq) { | ||
1030 | __cfqq = *cfqq; | ||
1031 | hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | ||
1032 | __cfqq->key_type = cfqd->key_type; | ||
1033 | } else { | ||
1034 | atomic_inc(&__cfqq->ref); | ||
1035 | cic->cfqq = __cfqq; | ||
1036 | cfq_put_queue(*cfqq); | ||
1037 | *cfqq = __cfqq; | ||
1038 | } | 1373 | } |
1039 | 1374 | ||
1040 | cic->cfqq = __cfqq; | 1375 | kmem_cache_free(cfq_ioc_pool, cic); |
1041 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
1042 | } | 1376 | } |
1043 | 1377 | ||
1044 | static void cfq_free_io_context(struct cfq_io_context *cic) | 1378 | /* |
1379 | * Called with interrupts disabled | ||
1380 | */ | ||
1381 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | ||
1045 | { | 1382 | { |
1046 | kmem_cache_free(cfq_ioc_pool, cic); | 1383 | struct cfq_data *cfqd = cic->cfqq->cfqd; |
1384 | request_queue_t *q = cfqd->queue; | ||
1385 | |||
1386 | WARN_ON(!irqs_disabled()); | ||
1387 | |||
1388 | spin_lock(q->queue_lock); | ||
1389 | |||
1390 | if (unlikely(cic->cfqq == cfqd->active_queue)) { | ||
1391 | __cfq_slice_expired(cfqd, cic->cfqq, 0); | ||
1392 | cfq_schedule_dispatch(cfqd); | ||
1393 | } | ||
1394 | |||
1395 | cfq_put_queue(cic->cfqq); | ||
1396 | cic->cfqq = NULL; | ||
1397 | spin_unlock(q->queue_lock); | ||
1047 | } | 1398 | } |
1048 | 1399 | ||
1049 | /* | 1400 | /* |
1050 | * locking hierarchy is: io_context lock -> queue locks | 1401 | * Another task may update the task cic list, if it is doing a queue lookup |
1402 | * on its behalf. cfq_cic_lock excludes such concurrent updates | ||
1051 | */ | 1403 | */ |
1052 | static void cfq_exit_io_context(struct cfq_io_context *cic) | 1404 | static void cfq_exit_io_context(struct cfq_io_context *cic) |
1053 | { | 1405 | { |
1054 | struct cfq_queue *cfqq = cic->cfqq; | 1406 | struct cfq_io_context *__cic; |
1055 | struct list_head *entry = &cic->list; | 1407 | struct list_head *entry; |
1056 | request_queue_t *q; | ||
1057 | unsigned long flags; | 1408 | unsigned long flags; |
1058 | 1409 | ||
1410 | local_irq_save(flags); | ||
1411 | |||
1059 | /* | 1412 | /* |
1060 | * put the reference this task is holding to the various queues | 1413 | * put the reference this task is holding to the various queues |
1061 | */ | 1414 | */ |
1062 | spin_lock_irqsave(&cic->ioc->lock, flags); | 1415 | list_for_each(entry, &cic->list) { |
1063 | while ((entry = cic->list.next) != &cic->list) { | ||
1064 | struct cfq_io_context *__cic; | ||
1065 | |||
1066 | __cic = list_entry(entry, struct cfq_io_context, list); | 1416 | __cic = list_entry(entry, struct cfq_io_context, list); |
1067 | list_del(entry); | 1417 | cfq_exit_single_io_context(__cic); |
1068 | |||
1069 | q = __cic->cfqq->cfqd->queue; | ||
1070 | spin_lock(q->queue_lock); | ||
1071 | cfq_put_queue(__cic->cfqq); | ||
1072 | spin_unlock(q->queue_lock); | ||
1073 | } | 1418 | } |
1074 | 1419 | ||
1075 | q = cfqq->cfqd->queue; | 1420 | cfq_exit_single_io_context(cic); |
1076 | spin_lock(q->queue_lock); | 1421 | local_irq_restore(flags); |
1077 | cfq_put_queue(cfqq); | ||
1078 | spin_unlock(q->queue_lock); | ||
1079 | |||
1080 | cic->cfqq = NULL; | ||
1081 | spin_unlock_irqrestore(&cic->ioc->lock, flags); | ||
1082 | } | 1422 | } |
1083 | 1423 | ||
1084 | static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) | 1424 | static struct cfq_io_context * |
1425 | cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) | ||
1085 | { | 1426 | { |
1086 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags); | 1427 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); |
1087 | 1428 | ||
1088 | if (cic) { | 1429 | if (cic) { |
1089 | cic->dtor = cfq_free_io_context; | ||
1090 | cic->exit = cfq_exit_io_context; | ||
1091 | INIT_LIST_HEAD(&cic->list); | 1430 | INIT_LIST_HEAD(&cic->list); |
1092 | cic->cfqq = NULL; | 1431 | cic->cfqq = NULL; |
1432 | cic->key = NULL; | ||
1433 | cic->last_end_request = jiffies; | ||
1434 | cic->ttime_total = 0; | ||
1435 | cic->ttime_samples = 0; | ||
1436 | cic->ttime_mean = 0; | ||
1437 | cic->dtor = cfq_free_io_context; | ||
1438 | cic->exit = cfq_exit_io_context; | ||
1093 | } | 1439 | } |
1094 | 1440 | ||
1095 | return cic; | 1441 | return cic; |
1096 | } | 1442 | } |
1097 | 1443 | ||
1444 | static void cfq_init_prio_data(struct cfq_queue *cfqq) | ||
1445 | { | ||
1446 | struct task_struct *tsk = current; | ||
1447 | int ioprio_class; | ||
1448 | |||
1449 | if (!cfq_cfqq_prio_changed(cfqq)) | ||
1450 | return; | ||
1451 | |||
1452 | ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); | ||
1453 | switch (ioprio_class) { | ||
1454 | default: | ||
1455 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | ||
1456 | case IOPRIO_CLASS_NONE: | ||
1457 | /* | ||
1458 | * no prio set, place us in the middle of the BE classes | ||
1459 | */ | ||
1460 | cfqq->ioprio = task_nice_ioprio(tsk); | ||
1461 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1462 | break; | ||
1463 | case IOPRIO_CLASS_RT: | ||
1464 | cfqq->ioprio = task_ioprio(tsk); | ||
1465 | cfqq->ioprio_class = IOPRIO_CLASS_RT; | ||
1466 | break; | ||
1467 | case IOPRIO_CLASS_BE: | ||
1468 | cfqq->ioprio = task_ioprio(tsk); | ||
1469 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1470 | break; | ||
1471 | case IOPRIO_CLASS_IDLE: | ||
1472 | cfqq->ioprio_class = IOPRIO_CLASS_IDLE; | ||
1473 | cfqq->ioprio = 7; | ||
1474 | cfq_clear_cfqq_idle_window(cfqq); | ||
1475 | break; | ||
1476 | } | ||
1477 | |||
1478 | /* | ||
1479 | * keep track of original prio settings in case we have to temporarily | ||
1480 | * elevate the priority of this queue | ||
1481 | */ | ||
1482 | cfqq->org_ioprio = cfqq->ioprio; | ||
1483 | cfqq->org_ioprio_class = cfqq->ioprio_class; | ||
1484 | |||
1485 | if (cfq_cfqq_on_rr(cfqq)) | ||
1486 | cfq_resort_rr_list(cfqq, 0); | ||
1487 | |||
1488 | cfq_clear_cfqq_prio_changed(cfqq); | ||
1489 | } | ||
1490 | |||
1491 | static inline void changed_ioprio(struct cfq_queue *cfqq) | ||
1492 | { | ||
1493 | if (cfqq) { | ||
1494 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1495 | |||
1496 | spin_lock(cfqd->queue->queue_lock); | ||
1497 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1498 | cfq_init_prio_data(cfqq); | ||
1499 | spin_unlock(cfqd->queue->queue_lock); | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | /* | ||
1504 | * callback from sys_ioprio_set, irqs are disabled | ||
1505 | */ | ||
1506 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | ||
1507 | { | ||
1508 | struct cfq_io_context *cic = ioc->cic; | ||
1509 | |||
1510 | changed_ioprio(cic->cfqq); | ||
1511 | |||
1512 | list_for_each_entry(cic, &cic->list, list) | ||
1513 | changed_ioprio(cic->cfqq); | ||
1514 | |||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static struct cfq_queue * | ||
1519 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | ||
1520 | int gfp_mask) | ||
1521 | { | ||
1522 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | ||
1523 | struct cfq_queue *cfqq, *new_cfqq = NULL; | ||
1524 | |||
1525 | retry: | ||
1526 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | ||
1527 | |||
1528 | if (!cfqq) { | ||
1529 | if (new_cfqq) { | ||
1530 | cfqq = new_cfqq; | ||
1531 | new_cfqq = NULL; | ||
1532 | } else if (gfp_mask & __GFP_WAIT) { | ||
1533 | spin_unlock_irq(cfqd->queue->queue_lock); | ||
1534 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | ||
1535 | spin_lock_irq(cfqd->queue->queue_lock); | ||
1536 | goto retry; | ||
1537 | } else { | ||
1538 | cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | ||
1539 | if (!cfqq) | ||
1540 | goto out; | ||
1541 | } | ||
1542 | |||
1543 | memset(cfqq, 0, sizeof(*cfqq)); | ||
1544 | |||
1545 | INIT_HLIST_NODE(&cfqq->cfq_hash); | ||
1546 | INIT_LIST_HEAD(&cfqq->cfq_list); | ||
1547 | RB_CLEAR_ROOT(&cfqq->sort_list); | ||
1548 | INIT_LIST_HEAD(&cfqq->fifo); | ||
1549 | |||
1550 | cfqq->key = key; | ||
1551 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | ||
1552 | atomic_set(&cfqq->ref, 0); | ||
1553 | cfqq->cfqd = cfqd; | ||
1554 | atomic_inc(&cfqd->ref); | ||
1555 | cfqq->service_last = 0; | ||
1556 | /* | ||
1557 | * set ->slice_left to allow preemption for a new process | ||
1558 | */ | ||
1559 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; | ||
1560 | cfq_mark_cfqq_idle_window(cfqq); | ||
1561 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1562 | cfq_init_prio_data(cfqq); | ||
1563 | } | ||
1564 | |||
1565 | if (new_cfqq) | ||
1566 | kmem_cache_free(cfq_pool, new_cfqq); | ||
1567 | |||
1568 | atomic_inc(&cfqq->ref); | ||
1569 | out: | ||
1570 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | ||
1571 | return cfqq; | ||
1572 | } | ||
1573 | |||
1098 | /* | 1574 | /* |
1099 | * Setup general io context and cfq io context. There can be several cfq | 1575 | * Setup general io context and cfq io context. There can be several cfq |
1100 | * io contexts per general io context, if this process is doing io to more | 1576 | * io contexts per general io context, if this process is doing io to more |
@@ -1102,39 +1578,39 @@ static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) | |||
1102 | * cfqq, so we don't need to worry about it disappearing | 1578 | * cfqq, so we don't need to worry about it disappearing |
1103 | */ | 1579 | */ |
1104 | static struct cfq_io_context * | 1580 | static struct cfq_io_context * |
1105 | cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) | 1581 | cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) |
1106 | { | 1582 | { |
1107 | struct cfq_data *cfqd = (*cfqq)->cfqd; | 1583 | struct io_context *ioc = NULL; |
1108 | struct cfq_queue *__cfqq = *cfqq; | ||
1109 | struct cfq_io_context *cic; | 1584 | struct cfq_io_context *cic; |
1110 | struct io_context *ioc; | ||
1111 | 1585 | ||
1112 | might_sleep_if(gfp_flags & __GFP_WAIT); | 1586 | might_sleep_if(gfp_mask & __GFP_WAIT); |
1113 | 1587 | ||
1114 | ioc = get_io_context(gfp_flags); | 1588 | ioc = get_io_context(gfp_mask); |
1115 | if (!ioc) | 1589 | if (!ioc) |
1116 | return NULL; | 1590 | return NULL; |
1117 | 1591 | ||
1118 | if ((cic = ioc->cic) == NULL) { | 1592 | if ((cic = ioc->cic) == NULL) { |
1119 | cic = cfq_alloc_io_context(gfp_flags); | 1593 | cic = cfq_alloc_io_context(cfqd, gfp_mask); |
1120 | 1594 | ||
1121 | if (cic == NULL) | 1595 | if (cic == NULL) |
1122 | goto err; | 1596 | goto err; |
1123 | 1597 | ||
1598 | /* | ||
1599 | * manually increment generic io_context usage count, it | ||
1600 | * cannot go away since we are already holding one ref to it | ||
1601 | */ | ||
1124 | ioc->cic = cic; | 1602 | ioc->cic = cic; |
1603 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1125 | cic->ioc = ioc; | 1604 | cic->ioc = ioc; |
1126 | cic->cfqq = __cfqq; | 1605 | cic->key = cfqd; |
1127 | atomic_inc(&__cfqq->ref); | 1606 | atomic_inc(&cfqd->ref); |
1128 | } else { | 1607 | } else { |
1129 | struct cfq_io_context *__cic; | 1608 | struct cfq_io_context *__cic; |
1130 | unsigned long flags; | ||
1131 | 1609 | ||
1132 | /* | 1610 | /* |
1133 | * since the first cic on the list is actually the head | 1611 | * the first cic on the list is actually the head itself |
1134 | * itself, need to check this here or we'll duplicate an | ||
1135 | * cic per ioc for no reason | ||
1136 | */ | 1612 | */ |
1137 | if (cic->cfqq == __cfqq) | 1613 | if (cic->key == cfqd) |
1138 | goto out; | 1614 | goto out; |
1139 | 1615 | ||
1140 | /* | 1616 | /* |
@@ -1142,152 +1618,255 @@ cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) | |||
1142 | * should be ok here, the list will usually not be more than | 1618 | * should be ok here, the list will usually not be more than |
1143 | * 1 or a few entries long | 1619 | * 1 or a few entries long |
1144 | */ | 1620 | */ |
1145 | spin_lock_irqsave(&ioc->lock, flags); | ||
1146 | list_for_each_entry(__cic, &cic->list, list) { | 1621 | list_for_each_entry(__cic, &cic->list, list) { |
1147 | /* | 1622 | /* |
1148 | * this process is already holding a reference to | 1623 | * this process is already holding a reference to |
1149 | * this queue, so no need to get one more | 1624 | * this queue, so no need to get one more |
1150 | */ | 1625 | */ |
1151 | if (__cic->cfqq == __cfqq) { | 1626 | if (__cic->key == cfqd) { |
1152 | cic = __cic; | 1627 | cic = __cic; |
1153 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
1154 | goto out; | 1628 | goto out; |
1155 | } | 1629 | } |
1156 | } | 1630 | } |
1157 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
1158 | 1631 | ||
1159 | /* | 1632 | /* |
1160 | * nope, process doesn't have a cic assoicated with this | 1633 | * nope, process doesn't have a cic assoicated with this |
1161 | * cfqq yet. get a new one and add to list | 1634 | * cfqq yet. get a new one and add to list |
1162 | */ | 1635 | */ |
1163 | __cic = cfq_alloc_io_context(gfp_flags); | 1636 | __cic = cfq_alloc_io_context(cfqd, gfp_mask); |
1164 | if (__cic == NULL) | 1637 | if (__cic == NULL) |
1165 | goto err; | 1638 | goto err; |
1166 | 1639 | ||
1167 | __cic->ioc = ioc; | 1640 | __cic->ioc = ioc; |
1168 | __cic->cfqq = __cfqq; | 1641 | __cic->key = cfqd; |
1169 | atomic_inc(&__cfqq->ref); | 1642 | atomic_inc(&cfqd->ref); |
1170 | spin_lock_irqsave(&ioc->lock, flags); | ||
1171 | list_add(&__cic->list, &cic->list); | 1643 | list_add(&__cic->list, &cic->list); |
1172 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
1173 | |||
1174 | cic = __cic; | 1644 | cic = __cic; |
1175 | *cfqq = __cfqq; | ||
1176 | } | 1645 | } |
1177 | 1646 | ||
1178 | out: | 1647 | out: |
1179 | /* | ||
1180 | * if key_type has been changed on the fly, we lazily rehash | ||
1181 | * each queue at lookup time | ||
1182 | */ | ||
1183 | if ((*cfqq)->key_type != cfqd->key_type) | ||
1184 | cfq_rehash_cfqq(cfqd, cfqq, cic); | ||
1185 | |||
1186 | return cic; | 1648 | return cic; |
1187 | err: | 1649 | err: |
1188 | put_io_context(ioc); | 1650 | put_io_context(ioc); |
1189 | return NULL; | 1651 | return NULL; |
1190 | } | 1652 | } |
1191 | 1653 | ||
1192 | static struct cfq_queue * | 1654 | static void |
1193 | __cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask) | 1655 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) |
1194 | { | 1656 | { |
1195 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | 1657 | unsigned long elapsed, ttime; |
1196 | struct cfq_queue *cfqq, *new_cfqq = NULL; | ||
1197 | |||
1198 | retry: | ||
1199 | cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); | ||
1200 | 1658 | ||
1201 | if (!cfqq) { | 1659 | /* |
1202 | if (new_cfqq) { | 1660 | * if this context already has stuff queued, thinktime is from |
1203 | cfqq = new_cfqq; | 1661 | * last queue not last end |
1204 | new_cfqq = NULL; | 1662 | */ |
1205 | } else { | 1663 | #if 0 |
1206 | spin_unlock_irq(cfqd->queue->queue_lock); | 1664 | if (time_after(cic->last_end_request, cic->last_queue)) |
1207 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | 1665 | elapsed = jiffies - cic->last_end_request; |
1208 | spin_lock_irq(cfqd->queue->queue_lock); | 1666 | else |
1667 | elapsed = jiffies - cic->last_queue; | ||
1668 | #else | ||
1669 | elapsed = jiffies - cic->last_end_request; | ||
1670 | #endif | ||
1209 | 1671 | ||
1210 | if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) | 1672 | ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); |
1211 | goto out; | ||
1212 | 1673 | ||
1213 | goto retry; | 1674 | cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; |
1214 | } | 1675 | cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; |
1676 | cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; | ||
1677 | } | ||
1215 | 1678 | ||
1216 | memset(cfqq, 0, sizeof(*cfqq)); | 1679 | #define sample_valid(samples) ((samples) > 80) |
1217 | 1680 | ||
1218 | INIT_HLIST_NODE(&cfqq->cfq_hash); | 1681 | /* |
1219 | INIT_LIST_HEAD(&cfqq->cfq_list); | 1682 | * Disable idle window if the process thinks too long or seeks so much that |
1220 | RB_CLEAR_ROOT(&cfqq->sort_list); | 1683 | * it doesn't matter |
1221 | INIT_LIST_HEAD(&cfqq->fifo[0]); | 1684 | */ |
1222 | INIT_LIST_HEAD(&cfqq->fifo[1]); | 1685 | static void |
1686 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1687 | struct cfq_io_context *cic) | ||
1688 | { | ||
1689 | int enable_idle = cfq_cfqq_idle_window(cfqq); | ||
1223 | 1690 | ||
1224 | cfqq->key = key; | 1691 | if (!cic->ioc->task || !cfqd->cfq_slice_idle) |
1225 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1692 | enable_idle = 0; |
1226 | atomic_set(&cfqq->ref, 0); | 1693 | else if (sample_valid(cic->ttime_samples)) { |
1227 | cfqq->cfqd = cfqd; | 1694 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
1228 | atomic_inc(&cfqd->ref); | 1695 | enable_idle = 0; |
1229 | cfqq->key_type = cfqd->key_type; | 1696 | else |
1230 | cfqq->service_start = ~0UL; | 1697 | enable_idle = 1; |
1231 | } | 1698 | } |
1232 | 1699 | ||
1233 | if (new_cfqq) | 1700 | if (enable_idle) |
1234 | kmem_cache_free(cfq_pool, new_cfqq); | 1701 | cfq_mark_cfqq_idle_window(cfqq); |
1702 | else | ||
1703 | cfq_clear_cfqq_idle_window(cfqq); | ||
1704 | } | ||
1235 | 1705 | ||
1236 | atomic_inc(&cfqq->ref); | 1706 | |
1237 | out: | 1707 | /* |
1238 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | 1708 | * Check if new_cfqq should preempt the currently active queue. Return 0 for |
1239 | return cfqq; | 1709 | * no or if we aren't sure, a 1 will cause a preempt. |
1710 | */ | ||
1711 | static int | ||
1712 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | ||
1713 | struct cfq_rq *crq) | ||
1714 | { | ||
1715 | struct cfq_queue *cfqq = cfqd->active_queue; | ||
1716 | |||
1717 | if (cfq_class_idle(new_cfqq)) | ||
1718 | return 0; | ||
1719 | |||
1720 | if (!cfqq) | ||
1721 | return 1; | ||
1722 | |||
1723 | if (cfq_class_idle(cfqq)) | ||
1724 | return 1; | ||
1725 | if (!cfq_cfqq_wait_request(new_cfqq)) | ||
1726 | return 0; | ||
1727 | /* | ||
1728 | * if it doesn't have slice left, forget it | ||
1729 | */ | ||
1730 | if (new_cfqq->slice_left < cfqd->cfq_slice_idle) | ||
1731 | return 0; | ||
1732 | if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq)) | ||
1733 | return 1; | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | /* | ||
1739 | * cfqq preempts the active queue. if we allowed preempt with no slice left, | ||
1740 | * let it have half of its nominal slice. | ||
1741 | */ | ||
1742 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1743 | { | ||
1744 | struct cfq_queue *__cfqq, *next; | ||
1745 | |||
1746 | list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) | ||
1747 | cfq_resort_rr_list(__cfqq, 1); | ||
1748 | |||
1749 | if (!cfqq->slice_left) | ||
1750 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; | ||
1751 | |||
1752 | cfqq->slice_end = cfqq->slice_left + jiffies; | ||
1753 | __cfq_slice_expired(cfqd, cfqq, 1); | ||
1754 | __cfq_set_active_queue(cfqd, cfqq); | ||
1755 | } | ||
1756 | |||
1757 | /* | ||
1758 | * should really be a ll_rw_blk.c helper | ||
1759 | */ | ||
1760 | static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1761 | { | ||
1762 | request_queue_t *q = cfqd->queue; | ||
1763 | |||
1764 | if (!blk_queue_plugged(q)) | ||
1765 | q->request_fn(q); | ||
1766 | else | ||
1767 | __generic_unplug_device(q); | ||
1768 | } | ||
1769 | |||
1770 | /* | ||
1771 | * Called when a new fs request (crq) is added (to cfqq). Check if there's | ||
1772 | * something we should do about it | ||
1773 | */ | ||
1774 | static void | ||
1775 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1776 | struct cfq_rq *crq) | ||
1777 | { | ||
1778 | struct cfq_io_context *cic; | ||
1779 | |||
1780 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); | ||
1781 | |||
1782 | /* | ||
1783 | * we never wait for an async request and we don't allow preemption | ||
1784 | * of an async request. so just return early | ||
1785 | */ | ||
1786 | if (!cfq_crq_is_sync(crq)) | ||
1787 | return; | ||
1788 | |||
1789 | cic = crq->io_context; | ||
1790 | |||
1791 | cfq_update_io_thinktime(cfqd, cic); | ||
1792 | cfq_update_idle_window(cfqd, cfqq, cic); | ||
1793 | |||
1794 | cic->last_queue = jiffies; | ||
1795 | |||
1796 | if (cfqq == cfqd->active_queue) { | ||
1797 | /* | ||
1798 | * if we are waiting for a request for this queue, let it rip | ||
1799 | * immediately and flag that we must not expire this queue | ||
1800 | * just now | ||
1801 | */ | ||
1802 | if (cfq_cfqq_wait_request(cfqq)) { | ||
1803 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
1804 | del_timer(&cfqd->idle_slice_timer); | ||
1805 | cfq_start_queueing(cfqd, cfqq); | ||
1806 | } | ||
1807 | } else if (cfq_should_preempt(cfqd, cfqq, crq)) { | ||
1808 | /* | ||
1809 | * not the active queue - expire current slice if it is | ||
1810 | * idle and has expired it's mean thinktime or this new queue | ||
1811 | * has some old slice time left and is of higher priority | ||
1812 | */ | ||
1813 | cfq_preempt_queue(cfqd, cfqq); | ||
1814 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
1815 | cfq_start_queueing(cfqd, cfqq); | ||
1816 | } | ||
1240 | } | 1817 | } |
1241 | 1818 | ||
1242 | static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) | 1819 | static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) |
1243 | { | 1820 | { |
1244 | crq->is_sync = 0; | 1821 | struct cfq_rq *crq = RQ_DATA(rq); |
1245 | if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE) | 1822 | struct cfq_queue *cfqq = crq->cfq_queue; |
1246 | crq->is_sync = 1; | 1823 | |
1824 | cfq_init_prio_data(cfqq); | ||
1247 | 1825 | ||
1248 | cfq_add_crq_rb(crq); | 1826 | cfq_add_crq_rb(crq); |
1249 | crq->queue_start = jiffies; | ||
1250 | 1827 | ||
1251 | list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]); | 1828 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
1829 | |||
1830 | if (rq_mergeable(rq)) { | ||
1831 | cfq_add_crq_hash(cfqd, crq); | ||
1832 | |||
1833 | if (!cfqd->queue->last_merge) | ||
1834 | cfqd->queue->last_merge = rq; | ||
1835 | } | ||
1836 | |||
1837 | cfq_crq_enqueued(cfqd, cfqq, crq); | ||
1252 | } | 1838 | } |
1253 | 1839 | ||
1254 | static void | 1840 | static void |
1255 | cfq_insert_request(request_queue_t *q, struct request *rq, int where) | 1841 | cfq_insert_request(request_queue_t *q, struct request *rq, int where) |
1256 | { | 1842 | { |
1257 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1843 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1258 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1259 | 1844 | ||
1260 | switch (where) { | 1845 | switch (where) { |
1261 | case ELEVATOR_INSERT_BACK: | 1846 | case ELEVATOR_INSERT_BACK: |
1262 | while (cfq_dispatch_requests(q, cfqd->cfq_quantum)) | 1847 | while (cfq_dispatch_requests(q, INT_MAX, 1)) |
1263 | ; | 1848 | ; |
1264 | list_add_tail(&rq->queuelist, &q->queue_head); | 1849 | list_add_tail(&rq->queuelist, &q->queue_head); |
1850 | /* | ||
1851 | * If we were idling with pending requests on | ||
1852 | * inactive cfqqs, force dispatching will | ||
1853 | * remove the idle timer and the queue won't | ||
1854 | * be kicked by __make_request() afterward. | ||
1855 | * Kick it here. | ||
1856 | */ | ||
1857 | cfq_schedule_dispatch(cfqd); | ||
1265 | break; | 1858 | break; |
1266 | case ELEVATOR_INSERT_FRONT: | 1859 | case ELEVATOR_INSERT_FRONT: |
1267 | list_add(&rq->queuelist, &q->queue_head); | 1860 | list_add(&rq->queuelist, &q->queue_head); |
1268 | break; | 1861 | break; |
1269 | case ELEVATOR_INSERT_SORT: | 1862 | case ELEVATOR_INSERT_SORT: |
1270 | BUG_ON(!blk_fs_request(rq)); | 1863 | BUG_ON(!blk_fs_request(rq)); |
1271 | cfq_enqueue(cfqd, crq); | 1864 | cfq_enqueue(cfqd, rq); |
1272 | break; | 1865 | break; |
1273 | default: | 1866 | default: |
1274 | printk("%s: bad insert point %d\n", __FUNCTION__,where); | 1867 | printk("%s: bad insert point %d\n", __FUNCTION__,where); |
1275 | return; | 1868 | return; |
1276 | } | 1869 | } |
1277 | |||
1278 | if (rq_mergeable(rq)) { | ||
1279 | cfq_add_crq_hash(cfqd, crq); | ||
1280 | |||
1281 | if (!q->last_merge) | ||
1282 | q->last_merge = rq; | ||
1283 | } | ||
1284 | } | ||
1285 | |||
1286 | static int cfq_queue_empty(request_queue_t *q) | ||
1287 | { | ||
1288 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1289 | |||
1290 | return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list); | ||
1291 | } | 1870 | } |
1292 | 1871 | ||
1293 | static void cfq_completed_request(request_queue_t *q, struct request *rq) | 1872 | static void cfq_completed_request(request_queue_t *q, struct request *rq) |
@@ -1300,9 +1879,11 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1300 | 1879 | ||
1301 | cfqq = crq->cfq_queue; | 1880 | cfqq = crq->cfq_queue; |
1302 | 1881 | ||
1303 | if (crq->in_flight) { | 1882 | if (cfq_crq_in_flight(crq)) { |
1304 | WARN_ON(!cfqq->in_flight); | 1883 | const int sync = cfq_crq_is_sync(crq); |
1305 | cfqq->in_flight--; | 1884 | |
1885 | WARN_ON(!cfqq->on_dispatch[sync]); | ||
1886 | cfqq->on_dispatch[sync]--; | ||
1306 | } | 1887 | } |
1307 | 1888 | ||
1308 | cfq_account_completion(cfqq, crq); | 1889 | cfq_account_completion(cfqq, crq); |
@@ -1332,51 +1913,136 @@ cfq_latter_request(request_queue_t *q, struct request *rq) | |||
1332 | return NULL; | 1913 | return NULL; |
1333 | } | 1914 | } |
1334 | 1915 | ||
1335 | static int cfq_may_queue(request_queue_t *q, int rw) | 1916 | /* |
1917 | * we temporarily boost lower priority queues if they are holding fs exclusive | ||
1918 | * resources. they are boosted to normal prio (CLASS_BE/4) | ||
1919 | */ | ||
1920 | static void cfq_prio_boost(struct cfq_queue *cfqq) | ||
1336 | { | 1921 | { |
1337 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1922 | const int ioprio_class = cfqq->ioprio_class; |
1338 | struct cfq_queue *cfqq; | 1923 | const int ioprio = cfqq->ioprio; |
1339 | int ret = ELV_MQUEUE_MAY; | ||
1340 | 1924 | ||
1341 | if (current->flags & PF_MEMALLOC) | 1925 | if (has_fs_excl()) { |
1342 | return ELV_MQUEUE_MAY; | 1926 | /* |
1927 | * boost idle prio on transactions that would lock out other | ||
1928 | * users of the filesystem | ||
1929 | */ | ||
1930 | if (cfq_class_idle(cfqq)) | ||
1931 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1932 | if (cfqq->ioprio > IOPRIO_NORM) | ||
1933 | cfqq->ioprio = IOPRIO_NORM; | ||
1934 | } else { | ||
1935 | /* | ||
1936 | * check if we need to unboost the queue | ||
1937 | */ | ||
1938 | if (cfqq->ioprio_class != cfqq->org_ioprio_class) | ||
1939 | cfqq->ioprio_class = cfqq->org_ioprio_class; | ||
1940 | if (cfqq->ioprio != cfqq->org_ioprio) | ||
1941 | cfqq->ioprio = cfqq->org_ioprio; | ||
1942 | } | ||
1343 | 1943 | ||
1344 | cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current)); | 1944 | /* |
1345 | if (cfqq) { | 1945 | * refile between round-robin lists if we moved the priority class |
1346 | int limit = cfqd->max_queued; | 1946 | */ |
1947 | if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && | ||
1948 | cfq_cfqq_on_rr(cfqq)) | ||
1949 | cfq_resort_rr_list(cfqq, 0); | ||
1950 | } | ||
1347 | 1951 | ||
1348 | if (cfqq->allocated[rw] < cfqd->cfq_queued) | 1952 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) |
1349 | return ELV_MQUEUE_MUST; | 1953 | { |
1954 | if (rw == READ || process_sync(task)) | ||
1955 | return task->pid; | ||
1350 | 1956 | ||
1351 | if (cfqd->busy_queues) | 1957 | return CFQ_KEY_ASYNC; |
1352 | limit = q->nr_requests / cfqd->busy_queues; | 1958 | } |
1353 | 1959 | ||
1354 | if (limit < cfqd->cfq_queued) | 1960 | static inline int |
1355 | limit = cfqd->cfq_queued; | 1961 | __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1356 | else if (limit > cfqd->max_queued) | 1962 | struct task_struct *task, int rw) |
1357 | limit = cfqd->max_queued; | 1963 | { |
1964 | #if 1 | ||
1965 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && | ||
1966 | !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
1967 | cfq_mark_cfqq_must_alloc_slice(cfqq); | ||
1968 | return ELV_MQUEUE_MUST; | ||
1969 | } | ||
1358 | 1970 | ||
1359 | if (cfqq->allocated[rw] >= limit) { | 1971 | return ELV_MQUEUE_MAY; |
1360 | if (limit > cfqq->alloc_limit[rw]) | 1972 | #else |
1361 | cfqq->alloc_limit[rw] = limit; | 1973 | if (!cfqq || task->flags & PF_MEMALLOC) |
1974 | return ELV_MQUEUE_MAY; | ||
1975 | if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { | ||
1976 | if (cfq_cfqq_wait_request(cfqq)) | ||
1977 | return ELV_MQUEUE_MUST; | ||
1362 | 1978 | ||
1363 | ret = ELV_MQUEUE_NO; | 1979 | /* |
1980 | * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we | ||
1981 | * can quickly flood the queue with writes from a single task | ||
1982 | */ | ||
1983 | if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
1984 | cfq_mark_cfqq_must_alloc_slice(cfqq); | ||
1985 | return ELV_MQUEUE_MUST; | ||
1364 | } | 1986 | } |
1987 | |||
1988 | return ELV_MQUEUE_MAY; | ||
1365 | } | 1989 | } |
1990 | if (cfq_class_idle(cfqq)) | ||
1991 | return ELV_MQUEUE_NO; | ||
1992 | if (cfqq->allocated[rw] >= cfqd->max_queued) { | ||
1993 | struct io_context *ioc = get_io_context(GFP_ATOMIC); | ||
1994 | int ret = ELV_MQUEUE_NO; | ||
1366 | 1995 | ||
1367 | return ret; | 1996 | if (ioc && ioc->nr_batch_requests) |
1997 | ret = ELV_MQUEUE_MAY; | ||
1998 | |||
1999 | put_io_context(ioc); | ||
2000 | return ret; | ||
2001 | } | ||
2002 | |||
2003 | return ELV_MQUEUE_MAY; | ||
2004 | #endif | ||
2005 | } | ||
2006 | |||
2007 | static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) | ||
2008 | { | ||
2009 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
2010 | struct task_struct *tsk = current; | ||
2011 | struct cfq_queue *cfqq; | ||
2012 | |||
2013 | /* | ||
2014 | * don't force setup of a queue from here, as a call to may_queue | ||
2015 | * does not necessarily imply that a request actually will be queued. | ||
2016 | * so just lookup a possibly existing queue, or return 'may queue' | ||
2017 | * if that fails | ||
2018 | */ | ||
2019 | cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); | ||
2020 | if (cfqq) { | ||
2021 | cfq_init_prio_data(cfqq); | ||
2022 | cfq_prio_boost(cfqq); | ||
2023 | |||
2024 | return __cfq_may_queue(cfqd, cfqq, tsk, rw); | ||
2025 | } | ||
2026 | |||
2027 | return ELV_MQUEUE_MAY; | ||
1368 | } | 2028 | } |
1369 | 2029 | ||
1370 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) | 2030 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) |
1371 | { | 2031 | { |
2032 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1372 | struct request_list *rl = &q->rq; | 2033 | struct request_list *rl = &q->rq; |
1373 | const int write = waitqueue_active(&rl->wait[WRITE]); | ||
1374 | const int read = waitqueue_active(&rl->wait[READ]); | ||
1375 | 2034 | ||
1376 | if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ]) | 2035 | if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { |
1377 | wake_up(&rl->wait[READ]); | 2036 | smp_mb(); |
1378 | if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE]) | 2037 | if (waitqueue_active(&rl->wait[READ])) |
1379 | wake_up(&rl->wait[WRITE]); | 2038 | wake_up(&rl->wait[READ]); |
2039 | } | ||
2040 | |||
2041 | if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { | ||
2042 | smp_mb(); | ||
2043 | if (waitqueue_active(&rl->wait[WRITE])) | ||
2044 | wake_up(&rl->wait[WRITE]); | ||
2045 | } | ||
1380 | } | 2046 | } |
1381 | 2047 | ||
1382 | /* | 2048 | /* |
@@ -1389,69 +2055,61 @@ static void cfq_put_request(request_queue_t *q, struct request *rq) | |||
1389 | 2055 | ||
1390 | if (crq) { | 2056 | if (crq) { |
1391 | struct cfq_queue *cfqq = crq->cfq_queue; | 2057 | struct cfq_queue *cfqq = crq->cfq_queue; |
2058 | const int rw = rq_data_dir(rq); | ||
1392 | 2059 | ||
1393 | BUG_ON(q->last_merge == rq); | 2060 | BUG_ON(!cfqq->allocated[rw]); |
1394 | BUG_ON(!hlist_unhashed(&crq->hash)); | 2061 | cfqq->allocated[rw]--; |
1395 | 2062 | ||
1396 | if (crq->io_context) | 2063 | put_io_context(crq->io_context->ioc); |
1397 | put_io_context(crq->io_context->ioc); | ||
1398 | |||
1399 | BUG_ON(!cfqq->allocated[crq->is_write]); | ||
1400 | cfqq->allocated[crq->is_write]--; | ||
1401 | 2064 | ||
1402 | mempool_free(crq, cfqd->crq_pool); | 2065 | mempool_free(crq, cfqd->crq_pool); |
1403 | rq->elevator_private = NULL; | 2066 | rq->elevator_private = NULL; |
1404 | 2067 | ||
1405 | smp_mb(); | ||
1406 | cfq_check_waiters(q, cfqq); | 2068 | cfq_check_waiters(q, cfqq); |
1407 | cfq_put_queue(cfqq); | 2069 | cfq_put_queue(cfqq); |
1408 | } | 2070 | } |
1409 | } | 2071 | } |
1410 | 2072 | ||
1411 | /* | 2073 | /* |
1412 | * Allocate cfq data structures associated with this request. A queue and | 2074 | * Allocate cfq data structures associated with this request. |
1413 | */ | 2075 | */ |
1414 | static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | 2076 | static int |
2077 | cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | ||
2078 | int gfp_mask) | ||
1415 | { | 2079 | { |
1416 | struct cfq_data *cfqd = q->elevator->elevator_data; | 2080 | struct cfq_data *cfqd = q->elevator->elevator_data; |
2081 | struct task_struct *tsk = current; | ||
1417 | struct cfq_io_context *cic; | 2082 | struct cfq_io_context *cic; |
1418 | const int rw = rq_data_dir(rq); | 2083 | const int rw = rq_data_dir(rq); |
1419 | struct cfq_queue *cfqq, *saved_cfqq; | 2084 | pid_t key = cfq_queue_pid(tsk, rw); |
2085 | struct cfq_queue *cfqq; | ||
1420 | struct cfq_rq *crq; | 2086 | struct cfq_rq *crq; |
1421 | unsigned long flags; | 2087 | unsigned long flags; |
1422 | 2088 | ||
1423 | might_sleep_if(gfp_mask & __GFP_WAIT); | 2089 | might_sleep_if(gfp_mask & __GFP_WAIT); |
1424 | 2090 | ||
2091 | cic = cfq_get_io_context(cfqd, key, gfp_mask); | ||
2092 | |||
1425 | spin_lock_irqsave(q->queue_lock, flags); | 2093 | spin_lock_irqsave(q->queue_lock, flags); |
1426 | 2094 | ||
1427 | cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask); | 2095 | if (!cic) |
1428 | if (!cfqq) | 2096 | goto queue_fail; |
1429 | goto out_lock; | 2097 | |
2098 | if (!cic->cfqq) { | ||
2099 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); | ||
2100 | if (!cfqq) | ||
2101 | goto queue_fail; | ||
1430 | 2102 | ||
1431 | repeat: | 2103 | cic->cfqq = cfqq; |
1432 | if (cfqq->allocated[rw] >= cfqd->max_queued) | 2104 | } else |
1433 | goto out_lock; | 2105 | cfqq = cic->cfqq; |
1434 | 2106 | ||
1435 | cfqq->allocated[rw]++; | 2107 | cfqq->allocated[rw]++; |
2108 | cfq_clear_cfqq_must_alloc(cfqq); | ||
2109 | cfqd->rq_starved = 0; | ||
2110 | atomic_inc(&cfqq->ref); | ||
1436 | spin_unlock_irqrestore(q->queue_lock, flags); | 2111 | spin_unlock_irqrestore(q->queue_lock, flags); |
1437 | 2112 | ||
1438 | /* | ||
1439 | * if hashing type has changed, the cfq_queue might change here. | ||
1440 | */ | ||
1441 | saved_cfqq = cfqq; | ||
1442 | cic = cfq_get_io_context(&cfqq, gfp_mask); | ||
1443 | if (!cic) | ||
1444 | goto err; | ||
1445 | |||
1446 | /* | ||
1447 | * repeat allocation checks on queue change | ||
1448 | */ | ||
1449 | if (unlikely(saved_cfqq != cfqq)) { | ||
1450 | spin_lock_irqsave(q->queue_lock, flags); | ||
1451 | saved_cfqq->allocated[rw]--; | ||
1452 | goto repeat; | ||
1453 | } | ||
1454 | |||
1455 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); | 2113 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); |
1456 | if (crq) { | 2114 | if (crq) { |
1457 | RB_CLEAR(&crq->rb_node); | 2115 | RB_CLEAR(&crq->rb_node); |
@@ -1460,24 +2118,141 @@ repeat: | |||
1460 | INIT_HLIST_NODE(&crq->hash); | 2118 | INIT_HLIST_NODE(&crq->hash); |
1461 | crq->cfq_queue = cfqq; | 2119 | crq->cfq_queue = cfqq; |
1462 | crq->io_context = cic; | 2120 | crq->io_context = cic; |
1463 | crq->service_start = crq->queue_start = 0; | 2121 | cfq_clear_crq_in_flight(crq); |
1464 | crq->in_flight = crq->accounted = crq->is_sync = 0; | 2122 | cfq_clear_crq_in_driver(crq); |
1465 | crq->is_write = rw; | 2123 | cfq_clear_crq_requeued(crq); |
2124 | |||
2125 | if (rw == READ || process_sync(tsk)) | ||
2126 | cfq_mark_crq_is_sync(crq); | ||
2127 | else | ||
2128 | cfq_clear_crq_is_sync(crq); | ||
2129 | |||
1466 | rq->elevator_private = crq; | 2130 | rq->elevator_private = crq; |
1467 | cfqq->alloc_limit[rw] = 0; | ||
1468 | return 0; | 2131 | return 0; |
1469 | } | 2132 | } |
1470 | 2133 | ||
1471 | put_io_context(cic->ioc); | ||
1472 | err: | ||
1473 | spin_lock_irqsave(q->queue_lock, flags); | 2134 | spin_lock_irqsave(q->queue_lock, flags); |
1474 | cfqq->allocated[rw]--; | 2135 | cfqq->allocated[rw]--; |
2136 | if (!(cfqq->allocated[0] + cfqq->allocated[1])) | ||
2137 | cfq_mark_cfqq_must_alloc(cfqq); | ||
1475 | cfq_put_queue(cfqq); | 2138 | cfq_put_queue(cfqq); |
1476 | out_lock: | 2139 | queue_fail: |
2140 | if (cic) | ||
2141 | put_io_context(cic->ioc); | ||
2142 | /* | ||
2143 | * mark us rq allocation starved. we need to kickstart the process | ||
2144 | * ourselves if there are no pending requests that can do it for us. | ||
2145 | * that would be an extremely rare OOM situation | ||
2146 | */ | ||
2147 | cfqd->rq_starved = 1; | ||
2148 | cfq_schedule_dispatch(cfqd); | ||
1477 | spin_unlock_irqrestore(q->queue_lock, flags); | 2149 | spin_unlock_irqrestore(q->queue_lock, flags); |
1478 | return 1; | 2150 | return 1; |
1479 | } | 2151 | } |
1480 | 2152 | ||
2153 | static void cfq_kick_queue(void *data) | ||
2154 | { | ||
2155 | request_queue_t *q = data; | ||
2156 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
2157 | unsigned long flags; | ||
2158 | |||
2159 | spin_lock_irqsave(q->queue_lock, flags); | ||
2160 | |||
2161 | if (cfqd->rq_starved) { | ||
2162 | struct request_list *rl = &q->rq; | ||
2163 | |||
2164 | /* | ||
2165 | * we aren't guaranteed to get a request after this, but we | ||
2166 | * have to be opportunistic | ||
2167 | */ | ||
2168 | smp_mb(); | ||
2169 | if (waitqueue_active(&rl->wait[READ])) | ||
2170 | wake_up(&rl->wait[READ]); | ||
2171 | if (waitqueue_active(&rl->wait[WRITE])) | ||
2172 | wake_up(&rl->wait[WRITE]); | ||
2173 | } | ||
2174 | |||
2175 | blk_remove_plug(q); | ||
2176 | q->request_fn(q); | ||
2177 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2178 | } | ||
2179 | |||
2180 | /* | ||
2181 | * Timer running if the active_queue is currently idling inside its time slice | ||
2182 | */ | ||
2183 | static void cfq_idle_slice_timer(unsigned long data) | ||
2184 | { | ||
2185 | struct cfq_data *cfqd = (struct cfq_data *) data; | ||
2186 | struct cfq_queue *cfqq; | ||
2187 | unsigned long flags; | ||
2188 | |||
2189 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
2190 | |||
2191 | if ((cfqq = cfqd->active_queue) != NULL) { | ||
2192 | unsigned long now = jiffies; | ||
2193 | |||
2194 | /* | ||
2195 | * expired | ||
2196 | */ | ||
2197 | if (time_after(now, cfqq->slice_end)) | ||
2198 | goto expire; | ||
2199 | |||
2200 | /* | ||
2201 | * only expire and reinvoke request handler, if there are | ||
2202 | * other queues with pending requests | ||
2203 | */ | ||
2204 | if (!cfq_pending_requests(cfqd)) { | ||
2205 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); | ||
2206 | add_timer(&cfqd->idle_slice_timer); | ||
2207 | goto out_cont; | ||
2208 | } | ||
2209 | |||
2210 | /* | ||
2211 | * not expired and it has a request pending, let it dispatch | ||
2212 | */ | ||
2213 | if (!RB_EMPTY(&cfqq->sort_list)) { | ||
2214 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
2215 | goto out_kick; | ||
2216 | } | ||
2217 | } | ||
2218 | expire: | ||
2219 | cfq_slice_expired(cfqd, 0); | ||
2220 | out_kick: | ||
2221 | cfq_schedule_dispatch(cfqd); | ||
2222 | out_cont: | ||
2223 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
2224 | } | ||
2225 | |||
2226 | /* | ||
2227 | * Timer running if an idle class queue is waiting for service | ||
2228 | */ | ||
2229 | static void cfq_idle_class_timer(unsigned long data) | ||
2230 | { | ||
2231 | struct cfq_data *cfqd = (struct cfq_data *) data; | ||
2232 | unsigned long flags, end; | ||
2233 | |||
2234 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
2235 | |||
2236 | /* | ||
2237 | * race with a non-idle queue, reset timer | ||
2238 | */ | ||
2239 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
2240 | if (!time_after_eq(jiffies, end)) { | ||
2241 | cfqd->idle_class_timer.expires = end; | ||
2242 | add_timer(&cfqd->idle_class_timer); | ||
2243 | } else | ||
2244 | cfq_schedule_dispatch(cfqd); | ||
2245 | |||
2246 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
2247 | } | ||
2248 | |||
2249 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | ||
2250 | { | ||
2251 | del_timer_sync(&cfqd->idle_slice_timer); | ||
2252 | del_timer_sync(&cfqd->idle_class_timer); | ||
2253 | blk_sync_queue(cfqd->queue); | ||
2254 | } | ||
2255 | |||
1481 | static void cfq_put_cfqd(struct cfq_data *cfqd) | 2256 | static void cfq_put_cfqd(struct cfq_data *cfqd) |
1482 | { | 2257 | { |
1483 | request_queue_t *q = cfqd->queue; | 2258 | request_queue_t *q = cfqd->queue; |
@@ -1487,6 +2262,9 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
1487 | 2262 | ||
1488 | blk_put_queue(q); | 2263 | blk_put_queue(q); |
1489 | 2264 | ||
2265 | cfq_shutdown_timer_wq(cfqd); | ||
2266 | q->elevator->elevator_data = NULL; | ||
2267 | |||
1490 | mempool_destroy(cfqd->crq_pool); | 2268 | mempool_destroy(cfqd->crq_pool); |
1491 | kfree(cfqd->crq_hash); | 2269 | kfree(cfqd->crq_hash); |
1492 | kfree(cfqd->cfq_hash); | 2270 | kfree(cfqd->cfq_hash); |
@@ -1495,7 +2273,10 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
1495 | 2273 | ||
1496 | static void cfq_exit_queue(elevator_t *e) | 2274 | static void cfq_exit_queue(elevator_t *e) |
1497 | { | 2275 | { |
1498 | cfq_put_cfqd(e->elevator_data); | 2276 | struct cfq_data *cfqd = e->elevator_data; |
2277 | |||
2278 | cfq_shutdown_timer_wq(cfqd); | ||
2279 | cfq_put_cfqd(cfqd); | ||
1499 | } | 2280 | } |
1500 | 2281 | ||
1501 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | 2282 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) |
@@ -1508,7 +2289,13 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
1508 | return -ENOMEM; | 2289 | return -ENOMEM; |
1509 | 2290 | ||
1510 | memset(cfqd, 0, sizeof(*cfqd)); | 2291 | memset(cfqd, 0, sizeof(*cfqd)); |
1511 | INIT_LIST_HEAD(&cfqd->rr_list); | 2292 | |
2293 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | ||
2294 | INIT_LIST_HEAD(&cfqd->rr_list[i]); | ||
2295 | |||
2296 | INIT_LIST_HEAD(&cfqd->busy_rr); | ||
2297 | INIT_LIST_HEAD(&cfqd->cur_rr); | ||
2298 | INIT_LIST_HEAD(&cfqd->idle_rr); | ||
1512 | INIT_LIST_HEAD(&cfqd->empty_list); | 2299 | INIT_LIST_HEAD(&cfqd->empty_list); |
1513 | 2300 | ||
1514 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | 2301 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); |
@@ -1533,24 +2320,32 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
1533 | cfqd->queue = q; | 2320 | cfqd->queue = q; |
1534 | atomic_inc(&q->refcnt); | 2321 | atomic_inc(&q->refcnt); |
1535 | 2322 | ||
1536 | /* | 2323 | cfqd->max_queued = q->nr_requests / 4; |
1537 | * just set it to some high value, we want anyone to be able to queue | ||
1538 | * some requests. fairness is handled differently | ||
1539 | */ | ||
1540 | q->nr_requests = 1024; | ||
1541 | cfqd->max_queued = q->nr_requests / 16; | ||
1542 | q->nr_batching = cfq_queued; | 2324 | q->nr_batching = cfq_queued; |
1543 | cfqd->key_type = CFQ_KEY_TGID; | 2325 | |
1544 | cfqd->find_best_crq = 1; | 2326 | init_timer(&cfqd->idle_slice_timer); |
2327 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | ||
2328 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | ||
2329 | |||
2330 | init_timer(&cfqd->idle_class_timer); | ||
2331 | cfqd->idle_class_timer.function = cfq_idle_class_timer; | ||
2332 | cfqd->idle_class_timer.data = (unsigned long) cfqd; | ||
2333 | |||
2334 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | ||
2335 | |||
1545 | atomic_set(&cfqd->ref, 1); | 2336 | atomic_set(&cfqd->ref, 1); |
1546 | 2337 | ||
1547 | cfqd->cfq_queued = cfq_queued; | 2338 | cfqd->cfq_queued = cfq_queued; |
1548 | cfqd->cfq_quantum = cfq_quantum; | 2339 | cfqd->cfq_quantum = cfq_quantum; |
1549 | cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r; | 2340 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
1550 | cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w; | 2341 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
1551 | cfqd->cfq_fifo_batch_expire = cfq_fifo_rate; | ||
1552 | cfqd->cfq_back_max = cfq_back_max; | 2342 | cfqd->cfq_back_max = cfq_back_max; |
1553 | cfqd->cfq_back_penalty = cfq_back_penalty; | 2343 | cfqd->cfq_back_penalty = cfq_back_penalty; |
2344 | cfqd->cfq_slice[0] = cfq_slice_async; | ||
2345 | cfqd->cfq_slice[1] = cfq_slice_sync; | ||
2346 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | ||
2347 | cfqd->cfq_slice_idle = cfq_slice_idle; | ||
2348 | cfqd->cfq_max_depth = cfq_max_depth; | ||
1554 | 2349 | ||
1555 | return 0; | 2350 | return 0; |
1556 | out_crqpool: | 2351 | out_crqpool: |
@@ -1595,7 +2390,6 @@ fail: | |||
1595 | return -ENOMEM; | 2390 | return -ENOMEM; |
1596 | } | 2391 | } |
1597 | 2392 | ||
1598 | |||
1599 | /* | 2393 | /* |
1600 | * sysfs parts below --> | 2394 | * sysfs parts below --> |
1601 | */ | 2395 | */ |
@@ -1620,45 +2414,6 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
1620 | return count; | 2414 | return count; |
1621 | } | 2415 | } |
1622 | 2416 | ||
1623 | static ssize_t | ||
1624 | cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count) | ||
1625 | { | ||
1626 | max_elapsed_dispatch = max_elapsed_crq = 0; | ||
1627 | return count; | ||
1628 | } | ||
1629 | |||
1630 | static ssize_t | ||
1631 | cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count) | ||
1632 | { | ||
1633 | spin_lock_irq(cfqd->queue->queue_lock); | ||
1634 | if (!strncmp(page, "pgid", 4)) | ||
1635 | cfqd->key_type = CFQ_KEY_PGID; | ||
1636 | else if (!strncmp(page, "tgid", 4)) | ||
1637 | cfqd->key_type = CFQ_KEY_TGID; | ||
1638 | else if (!strncmp(page, "uid", 3)) | ||
1639 | cfqd->key_type = CFQ_KEY_UID; | ||
1640 | else if (!strncmp(page, "gid", 3)) | ||
1641 | cfqd->key_type = CFQ_KEY_GID; | ||
1642 | spin_unlock_irq(cfqd->queue->queue_lock); | ||
1643 | return count; | ||
1644 | } | ||
1645 | |||
1646 | static ssize_t | ||
1647 | cfq_read_key_type(struct cfq_data *cfqd, char *page) | ||
1648 | { | ||
1649 | ssize_t len = 0; | ||
1650 | int i; | ||
1651 | |||
1652 | for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) { | ||
1653 | if (cfqd->key_type == i) | ||
1654 | len += sprintf(page+len, "[%s] ", cfq_key_types[i]); | ||
1655 | else | ||
1656 | len += sprintf(page+len, "%s ", cfq_key_types[i]); | ||
1657 | } | ||
1658 | len += sprintf(page+len, "\n"); | ||
1659 | return len; | ||
1660 | } | ||
1661 | |||
1662 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 2417 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
1663 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | 2418 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ |
1664 | { \ | 2419 | { \ |
@@ -1669,12 +2424,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | |||
1669 | } | 2424 | } |
1670 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | 2425 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
1671 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); | 2426 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); |
1672 | SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1); | 2427 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); |
1673 | SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1); | 2428 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
1674 | SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1); | ||
1675 | SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0); | ||
1676 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); | 2429 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); |
1677 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); | 2430 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); |
2431 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | ||
2432 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | ||
2433 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | ||
2434 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | ||
2435 | SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); | ||
1678 | #undef SHOW_FUNCTION | 2436 | #undef SHOW_FUNCTION |
1679 | 2437 | ||
1680 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2438 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -1694,12 +2452,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ | |||
1694 | } | 2452 | } |
1695 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | 2453 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
1696 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); | 2454 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); |
1697 | STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1); | 2455 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); |
1698 | STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1); | 2456 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
1699 | STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1); | ||
1700 | STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0); | ||
1701 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | 2457 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
1702 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | 2458 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
2459 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | ||
2460 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | ||
2461 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | ||
2462 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); | ||
2463 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); | ||
1703 | #undef STORE_FUNCTION | 2464 | #undef STORE_FUNCTION |
1704 | 2465 | ||
1705 | static struct cfq_fs_entry cfq_quantum_entry = { | 2466 | static struct cfq_fs_entry cfq_quantum_entry = { |
@@ -1712,25 +2473,15 @@ static struct cfq_fs_entry cfq_queued_entry = { | |||
1712 | .show = cfq_queued_show, | 2473 | .show = cfq_queued_show, |
1713 | .store = cfq_queued_store, | 2474 | .store = cfq_queued_store, |
1714 | }; | 2475 | }; |
1715 | static struct cfq_fs_entry cfq_fifo_expire_r_entry = { | 2476 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { |
1716 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, | 2477 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, |
1717 | .show = cfq_fifo_expire_r_show, | 2478 | .show = cfq_fifo_expire_sync_show, |
1718 | .store = cfq_fifo_expire_r_store, | 2479 | .store = cfq_fifo_expire_sync_store, |
1719 | }; | 2480 | }; |
1720 | static struct cfq_fs_entry cfq_fifo_expire_w_entry = { | 2481 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { |
1721 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, | 2482 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, |
1722 | .show = cfq_fifo_expire_w_show, | 2483 | .show = cfq_fifo_expire_async_show, |
1723 | .store = cfq_fifo_expire_w_store, | 2484 | .store = cfq_fifo_expire_async_store, |
1724 | }; | ||
1725 | static struct cfq_fs_entry cfq_fifo_batch_expire_entry = { | ||
1726 | .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
1727 | .show = cfq_fifo_batch_expire_show, | ||
1728 | .store = cfq_fifo_batch_expire_store, | ||
1729 | }; | ||
1730 | static struct cfq_fs_entry cfq_find_best_entry = { | ||
1731 | .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR }, | ||
1732 | .show = cfq_find_best_show, | ||
1733 | .store = cfq_find_best_store, | ||
1734 | }; | 2485 | }; |
1735 | static struct cfq_fs_entry cfq_back_max_entry = { | 2486 | static struct cfq_fs_entry cfq_back_max_entry = { |
1736 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, | 2487 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, |
@@ -1742,27 +2493,44 @@ static struct cfq_fs_entry cfq_back_penalty_entry = { | |||
1742 | .show = cfq_back_penalty_show, | 2493 | .show = cfq_back_penalty_show, |
1743 | .store = cfq_back_penalty_store, | 2494 | .store = cfq_back_penalty_store, |
1744 | }; | 2495 | }; |
1745 | static struct cfq_fs_entry cfq_clear_elapsed_entry = { | 2496 | static struct cfq_fs_entry cfq_slice_sync_entry = { |
1746 | .attr = {.name = "clear_elapsed", .mode = S_IWUSR }, | 2497 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, |
1747 | .store = cfq_clear_elapsed, | 2498 | .show = cfq_slice_sync_show, |
2499 | .store = cfq_slice_sync_store, | ||
2500 | }; | ||
2501 | static struct cfq_fs_entry cfq_slice_async_entry = { | ||
2502 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, | ||
2503 | .show = cfq_slice_async_show, | ||
2504 | .store = cfq_slice_async_store, | ||
1748 | }; | 2505 | }; |
1749 | static struct cfq_fs_entry cfq_key_type_entry = { | 2506 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { |
1750 | .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR }, | 2507 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, |
1751 | .show = cfq_read_key_type, | 2508 | .show = cfq_slice_async_rq_show, |
1752 | .store = cfq_set_key_type, | 2509 | .store = cfq_slice_async_rq_store, |
2510 | }; | ||
2511 | static struct cfq_fs_entry cfq_slice_idle_entry = { | ||
2512 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, | ||
2513 | .show = cfq_slice_idle_show, | ||
2514 | .store = cfq_slice_idle_store, | ||
2515 | }; | ||
2516 | static struct cfq_fs_entry cfq_max_depth_entry = { | ||
2517 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, | ||
2518 | .show = cfq_max_depth_show, | ||
2519 | .store = cfq_max_depth_store, | ||
1753 | }; | 2520 | }; |
1754 | 2521 | ||
1755 | static struct attribute *default_attrs[] = { | 2522 | static struct attribute *default_attrs[] = { |
1756 | &cfq_quantum_entry.attr, | 2523 | &cfq_quantum_entry.attr, |
1757 | &cfq_queued_entry.attr, | 2524 | &cfq_queued_entry.attr, |
1758 | &cfq_fifo_expire_r_entry.attr, | 2525 | &cfq_fifo_expire_sync_entry.attr, |
1759 | &cfq_fifo_expire_w_entry.attr, | 2526 | &cfq_fifo_expire_async_entry.attr, |
1760 | &cfq_fifo_batch_expire_entry.attr, | ||
1761 | &cfq_key_type_entry.attr, | ||
1762 | &cfq_find_best_entry.attr, | ||
1763 | &cfq_back_max_entry.attr, | 2527 | &cfq_back_max_entry.attr, |
1764 | &cfq_back_penalty_entry.attr, | 2528 | &cfq_back_penalty_entry.attr, |
1765 | &cfq_clear_elapsed_entry.attr, | 2529 | &cfq_slice_sync_entry.attr, |
2530 | &cfq_slice_async_entry.attr, | ||
2531 | &cfq_slice_async_rq_entry.attr, | ||
2532 | &cfq_slice_idle_entry.attr, | ||
2533 | &cfq_max_depth_entry.attr, | ||
1766 | NULL, | 2534 | NULL, |
1767 | }; | 2535 | }; |
1768 | 2536 | ||
@@ -1775,7 +2543,7 @@ cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
1775 | struct cfq_fs_entry *entry = to_cfq(attr); | 2543 | struct cfq_fs_entry *entry = to_cfq(attr); |
1776 | 2544 | ||
1777 | if (!entry->show) | 2545 | if (!entry->show) |
1778 | return 0; | 2546 | return -EIO; |
1779 | 2547 | ||
1780 | return entry->show(e->elevator_data, page); | 2548 | return entry->show(e->elevator_data, page); |
1781 | } | 2549 | } |
@@ -1788,7 +2556,7 @@ cfq_attr_store(struct kobject *kobj, struct attribute *attr, | |||
1788 | struct cfq_fs_entry *entry = to_cfq(attr); | 2556 | struct cfq_fs_entry *entry = to_cfq(attr); |
1789 | 2557 | ||
1790 | if (!entry->store) | 2558 | if (!entry->store) |
1791 | return -EINVAL; | 2559 | return -EIO; |
1792 | 2560 | ||
1793 | return entry->store(e->elevator_data, page, length); | 2561 | return entry->store(e->elevator_data, page, length); |
1794 | } | 2562 | } |
@@ -1832,21 +2600,46 @@ static int __init cfq_init(void) | |||
1832 | { | 2600 | { |
1833 | int ret; | 2601 | int ret; |
1834 | 2602 | ||
2603 | /* | ||
2604 | * could be 0 on HZ < 1000 setups | ||
2605 | */ | ||
2606 | if (!cfq_slice_async) | ||
2607 | cfq_slice_async = 1; | ||
2608 | if (!cfq_slice_idle) | ||
2609 | cfq_slice_idle = 1; | ||
2610 | |||
1835 | if (cfq_slab_setup()) | 2611 | if (cfq_slab_setup()) |
1836 | return -ENOMEM; | 2612 | return -ENOMEM; |
1837 | 2613 | ||
1838 | ret = elv_register(&iosched_cfq); | 2614 | ret = elv_register(&iosched_cfq); |
1839 | if (!ret) { | 2615 | if (ret) |
1840 | __module_get(THIS_MODULE); | 2616 | cfq_slab_kill(); |
1841 | return 0; | ||
1842 | } | ||
1843 | 2617 | ||
1844 | cfq_slab_kill(); | ||
1845 | return ret; | 2618 | return ret; |
1846 | } | 2619 | } |
1847 | 2620 | ||
1848 | static void __exit cfq_exit(void) | 2621 | static void __exit cfq_exit(void) |
1849 | { | 2622 | { |
2623 | struct task_struct *g, *p; | ||
2624 | unsigned long flags; | ||
2625 | |||
2626 | read_lock_irqsave(&tasklist_lock, flags); | ||
2627 | |||
2628 | /* | ||
2629 | * iterate each process in the system, removing our io_context | ||
2630 | */ | ||
2631 | do_each_thread(g, p) { | ||
2632 | struct io_context *ioc = p->io_context; | ||
2633 | |||
2634 | if (ioc && ioc->cic) { | ||
2635 | ioc->cic->exit(ioc->cic); | ||
2636 | cfq_free_io_context(ioc->cic); | ||
2637 | ioc->cic = NULL; | ||
2638 | } | ||
2639 | } while_each_thread(g, p); | ||
2640 | |||
2641 | read_unlock_irqrestore(&tasklist_lock, flags); | ||
2642 | |||
1850 | cfq_slab_kill(); | 2643 | cfq_slab_kill(); |
1851 | elv_unregister(&iosched_cfq); | 2644 | elv_unregister(&iosched_cfq); |
1852 | } | 2645 | } |
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index d63d34c671f7..ff5201e02153 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c | |||
@@ -711,18 +711,20 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
711 | if (!drq_pool) | 711 | if (!drq_pool) |
712 | return -ENOMEM; | 712 | return -ENOMEM; |
713 | 713 | ||
714 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | 714 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
715 | if (!dd) | 715 | if (!dd) |
716 | return -ENOMEM; | 716 | return -ENOMEM; |
717 | memset(dd, 0, sizeof(*dd)); | 717 | memset(dd, 0, sizeof(*dd)); |
718 | 718 | ||
719 | dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL); | 719 | dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, |
720 | GFP_KERNEL, q->node); | ||
720 | if (!dd->hash) { | 721 | if (!dd->hash) { |
721 | kfree(dd); | 722 | kfree(dd); |
722 | return -ENOMEM; | 723 | return -ENOMEM; |
723 | } | 724 | } |
724 | 725 | ||
725 | dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool); | 726 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
727 | mempool_free_slab, drq_pool, q->node); | ||
726 | if (!dd->drq_pool) { | 728 | if (!dd->drq_pool) { |
727 | kfree(dd->hash); | 729 | kfree(dd->hash); |
728 | kfree(dd); | 730 | kfree(dd); |
@@ -758,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) | |||
758 | } | 760 | } |
759 | 761 | ||
760 | static int | 762 | static int |
761 | deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | 763 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
764 | int gfp_mask) | ||
762 | { | 765 | { |
763 | struct deadline_data *dd = q->elevator->elevator_data; | 766 | struct deadline_data *dd = q->elevator->elevator_data; |
764 | struct deadline_rq *drq; | 767 | struct deadline_rq *drq; |
@@ -886,7 +889,7 @@ deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
886 | struct deadline_fs_entry *entry = to_deadline(attr); | 889 | struct deadline_fs_entry *entry = to_deadline(attr); |
887 | 890 | ||
888 | if (!entry->show) | 891 | if (!entry->show) |
889 | return 0; | 892 | return -EIO; |
890 | 893 | ||
891 | return entry->show(e->elevator_data, page); | 894 | return entry->show(e->elevator_data, page); |
892 | } | 895 | } |
@@ -899,7 +902,7 @@ deadline_attr_store(struct kobject *kobj, struct attribute *attr, | |||
899 | struct deadline_fs_entry *entry = to_deadline(attr); | 902 | struct deadline_fs_entry *entry = to_deadline(attr); |
900 | 903 | ||
901 | if (!entry->store) | 904 | if (!entry->store) |
902 | return -EINVAL; | 905 | return -EIO; |
903 | 906 | ||
904 | return entry->store(e->elevator_data, page, length); | 907 | return entry->store(e->elevator_data, page, length); |
905 | } | 908 | } |
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index f831f08f839c..98f0126a2deb 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c | |||
@@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) | |||
486 | return NULL; | 486 | return NULL; |
487 | } | 487 | } |
488 | 488 | ||
489 | int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | 489 | int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
490 | int gfp_mask) | ||
490 | { | 491 | { |
491 | elevator_t *e = q->elevator; | 492 | elevator_t *e = q->elevator; |
492 | 493 | ||
493 | if (e->ops->elevator_set_req_fn) | 494 | if (e->ops->elevator_set_req_fn) |
494 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); | 495 | return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask); |
495 | 496 | ||
496 | rq->elevator_private = NULL; | 497 | rq->elevator_private = NULL; |
497 | return 0; | 498 | return 0; |
@@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq) | |||
505 | e->ops->elevator_put_req_fn(q, rq); | 506 | e->ops->elevator_put_req_fn(q, rq); |
506 | } | 507 | } |
507 | 508 | ||
508 | int elv_may_queue(request_queue_t *q, int rw) | 509 | int elv_may_queue(request_queue_t *q, int rw, struct bio *bio) |
509 | { | 510 | { |
510 | elevator_t *e = q->elevator; | 511 | elevator_t *e = q->elevator; |
511 | 512 | ||
512 | if (e->ops->elevator_may_queue_fn) | 513 | if (e->ops->elevator_may_queue_fn) |
513 | return e->ops->elevator_may_queue_fn(q, rw); | 514 | return e->ops->elevator_may_queue_fn(q, rw, bio); |
514 | 515 | ||
515 | return ELV_MQUEUE_MAY; | 516 | return ELV_MQUEUE_MAY; |
516 | } | 517 | } |
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index 8bbe01d4b487..47fd3659a061 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c | |||
@@ -40,7 +40,7 @@ static inline int major_to_index(int major) | |||
40 | 40 | ||
41 | #ifdef CONFIG_PROC_FS | 41 | #ifdef CONFIG_PROC_FS |
42 | /* get block device names in somewhat random order */ | 42 | /* get block device names in somewhat random order */ |
43 | int get_blkdev_list(char *p) | 43 | int get_blkdev_list(char *p, int used) |
44 | { | 44 | { |
45 | struct blk_major_name *n; | 45 | struct blk_major_name *n; |
46 | int i, len; | 46 | int i, len; |
@@ -49,10 +49,18 @@ int get_blkdev_list(char *p) | |||
49 | 49 | ||
50 | down(&block_subsys_sem); | 50 | down(&block_subsys_sem); |
51 | for (i = 0; i < ARRAY_SIZE(major_names); i++) { | 51 | for (i = 0; i < ARRAY_SIZE(major_names); i++) { |
52 | for (n = major_names[i]; n; n = n->next) | 52 | for (n = major_names[i]; n; n = n->next) { |
53 | /* | ||
54 | * If the curent string plus the 5 extra characters | ||
55 | * in the line would run us off the page, then we're done | ||
56 | */ | ||
57 | if ((len + used + strlen(n->name) + 5) >= PAGE_SIZE) | ||
58 | goto page_full; | ||
53 | len += sprintf(p+len, "%3d %s\n", | 59 | len += sprintf(p+len, "%3d %s\n", |
54 | n->major, n->name); | 60 | n->major, n->name); |
61 | } | ||
55 | } | 62 | } |
63 | page_full: | ||
56 | up(&block_subsys_sem); | 64 | up(&block_subsys_sem); |
57 | 65 | ||
58 | return len; | 66 | return len; |
@@ -322,7 +330,7 @@ static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr, | |||
322 | struct gendisk *disk = to_disk(kobj); | 330 | struct gendisk *disk = to_disk(kobj); |
323 | struct disk_attribute *disk_attr = | 331 | struct disk_attribute *disk_attr = |
324 | container_of(attr,struct disk_attribute,attr); | 332 | container_of(attr,struct disk_attribute,attr); |
325 | ssize_t ret = 0; | 333 | ssize_t ret = -EIO; |
326 | 334 | ||
327 | if (disk_attr->show) | 335 | if (disk_attr->show) |
328 | ret = disk_attr->show(disk,page); | 336 | ret = disk_attr->show(disk,page); |
@@ -582,10 +590,16 @@ struct seq_operations diskstats_op = { | |||
582 | .show = diskstats_show | 590 | .show = diskstats_show |
583 | }; | 591 | }; |
584 | 592 | ||
585 | |||
586 | struct gendisk *alloc_disk(int minors) | 593 | struct gendisk *alloc_disk(int minors) |
587 | { | 594 | { |
588 | struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL); | 595 | return alloc_disk_node(minors, -1); |
596 | } | ||
597 | |||
598 | struct gendisk *alloc_disk_node(int minors, int node_id) | ||
599 | { | ||
600 | struct gendisk *disk; | ||
601 | |||
602 | disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); | ||
589 | if (disk) { | 603 | if (disk) { |
590 | memset(disk, 0, sizeof(struct gendisk)); | 604 | memset(disk, 0, sizeof(struct gendisk)); |
591 | if (!init_disk_stats(disk)) { | 605 | if (!init_disk_stats(disk)) { |
@@ -594,7 +608,7 @@ struct gendisk *alloc_disk(int minors) | |||
594 | } | 608 | } |
595 | if (minors > 1) { | 609 | if (minors > 1) { |
596 | int size = (minors - 1) * sizeof(struct hd_struct *); | 610 | int size = (minors - 1) * sizeof(struct hd_struct *); |
597 | disk->part = kmalloc(size, GFP_KERNEL); | 611 | disk->part = kmalloc_node(size, GFP_KERNEL, node_id); |
598 | if (!disk->part) { | 612 | if (!disk->part) { |
599 | kfree(disk); | 613 | kfree(disk); |
600 | return NULL; | 614 | return NULL; |
@@ -610,6 +624,7 @@ struct gendisk *alloc_disk(int minors) | |||
610 | } | 624 | } |
611 | 625 | ||
612 | EXPORT_SYMBOL(alloc_disk); | 626 | EXPORT_SYMBOL(alloc_disk); |
627 | EXPORT_SYMBOL(alloc_disk_node); | ||
613 | 628 | ||
614 | struct kobject *get_disk(struct gendisk *disk) | 629 | struct kobject *get_disk(struct gendisk *disk) |
615 | { | 630 | { |
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c index 6d7bcc9da9e7..6e278474f9a8 100644 --- a/drivers/block/ioctl.c +++ b/drivers/block/ioctl.c | |||
@@ -133,11 +133,9 @@ static int put_u64(unsigned long arg, u64 val) | |||
133 | return put_user(val, (u64 __user *)arg); | 133 | return put_user(val, (u64 __user *)arg); |
134 | } | 134 | } |
135 | 135 | ||
136 | int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, | 136 | static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev, |
137 | unsigned long arg) | 137 | unsigned cmd, unsigned long arg) |
138 | { | 138 | { |
139 | struct block_device *bdev = inode->i_bdev; | ||
140 | struct gendisk *disk = bdev->bd_disk; | ||
141 | struct backing_dev_info *bdi; | 139 | struct backing_dev_info *bdi; |
142 | int ret, n; | 140 | int ret, n; |
143 | 141 | ||
@@ -190,36 +188,72 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, | |||
190 | return put_ulong(arg, bdev->bd_inode->i_size >> 9); | 188 | return put_ulong(arg, bdev->bd_inode->i_size >> 9); |
191 | case BLKGETSIZE64: | 189 | case BLKGETSIZE64: |
192 | return put_u64(arg, bdev->bd_inode->i_size); | 190 | return put_u64(arg, bdev->bd_inode->i_size); |
191 | } | ||
192 | return -ENOIOCTLCMD; | ||
193 | } | ||
194 | |||
195 | static int blkdev_driver_ioctl(struct inode *inode, struct file *file, | ||
196 | struct gendisk *disk, unsigned cmd, unsigned long arg) | ||
197 | { | ||
198 | int ret; | ||
199 | if (disk->fops->unlocked_ioctl) | ||
200 | return disk->fops->unlocked_ioctl(file, cmd, arg); | ||
201 | |||
202 | if (disk->fops->ioctl) { | ||
203 | lock_kernel(); | ||
204 | ret = disk->fops->ioctl(inode, file, cmd, arg); | ||
205 | unlock_kernel(); | ||
206 | return ret; | ||
207 | } | ||
208 | |||
209 | return -ENOTTY; | ||
210 | } | ||
211 | |||
212 | int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, | ||
213 | unsigned long arg) | ||
214 | { | ||
215 | struct block_device *bdev = inode->i_bdev; | ||
216 | struct gendisk *disk = bdev->bd_disk; | ||
217 | int ret, n; | ||
218 | |||
219 | switch(cmd) { | ||
193 | case BLKFLSBUF: | 220 | case BLKFLSBUF: |
194 | if (!capable(CAP_SYS_ADMIN)) | 221 | if (!capable(CAP_SYS_ADMIN)) |
195 | return -EACCES; | 222 | return -EACCES; |
196 | if (disk->fops->ioctl) { | 223 | |
197 | ret = disk->fops->ioctl(inode, file, cmd, arg); | 224 | ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg); |
198 | /* -EINVAL to handle old uncorrected drivers */ | 225 | /* -EINVAL to handle old uncorrected drivers */ |
199 | if (ret != -EINVAL && ret != -ENOTTY) | 226 | if (ret != -EINVAL && ret != -ENOTTY) |
200 | return ret; | 227 | return ret; |
201 | } | 228 | |
229 | lock_kernel(); | ||
202 | fsync_bdev(bdev); | 230 | fsync_bdev(bdev); |
203 | invalidate_bdev(bdev, 0); | 231 | invalidate_bdev(bdev, 0); |
232 | unlock_kernel(); | ||
204 | return 0; | 233 | return 0; |
234 | |||
205 | case BLKROSET: | 235 | case BLKROSET: |
206 | if (disk->fops->ioctl) { | 236 | ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg); |
207 | ret = disk->fops->ioctl(inode, file, cmd, arg); | 237 | /* -EINVAL to handle old uncorrected drivers */ |
208 | /* -EINVAL to handle old uncorrected drivers */ | 238 | if (ret != -EINVAL && ret != -ENOTTY) |
209 | if (ret != -EINVAL && ret != -ENOTTY) | 239 | return ret; |
210 | return ret; | ||
211 | } | ||
212 | if (!capable(CAP_SYS_ADMIN)) | 240 | if (!capable(CAP_SYS_ADMIN)) |
213 | return -EACCES; | 241 | return -EACCES; |
214 | if (get_user(n, (int __user *)(arg))) | 242 | if (get_user(n, (int __user *)(arg))) |
215 | return -EFAULT; | 243 | return -EFAULT; |
244 | lock_kernel(); | ||
216 | set_device_ro(bdev, n); | 245 | set_device_ro(bdev, n); |
246 | unlock_kernel(); | ||
217 | return 0; | 247 | return 0; |
218 | default: | ||
219 | if (disk->fops->ioctl) | ||
220 | return disk->fops->ioctl(inode, file, cmd, arg); | ||
221 | } | 248 | } |
222 | return -ENOTTY; | 249 | |
250 | lock_kernel(); | ||
251 | ret = blkdev_locked_ioctl(file, bdev, cmd, arg); | ||
252 | unlock_kernel(); | ||
253 | if (ret != -ENOIOCTLCMD) | ||
254 | return ret; | ||
255 | |||
256 | return blkdev_driver_ioctl(inode, file, disk, cmd, arg); | ||
223 | } | 257 | } |
224 | 258 | ||
225 | /* Most of the generic ioctls are handled in the normal fallback path. | 259 | /* Most of the generic ioctls are handled in the normal fallback path. |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index f6fda036b4ae..0c7599563b65 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/swap.h> | 29 | #include <linux/swap.h> |
30 | #include <linux/writeback.h> | 30 | #include <linux/writeback.h> |
31 | #include <linux/blkdev.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * for max sense size | 34 | * for max sense size |
@@ -36,6 +37,7 @@ | |||
36 | 37 | ||
37 | static void blk_unplug_work(void *data); | 38 | static void blk_unplug_work(void *data); |
38 | static void blk_unplug_timeout(unsigned long data); | 39 | static void blk_unplug_timeout(unsigned long data); |
40 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | ||
39 | 41 | ||
40 | /* | 42 | /* |
41 | * For the allocated request tables | 43 | * For the allocated request tables |
@@ -274,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) | |||
274 | rq->errors = 0; | 276 | rq->errors = 0; |
275 | rq->rq_status = RQ_ACTIVE; | 277 | rq->rq_status = RQ_ACTIVE; |
276 | rq->bio = rq->biotail = NULL; | 278 | rq->bio = rq->biotail = NULL; |
279 | rq->ioprio = 0; | ||
277 | rq->buffer = NULL; | 280 | rq->buffer = NULL; |
278 | rq->ref_count = 1; | 281 | rq->ref_count = 1; |
279 | rq->q = q; | 282 | rq->q = q; |
@@ -775,9 +778,9 @@ EXPORT_SYMBOL(blk_queue_free_tags); | |||
775 | static int | 778 | static int |
776 | init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | 779 | init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) |
777 | { | 780 | { |
778 | int bits, i; | ||
779 | struct request **tag_index; | 781 | struct request **tag_index; |
780 | unsigned long *tag_map; | 782 | unsigned long *tag_map; |
783 | int nr_ulongs; | ||
781 | 784 | ||
782 | if (depth > q->nr_requests * 2) { | 785 | if (depth > q->nr_requests * 2) { |
783 | depth = q->nr_requests * 2; | 786 | depth = q->nr_requests * 2; |
@@ -789,24 +792,18 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | |||
789 | if (!tag_index) | 792 | if (!tag_index) |
790 | goto fail; | 793 | goto fail; |
791 | 794 | ||
792 | bits = (depth / BLK_TAGS_PER_LONG) + 1; | 795 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; |
793 | tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC); | 796 | tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); |
794 | if (!tag_map) | 797 | if (!tag_map) |
795 | goto fail; | 798 | goto fail; |
796 | 799 | ||
797 | memset(tag_index, 0, depth * sizeof(struct request *)); | 800 | memset(tag_index, 0, depth * sizeof(struct request *)); |
798 | memset(tag_map, 0, bits * sizeof(unsigned long)); | 801 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); |
802 | tags->real_max_depth = depth; | ||
799 | tags->max_depth = depth; | 803 | tags->max_depth = depth; |
800 | tags->real_max_depth = bits * BITS_PER_LONG; | ||
801 | tags->tag_index = tag_index; | 804 | tags->tag_index = tag_index; |
802 | tags->tag_map = tag_map; | 805 | tags->tag_map = tag_map; |
803 | 806 | ||
804 | /* | ||
805 | * set the upper bits if the depth isn't a multiple of the word size | ||
806 | */ | ||
807 | for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++) | ||
808 | __set_bit(i, tag_map); | ||
809 | |||
810 | return 0; | 807 | return 0; |
811 | fail: | 808 | fail: |
812 | kfree(tag_index); | 809 | kfree(tag_index); |
@@ -871,13 +868,16 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) | |||
871 | struct blk_queue_tag *bqt = q->queue_tags; | 868 | struct blk_queue_tag *bqt = q->queue_tags; |
872 | struct request **tag_index; | 869 | struct request **tag_index; |
873 | unsigned long *tag_map; | 870 | unsigned long *tag_map; |
874 | int bits, max_depth; | 871 | int max_depth, nr_ulongs; |
875 | 872 | ||
876 | if (!bqt) | 873 | if (!bqt) |
877 | return -ENXIO; | 874 | return -ENXIO; |
878 | 875 | ||
879 | /* | 876 | /* |
880 | * don't bother sizing down | 877 | * if we already have large enough real_max_depth. just |
878 | * adjust max_depth. *NOTE* as requests with tag value | ||
879 | * between new_depth and real_max_depth can be in-flight, tag | ||
880 | * map can not be shrunk blindly here. | ||
881 | */ | 881 | */ |
882 | if (new_depth <= bqt->real_max_depth) { | 882 | if (new_depth <= bqt->real_max_depth) { |
883 | bqt->max_depth = new_depth; | 883 | bqt->max_depth = new_depth; |
@@ -895,8 +895,8 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) | |||
895 | return -ENOMEM; | 895 | return -ENOMEM; |
896 | 896 | ||
897 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | 897 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); |
898 | bits = max_depth / BLK_TAGS_PER_LONG; | 898 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; |
899 | memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long)); | 899 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); |
900 | 900 | ||
901 | kfree(tag_index); | 901 | kfree(tag_index); |
902 | kfree(tag_map); | 902 | kfree(tag_map); |
@@ -927,10 +927,15 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) | |||
927 | BUG_ON(tag == -1); | 927 | BUG_ON(tag == -1); |
928 | 928 | ||
929 | if (unlikely(tag >= bqt->real_max_depth)) | 929 | if (unlikely(tag >= bqt->real_max_depth)) |
930 | /* | ||
931 | * This can happen after tag depth has been reduced. | ||
932 | * FIXME: how about a warning or info message here? | ||
933 | */ | ||
930 | return; | 934 | return; |
931 | 935 | ||
932 | if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { | 936 | if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { |
933 | printk("attempt to clear non-busy tag (%d)\n", tag); | 937 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
938 | __FUNCTION__, tag); | ||
934 | return; | 939 | return; |
935 | } | 940 | } |
936 | 941 | ||
@@ -939,7 +944,8 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) | |||
939 | rq->tag = -1; | 944 | rq->tag = -1; |
940 | 945 | ||
941 | if (unlikely(bqt->tag_index[tag] == NULL)) | 946 | if (unlikely(bqt->tag_index[tag] == NULL)) |
942 | printk("tag %d is missing\n", tag); | 947 | printk(KERN_ERR "%s: tag %d is missing\n", |
948 | __FUNCTION__, tag); | ||
943 | 949 | ||
944 | bqt->tag_index[tag] = NULL; | 950 | bqt->tag_index[tag] = NULL; |
945 | bqt->busy--; | 951 | bqt->busy--; |
@@ -968,24 +974,20 @@ EXPORT_SYMBOL(blk_queue_end_tag); | |||
968 | int blk_queue_start_tag(request_queue_t *q, struct request *rq) | 974 | int blk_queue_start_tag(request_queue_t *q, struct request *rq) |
969 | { | 975 | { |
970 | struct blk_queue_tag *bqt = q->queue_tags; | 976 | struct blk_queue_tag *bqt = q->queue_tags; |
971 | unsigned long *map = bqt->tag_map; | 977 | int tag; |
972 | int tag = 0; | ||
973 | 978 | ||
974 | if (unlikely((rq->flags & REQ_QUEUED))) { | 979 | if (unlikely((rq->flags & REQ_QUEUED))) { |
975 | printk(KERN_ERR | 980 | printk(KERN_ERR |
976 | "request %p for device [%s] already tagged %d", | 981 | "%s: request %p for device [%s] already tagged %d", |
977 | rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | 982 | __FUNCTION__, rq, |
983 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | ||
978 | BUG(); | 984 | BUG(); |
979 | } | 985 | } |
980 | 986 | ||
981 | for (map = bqt->tag_map; *map == -1UL; map++) { | 987 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); |
982 | tag += BLK_TAGS_PER_LONG; | 988 | if (tag >= bqt->max_depth) |
983 | 989 | return 1; | |
984 | if (tag >= bqt->max_depth) | ||
985 | return 1; | ||
986 | } | ||
987 | 990 | ||
988 | tag += ffz(*map); | ||
989 | __set_bit(tag, bqt->tag_map); | 991 | __set_bit(tag, bqt->tag_map); |
990 | 992 | ||
991 | rq->flags |= REQ_QUEUED; | 993 | rq->flags |= REQ_QUEUED; |
@@ -1021,7 +1023,8 @@ void blk_queue_invalidate_tags(request_queue_t *q) | |||
1021 | rq = list_entry_rq(tmp); | 1023 | rq = list_entry_rq(tmp); |
1022 | 1024 | ||
1023 | if (rq->tag == -1) { | 1025 | if (rq->tag == -1) { |
1024 | printk("bad tag found on list\n"); | 1026 | printk(KERN_ERR |
1027 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1025 | list_del_init(&rq->queuelist); | 1028 | list_del_init(&rq->queuelist); |
1026 | rq->flags &= ~REQ_QUEUED; | 1029 | rq->flags &= ~REQ_QUEUED; |
1027 | } else | 1030 | } else |
@@ -1149,7 +1152,7 @@ new_hw_segment: | |||
1149 | } | 1152 | } |
1150 | 1153 | ||
1151 | 1154 | ||
1152 | int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | 1155 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, |
1153 | struct bio *nxt) | 1156 | struct bio *nxt) |
1154 | { | 1157 | { |
1155 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 1158 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) |
@@ -1170,9 +1173,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | |||
1170 | return 0; | 1173 | return 0; |
1171 | } | 1174 | } |
1172 | 1175 | ||
1173 | EXPORT_SYMBOL(blk_phys_contig_segment); | 1176 | static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, |
1174 | |||
1175 | int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | ||
1176 | struct bio *nxt) | 1177 | struct bio *nxt) |
1177 | { | 1178 | { |
1178 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 1179 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
@@ -1188,8 +1189,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | |||
1188 | return 1; | 1189 | return 1; |
1189 | } | 1190 | } |
1190 | 1191 | ||
1191 | EXPORT_SYMBOL(blk_hw_contig_segment); | ||
1192 | |||
1193 | /* | 1192 | /* |
1194 | * map a request to scatterlist, return number of sg entries setup. Caller | 1193 | * map a request to scatterlist, return number of sg entries setup. Caller |
1195 | * must make sure sg can hold rq->nr_phys_segments entries | 1194 | * must make sure sg can hold rq->nr_phys_segments entries |
@@ -1359,8 +1358,8 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, | |||
1359 | static int ll_merge_requests_fn(request_queue_t *q, struct request *req, | 1358 | static int ll_merge_requests_fn(request_queue_t *q, struct request *req, |
1360 | struct request *next) | 1359 | struct request *next) |
1361 | { | 1360 | { |
1362 | int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments; | 1361 | int total_phys_segments; |
1363 | int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | 1362 | int total_hw_segments; |
1364 | 1363 | ||
1365 | /* | 1364 | /* |
1366 | * First check if the either of the requests are re-queued | 1365 | * First check if the either of the requests are re-queued |
@@ -1370,7 +1369,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, | |||
1370 | return 0; | 1369 | return 0; |
1371 | 1370 | ||
1372 | /* | 1371 | /* |
1373 | * Will it become to large? | 1372 | * Will it become too large? |
1374 | */ | 1373 | */ |
1375 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | 1374 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) |
1376 | return 0; | 1375 | return 0; |
@@ -1451,17 +1450,13 @@ EXPORT_SYMBOL(blk_remove_plug); | |||
1451 | */ | 1450 | */ |
1452 | void __generic_unplug_device(request_queue_t *q) | 1451 | void __generic_unplug_device(request_queue_t *q) |
1453 | { | 1452 | { |
1454 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | 1453 | if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) |
1455 | return; | 1454 | return; |
1456 | 1455 | ||
1457 | if (!blk_remove_plug(q)) | 1456 | if (!blk_remove_plug(q)) |
1458 | return; | 1457 | return; |
1459 | 1458 | ||
1460 | /* | 1459 | q->request_fn(q); |
1461 | * was plugged, fire request_fn if queue has stuff to do | ||
1462 | */ | ||
1463 | if (elv_next_request(q)) | ||
1464 | q->request_fn(q); | ||
1465 | } | 1460 | } |
1466 | EXPORT_SYMBOL(__generic_unplug_device); | 1461 | EXPORT_SYMBOL(__generic_unplug_device); |
1467 | 1462 | ||
@@ -1646,7 +1641,8 @@ static int blk_init_free_list(request_queue_t *q) | |||
1646 | init_waitqueue_head(&rl->wait[WRITE]); | 1641 | init_waitqueue_head(&rl->wait[WRITE]); |
1647 | init_waitqueue_head(&rl->drain); | 1642 | init_waitqueue_head(&rl->drain); |
1648 | 1643 | ||
1649 | rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep); | 1644 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
1645 | mempool_free_slab, request_cachep, q->node); | ||
1650 | 1646 | ||
1651 | if (!rl->rq_pool) | 1647 | if (!rl->rq_pool) |
1652 | return -ENOMEM; | 1648 | return -ENOMEM; |
@@ -1658,8 +1654,15 @@ static int __make_request(request_queue_t *, struct bio *); | |||
1658 | 1654 | ||
1659 | request_queue_t *blk_alloc_queue(int gfp_mask) | 1655 | request_queue_t *blk_alloc_queue(int gfp_mask) |
1660 | { | 1656 | { |
1661 | request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask); | 1657 | return blk_alloc_queue_node(gfp_mask, -1); |
1658 | } | ||
1659 | EXPORT_SYMBOL(blk_alloc_queue); | ||
1660 | |||
1661 | request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) | ||
1662 | { | ||
1663 | request_queue_t *q; | ||
1662 | 1664 | ||
1665 | q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); | ||
1663 | if (!q) | 1666 | if (!q) |
1664 | return NULL; | 1667 | return NULL; |
1665 | 1668 | ||
@@ -1672,8 +1675,7 @@ request_queue_t *blk_alloc_queue(int gfp_mask) | |||
1672 | 1675 | ||
1673 | return q; | 1676 | return q; |
1674 | } | 1677 | } |
1675 | 1678 | EXPORT_SYMBOL(blk_alloc_queue_node); | |
1676 | EXPORT_SYMBOL(blk_alloc_queue); | ||
1677 | 1679 | ||
1678 | /** | 1680 | /** |
1679 | * blk_init_queue - prepare a request queue for use with a block device | 1681 | * blk_init_queue - prepare a request queue for use with a block device |
@@ -1706,13 +1708,22 @@ EXPORT_SYMBOL(blk_alloc_queue); | |||
1706 | * blk_init_queue() must be paired with a blk_cleanup_queue() call | 1708 | * blk_init_queue() must be paired with a blk_cleanup_queue() call |
1707 | * when the block device is deactivated (such as at module unload). | 1709 | * when the block device is deactivated (such as at module unload). |
1708 | **/ | 1710 | **/ |
1711 | |||
1709 | request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 1712 | request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) |
1710 | { | 1713 | { |
1711 | request_queue_t *q = blk_alloc_queue(GFP_KERNEL); | 1714 | return blk_init_queue_node(rfn, lock, -1); |
1715 | } | ||
1716 | EXPORT_SYMBOL(blk_init_queue); | ||
1717 | |||
1718 | request_queue_t * | ||
1719 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | ||
1720 | { | ||
1721 | request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | ||
1712 | 1722 | ||
1713 | if (!q) | 1723 | if (!q) |
1714 | return NULL; | 1724 | return NULL; |
1715 | 1725 | ||
1726 | q->node = node_id; | ||
1716 | if (blk_init_free_list(q)) | 1727 | if (blk_init_free_list(q)) |
1717 | goto out_init; | 1728 | goto out_init; |
1718 | 1729 | ||
@@ -1755,12 +1766,11 @@ out_init: | |||
1755 | kmem_cache_free(requestq_cachep, q); | 1766 | kmem_cache_free(requestq_cachep, q); |
1756 | return NULL; | 1767 | return NULL; |
1757 | } | 1768 | } |
1758 | 1769 | EXPORT_SYMBOL(blk_init_queue_node); | |
1759 | EXPORT_SYMBOL(blk_init_queue); | ||
1760 | 1770 | ||
1761 | int blk_get_queue(request_queue_t *q) | 1771 | int blk_get_queue(request_queue_t *q) |
1762 | { | 1772 | { |
1763 | if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | 1773 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
1764 | atomic_inc(&q->refcnt); | 1774 | atomic_inc(&q->refcnt); |
1765 | return 0; | 1775 | return 0; |
1766 | } | 1776 | } |
@@ -1776,8 +1786,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) | |||
1776 | mempool_free(rq, q->rq.rq_pool); | 1786 | mempool_free(rq, q->rq.rq_pool); |
1777 | } | 1787 | } |
1778 | 1788 | ||
1779 | static inline struct request *blk_alloc_request(request_queue_t *q, int rw, | 1789 | static inline struct request * |
1780 | int gfp_mask) | 1790 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) |
1781 | { | 1791 | { |
1782 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 1792 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
1783 | 1793 | ||
@@ -1790,7 +1800,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw, | |||
1790 | */ | 1800 | */ |
1791 | rq->flags = rw; | 1801 | rq->flags = rw; |
1792 | 1802 | ||
1793 | if (!elv_set_request(q, rq, gfp_mask)) | 1803 | if (!elv_set_request(q, rq, bio, gfp_mask)) |
1794 | return rq; | 1804 | return rq; |
1795 | 1805 | ||
1796 | mempool_free(rq, q->rq.rq_pool); | 1806 | mempool_free(rq, q->rq.rq_pool); |
@@ -1822,7 +1832,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) | |||
1822 | * is the behaviour we want though - once it gets a wakeup it should be given | 1832 | * is the behaviour we want though - once it gets a wakeup it should be given |
1823 | * a nice run. | 1833 | * a nice run. |
1824 | */ | 1834 | */ |
1825 | void ioc_set_batching(request_queue_t *q, struct io_context *ioc) | 1835 | static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) |
1826 | { | 1836 | { |
1827 | if (!ioc || ioc_batching(q, ioc)) | 1837 | if (!ioc || ioc_batching(q, ioc)) |
1828 | return; | 1838 | return; |
@@ -1839,7 +1849,6 @@ static void __freed_request(request_queue_t *q, int rw) | |||
1839 | clear_queue_congested(q, rw); | 1849 | clear_queue_congested(q, rw); |
1840 | 1850 | ||
1841 | if (rl->count[rw] + 1 <= q->nr_requests) { | 1851 | if (rl->count[rw] + 1 <= q->nr_requests) { |
1842 | smp_mb(); | ||
1843 | if (waitqueue_active(&rl->wait[rw])) | 1852 | if (waitqueue_active(&rl->wait[rw])) |
1844 | wake_up(&rl->wait[rw]); | 1853 | wake_up(&rl->wait[rw]); |
1845 | 1854 | ||
@@ -1871,18 +1880,20 @@ static void freed_request(request_queue_t *q, int rw) | |||
1871 | 1880 | ||
1872 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | 1881 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) |
1873 | /* | 1882 | /* |
1874 | * Get a free request, queue_lock must not be held | 1883 | * Get a free request, queue_lock must be held. |
1884 | * Returns NULL on failure, with queue_lock held. | ||
1885 | * Returns !NULL on success, with queue_lock *not held*. | ||
1875 | */ | 1886 | */ |
1876 | static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) | 1887 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, |
1888 | int gfp_mask) | ||
1877 | { | 1889 | { |
1878 | struct request *rq = NULL; | 1890 | struct request *rq = NULL; |
1879 | struct request_list *rl = &q->rq; | 1891 | struct request_list *rl = &q->rq; |
1880 | struct io_context *ioc = get_io_context(gfp_mask); | 1892 | struct io_context *ioc = current_io_context(GFP_ATOMIC); |
1881 | 1893 | ||
1882 | if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) | 1894 | if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) |
1883 | goto out; | 1895 | goto out; |
1884 | 1896 | ||
1885 | spin_lock_irq(q->queue_lock); | ||
1886 | if (rl->count[rw]+1 >= q->nr_requests) { | 1897 | if (rl->count[rw]+1 >= q->nr_requests) { |
1887 | /* | 1898 | /* |
1888 | * The queue will fill after this allocation, so set it as | 1899 | * The queue will fill after this allocation, so set it as |
@@ -1896,7 +1907,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) | |||
1896 | } | 1907 | } |
1897 | } | 1908 | } |
1898 | 1909 | ||
1899 | switch (elv_may_queue(q, rw)) { | 1910 | switch (elv_may_queue(q, rw, bio)) { |
1900 | case ELV_MQUEUE_NO: | 1911 | case ELV_MQUEUE_NO: |
1901 | goto rq_starved; | 1912 | goto rq_starved; |
1902 | case ELV_MQUEUE_MAY: | 1913 | case ELV_MQUEUE_MAY: |
@@ -1910,18 +1921,25 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) | |||
1910 | * The queue is full and the allocating process is not a | 1921 | * The queue is full and the allocating process is not a |
1911 | * "batcher", and not exempted by the IO scheduler | 1922 | * "batcher", and not exempted by the IO scheduler |
1912 | */ | 1923 | */ |
1913 | spin_unlock_irq(q->queue_lock); | ||
1914 | goto out; | 1924 | goto out; |
1915 | } | 1925 | } |
1916 | 1926 | ||
1917 | get_rq: | 1927 | get_rq: |
1928 | /* | ||
1929 | * Only allow batching queuers to allocate up to 50% over the defined | ||
1930 | * limit of requests, otherwise we could have thousands of requests | ||
1931 | * allocated with any setting of ->nr_requests | ||
1932 | */ | ||
1933 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | ||
1934 | goto out; | ||
1935 | |||
1918 | rl->count[rw]++; | 1936 | rl->count[rw]++; |
1919 | rl->starved[rw] = 0; | 1937 | rl->starved[rw] = 0; |
1920 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) | 1938 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) |
1921 | set_queue_congested(q, rw); | 1939 | set_queue_congested(q, rw); |
1922 | spin_unlock_irq(q->queue_lock); | 1940 | spin_unlock_irq(q->queue_lock); |
1923 | 1941 | ||
1924 | rq = blk_alloc_request(q, rw, gfp_mask); | 1942 | rq = blk_alloc_request(q, rw, bio, gfp_mask); |
1925 | if (!rq) { | 1943 | if (!rq) { |
1926 | /* | 1944 | /* |
1927 | * Allocation failed presumably due to memory. Undo anything | 1945 | * Allocation failed presumably due to memory. Undo anything |
@@ -1944,7 +1962,6 @@ rq_starved: | |||
1944 | if (unlikely(rl->count[rw] == 0)) | 1962 | if (unlikely(rl->count[rw] == 0)) |
1945 | rl->starved[rw] = 1; | 1963 | rl->starved[rw] = 1; |
1946 | 1964 | ||
1947 | spin_unlock_irq(q->queue_lock); | ||
1948 | goto out; | 1965 | goto out; |
1949 | } | 1966 | } |
1950 | 1967 | ||
@@ -1954,31 +1971,35 @@ rq_starved: | |||
1954 | rq_init(q, rq); | 1971 | rq_init(q, rq); |
1955 | rq->rl = rl; | 1972 | rq->rl = rl; |
1956 | out: | 1973 | out: |
1957 | put_io_context(ioc); | ||
1958 | return rq; | 1974 | return rq; |
1959 | } | 1975 | } |
1960 | 1976 | ||
1961 | /* | 1977 | /* |
1962 | * No available requests for this queue, unplug the device and wait for some | 1978 | * No available requests for this queue, unplug the device and wait for some |
1963 | * requests to become available. | 1979 | * requests to become available. |
1980 | * | ||
1981 | * Called with q->queue_lock held, and returns with it unlocked. | ||
1964 | */ | 1982 | */ |
1965 | static struct request *get_request_wait(request_queue_t *q, int rw) | 1983 | static struct request *get_request_wait(request_queue_t *q, int rw, |
1984 | struct bio *bio) | ||
1966 | { | 1985 | { |
1967 | DEFINE_WAIT(wait); | ||
1968 | struct request *rq; | 1986 | struct request *rq; |
1969 | 1987 | ||
1970 | generic_unplug_device(q); | 1988 | rq = get_request(q, rw, bio, GFP_NOIO); |
1971 | do { | 1989 | while (!rq) { |
1990 | DEFINE_WAIT(wait); | ||
1972 | struct request_list *rl = &q->rq; | 1991 | struct request_list *rl = &q->rq; |
1973 | 1992 | ||
1974 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 1993 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, |
1975 | TASK_UNINTERRUPTIBLE); | 1994 | TASK_UNINTERRUPTIBLE); |
1976 | 1995 | ||
1977 | rq = get_request(q, rw, GFP_NOIO); | 1996 | rq = get_request(q, rw, bio, GFP_NOIO); |
1978 | 1997 | ||
1979 | if (!rq) { | 1998 | if (!rq) { |
1980 | struct io_context *ioc; | 1999 | struct io_context *ioc; |
1981 | 2000 | ||
2001 | __generic_unplug_device(q); | ||
2002 | spin_unlock_irq(q->queue_lock); | ||
1982 | io_schedule(); | 2003 | io_schedule(); |
1983 | 2004 | ||
1984 | /* | 2005 | /* |
@@ -1987,12 +2008,13 @@ static struct request *get_request_wait(request_queue_t *q, int rw) | |||
1987 | * up to a big batch of them for a small period time. | 2008 | * up to a big batch of them for a small period time. |
1988 | * See ioc_batching, ioc_set_batching | 2009 | * See ioc_batching, ioc_set_batching |
1989 | */ | 2010 | */ |
1990 | ioc = get_io_context(GFP_NOIO); | 2011 | ioc = current_io_context(GFP_NOIO); |
1991 | ioc_set_batching(q, ioc); | 2012 | ioc_set_batching(q, ioc); |
1992 | put_io_context(ioc); | 2013 | |
2014 | spin_lock_irq(q->queue_lock); | ||
1993 | } | 2015 | } |
1994 | finish_wait(&rl->wait[rw], &wait); | 2016 | finish_wait(&rl->wait[rw], &wait); |
1995 | } while (!rq); | 2017 | } |
1996 | 2018 | ||
1997 | return rq; | 2019 | return rq; |
1998 | } | 2020 | } |
@@ -2003,14 +2025,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) | |||
2003 | 2025 | ||
2004 | BUG_ON(rw != READ && rw != WRITE); | 2026 | BUG_ON(rw != READ && rw != WRITE); |
2005 | 2027 | ||
2006 | if (gfp_mask & __GFP_WAIT) | 2028 | spin_lock_irq(q->queue_lock); |
2007 | rq = get_request_wait(q, rw); | 2029 | if (gfp_mask & __GFP_WAIT) { |
2008 | else | 2030 | rq = get_request_wait(q, rw, NULL); |
2009 | rq = get_request(q, rw, gfp_mask); | 2031 | } else { |
2032 | rq = get_request(q, rw, NULL, gfp_mask); | ||
2033 | if (!rq) | ||
2034 | spin_unlock_irq(q->queue_lock); | ||
2035 | } | ||
2036 | /* q->queue_lock is unlocked at this point */ | ||
2010 | 2037 | ||
2011 | return rq; | 2038 | return rq; |
2012 | } | 2039 | } |
2013 | |||
2014 | EXPORT_SYMBOL(blk_get_request); | 2040 | EXPORT_SYMBOL(blk_get_request); |
2015 | 2041 | ||
2016 | /** | 2042 | /** |
@@ -2385,7 +2411,7 @@ int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, | |||
2385 | 2411 | ||
2386 | EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn); | 2412 | EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn); |
2387 | 2413 | ||
2388 | void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | 2414 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) |
2389 | { | 2415 | { |
2390 | int rw = rq_data_dir(rq); | 2416 | int rw = rq_data_dir(rq); |
2391 | 2417 | ||
@@ -2467,7 +2493,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2467 | return; | 2493 | return; |
2468 | 2494 | ||
2469 | req->rq_status = RQ_INACTIVE; | 2495 | req->rq_status = RQ_INACTIVE; |
2470 | req->q = NULL; | ||
2471 | req->rl = NULL; | 2496 | req->rl = NULL; |
2472 | 2497 | ||
2473 | /* | 2498 | /* |
@@ -2596,6 +2621,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, | |||
2596 | req->rq_disk->in_flight--; | 2621 | req->rq_disk->in_flight--; |
2597 | } | 2622 | } |
2598 | 2623 | ||
2624 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | ||
2625 | |||
2599 | __blk_put_request(q, next); | 2626 | __blk_put_request(q, next); |
2600 | return 1; | 2627 | return 1; |
2601 | } | 2628 | } |
@@ -2644,25 +2671,17 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) | |||
2644 | 2671 | ||
2645 | EXPORT_SYMBOL(blk_attempt_remerge); | 2672 | EXPORT_SYMBOL(blk_attempt_remerge); |
2646 | 2673 | ||
2647 | /* | ||
2648 | * Non-locking blk_attempt_remerge variant. | ||
2649 | */ | ||
2650 | void __blk_attempt_remerge(request_queue_t *q, struct request *rq) | ||
2651 | { | ||
2652 | attempt_back_merge(q, rq); | ||
2653 | } | ||
2654 | |||
2655 | EXPORT_SYMBOL(__blk_attempt_remerge); | ||
2656 | |||
2657 | static int __make_request(request_queue_t *q, struct bio *bio) | 2674 | static int __make_request(request_queue_t *q, struct bio *bio) |
2658 | { | 2675 | { |
2659 | struct request *req, *freereq = NULL; | 2676 | struct request *req; |
2660 | int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; | 2677 | int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; |
2678 | unsigned short prio; | ||
2661 | sector_t sector; | 2679 | sector_t sector; |
2662 | 2680 | ||
2663 | sector = bio->bi_sector; | 2681 | sector = bio->bi_sector; |
2664 | nr_sectors = bio_sectors(bio); | 2682 | nr_sectors = bio_sectors(bio); |
2665 | cur_nr_sectors = bio_cur_sectors(bio); | 2683 | cur_nr_sectors = bio_cur_sectors(bio); |
2684 | prio = bio_prio(bio); | ||
2666 | 2685 | ||
2667 | rw = bio_data_dir(bio); | 2686 | rw = bio_data_dir(bio); |
2668 | sync = bio_sync(bio); | 2687 | sync = bio_sync(bio); |
@@ -2677,19 +2696,14 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2677 | spin_lock_prefetch(q->queue_lock); | 2696 | spin_lock_prefetch(q->queue_lock); |
2678 | 2697 | ||
2679 | barrier = bio_barrier(bio); | 2698 | barrier = bio_barrier(bio); |
2680 | if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) { | 2699 | if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) { |
2681 | err = -EOPNOTSUPP; | 2700 | err = -EOPNOTSUPP; |
2682 | goto end_io; | 2701 | goto end_io; |
2683 | } | 2702 | } |
2684 | 2703 | ||
2685 | again: | ||
2686 | spin_lock_irq(q->queue_lock); | 2704 | spin_lock_irq(q->queue_lock); |
2687 | 2705 | ||
2688 | if (elv_queue_empty(q)) { | 2706 | if (unlikely(barrier) || elv_queue_empty(q)) |
2689 | blk_plug_device(q); | ||
2690 | goto get_rq; | ||
2691 | } | ||
2692 | if (barrier) | ||
2693 | goto get_rq; | 2707 | goto get_rq; |
2694 | 2708 | ||
2695 | el_ret = elv_merge(q, &req, bio); | 2709 | el_ret = elv_merge(q, &req, bio); |
@@ -2703,6 +2717,7 @@ again: | |||
2703 | req->biotail->bi_next = bio; | 2717 | req->biotail->bi_next = bio; |
2704 | req->biotail = bio; | 2718 | req->biotail = bio; |
2705 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 2719 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
2720 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
2706 | drive_stat_acct(req, nr_sectors, 0); | 2721 | drive_stat_acct(req, nr_sectors, 0); |
2707 | if (!attempt_back_merge(q, req)) | 2722 | if (!attempt_back_merge(q, req)) |
2708 | elv_merged_request(q, req); | 2723 | elv_merged_request(q, req); |
@@ -2727,45 +2742,30 @@ again: | |||
2727 | req->hard_cur_sectors = cur_nr_sectors; | 2742 | req->hard_cur_sectors = cur_nr_sectors; |
2728 | req->sector = req->hard_sector = sector; | 2743 | req->sector = req->hard_sector = sector; |
2729 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 2744 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
2745 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
2730 | drive_stat_acct(req, nr_sectors, 0); | 2746 | drive_stat_acct(req, nr_sectors, 0); |
2731 | if (!attempt_front_merge(q, req)) | 2747 | if (!attempt_front_merge(q, req)) |
2732 | elv_merged_request(q, req); | 2748 | elv_merged_request(q, req); |
2733 | goto out; | 2749 | goto out; |
2734 | 2750 | ||
2735 | /* | 2751 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ |
2736 | * elevator says don't/can't merge. get new request | ||
2737 | */ | ||
2738 | case ELEVATOR_NO_MERGE: | ||
2739 | break; | ||
2740 | |||
2741 | default: | 2752 | default: |
2742 | printk("elevator returned crap (%d)\n", el_ret); | 2753 | ; |
2743 | BUG(); | ||
2744 | } | 2754 | } |
2745 | 2755 | ||
2756 | get_rq: | ||
2746 | /* | 2757 | /* |
2747 | * Grab a free request from the freelist - if that is empty, check | 2758 | * Grab a free request. This is might sleep but can not fail. |
2748 | * if we are doing read ahead and abort instead of blocking for | 2759 | * Returns with the queue unlocked. |
2749 | * a free slot. | 2760 | */ |
2761 | req = get_request_wait(q, rw, bio); | ||
2762 | |||
2763 | /* | ||
2764 | * After dropping the lock and possibly sleeping here, our request | ||
2765 | * may now be mergeable after it had proven unmergeable (above). | ||
2766 | * We don't worry about that case for efficiency. It won't happen | ||
2767 | * often, and the elevators are able to handle it. | ||
2750 | */ | 2768 | */ |
2751 | get_rq: | ||
2752 | if (freereq) { | ||
2753 | req = freereq; | ||
2754 | freereq = NULL; | ||
2755 | } else { | ||
2756 | spin_unlock_irq(q->queue_lock); | ||
2757 | if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) { | ||
2758 | /* | ||
2759 | * READA bit set | ||
2760 | */ | ||
2761 | err = -EWOULDBLOCK; | ||
2762 | if (bio_rw_ahead(bio)) | ||
2763 | goto end_io; | ||
2764 | |||
2765 | freereq = get_request_wait(q, rw); | ||
2766 | } | ||
2767 | goto again; | ||
2768 | } | ||
2769 | 2769 | ||
2770 | req->flags |= REQ_CMD; | 2770 | req->flags |= REQ_CMD; |
2771 | 2771 | ||
@@ -2778,7 +2778,7 @@ get_rq: | |||
2778 | /* | 2778 | /* |
2779 | * REQ_BARRIER implies no merging, but lets make it explicit | 2779 | * REQ_BARRIER implies no merging, but lets make it explicit |
2780 | */ | 2780 | */ |
2781 | if (barrier) | 2781 | if (unlikely(barrier)) |
2782 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 2782 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
2783 | 2783 | ||
2784 | req->errors = 0; | 2784 | req->errors = 0; |
@@ -2790,13 +2790,15 @@ get_rq: | |||
2790 | req->buffer = bio_data(bio); /* see ->buffer comment above */ | 2790 | req->buffer = bio_data(bio); /* see ->buffer comment above */ |
2791 | req->waiting = NULL; | 2791 | req->waiting = NULL; |
2792 | req->bio = req->biotail = bio; | 2792 | req->bio = req->biotail = bio; |
2793 | req->ioprio = prio; | ||
2793 | req->rq_disk = bio->bi_bdev->bd_disk; | 2794 | req->rq_disk = bio->bi_bdev->bd_disk; |
2794 | req->start_time = jiffies; | 2795 | req->start_time = jiffies; |
2795 | 2796 | ||
2797 | spin_lock_irq(q->queue_lock); | ||
2798 | if (elv_queue_empty(q)) | ||
2799 | blk_plug_device(q); | ||
2796 | add_request(q, req); | 2800 | add_request(q, req); |
2797 | out: | 2801 | out: |
2798 | if (freereq) | ||
2799 | __blk_put_request(q, freereq); | ||
2800 | if (sync) | 2802 | if (sync) |
2801 | __generic_unplug_device(q); | 2803 | __generic_unplug_device(q); |
2802 | 2804 | ||
@@ -2818,7 +2820,7 @@ static inline void blk_partition_remap(struct bio *bio) | |||
2818 | if (bdev != bdev->bd_contains) { | 2820 | if (bdev != bdev->bd_contains) { |
2819 | struct hd_struct *p = bdev->bd_part; | 2821 | struct hd_struct *p = bdev->bd_part; |
2820 | 2822 | ||
2821 | switch (bio->bi_rw) { | 2823 | switch (bio_data_dir(bio)) { |
2822 | case READ: | 2824 | case READ: |
2823 | p->read_sectors += bio_sectors(bio); | 2825 | p->read_sectors += bio_sectors(bio); |
2824 | p->reads++; | 2826 | p->reads++; |
@@ -2837,6 +2839,7 @@ void blk_finish_queue_drain(request_queue_t *q) | |||
2837 | { | 2839 | { |
2838 | struct request_list *rl = &q->rq; | 2840 | struct request_list *rl = &q->rq; |
2839 | struct request *rq; | 2841 | struct request *rq; |
2842 | int requeued = 0; | ||
2840 | 2843 | ||
2841 | spin_lock_irq(q->queue_lock); | 2844 | spin_lock_irq(q->queue_lock); |
2842 | clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); | 2845 | clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); |
@@ -2845,9 +2848,13 @@ void blk_finish_queue_drain(request_queue_t *q) | |||
2845 | rq = list_entry_rq(q->drain_list.next); | 2848 | rq = list_entry_rq(q->drain_list.next); |
2846 | 2849 | ||
2847 | list_del_init(&rq->queuelist); | 2850 | list_del_init(&rq->queuelist); |
2848 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); | 2851 | elv_requeue_request(q, rq); |
2852 | requeued++; | ||
2849 | } | 2853 | } |
2850 | 2854 | ||
2855 | if (requeued) | ||
2856 | q->request_fn(q); | ||
2857 | |||
2851 | spin_unlock_irq(q->queue_lock); | 2858 | spin_unlock_irq(q->queue_lock); |
2852 | 2859 | ||
2853 | wake_up(&rl->wait[0]); | 2860 | wake_up(&rl->wait[0]); |
@@ -2902,7 +2909,7 @@ static inline void block_wait_queue_running(request_queue_t *q) | |||
2902 | { | 2909 | { |
2903 | DEFINE_WAIT(wait); | 2910 | DEFINE_WAIT(wait); |
2904 | 2911 | ||
2905 | while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { | 2912 | while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { |
2906 | struct request_list *rl = &q->rq; | 2913 | struct request_list *rl = &q->rq; |
2907 | 2914 | ||
2908 | prepare_to_wait_exclusive(&rl->drain, &wait, | 2915 | prepare_to_wait_exclusive(&rl->drain, &wait, |
@@ -3011,7 +3018,7 @@ end_io: | |||
3011 | goto end_io; | 3018 | goto end_io; |
3012 | } | 3019 | } |
3013 | 3020 | ||
3014 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) | 3021 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) |
3015 | goto end_io; | 3022 | goto end_io; |
3016 | 3023 | ||
3017 | block_wait_queue_running(q); | 3024 | block_wait_queue_running(q); |
@@ -3044,7 +3051,7 @@ void submit_bio(int rw, struct bio *bio) | |||
3044 | 3051 | ||
3045 | BIO_BUG_ON(!bio->bi_size); | 3052 | BIO_BUG_ON(!bio->bi_size); |
3046 | BIO_BUG_ON(!bio->bi_io_vec); | 3053 | BIO_BUG_ON(!bio->bi_io_vec); |
3047 | bio->bi_rw = rw; | 3054 | bio->bi_rw |= rw; |
3048 | if (rw & WRITE) | 3055 | if (rw & WRITE) |
3049 | mod_page_state(pgpgout, count); | 3056 | mod_page_state(pgpgout, count); |
3050 | else | 3057 | else |
@@ -3064,7 +3071,7 @@ void submit_bio(int rw, struct bio *bio) | |||
3064 | 3071 | ||
3065 | EXPORT_SYMBOL(submit_bio); | 3072 | EXPORT_SYMBOL(submit_bio); |
3066 | 3073 | ||
3067 | void blk_recalc_rq_segments(struct request *rq) | 3074 | static void blk_recalc_rq_segments(struct request *rq) |
3068 | { | 3075 | { |
3069 | struct bio *bio, *prevbio = NULL; | 3076 | struct bio *bio, *prevbio = NULL; |
3070 | int nr_phys_segs, nr_hw_segs; | 3077 | int nr_phys_segs, nr_hw_segs; |
@@ -3106,7 +3113,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
3106 | rq->nr_hw_segments = nr_hw_segs; | 3113 | rq->nr_hw_segments = nr_hw_segs; |
3107 | } | 3114 | } |
3108 | 3115 | ||
3109 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | 3116 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) |
3110 | { | 3117 | { |
3111 | if (blk_fs_request(rq)) { | 3118 | if (blk_fs_request(rq)) { |
3112 | rq->hard_sector += nsect; | 3119 | rq->hard_sector += nsect; |
@@ -3401,8 +3408,11 @@ void exit_io_context(void) | |||
3401 | struct io_context *ioc; | 3408 | struct io_context *ioc; |
3402 | 3409 | ||
3403 | local_irq_save(flags); | 3410 | local_irq_save(flags); |
3411 | task_lock(current); | ||
3404 | ioc = current->io_context; | 3412 | ioc = current->io_context; |
3405 | current->io_context = NULL; | 3413 | current->io_context = NULL; |
3414 | ioc->task = NULL; | ||
3415 | task_unlock(current); | ||
3406 | local_irq_restore(flags); | 3416 | local_irq_restore(flags); |
3407 | 3417 | ||
3408 | if (ioc->aic && ioc->aic->exit) | 3418 | if (ioc->aic && ioc->aic->exit) |
@@ -3415,53 +3425,49 @@ void exit_io_context(void) | |||
3415 | 3425 | ||
3416 | /* | 3426 | /* |
3417 | * If the current task has no IO context then create one and initialise it. | 3427 | * If the current task has no IO context then create one and initialise it. |
3418 | * If it does have a context, take a ref on it. | 3428 | * Otherwise, return its existing IO context. |
3419 | * | 3429 | * |
3420 | * This is always called in the context of the task which submitted the I/O. | 3430 | * This returned IO context doesn't have a specifically elevated refcount, |
3421 | * But weird things happen, so we disable local interrupts to ensure exclusive | 3431 | * but since the current task itself holds a reference, the context can be |
3422 | * access to *current. | 3432 | * used in general code, so long as it stays within `current` context. |
3423 | */ | 3433 | */ |
3424 | struct io_context *get_io_context(int gfp_flags) | 3434 | struct io_context *current_io_context(int gfp_flags) |
3425 | { | 3435 | { |
3426 | struct task_struct *tsk = current; | 3436 | struct task_struct *tsk = current; |
3427 | unsigned long flags; | ||
3428 | struct io_context *ret; | 3437 | struct io_context *ret; |
3429 | 3438 | ||
3430 | local_irq_save(flags); | ||
3431 | ret = tsk->io_context; | 3439 | ret = tsk->io_context; |
3432 | if (ret) | 3440 | if (likely(ret)) |
3433 | goto out; | 3441 | return ret; |
3434 | |||
3435 | local_irq_restore(flags); | ||
3436 | 3442 | ||
3437 | ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); | 3443 | ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); |
3438 | if (ret) { | 3444 | if (ret) { |
3439 | atomic_set(&ret->refcount, 1); | 3445 | atomic_set(&ret->refcount, 1); |
3440 | ret->pid = tsk->pid; | 3446 | ret->task = current; |
3447 | ret->set_ioprio = NULL; | ||
3441 | ret->last_waited = jiffies; /* doesn't matter... */ | 3448 | ret->last_waited = jiffies; /* doesn't matter... */ |
3442 | ret->nr_batch_requests = 0; /* because this is 0 */ | 3449 | ret->nr_batch_requests = 0; /* because this is 0 */ |
3443 | ret->aic = NULL; | 3450 | ret->aic = NULL; |
3444 | ret->cic = NULL; | 3451 | ret->cic = NULL; |
3445 | spin_lock_init(&ret->lock); | 3452 | tsk->io_context = ret; |
3446 | 3453 | } | |
3447 | local_irq_save(flags); | ||
3448 | 3454 | ||
3449 | /* | 3455 | return ret; |
3450 | * very unlikely, someone raced with us in setting up the task | 3456 | } |
3451 | * io context. free new context and just grab a reference. | 3457 | EXPORT_SYMBOL(current_io_context); |
3452 | */ | ||
3453 | if (!tsk->io_context) | ||
3454 | tsk->io_context = ret; | ||
3455 | else { | ||
3456 | kmem_cache_free(iocontext_cachep, ret); | ||
3457 | ret = tsk->io_context; | ||
3458 | } | ||
3459 | 3458 | ||
3460 | out: | 3459 | /* |
3460 | * If the current task has no IO context then create one and initialise it. | ||
3461 | * If it does have a context, take a ref on it. | ||
3462 | * | ||
3463 | * This is always called in the context of the task which submitted the I/O. | ||
3464 | */ | ||
3465 | struct io_context *get_io_context(int gfp_flags) | ||
3466 | { | ||
3467 | struct io_context *ret; | ||
3468 | ret = current_io_context(gfp_flags); | ||
3469 | if (likely(ret)) | ||
3461 | atomic_inc(&ret->refcount); | 3470 | atomic_inc(&ret->refcount); |
3462 | local_irq_restore(flags); | ||
3463 | } | ||
3464 | |||
3465 | return ret; | 3471 | return ret; |
3466 | } | 3472 | } |
3467 | EXPORT_SYMBOL(get_io_context); | 3473 | EXPORT_SYMBOL(get_io_context); |
@@ -3670,7 +3676,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
3670 | 3676 | ||
3671 | q = container_of(kobj, struct request_queue, kobj); | 3677 | q = container_of(kobj, struct request_queue, kobj); |
3672 | if (!entry->show) | 3678 | if (!entry->show) |
3673 | return 0; | 3679 | return -EIO; |
3674 | 3680 | ||
3675 | return entry->show(q, page); | 3681 | return entry->show(q, page); |
3676 | } | 3682 | } |
@@ -3684,7 +3690,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3684 | 3690 | ||
3685 | q = container_of(kobj, struct request_queue, kobj); | 3691 | q = container_of(kobj, struct request_queue, kobj); |
3686 | if (!entry->store) | 3692 | if (!entry->store) |
3687 | return -EINVAL; | 3693 | return -EIO; |
3688 | 3694 | ||
3689 | return entry->store(q, page, length); | 3695 | return entry->store(q, page, length); |
3690 | } | 3696 | } |
@@ -3694,7 +3700,7 @@ static struct sysfs_ops queue_sysfs_ops = { | |||
3694 | .store = queue_attr_store, | 3700 | .store = queue_attr_store, |
3695 | }; | 3701 | }; |
3696 | 3702 | ||
3697 | struct kobj_type queue_ktype = { | 3703 | static struct kobj_type queue_ktype = { |
3698 | .sysfs_ops = &queue_sysfs_ops, | 3704 | .sysfs_ops = &queue_sysfs_ops, |
3699 | .default_attrs = default_attrs, | 3705 | .default_attrs = default_attrs, |
3700 | }; | 3706 | }; |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6f011d0d8e97..b35e08876dd4 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -472,17 +472,11 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
472 | */ | 472 | */ |
473 | static void loop_add_bio(struct loop_device *lo, struct bio *bio) | 473 | static void loop_add_bio(struct loop_device *lo, struct bio *bio) |
474 | { | 474 | { |
475 | unsigned long flags; | ||
476 | |||
477 | spin_lock_irqsave(&lo->lo_lock, flags); | ||
478 | if (lo->lo_biotail) { | 475 | if (lo->lo_biotail) { |
479 | lo->lo_biotail->bi_next = bio; | 476 | lo->lo_biotail->bi_next = bio; |
480 | lo->lo_biotail = bio; | 477 | lo->lo_biotail = bio; |
481 | } else | 478 | } else |
482 | lo->lo_bio = lo->lo_biotail = bio; | 479 | lo->lo_bio = lo->lo_biotail = bio; |
483 | spin_unlock_irqrestore(&lo->lo_lock, flags); | ||
484 | |||
485 | up(&lo->lo_bh_mutex); | ||
486 | } | 480 | } |
487 | 481 | ||
488 | /* | 482 | /* |
@@ -492,14 +486,12 @@ static struct bio *loop_get_bio(struct loop_device *lo) | |||
492 | { | 486 | { |
493 | struct bio *bio; | 487 | struct bio *bio; |
494 | 488 | ||
495 | spin_lock_irq(&lo->lo_lock); | ||
496 | if ((bio = lo->lo_bio)) { | 489 | if ((bio = lo->lo_bio)) { |
497 | if (bio == lo->lo_biotail) | 490 | if (bio == lo->lo_biotail) |
498 | lo->lo_biotail = NULL; | 491 | lo->lo_biotail = NULL; |
499 | lo->lo_bio = bio->bi_next; | 492 | lo->lo_bio = bio->bi_next; |
500 | bio->bi_next = NULL; | 493 | bio->bi_next = NULL; |
501 | } | 494 | } |
502 | spin_unlock_irq(&lo->lo_lock); | ||
503 | 495 | ||
504 | return bio; | 496 | return bio; |
505 | } | 497 | } |
@@ -509,35 +501,28 @@ static int loop_make_request(request_queue_t *q, struct bio *old_bio) | |||
509 | struct loop_device *lo = q->queuedata; | 501 | struct loop_device *lo = q->queuedata; |
510 | int rw = bio_rw(old_bio); | 502 | int rw = bio_rw(old_bio); |
511 | 503 | ||
512 | if (!lo) | 504 | if (rw == READA) |
513 | goto out; | 505 | rw = READ; |
506 | |||
507 | BUG_ON(!lo || (rw != READ && rw != WRITE)); | ||
514 | 508 | ||
515 | spin_lock_irq(&lo->lo_lock); | 509 | spin_lock_irq(&lo->lo_lock); |
516 | if (lo->lo_state != Lo_bound) | 510 | if (lo->lo_state != Lo_bound) |
517 | goto inactive; | 511 | goto out; |
518 | atomic_inc(&lo->lo_pending); | 512 | if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY))) |
519 | spin_unlock_irq(&lo->lo_lock); | 513 | goto out; |
520 | 514 | lo->lo_pending++; | |
521 | if (rw == WRITE) { | ||
522 | if (lo->lo_flags & LO_FLAGS_READ_ONLY) | ||
523 | goto err; | ||
524 | } else if (rw == READA) { | ||
525 | rw = READ; | ||
526 | } else if (rw != READ) { | ||
527 | printk(KERN_ERR "loop: unknown command (%x)\n", rw); | ||
528 | goto err; | ||
529 | } | ||
530 | loop_add_bio(lo, old_bio); | 515 | loop_add_bio(lo, old_bio); |
516 | spin_unlock_irq(&lo->lo_lock); | ||
517 | up(&lo->lo_bh_mutex); | ||
531 | return 0; | 518 | return 0; |
532 | err: | 519 | |
533 | if (atomic_dec_and_test(&lo->lo_pending)) | ||
534 | up(&lo->lo_bh_mutex); | ||
535 | out: | 520 | out: |
521 | if (lo->lo_pending == 0) | ||
522 | up(&lo->lo_bh_mutex); | ||
523 | spin_unlock_irq(&lo->lo_lock); | ||
536 | bio_io_error(old_bio, old_bio->bi_size); | 524 | bio_io_error(old_bio, old_bio->bi_size); |
537 | return 0; | 525 | return 0; |
538 | inactive: | ||
539 | spin_unlock_irq(&lo->lo_lock); | ||
540 | goto out; | ||
541 | } | 526 | } |
542 | 527 | ||
543 | /* | 528 | /* |
@@ -560,13 +545,11 @@ static void do_loop_switch(struct loop_device *, struct switch_request *); | |||
560 | 545 | ||
561 | static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) | 546 | static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) |
562 | { | 547 | { |
563 | int ret; | ||
564 | |||
565 | if (unlikely(!bio->bi_bdev)) { | 548 | if (unlikely(!bio->bi_bdev)) { |
566 | do_loop_switch(lo, bio->bi_private); | 549 | do_loop_switch(lo, bio->bi_private); |
567 | bio_put(bio); | 550 | bio_put(bio); |
568 | } else { | 551 | } else { |
569 | ret = do_bio_filebacked(lo, bio); | 552 | int ret = do_bio_filebacked(lo, bio); |
570 | bio_endio(bio, bio->bi_size, ret); | 553 | bio_endio(bio, bio->bi_size, ret); |
571 | } | 554 | } |
572 | } | 555 | } |
@@ -594,7 +577,7 @@ static int loop_thread(void *data) | |||
594 | set_user_nice(current, -20); | 577 | set_user_nice(current, -20); |
595 | 578 | ||
596 | lo->lo_state = Lo_bound; | 579 | lo->lo_state = Lo_bound; |
597 | atomic_inc(&lo->lo_pending); | 580 | lo->lo_pending = 1; |
598 | 581 | ||
599 | /* | 582 | /* |
600 | * up sem, we are running | 583 | * up sem, we are running |
@@ -602,26 +585,37 @@ static int loop_thread(void *data) | |||
602 | up(&lo->lo_sem); | 585 | up(&lo->lo_sem); |
603 | 586 | ||
604 | for (;;) { | 587 | for (;;) { |
605 | down_interruptible(&lo->lo_bh_mutex); | 588 | int pending; |
589 | |||
606 | /* | 590 | /* |
607 | * could be upped because of tear-down, not because of | 591 | * interruptible just to not contribute to load avg |
608 | * pending work | ||
609 | */ | 592 | */ |
610 | if (!atomic_read(&lo->lo_pending)) | 593 | if (down_interruptible(&lo->lo_bh_mutex)) |
594 | continue; | ||
595 | |||
596 | spin_lock_irq(&lo->lo_lock); | ||
597 | |||
598 | /* | ||
599 | * could be upped because of tear-down, not pending work | ||
600 | */ | ||
601 | if (unlikely(!lo->lo_pending)) { | ||
602 | spin_unlock_irq(&lo->lo_lock); | ||
611 | break; | 603 | break; |
604 | } | ||
612 | 605 | ||
613 | bio = loop_get_bio(lo); | 606 | bio = loop_get_bio(lo); |
614 | if (!bio) { | 607 | lo->lo_pending--; |
615 | printk("loop: missing bio\n"); | 608 | pending = lo->lo_pending; |
616 | continue; | 609 | spin_unlock_irq(&lo->lo_lock); |
617 | } | 610 | |
611 | BUG_ON(!bio); | ||
618 | loop_handle_bio(lo, bio); | 612 | loop_handle_bio(lo, bio); |
619 | 613 | ||
620 | /* | 614 | /* |
621 | * upped both for pending work and tear-down, lo_pending | 615 | * upped both for pending work and tear-down, lo_pending |
622 | * will hit zero then | 616 | * will hit zero then |
623 | */ | 617 | */ |
624 | if (atomic_dec_and_test(&lo->lo_pending)) | 618 | if (unlikely(!pending)) |
625 | break; | 619 | break; |
626 | } | 620 | } |
627 | 621 | ||
@@ -900,7 +894,8 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
900 | 894 | ||
901 | spin_lock_irq(&lo->lo_lock); | 895 | spin_lock_irq(&lo->lo_lock); |
902 | lo->lo_state = Lo_rundown; | 896 | lo->lo_state = Lo_rundown; |
903 | if (atomic_dec_and_test(&lo->lo_pending)) | 897 | lo->lo_pending--; |
898 | if (!lo->lo_pending) | ||
904 | up(&lo->lo_bh_mutex); | 899 | up(&lo->lo_bh_mutex); |
905 | spin_unlock_irq(&lo->lo_lock); | 900 | spin_unlock_irq(&lo->lo_lock); |
906 | 901 | ||
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index dbeb107bb971..84d8e291ed96 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -222,7 +222,7 @@ static int pg_identify(struct pg *dev, int log); | |||
222 | 222 | ||
223 | static char pg_scratch[512]; /* scratch block buffer */ | 223 | static char pg_scratch[512]; /* scratch block buffer */ |
224 | 224 | ||
225 | static struct class_simple *pg_class; | 225 | static struct class *pg_class; |
226 | 226 | ||
227 | /* kernel glue structures */ | 227 | /* kernel glue structures */ |
228 | 228 | ||
@@ -666,7 +666,7 @@ static int __init pg_init(void) | |||
666 | err = -1; | 666 | err = -1; |
667 | goto out; | 667 | goto out; |
668 | } | 668 | } |
669 | pg_class = class_simple_create(THIS_MODULE, "pg"); | 669 | pg_class = class_create(THIS_MODULE, "pg"); |
670 | if (IS_ERR(pg_class)) { | 670 | if (IS_ERR(pg_class)) { |
671 | err = PTR_ERR(pg_class); | 671 | err = PTR_ERR(pg_class); |
672 | goto out_chrdev; | 672 | goto out_chrdev; |
@@ -675,7 +675,7 @@ static int __init pg_init(void) | |||
675 | for (unit = 0; unit < PG_UNITS; unit++) { | 675 | for (unit = 0; unit < PG_UNITS; unit++) { |
676 | struct pg *dev = &devices[unit]; | 676 | struct pg *dev = &devices[unit]; |
677 | if (dev->present) { | 677 | if (dev->present) { |
678 | class_simple_device_add(pg_class, MKDEV(major, unit), | 678 | class_device_create(pg_class, MKDEV(major, unit), |
679 | NULL, "pg%u", unit); | 679 | NULL, "pg%u", unit); |
680 | err = devfs_mk_cdev(MKDEV(major, unit), | 680 | err = devfs_mk_cdev(MKDEV(major, unit), |
681 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", | 681 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", |
@@ -688,8 +688,8 @@ static int __init pg_init(void) | |||
688 | goto out; | 688 | goto out; |
689 | 689 | ||
690 | out_class: | 690 | out_class: |
691 | class_simple_device_remove(MKDEV(major, unit)); | 691 | class_device_destroy(pg_class, MKDEV(major, unit)); |
692 | class_simple_destroy(pg_class); | 692 | class_destroy(pg_class); |
693 | out_chrdev: | 693 | out_chrdev: |
694 | unregister_chrdev(major, "pg"); | 694 | unregister_chrdev(major, "pg"); |
695 | out: | 695 | out: |
@@ -703,11 +703,11 @@ static void __exit pg_exit(void) | |||
703 | for (unit = 0; unit < PG_UNITS; unit++) { | 703 | for (unit = 0; unit < PG_UNITS; unit++) { |
704 | struct pg *dev = &devices[unit]; | 704 | struct pg *dev = &devices[unit]; |
705 | if (dev->present) { | 705 | if (dev->present) { |
706 | class_simple_device_remove(MKDEV(major, unit)); | 706 | class_device_destroy(pg_class, MKDEV(major, unit)); |
707 | devfs_remove("pg/%u", unit); | 707 | devfs_remove("pg/%u", unit); |
708 | } | 708 | } |
709 | } | 709 | } |
710 | class_simple_destroy(pg_class); | 710 | class_destroy(pg_class); |
711 | devfs_remove("pg"); | 711 | devfs_remove("pg"); |
712 | unregister_chrdev(major, name); | 712 | unregister_chrdev(major, name); |
713 | 713 | ||
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 8fbd6922fe0d..5fe8ee86f095 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
@@ -242,7 +242,7 @@ static struct file_operations pt_fops = { | |||
242 | }; | 242 | }; |
243 | 243 | ||
244 | /* sysfs class support */ | 244 | /* sysfs class support */ |
245 | static struct class_simple *pt_class; | 245 | static struct class *pt_class; |
246 | 246 | ||
247 | static inline int status_reg(struct pi_adapter *pi) | 247 | static inline int status_reg(struct pi_adapter *pi) |
248 | { | 248 | { |
@@ -963,7 +963,7 @@ static int __init pt_init(void) | |||
963 | err = -1; | 963 | err = -1; |
964 | goto out; | 964 | goto out; |
965 | } | 965 | } |
966 | pt_class = class_simple_create(THIS_MODULE, "pt"); | 966 | pt_class = class_create(THIS_MODULE, "pt"); |
967 | if (IS_ERR(pt_class)) { | 967 | if (IS_ERR(pt_class)) { |
968 | err = PTR_ERR(pt_class); | 968 | err = PTR_ERR(pt_class); |
969 | goto out_chrdev; | 969 | goto out_chrdev; |
@@ -972,29 +972,29 @@ static int __init pt_init(void) | |||
972 | devfs_mk_dir("pt"); | 972 | devfs_mk_dir("pt"); |
973 | for (unit = 0; unit < PT_UNITS; unit++) | 973 | for (unit = 0; unit < PT_UNITS; unit++) |
974 | if (pt[unit].present) { | 974 | if (pt[unit].present) { |
975 | class_simple_device_add(pt_class, MKDEV(major, unit), | 975 | class_device_create(pt_class, MKDEV(major, unit), |
976 | NULL, "pt%d", unit); | 976 | NULL, "pt%d", unit); |
977 | err = devfs_mk_cdev(MKDEV(major, unit), | 977 | err = devfs_mk_cdev(MKDEV(major, unit), |
978 | S_IFCHR | S_IRUSR | S_IWUSR, | 978 | S_IFCHR | S_IRUSR | S_IWUSR, |
979 | "pt/%d", unit); | 979 | "pt/%d", unit); |
980 | if (err) { | 980 | if (err) { |
981 | class_simple_device_remove(MKDEV(major, unit)); | 981 | class_device_destroy(pt_class, MKDEV(major, unit)); |
982 | goto out_class; | 982 | goto out_class; |
983 | } | 983 | } |
984 | class_simple_device_add(pt_class, MKDEV(major, unit + 128), | 984 | class_device_create(pt_class, MKDEV(major, unit + 128), |
985 | NULL, "pt%dn", unit); | 985 | NULL, "pt%dn", unit); |
986 | err = devfs_mk_cdev(MKDEV(major, unit + 128), | 986 | err = devfs_mk_cdev(MKDEV(major, unit + 128), |
987 | S_IFCHR | S_IRUSR | S_IWUSR, | 987 | S_IFCHR | S_IRUSR | S_IWUSR, |
988 | "pt/%dn", unit); | 988 | "pt/%dn", unit); |
989 | if (err) { | 989 | if (err) { |
990 | class_simple_device_remove(MKDEV(major, unit + 128)); | 990 | class_device_destroy(pt_class, MKDEV(major, unit + 128)); |
991 | goto out_class; | 991 | goto out_class; |
992 | } | 992 | } |
993 | } | 993 | } |
994 | goto out; | 994 | goto out; |
995 | 995 | ||
996 | out_class: | 996 | out_class: |
997 | class_simple_destroy(pt_class); | 997 | class_destroy(pt_class); |
998 | out_chrdev: | 998 | out_chrdev: |
999 | unregister_chrdev(major, "pt"); | 999 | unregister_chrdev(major, "pt"); |
1000 | out: | 1000 | out: |
@@ -1006,12 +1006,12 @@ static void __exit pt_exit(void) | |||
1006 | int unit; | 1006 | int unit; |
1007 | for (unit = 0; unit < PT_UNITS; unit++) | 1007 | for (unit = 0; unit < PT_UNITS; unit++) |
1008 | if (pt[unit].present) { | 1008 | if (pt[unit].present) { |
1009 | class_simple_device_remove(MKDEV(major, unit)); | 1009 | class_device_destroy(pt_class, MKDEV(major, unit)); |
1010 | devfs_remove("pt/%d", unit); | 1010 | devfs_remove("pt/%d", unit); |
1011 | class_simple_device_remove(MKDEV(major, unit + 128)); | 1011 | class_device_destroy(pt_class, MKDEV(major, unit + 128)); |
1012 | devfs_remove("pt/%dn", unit); | 1012 | devfs_remove("pt/%dn", unit); |
1013 | } | 1013 | } |
1014 | class_simple_destroy(pt_class); | 1014 | class_destroy(pt_class); |
1015 | devfs_remove("pt"); | 1015 | devfs_remove("pt"); |
1016 | unregister_chrdev(major, name); | 1016 | unregister_chrdev(major, name); |
1017 | for (unit = 0; unit < PT_UNITS; unit++) | 1017 | for (unit = 0; unit < PT_UNITS; unit++) |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index bc56770bcc90..7b838342f0a3 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -467,14 +467,12 @@ static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsign | |||
467 | * Queue a bio for processing by the low-level CD device. Must be called | 467 | * Queue a bio for processing by the low-level CD device. Must be called |
468 | * from process context. | 468 | * from process context. |
469 | */ | 469 | */ |
470 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read) | 470 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) |
471 | { | 471 | { |
472 | spin_lock(&pd->iosched.lock); | 472 | spin_lock(&pd->iosched.lock); |
473 | if (bio_data_dir(bio) == READ) { | 473 | if (bio_data_dir(bio) == READ) { |
474 | pkt_add_list_last(bio, &pd->iosched.read_queue, | 474 | pkt_add_list_last(bio, &pd->iosched.read_queue, |
475 | &pd->iosched.read_queue_tail); | 475 | &pd->iosched.read_queue_tail); |
476 | if (high_prio_read) | ||
477 | pd->iosched.high_prio_read = 1; | ||
478 | } else { | 476 | } else { |
479 | pkt_add_list_last(bio, &pd->iosched.write_queue, | 477 | pkt_add_list_last(bio, &pd->iosched.write_queue, |
480 | &pd->iosched.write_queue_tail); | 478 | &pd->iosched.write_queue_tail); |
@@ -490,15 +488,16 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_p | |||
490 | * requirements for CDRW drives: | 488 | * requirements for CDRW drives: |
491 | * - A cache flush command must be inserted before a read request if the | 489 | * - A cache flush command must be inserted before a read request if the |
492 | * previous request was a write. | 490 | * previous request was a write. |
493 | * - Switching between reading and writing is slow, so don't it more often | 491 | * - Switching between reading and writing is slow, so don't do it more often |
494 | * than necessary. | 492 | * than necessary. |
493 | * - Optimize for throughput at the expense of latency. This means that streaming | ||
494 | * writes will never be interrupted by a read, but if the drive has to seek | ||
495 | * before the next write, switch to reading instead if there are any pending | ||
496 | * read requests. | ||
495 | * - Set the read speed according to current usage pattern. When only reading | 497 | * - Set the read speed according to current usage pattern. When only reading |
496 | * from the device, it's best to use the highest possible read speed, but | 498 | * from the device, it's best to use the highest possible read speed, but |
497 | * when switching often between reading and writing, it's better to have the | 499 | * when switching often between reading and writing, it's better to have the |
498 | * same read and write speeds. | 500 | * same read and write speeds. |
499 | * - Reads originating from user space should have higher priority than reads | ||
500 | * originating from pkt_gather_data, because some process is usually waiting | ||
501 | * on reads of the first kind. | ||
502 | */ | 501 | */ |
503 | static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | 502 | static void pkt_iosched_process_queue(struct pktcdvd_device *pd) |
504 | { | 503 | { |
@@ -512,21 +511,24 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
512 | 511 | ||
513 | for (;;) { | 512 | for (;;) { |
514 | struct bio *bio; | 513 | struct bio *bio; |
515 | int reads_queued, writes_queued, high_prio_read; | 514 | int reads_queued, writes_queued; |
516 | 515 | ||
517 | spin_lock(&pd->iosched.lock); | 516 | spin_lock(&pd->iosched.lock); |
518 | reads_queued = (pd->iosched.read_queue != NULL); | 517 | reads_queued = (pd->iosched.read_queue != NULL); |
519 | writes_queued = (pd->iosched.write_queue != NULL); | 518 | writes_queued = (pd->iosched.write_queue != NULL); |
520 | if (!reads_queued) | ||
521 | pd->iosched.high_prio_read = 0; | ||
522 | high_prio_read = pd->iosched.high_prio_read; | ||
523 | spin_unlock(&pd->iosched.lock); | 519 | spin_unlock(&pd->iosched.lock); |
524 | 520 | ||
525 | if (!reads_queued && !writes_queued) | 521 | if (!reads_queued && !writes_queued) |
526 | break; | 522 | break; |
527 | 523 | ||
528 | if (pd->iosched.writing) { | 524 | if (pd->iosched.writing) { |
529 | if (high_prio_read || (!writes_queued && reads_queued)) { | 525 | int need_write_seek = 1; |
526 | spin_lock(&pd->iosched.lock); | ||
527 | bio = pd->iosched.write_queue; | ||
528 | spin_unlock(&pd->iosched.lock); | ||
529 | if (bio && (bio->bi_sector == pd->iosched.last_write)) | ||
530 | need_write_seek = 0; | ||
531 | if (need_write_seek && reads_queued) { | ||
530 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { | 532 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { |
531 | VPRINTK("pktcdvd: write, waiting\n"); | 533 | VPRINTK("pktcdvd: write, waiting\n"); |
532 | break; | 534 | break; |
@@ -559,8 +561,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
559 | 561 | ||
560 | if (bio_data_dir(bio) == READ) | 562 | if (bio_data_dir(bio) == READ) |
561 | pd->iosched.successive_reads += bio->bi_size >> 10; | 563 | pd->iosched.successive_reads += bio->bi_size >> 10; |
562 | else | 564 | else { |
563 | pd->iosched.successive_reads = 0; | 565 | pd->iosched.successive_reads = 0; |
566 | pd->iosched.last_write = bio->bi_sector + bio_sectors(bio); | ||
567 | } | ||
564 | if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { | 568 | if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { |
565 | if (pd->read_speed == pd->write_speed) { | 569 | if (pd->read_speed == pd->write_speed) { |
566 | pd->read_speed = MAX_SPEED; | 570 | pd->read_speed = MAX_SPEED; |
@@ -765,7 +769,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
765 | 769 | ||
766 | atomic_inc(&pkt->io_wait); | 770 | atomic_inc(&pkt->io_wait); |
767 | bio->bi_rw = READ; | 771 | bio->bi_rw = READ; |
768 | pkt_queue_bio(pd, bio, 0); | 772 | pkt_queue_bio(pd, bio); |
769 | frames_read++; | 773 | frames_read++; |
770 | } | 774 | } |
771 | 775 | ||
@@ -1062,7 +1066,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1062 | 1066 | ||
1063 | atomic_set(&pkt->io_wait, 1); | 1067 | atomic_set(&pkt->io_wait, 1); |
1064 | pkt->w_bio->bi_rw = WRITE; | 1068 | pkt->w_bio->bi_rw = WRITE; |
1065 | pkt_queue_bio(pd, pkt->w_bio, 0); | 1069 | pkt_queue_bio(pd, pkt->w_bio); |
1066 | } | 1070 | } |
1067 | 1071 | ||
1068 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) | 1072 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) |
@@ -1247,8 +1251,7 @@ static int kcdrwd(void *foobar) | |||
1247 | VPRINTK("kcdrwd: wake up\n"); | 1251 | VPRINTK("kcdrwd: wake up\n"); |
1248 | 1252 | ||
1249 | /* make swsusp happy with our thread */ | 1253 | /* make swsusp happy with our thread */ |
1250 | if (current->flags & PF_FREEZE) | 1254 | try_to_freeze(); |
1251 | refrigerator(PF_FREEZE); | ||
1252 | 1255 | ||
1253 | list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { | 1256 | list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { |
1254 | if (!pkt->sleep_time) | 1257 | if (!pkt->sleep_time) |
@@ -2120,7 +2123,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio) | |||
2120 | cloned_bio->bi_private = psd; | 2123 | cloned_bio->bi_private = psd; |
2121 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; | 2124 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; |
2122 | pd->stats.secs_r += bio->bi_size >> 9; | 2125 | pd->stats.secs_r += bio->bi_size >> 9; |
2123 | pkt_queue_bio(pd, cloned_bio, 1); | 2126 | pkt_queue_bio(pd, cloned_bio); |
2124 | return 0; | 2127 | return 0; |
2125 | } | 2128 | } |
2126 | 2129 | ||
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 5b09cf154ac7..e5f7494c00ee 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -253,7 +253,7 @@ static int floppy_revalidate(struct gendisk *disk); | |||
253 | static int swim3_add_device(struct device_node *swims); | 253 | static int swim3_add_device(struct device_node *swims); |
254 | int swim3_init(void); | 254 | int swim3_init(void); |
255 | 255 | ||
256 | #ifndef CONFIG_PMAC_PBOOK | 256 | #ifndef CONFIG_PMAC_MEDIABAY |
257 | #define check_media_bay(which, what) 1 | 257 | #define check_media_bay(which, what) 1 |
258 | #endif | 258 | #endif |
259 | 259 | ||
@@ -297,9 +297,11 @@ static void do_fd_request(request_queue_t * q) | |||
297 | int i; | 297 | int i; |
298 | for(i=0;i<floppy_count;i++) | 298 | for(i=0;i<floppy_count;i++) |
299 | { | 299 | { |
300 | #ifdef CONFIG_PMAC_MEDIABAY | ||
300 | if (floppy_states[i].media_bay && | 301 | if (floppy_states[i].media_bay && |
301 | check_media_bay(floppy_states[i].media_bay, MB_FD)) | 302 | check_media_bay(floppy_states[i].media_bay, MB_FD)) |
302 | continue; | 303 | continue; |
304 | #endif /* CONFIG_PMAC_MEDIABAY */ | ||
303 | start_request(&floppy_states[i]); | 305 | start_request(&floppy_states[i]); |
304 | } | 306 | } |
305 | sti(); | 307 | sti(); |
@@ -856,8 +858,10 @@ static int floppy_ioctl(struct inode *inode, struct file *filp, | |||
856 | if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) | 858 | if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) |
857 | return -EPERM; | 859 | return -EPERM; |
858 | 860 | ||
861 | #ifdef CONFIG_PMAC_MEDIABAY | ||
859 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 862 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) |
860 | return -ENXIO; | 863 | return -ENXIO; |
864 | #endif | ||
861 | 865 | ||
862 | switch (cmd) { | 866 | switch (cmd) { |
863 | case FDEJECT: | 867 | case FDEJECT: |
@@ -881,8 +885,10 @@ static int floppy_open(struct inode *inode, struct file *filp) | |||
881 | int n, err = 0; | 885 | int n, err = 0; |
882 | 886 | ||
883 | if (fs->ref_count == 0) { | 887 | if (fs->ref_count == 0) { |
888 | #ifdef CONFIG_PMAC_MEDIABAY | ||
884 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 889 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) |
885 | return -ENXIO; | 890 | return -ENXIO; |
891 | #endif | ||
886 | out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); | 892 | out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); |
887 | out_8(&sw->control_bic, 0xff); | 893 | out_8(&sw->control_bic, 0xff); |
888 | out_8(&sw->mode, 0x95); | 894 | out_8(&sw->mode, 0x95); |
@@ -967,8 +973,10 @@ static int floppy_revalidate(struct gendisk *disk) | |||
967 | struct swim3 __iomem *sw; | 973 | struct swim3 __iomem *sw; |
968 | int ret, n; | 974 | int ret, n; |
969 | 975 | ||
976 | #ifdef CONFIG_PMAC_MEDIABAY | ||
970 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 977 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) |
971 | return -ENXIO; | 978 | return -ENXIO; |
979 | #endif | ||
972 | 980 | ||
973 | sw = fs->swim3; | 981 | sw = fs->swim3; |
974 | grab_drive(fs, revalidating, 0); | 982 | grab_drive(fs, revalidating, 0); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 5ed3a6379452..d57007b92f77 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/time.h> | 27 | #include <linux/time.h> |
28 | #include <linux/hdreg.h> | 28 | #include <linux/hdreg.h> |
29 | #include <linux/dma-mapping.h> | ||
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/semaphore.h> | 31 | #include <asm/semaphore.h> |
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
@@ -1581,10 +1582,10 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1581 | if (rc) | 1582 | if (rc) |
1582 | goto err_out; | 1583 | goto err_out; |
1583 | 1584 | ||
1584 | #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ | 1585 | #ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ |
1585 | rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); | 1586 | rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
1586 | if (!rc) { | 1587 | if (!rc) { |
1587 | rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); | 1588 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
1588 | if (rc) { | 1589 | if (rc) { |
1589 | printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", | 1590 | printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", |
1590 | pci_name(pdev)); | 1591 | pci_name(pdev)); |
@@ -1593,14 +1594,14 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1593 | pci_dac = 1; | 1594 | pci_dac = 1; |
1594 | } else { | 1595 | } else { |
1595 | #endif | 1596 | #endif |
1596 | rc = pci_set_dma_mask(pdev, 0xffffffffULL); | 1597 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1597 | if (rc) { | 1598 | if (rc) { |
1598 | printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", | 1599 | printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", |
1599 | pci_name(pdev)); | 1600 | pci_name(pdev)); |
1600 | goto err_out_regions; | 1601 | goto err_out_regions; |
1601 | } | 1602 | } |
1602 | pci_dac = 0; | 1603 | pci_dac = 0; |
1603 | #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ | 1604 | #ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ |
1604 | } | 1605 | } |
1605 | #endif | 1606 | #endif |
1606 | 1607 | ||
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 19c5e59bcfa8..a026567f5d18 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * -- Exterminate P3 printks | 23 | * -- Exterminate P3 printks |
24 | * -- Resove XXX's | 24 | * -- Resove XXX's |
25 | * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? | 25 | * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? |
26 | * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. | ||
26 | */ | 27 | */ |
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 29 | #include <linux/module.h> |
@@ -38,6 +39,73 @@ | |||
38 | #define UB_MAJOR 180 | 39 | #define UB_MAJOR 180 |
39 | 40 | ||
40 | /* | 41 | /* |
42 | * The command state machine is the key model for understanding of this driver. | ||
43 | * | ||
44 | * The general rule is that all transitions are done towards the bottom | ||
45 | * of the diagram, thus preventing any loops. | ||
46 | * | ||
47 | * An exception to that is how the STAT state is handled. A counter allows it | ||
48 | * to be re-entered along the path marked with [C]. | ||
49 | * | ||
50 | * +--------+ | ||
51 | * ! INIT ! | ||
52 | * +--------+ | ||
53 | * ! | ||
54 | * ub_scsi_cmd_start fails ->--------------------------------------\ | ||
55 | * ! ! | ||
56 | * V ! | ||
57 | * +--------+ ! | ||
58 | * ! CMD ! ! | ||
59 | * +--------+ ! | ||
60 | * ! +--------+ ! | ||
61 | * was -EPIPE -->-------------------------------->! CLEAR ! ! | ||
62 | * ! +--------+ ! | ||
63 | * ! ! ! | ||
64 | * was error -->------------------------------------- ! --------->\ | ||
65 | * ! ! ! | ||
66 | * /--<-- cmd->dir == NONE ? ! ! | ||
67 | * ! ! ! ! | ||
68 | * ! V ! ! | ||
69 | * ! +--------+ ! ! | ||
70 | * ! ! DATA ! ! ! | ||
71 | * ! +--------+ ! ! | ||
72 | * ! ! +---------+ ! ! | ||
73 | * ! was -EPIPE -->--------------->! CLR2STS ! ! ! | ||
74 | * ! ! +---------+ ! ! | ||
75 | * ! ! ! ! ! | ||
76 | * ! ! was error -->---- ! --------->\ | ||
77 | * ! was error -->--------------------- ! ------------- ! --------->\ | ||
78 | * ! ! ! ! ! | ||
79 | * ! V ! ! ! | ||
80 | * \--->+--------+ ! ! ! | ||
81 | * ! STAT !<--------------------------/ ! ! | ||
82 | * /--->+--------+ ! ! | ||
83 | * ! ! ! ! | ||
84 | * [C] was -EPIPE -->-----------\ ! ! | ||
85 | * ! ! ! ! ! | ||
86 | * +<---- len == 0 ! ! ! | ||
87 | * ! ! ! ! ! | ||
88 | * ! was error -->--------------------------------------!---------->\ | ||
89 | * ! ! ! ! ! | ||
90 | * +<---- bad CSW ! ! ! | ||
91 | * +<---- bad tag ! ! ! | ||
92 | * ! ! V ! ! | ||
93 | * ! ! +--------+ ! ! | ||
94 | * ! ! ! CLRRS ! ! ! | ||
95 | * ! ! +--------+ ! ! | ||
96 | * ! ! ! ! ! | ||
97 | * \------- ! --------------------[C]--------\ ! ! | ||
98 | * ! ! ! ! | ||
99 | * cmd->error---\ +--------+ ! ! | ||
100 | * ! +--------------->! SENSE !<----------/ ! | ||
101 | * STAT_FAIL----/ +--------+ ! | ||
102 | * ! ! V | ||
103 | * ! V +--------+ | ||
104 | * \--------------------------------\--------------------->! DONE ! | ||
105 | * +--------+ | ||
106 | */ | ||
107 | |||
108 | /* | ||
41 | * Definitions which have to be scattered once we understand the layout better. | 109 | * Definitions which have to be scattered once we understand the layout better. |
42 | */ | 110 | */ |
43 | 111 | ||
@@ -91,8 +159,6 @@ struct bulk_cs_wrap { | |||
91 | 159 | ||
92 | #define US_BULK_CS_WRAP_LEN 13 | 160 | #define US_BULK_CS_WRAP_LEN 13 |
93 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ | 161 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ |
94 | /* This is for Olympus Camedia digital cameras */ | ||
95 | #define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */ | ||
96 | #define US_BULK_STAT_OK 0 | 162 | #define US_BULK_STAT_OK 0 |
97 | #define US_BULK_STAT_FAIL 1 | 163 | #define US_BULK_STAT_FAIL 1 |
98 | #define US_BULK_STAT_PHASE 2 | 164 | #define US_BULK_STAT_PHASE 2 |
@@ -135,6 +201,7 @@ enum ub_scsi_cmd_state { | |||
135 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ | 201 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ |
136 | UB_CMDST_STAT, /* Status phase */ | 202 | UB_CMDST_STAT, /* Status phase */ |
137 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ | 203 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ |
204 | UB_CMDST_CLRRS, /* Clearing before retrying status */ | ||
138 | UB_CMDST_SENSE, /* Sending Request Sense */ | 205 | UB_CMDST_SENSE, /* Sending Request Sense */ |
139 | UB_CMDST_DONE /* Final state */ | 206 | UB_CMDST_DONE /* Final state */ |
140 | }; | 207 | }; |
@@ -146,6 +213,7 @@ static char *ub_scsi_cmd_stname[] = { | |||
146 | "c2s", | 213 | "c2s", |
147 | "sts", | 214 | "sts", |
148 | "clr", | 215 | "clr", |
216 | "crs", | ||
149 | "Sen", | 217 | "Sen", |
150 | "fin" | 218 | "fin" |
151 | }; | 219 | }; |
@@ -316,6 +384,7 @@ struct ub_dev { | |||
316 | struct urb work_urb; | 384 | struct urb work_urb; |
317 | struct timer_list work_timer; | 385 | struct timer_list work_timer; |
318 | int last_pipe; /* What might need clearing */ | 386 | int last_pipe; /* What might need clearing */ |
387 | __le32 signature; /* Learned signature */ | ||
319 | struct bulk_cb_wrap work_bcb; | 388 | struct bulk_cb_wrap work_bcb; |
320 | struct bulk_cs_wrap work_bcs; | 389 | struct bulk_cs_wrap work_bcs; |
321 | struct usb_ctrlrequest work_cr; | 390 | struct usb_ctrlrequest work_cr; |
@@ -339,8 +408,9 @@ static void ub_scsi_action(unsigned long _dev); | |||
339 | static void ub_scsi_dispatch(struct ub_dev *sc); | 408 | static void ub_scsi_dispatch(struct ub_dev *sc); |
340 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 409 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
341 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); | 410 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); |
342 | static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 411 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
343 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 412 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
413 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | ||
344 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 414 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
345 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 415 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
346 | int stalled_pipe); | 416 | int stalled_pipe); |
@@ -430,7 +500,7 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
430 | } | 500 | } |
431 | } | 501 | } |
432 | 502 | ||
433 | static ssize_t ub_diag_show(struct device *dev, char *page) | 503 | static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) |
434 | { | 504 | { |
435 | struct usb_interface *intf; | 505 | struct usb_interface *intf; |
436 | struct ub_dev *sc; | 506 | struct ub_dev *sc; |
@@ -1085,6 +1155,28 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1085 | 1155 | ||
1086 | ub_state_stat(sc, cmd); | 1156 | ub_state_stat(sc, cmd); |
1087 | 1157 | ||
1158 | } else if (cmd->state == UB_CMDST_CLRRS) { | ||
1159 | if (urb->status == -EPIPE) { | ||
1160 | /* | ||
1161 | * STALL while clearning STALL. | ||
1162 | * The control pipe clears itself - nothing to do. | ||
1163 | * XXX Might try to reset the device here and retry. | ||
1164 | */ | ||
1165 | printk(KERN_NOTICE "%s: stall on control pipe\n", | ||
1166 | sc->name); | ||
1167 | goto Bad_End; | ||
1168 | } | ||
1169 | |||
1170 | /* | ||
1171 | * We ignore the result for the halt clear. | ||
1172 | */ | ||
1173 | |||
1174 | /* reset the endpoint toggle */ | ||
1175 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | ||
1176 | usb_pipeout(sc->last_pipe), 0); | ||
1177 | |||
1178 | ub_state_stat_counted(sc, cmd); | ||
1179 | |||
1088 | } else if (cmd->state == UB_CMDST_CMD) { | 1180 | } else if (cmd->state == UB_CMDST_CMD) { |
1089 | if (urb->status == -EPIPE) { | 1181 | if (urb->status == -EPIPE) { |
1090 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1182 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
@@ -1190,52 +1282,57 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1190 | */ | 1282 | */ |
1191 | goto Bad_End; | 1283 | goto Bad_End; |
1192 | } | 1284 | } |
1193 | cmd->state = UB_CMDST_CLEAR; | 1285 | |
1286 | /* | ||
1287 | * Having a stall when getting CSW is an error, so | ||
1288 | * make sure uppper levels are not oblivious to it. | ||
1289 | */ | ||
1290 | cmd->error = -EIO; /* A cheap trick... */ | ||
1291 | |||
1292 | cmd->state = UB_CMDST_CLRRS; | ||
1194 | ub_cmdtr_state(sc, cmd); | 1293 | ub_cmdtr_state(sc, cmd); |
1195 | return; | 1294 | return; |
1196 | } | 1295 | } |
1296 | if (urb->status == -EOVERFLOW) { | ||
1297 | /* | ||
1298 | * XXX We are screwed here. Retrying is pointless, | ||
1299 | * because the pipelined data will not get in until | ||
1300 | * we read with a big enough buffer. We must reset XXX. | ||
1301 | */ | ||
1302 | goto Bad_End; | ||
1303 | } | ||
1197 | if (urb->status != 0) | 1304 | if (urb->status != 0) |
1198 | goto Bad_End; | 1305 | goto Bad_End; |
1199 | 1306 | ||
1200 | if (urb->actual_length == 0) { | 1307 | if (urb->actual_length == 0) { |
1201 | /* | 1308 | ub_state_stat_counted(sc, cmd); |
1202 | * Some broken devices add unnecessary zero-length | ||
1203 | * packets to the end of their data transfers. | ||
1204 | * Such packets show up as 0-length CSWs. If we | ||
1205 | * encounter such a thing, try to read the CSW again. | ||
1206 | */ | ||
1207 | if (++cmd->stat_count >= 4) { | ||
1208 | printk(KERN_NOTICE "%s: unable to get CSW\n", | ||
1209 | sc->name); | ||
1210 | goto Bad_End; | ||
1211 | } | ||
1212 | __ub_state_stat(sc, cmd); | ||
1213 | return; | 1309 | return; |
1214 | } | 1310 | } |
1215 | 1311 | ||
1216 | /* | 1312 | /* |
1217 | * Check the returned Bulk protocol status. | 1313 | * Check the returned Bulk protocol status. |
1314 | * The status block has to be validated first. | ||
1218 | */ | 1315 | */ |
1219 | 1316 | ||
1220 | bcs = &sc->work_bcs; | 1317 | bcs = &sc->work_bcs; |
1221 | rc = le32_to_cpu(bcs->Residue); | 1318 | |
1222 | if (rc != cmd->len - cmd->act_len) { | 1319 | if (sc->signature == cpu_to_le32(0)) { |
1223 | /* | 1320 | /* |
1224 | * It is all right to transfer less, the caller has | 1321 | * This is the first reply, so do not perform the check. |
1225 | * to check. But it's not all right if the device | 1322 | * Instead, remember the signature the device uses |
1226 | * counts disagree with our counts. | 1323 | * for future checks. But do not allow a nul. |
1227 | */ | 1324 | */ |
1228 | /* P3 */ printk("%s: resid %d len %d act %d\n", | 1325 | sc->signature = bcs->Signature; |
1229 | sc->name, rc, cmd->len, cmd->act_len); | 1326 | if (sc->signature == cpu_to_le32(0)) { |
1230 | goto Bad_End; | 1327 | ub_state_stat_counted(sc, cmd); |
1231 | } | 1328 | return; |
1232 | 1329 | } | |
1233 | #if 0 | 1330 | } else { |
1234 | if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && | 1331 | if (bcs->Signature != sc->signature) { |
1235 | bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) { | 1332 | ub_state_stat_counted(sc, cmd); |
1236 | /* Windows ignores signatures, so do we. */ | 1333 | return; |
1334 | } | ||
1237 | } | 1335 | } |
1238 | #endif | ||
1239 | 1336 | ||
1240 | if (bcs->Tag != cmd->tag) { | 1337 | if (bcs->Tag != cmd->tag) { |
1241 | /* | 1338 | /* |
@@ -1245,16 +1342,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1245 | * commands and reply at commands we timed out before. | 1342 | * commands and reply at commands we timed out before. |
1246 | * Without flushing these replies we loop forever. | 1343 | * Without flushing these replies we loop forever. |
1247 | */ | 1344 | */ |
1248 | if (++cmd->stat_count >= 4) { | 1345 | ub_state_stat_counted(sc, cmd); |
1249 | printk(KERN_NOTICE "%s: " | ||
1250 | "tag mismatch orig 0x%x reply 0x%x\n", | ||
1251 | sc->name, cmd->tag, bcs->Tag); | ||
1252 | goto Bad_End; | ||
1253 | } | ||
1254 | __ub_state_stat(sc, cmd); | ||
1255 | return; | 1346 | return; |
1256 | } | 1347 | } |
1257 | 1348 | ||
1349 | rc = le32_to_cpu(bcs->Residue); | ||
1350 | if (rc != cmd->len - cmd->act_len) { | ||
1351 | /* | ||
1352 | * It is all right to transfer less, the caller has | ||
1353 | * to check. But it's not all right if the device | ||
1354 | * counts disagree with our counts. | ||
1355 | */ | ||
1356 | /* P3 */ printk("%s: resid %d len %d act %d\n", | ||
1357 | sc->name, rc, cmd->len, cmd->act_len); | ||
1358 | goto Bad_End; | ||
1359 | } | ||
1360 | |||
1258 | switch (bcs->Status) { | 1361 | switch (bcs->Status) { |
1259 | case US_BULK_STAT_OK: | 1362 | case US_BULK_STAT_OK: |
1260 | break; | 1363 | break; |
@@ -1272,6 +1375,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1272 | } | 1375 | } |
1273 | 1376 | ||
1274 | /* Not zeroing error to preserve a babble indicator */ | 1377 | /* Not zeroing error to preserve a babble indicator */ |
1378 | if (cmd->error != 0) { | ||
1379 | ub_state_sense(sc, cmd); | ||
1380 | return; | ||
1381 | } | ||
1275 | cmd->state = UB_CMDST_DONE; | 1382 | cmd->state = UB_CMDST_DONE; |
1276 | ub_cmdtr_state(sc, cmd); | 1383 | ub_cmdtr_state(sc, cmd); |
1277 | ub_cmdq_pop(sc); | 1384 | ub_cmdq_pop(sc); |
@@ -1310,7 +1417,7 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) | |||
1310 | * Factorization helper for the command state machine: | 1417 | * Factorization helper for the command state machine: |
1311 | * Submit a CSW read. | 1418 | * Submit a CSW read. |
1312 | */ | 1419 | */ |
1313 | static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1420 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1314 | { | 1421 | { |
1315 | int rc; | 1422 | int rc; |
1316 | 1423 | ||
@@ -1328,11 +1435,12 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1328 | /* XXX Clear stalls */ | 1435 | /* XXX Clear stalls */ |
1329 | ub_complete(&sc->work_done); | 1436 | ub_complete(&sc->work_done); |
1330 | ub_state_done(sc, cmd, rc); | 1437 | ub_state_done(sc, cmd, rc); |
1331 | return; | 1438 | return -1; |
1332 | } | 1439 | } |
1333 | 1440 | ||
1334 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; | 1441 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; |
1335 | add_timer(&sc->work_timer); | 1442 | add_timer(&sc->work_timer); |
1443 | return 0; | ||
1336 | } | 1444 | } |
1337 | 1445 | ||
1338 | /* | 1446 | /* |
@@ -1341,7 +1449,9 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1341 | */ | 1449 | */ |
1342 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1450 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1343 | { | 1451 | { |
1344 | __ub_state_stat(sc, cmd); | 1452 | |
1453 | if (__ub_state_stat(sc, cmd) != 0) | ||
1454 | return; | ||
1345 | 1455 | ||
1346 | cmd->stat_count = 0; | 1456 | cmd->stat_count = 0; |
1347 | cmd->state = UB_CMDST_STAT; | 1457 | cmd->state = UB_CMDST_STAT; |
@@ -1350,6 +1460,25 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1350 | 1460 | ||
1351 | /* | 1461 | /* |
1352 | * Factorization helper for the command state machine: | 1462 | * Factorization helper for the command state machine: |
1463 | * Submit a CSW read and go to STAT state with counter (along [C] path). | ||
1464 | */ | ||
1465 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | ||
1466 | { | ||
1467 | |||
1468 | if (++cmd->stat_count >= 4) { | ||
1469 | ub_state_sense(sc, cmd); | ||
1470 | return; | ||
1471 | } | ||
1472 | |||
1473 | if (__ub_state_stat(sc, cmd) != 0) | ||
1474 | return; | ||
1475 | |||
1476 | cmd->state = UB_CMDST_STAT; | ||
1477 | ub_cmdtr_state(sc, cmd); | ||
1478 | } | ||
1479 | |||
1480 | /* | ||
1481 | * Factorization helper for the command state machine: | ||
1353 | * Submit a REQUEST SENSE and go to SENSE state. | 1482 | * Submit a REQUEST SENSE and go to SENSE state. |
1354 | */ | 1483 | */ |
1355 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1484 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |