diff options
Diffstat (limited to 'drivers/block')
43 files changed, 3509 insertions, 2219 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index c5f22bb0a48e..4e2c367fec11 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -79,23 +79,28 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode) | |||
79 | struct gendisk *disk = bdev->bd_disk; | 79 | struct gendisk *disk = bdev->bd_disk; |
80 | DAC960_Controller_T *p = disk->queue->queuedata; | 80 | DAC960_Controller_T *p = disk->queue->queuedata; |
81 | int drive_nr = (long)disk->private_data; | 81 | int drive_nr = (long)disk->private_data; |
82 | int ret = -ENXIO; | ||
82 | 83 | ||
84 | lock_kernel(); | ||
83 | if (p->FirmwareType == DAC960_V1_Controller) { | 85 | if (p->FirmwareType == DAC960_V1_Controller) { |
84 | if (p->V1.LogicalDriveInformation[drive_nr]. | 86 | if (p->V1.LogicalDriveInformation[drive_nr]. |
85 | LogicalDriveState == DAC960_V1_LogicalDrive_Offline) | 87 | LogicalDriveState == DAC960_V1_LogicalDrive_Offline) |
86 | return -ENXIO; | 88 | goto out; |
87 | } else { | 89 | } else { |
88 | DAC960_V2_LogicalDeviceInfo_T *i = | 90 | DAC960_V2_LogicalDeviceInfo_T *i = |
89 | p->V2.LogicalDeviceInformation[drive_nr]; | 91 | p->V2.LogicalDeviceInformation[drive_nr]; |
90 | if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline) | 92 | if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline) |
91 | return -ENXIO; | 93 | goto out; |
92 | } | 94 | } |
93 | 95 | ||
94 | check_disk_change(bdev); | 96 | check_disk_change(bdev); |
95 | 97 | ||
96 | if (!get_capacity(p->disks[drive_nr])) | 98 | if (!get_capacity(p->disks[drive_nr])) |
97 | return -ENXIO; | 99 | goto out; |
98 | return 0; | 100 | ret = 0; |
101 | out: | ||
102 | unlock_kernel(); | ||
103 | return ret; | ||
99 | } | 104 | } |
100 | 105 | ||
101 | static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 106 | static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 77bfce52e9ca..de277689da61 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -76,6 +76,17 @@ config BLK_DEV_XD | |||
76 | 76 | ||
77 | It's pretty unlikely that you have one of these: say N. | 77 | It's pretty unlikely that you have one of these: say N. |
78 | 78 | ||
79 | config GDROM | ||
80 | tristate "SEGA Dreamcast GD-ROM drive" | ||
81 | depends on SH_DREAMCAST | ||
82 | help | ||
83 | A standard SEGA Dreamcast comes with a modified CD ROM drive called a | ||
84 | "GD-ROM" by SEGA to signify it is capable of reading special disks | ||
85 | with up to 1 GB of data. This drive will also read standard CD ROM | ||
86 | disks. Select this option to access any disks in your GD ROM drive. | ||
87 | Most users will want to say "Y" here. | ||
88 | You can also build this as a module which will be called gdrom. | ||
89 | |||
79 | config PARIDE | 90 | config PARIDE |
80 | tristate "Parallel port IDE device support" | 91 | tristate "Parallel port IDE device support" |
81 | depends on PARPORT_PC | 92 | depends on PARPORT_PC |
@@ -103,17 +114,6 @@ config PARIDE | |||
103 | "MicroSolutions backpack protocol", "DataStor Commuter protocol" | 114 | "MicroSolutions backpack protocol", "DataStor Commuter protocol" |
104 | etc.). | 115 | etc.). |
105 | 116 | ||
106 | config GDROM | ||
107 | tristate "SEGA Dreamcast GD-ROM drive" | ||
108 | depends on SH_DREAMCAST | ||
109 | help | ||
110 | A standard SEGA Dreamcast comes with a modified CD ROM drive called a | ||
111 | "GD-ROM" by SEGA to signify it is capable of reading special disks | ||
112 | with up to 1 GB of data. This drive will also read standard CD ROM | ||
113 | disks. Select this option to access any disks in your GD ROM drive. | ||
114 | Most users will want to say "Y" here. | ||
115 | You can also build this as a module which will be called gdrom. | ||
116 | |||
117 | source "drivers/block/paride/Kconfig" | 117 | source "drivers/block/paride/Kconfig" |
118 | 118 | ||
119 | config BLK_CPQ_DA | 119 | config BLK_CPQ_DA |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 0182a22c423a..76f114f0bba3 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -60,12 +60,14 @@ | |||
60 | #include <linux/hdreg.h> | 60 | #include <linux/hdreg.h> |
61 | #include <linux/delay.h> | 61 | #include <linux/delay.h> |
62 | #include <linux/init.h> | 62 | #include <linux/init.h> |
63 | #include <linux/smp_lock.h> | ||
63 | #include <linux/amifdreg.h> | 64 | #include <linux/amifdreg.h> |
64 | #include <linux/amifd.h> | 65 | #include <linux/amifd.h> |
65 | #include <linux/buffer_head.h> | 66 | #include <linux/buffer_head.h> |
66 | #include <linux/blkdev.h> | 67 | #include <linux/blkdev.h> |
67 | #include <linux/elevator.h> | 68 | #include <linux/elevator.h> |
68 | #include <linux/interrupt.h> | 69 | #include <linux/interrupt.h> |
70 | #include <linux/platform_device.h> | ||
69 | 71 | ||
70 | #include <asm/setup.h> | 72 | #include <asm/setup.h> |
71 | #include <asm/uaccess.h> | 73 | #include <asm/uaccess.h> |
@@ -1422,7 +1424,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
1422 | return 0; | 1424 | return 0; |
1423 | } | 1425 | } |
1424 | 1426 | ||
1425 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, | 1427 | static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, |
1426 | unsigned int cmd, unsigned long param) | 1428 | unsigned int cmd, unsigned long param) |
1427 | { | 1429 | { |
1428 | struct amiga_floppy_struct *p = bdev->bd_disk->private_data; | 1430 | struct amiga_floppy_struct *p = bdev->bd_disk->private_data; |
@@ -1499,6 +1501,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, | |||
1499 | return 0; | 1501 | return 0; |
1500 | } | 1502 | } |
1501 | 1503 | ||
1504 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, | ||
1505 | unsigned int cmd, unsigned long param) | ||
1506 | { | ||
1507 | int ret; | ||
1508 | |||
1509 | lock_kernel(); | ||
1510 | ret = fd_locked_ioctl(bdev, mode, cmd, param); | ||
1511 | unlock_kernel(); | ||
1512 | |||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1502 | static void fd_probe(int dev) | 1516 | static void fd_probe(int dev) |
1503 | { | 1517 | { |
1504 | unsigned long code; | 1518 | unsigned long code; |
@@ -1541,10 +1555,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
1541 | int old_dev; | 1555 | int old_dev; |
1542 | unsigned long flags; | 1556 | unsigned long flags; |
1543 | 1557 | ||
1558 | lock_kernel(); | ||
1544 | old_dev = fd_device[drive]; | 1559 | old_dev = fd_device[drive]; |
1545 | 1560 | ||
1546 | if (fd_ref[drive] && old_dev != system) | 1561 | if (fd_ref[drive] && old_dev != system) { |
1562 | unlock_kernel(); | ||
1547 | return -EBUSY; | 1563 | return -EBUSY; |
1564 | } | ||
1548 | 1565 | ||
1549 | if (mode & (FMODE_READ|FMODE_WRITE)) { | 1566 | if (mode & (FMODE_READ|FMODE_WRITE)) { |
1550 | check_disk_change(bdev); | 1567 | check_disk_change(bdev); |
@@ -1557,8 +1574,10 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
1557 | fd_deselect (drive); | 1574 | fd_deselect (drive); |
1558 | rel_fdc(); | 1575 | rel_fdc(); |
1559 | 1576 | ||
1560 | if (wrprot) | 1577 | if (wrprot) { |
1578 | unlock_kernel(); | ||
1561 | return -EROFS; | 1579 | return -EROFS; |
1580 | } | ||
1562 | } | 1581 | } |
1563 | } | 1582 | } |
1564 | 1583 | ||
@@ -1575,6 +1594,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
1575 | printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, | 1594 | printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, |
1576 | unit[drive].type->name, data_types[system].name); | 1595 | unit[drive].type->name, data_types[system].name); |
1577 | 1596 | ||
1597 | unlock_kernel(); | ||
1578 | return 0; | 1598 | return 0; |
1579 | } | 1599 | } |
1580 | 1600 | ||
@@ -1583,6 +1603,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
1583 | struct amiga_floppy_struct *p = disk->private_data; | 1603 | struct amiga_floppy_struct *p = disk->private_data; |
1584 | int drive = p - unit; | 1604 | int drive = p - unit; |
1585 | 1605 | ||
1606 | lock_kernel(); | ||
1586 | if (unit[drive].dirty == 1) { | 1607 | if (unit[drive].dirty == 1) { |
1587 | del_timer (flush_track_timer + drive); | 1608 | del_timer (flush_track_timer + drive); |
1588 | non_int_flush_track (drive); | 1609 | non_int_flush_track (drive); |
@@ -1596,6 +1617,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
1596 | /* the mod_use counter is handled this way */ | 1617 | /* the mod_use counter is handled this way */ |
1597 | floppy_off (drive | 0x40000000); | 1618 | floppy_off (drive | 0x40000000); |
1598 | #endif | 1619 | #endif |
1620 | unlock_kernel(); | ||
1599 | return 0; | 1621 | return 0; |
1600 | } | 1622 | } |
1601 | 1623 | ||
@@ -1637,7 +1659,7 @@ static const struct block_device_operations floppy_fops = { | |||
1637 | .owner = THIS_MODULE, | 1659 | .owner = THIS_MODULE, |
1638 | .open = floppy_open, | 1660 | .open = floppy_open, |
1639 | .release = floppy_release, | 1661 | .release = floppy_release, |
1640 | .locked_ioctl = fd_ioctl, | 1662 | .ioctl = fd_ioctl, |
1641 | .getgeo = fd_getgeo, | 1663 | .getgeo = fd_getgeo, |
1642 | .media_changed = amiga_floppy_change, | 1664 | .media_changed = amiga_floppy_change, |
1643 | }; | 1665 | }; |
@@ -1696,34 +1718,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1696 | return get_disk(unit[drive].gendisk); | 1718 | return get_disk(unit[drive].gendisk); |
1697 | } | 1719 | } |
1698 | 1720 | ||
1699 | static int __init amiga_floppy_init(void) | 1721 | static int __init amiga_floppy_probe(struct platform_device *pdev) |
1700 | { | 1722 | { |
1701 | int i, ret; | 1723 | int i, ret; |
1702 | 1724 | ||
1703 | if (!MACH_IS_AMIGA) | ||
1704 | return -ENODEV; | ||
1705 | |||
1706 | if (!AMIGAHW_PRESENT(AMI_FLOPPY)) | ||
1707 | return -ENODEV; | ||
1708 | |||
1709 | if (register_blkdev(FLOPPY_MAJOR,"fd")) | 1725 | if (register_blkdev(FLOPPY_MAJOR,"fd")) |
1710 | return -EBUSY; | 1726 | return -EBUSY; |
1711 | 1727 | ||
1712 | /* | ||
1713 | * We request DSKPTR, DSKLEN and DSKDATA only, because the other | ||
1714 | * floppy registers are too spreaded over the custom register space | ||
1715 | */ | ||
1716 | ret = -EBUSY; | ||
1717 | if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) { | ||
1718 | printk("fd: cannot get floppy registers\n"); | ||
1719 | goto out_blkdev; | ||
1720 | } | ||
1721 | |||
1722 | ret = -ENOMEM; | 1728 | ret = -ENOMEM; |
1723 | if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == | 1729 | if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == |
1724 | NULL) { | 1730 | NULL) { |
1725 | printk("fd: cannot get chip mem buffer\n"); | 1731 | printk("fd: cannot get chip mem buffer\n"); |
1726 | goto out_memregion; | 1732 | goto out_blkdev; |
1727 | } | 1733 | } |
1728 | 1734 | ||
1729 | ret = -EBUSY; | 1735 | ret = -EBUSY; |
@@ -1792,18 +1798,13 @@ out_irq2: | |||
1792 | free_irq(IRQ_AMIGA_DSKBLK, NULL); | 1798 | free_irq(IRQ_AMIGA_DSKBLK, NULL); |
1793 | out_irq: | 1799 | out_irq: |
1794 | amiga_chip_free(raw_buf); | 1800 | amiga_chip_free(raw_buf); |
1795 | out_memregion: | ||
1796 | release_mem_region(CUSTOM_PHYSADDR+0x20, 8); | ||
1797 | out_blkdev: | 1801 | out_blkdev: |
1798 | unregister_blkdev(FLOPPY_MAJOR,"fd"); | 1802 | unregister_blkdev(FLOPPY_MAJOR,"fd"); |
1799 | return ret; | 1803 | return ret; |
1800 | } | 1804 | } |
1801 | 1805 | ||
1802 | module_init(amiga_floppy_init); | ||
1803 | #ifdef MODULE | ||
1804 | |||
1805 | #if 0 /* not safe to unload */ | 1806 | #if 0 /* not safe to unload */ |
1806 | void cleanup_module(void) | 1807 | static int __exit amiga_floppy_remove(struct platform_device *pdev) |
1807 | { | 1808 | { |
1808 | int i; | 1809 | int i; |
1809 | 1810 | ||
@@ -1820,12 +1821,25 @@ void cleanup_module(void) | |||
1820 | custom.dmacon = DMAF_DISK; /* disable DMA */ | 1821 | custom.dmacon = DMAF_DISK; /* disable DMA */ |
1821 | amiga_chip_free(raw_buf); | 1822 | amiga_chip_free(raw_buf); |
1822 | blk_cleanup_queue(floppy_queue); | 1823 | blk_cleanup_queue(floppy_queue); |
1823 | release_mem_region(CUSTOM_PHYSADDR+0x20, 8); | ||
1824 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 1824 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
1825 | } | 1825 | } |
1826 | #endif | 1826 | #endif |
1827 | 1827 | ||
1828 | #else | 1828 | static struct platform_driver amiga_floppy_driver = { |
1829 | .driver = { | ||
1830 | .name = "amiga-floppy", | ||
1831 | .owner = THIS_MODULE, | ||
1832 | }, | ||
1833 | }; | ||
1834 | |||
1835 | static int __init amiga_floppy_init(void) | ||
1836 | { | ||
1837 | return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe); | ||
1838 | } | ||
1839 | |||
1840 | module_init(amiga_floppy_init); | ||
1841 | |||
1842 | #ifndef MODULE | ||
1829 | static int __init amiga_floppy_setup (char *str) | 1843 | static int __init amiga_floppy_setup (char *str) |
1830 | { | 1844 | { |
1831 | int n; | 1845 | int n; |
@@ -1840,3 +1854,5 @@ static int __init amiga_floppy_setup (char *str) | |||
1840 | 1854 | ||
1841 | __setup("floppy=", amiga_floppy_setup); | 1855 | __setup("floppy=", amiga_floppy_setup); |
1842 | #endif | 1856 | #endif |
1857 | |||
1858 | MODULE_ALIAS("platform:amiga-floppy"); | ||
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 035cefe4045a..a946929735a5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/genhd.h> | 13 | #include <linux/genhd.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/smp_lock.h> | ||
15 | #include "aoe.h" | 16 | #include "aoe.h" |
16 | 17 | ||
17 | static struct kmem_cache *buf_pool_cache; | 18 | static struct kmem_cache *buf_pool_cache; |
@@ -124,13 +125,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode) | |||
124 | struct aoedev *d = bdev->bd_disk->private_data; | 125 | struct aoedev *d = bdev->bd_disk->private_data; |
125 | ulong flags; | 126 | ulong flags; |
126 | 127 | ||
128 | lock_kernel(); | ||
127 | spin_lock_irqsave(&d->lock, flags); | 129 | spin_lock_irqsave(&d->lock, flags); |
128 | if (d->flags & DEVFL_UP) { | 130 | if (d->flags & DEVFL_UP) { |
129 | d->nopen++; | 131 | d->nopen++; |
130 | spin_unlock_irqrestore(&d->lock, flags); | 132 | spin_unlock_irqrestore(&d->lock, flags); |
133 | unlock_kernel(); | ||
131 | return 0; | 134 | return 0; |
132 | } | 135 | } |
133 | spin_unlock_irqrestore(&d->lock, flags); | 136 | spin_unlock_irqrestore(&d->lock, flags); |
137 | unlock_kernel(); | ||
134 | return -ENODEV; | 138 | return -ENODEV; |
135 | } | 139 | } |
136 | 140 | ||
@@ -173,7 +177,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
173 | BUG(); | 177 | BUG(); |
174 | bio_endio(bio, -ENXIO); | 178 | bio_endio(bio, -ENXIO); |
175 | return 0; | 179 | return 0; |
176 | } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { | 180 | } else if (bio->bi_rw & REQ_HARDBARRIER) { |
177 | bio_endio(bio, -EOPNOTSUPP); | 181 | bio_endio(bio, -EOPNOTSUPP); |
178 | return 0; | 182 | return 0; |
179 | } else if (bio->bi_io_vec == NULL) { | 183 | } else if (bio->bi_io_vec == NULL) { |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index e35cf59cbfde..aceb96476524 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/delay.h> | 67 | #include <linux/delay.h> |
68 | #include <linux/init.h> | 68 | #include <linux/init.h> |
69 | #include <linux/blkdev.h> | 69 | #include <linux/blkdev.h> |
70 | #include <linux/smp_lock.h> | ||
70 | 71 | ||
71 | #include <asm/atafd.h> | 72 | #include <asm/atafd.h> |
72 | #include <asm/atafdreg.h> | 73 | #include <asm/atafdreg.h> |
@@ -359,7 +360,7 @@ static void finish_fdc( void ); | |||
359 | static void finish_fdc_done( int dummy ); | 360 | static void finish_fdc_done( int dummy ); |
360 | static void setup_req_params( int drive ); | 361 | static void setup_req_params( int drive ); |
361 | static void redo_fd_request( void); | 362 | static void redo_fd_request( void); |
362 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | 363 | static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int |
363 | cmd, unsigned long param); | 364 | cmd, unsigned long param); |
364 | static void fd_probe( int drive ); | 365 | static void fd_probe( int drive ); |
365 | static int fd_test_drive_present( int drive ); | 366 | static int fd_test_drive_present( int drive ); |
@@ -1480,7 +1481,7 @@ void do_fd_request(struct request_queue * q) | |||
1480 | atari_enable_irq( IRQ_MFP_FDC ); | 1481 | atari_enable_irq( IRQ_MFP_FDC ); |
1481 | } | 1482 | } |
1482 | 1483 | ||
1483 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, | 1484 | static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, |
1484 | unsigned int cmd, unsigned long param) | 1485 | unsigned int cmd, unsigned long param) |
1485 | { | 1486 | { |
1486 | struct gendisk *disk = bdev->bd_disk; | 1487 | struct gendisk *disk = bdev->bd_disk; |
@@ -1665,6 +1666,17 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, | |||
1665 | } | 1666 | } |
1666 | } | 1667 | } |
1667 | 1668 | ||
1669 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, | ||
1670 | unsigned int cmd, unsigned long arg) | ||
1671 | { | ||
1672 | int ret; | ||
1673 | |||
1674 | lock_kernel(); | ||
1675 | ret = fd_locked_ioctl(bdev, mode, cmd, arg); | ||
1676 | unlock_kernel(); | ||
1677 | |||
1678 | return ret; | ||
1679 | } | ||
1668 | 1680 | ||
1669 | /* Initialize the 'unit' variable for drive 'drive' */ | 1681 | /* Initialize the 'unit' variable for drive 'drive' */ |
1670 | 1682 | ||
@@ -1838,24 +1850,36 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
1838 | return 0; | 1850 | return 0; |
1839 | } | 1851 | } |
1840 | 1852 | ||
1853 | static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
1854 | { | ||
1855 | int ret; | ||
1856 | |||
1857 | lock_kernel(); | ||
1858 | ret = floppy_open(bdev, mode); | ||
1859 | unlock_kernel(); | ||
1860 | |||
1861 | return ret; | ||
1862 | } | ||
1841 | 1863 | ||
1842 | static int floppy_release(struct gendisk *disk, fmode_t mode) | 1864 | static int floppy_release(struct gendisk *disk, fmode_t mode) |
1843 | { | 1865 | { |
1844 | struct atari_floppy_struct *p = disk->private_data; | 1866 | struct atari_floppy_struct *p = disk->private_data; |
1867 | lock_kernel(); | ||
1845 | if (p->ref < 0) | 1868 | if (p->ref < 0) |
1846 | p->ref = 0; | 1869 | p->ref = 0; |
1847 | else if (!p->ref--) { | 1870 | else if (!p->ref--) { |
1848 | printk(KERN_ERR "floppy_release with fd_ref == 0"); | 1871 | printk(KERN_ERR "floppy_release with fd_ref == 0"); |
1849 | p->ref = 0; | 1872 | p->ref = 0; |
1850 | } | 1873 | } |
1874 | unlock_kernel(); | ||
1851 | return 0; | 1875 | return 0; |
1852 | } | 1876 | } |
1853 | 1877 | ||
1854 | static const struct block_device_operations floppy_fops = { | 1878 | static const struct block_device_operations floppy_fops = { |
1855 | .owner = THIS_MODULE, | 1879 | .owner = THIS_MODULE, |
1856 | .open = floppy_open, | 1880 | .open = floppy_unlocked_open, |
1857 | .release = floppy_release, | 1881 | .release = floppy_release, |
1858 | .locked_ioctl = fd_ioctl, | 1882 | .ioctl = fd_ioctl, |
1859 | .media_changed = check_floppy_change, | 1883 | .media_changed = check_floppy_change, |
1860 | .revalidate_disk= floppy_revalidate, | 1884 | .revalidate_disk= floppy_revalidate, |
1861 | }; | 1885 | }; |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 6081e81d5738..1c7f63792ff8 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/blkdev.h> | 15 | #include <linux/blkdev.h> |
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/smp_lock.h> | ||
18 | #include <linux/radix-tree.h> | 19 | #include <linux/radix-tree.h> |
19 | #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ | 20 | #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ |
20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
@@ -133,6 +134,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) | |||
133 | return page; | 134 | return page; |
134 | } | 135 | } |
135 | 136 | ||
137 | static void brd_free_page(struct brd_device *brd, sector_t sector) | ||
138 | { | ||
139 | struct page *page; | ||
140 | pgoff_t idx; | ||
141 | |||
142 | spin_lock(&brd->brd_lock); | ||
143 | idx = sector >> PAGE_SECTORS_SHIFT; | ||
144 | page = radix_tree_delete(&brd->brd_pages, idx); | ||
145 | spin_unlock(&brd->brd_lock); | ||
146 | if (page) | ||
147 | __free_page(page); | ||
148 | } | ||
149 | |||
150 | static void brd_zero_page(struct brd_device *brd, sector_t sector) | ||
151 | { | ||
152 | struct page *page; | ||
153 | |||
154 | page = brd_lookup_page(brd, sector); | ||
155 | if (page) | ||
156 | clear_highpage(page); | ||
157 | } | ||
158 | |||
136 | /* | 159 | /* |
137 | * Free all backing store pages and radix tree. This must only be called when | 160 | * Free all backing store pages and radix tree. This must only be called when |
138 | * there are no other users of the device. | 161 | * there are no other users of the device. |
@@ -189,6 +212,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) | |||
189 | return 0; | 212 | return 0; |
190 | } | 213 | } |
191 | 214 | ||
215 | static void discard_from_brd(struct brd_device *brd, | ||
216 | sector_t sector, size_t n) | ||
217 | { | ||
218 | while (n >= PAGE_SIZE) { | ||
219 | /* | ||
220 | * Don't want to actually discard pages here because | ||
221 | * re-allocating the pages can result in writeback | ||
222 | * deadlocks under heavy load. | ||
223 | */ | ||
224 | if (0) | ||
225 | brd_free_page(brd, sector); | ||
226 | else | ||
227 | brd_zero_page(brd, sector); | ||
228 | sector += PAGE_SIZE >> SECTOR_SHIFT; | ||
229 | n -= PAGE_SIZE; | ||
230 | } | ||
231 | } | ||
232 | |||
192 | /* | 233 | /* |
193 | * Copy n bytes from src to the brd starting at sector. Does not sleep. | 234 | * Copy n bytes from src to the brd starting at sector. Does not sleep. |
194 | */ | 235 | */ |
@@ -300,6 +341,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) | |||
300 | get_capacity(bdev->bd_disk)) | 341 | get_capacity(bdev->bd_disk)) |
301 | goto out; | 342 | goto out; |
302 | 343 | ||
344 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | ||
345 | err = 0; | ||
346 | discard_from_brd(brd, sector, bio->bi_size); | ||
347 | goto out; | ||
348 | } | ||
349 | |||
303 | rw = bio_rw(bio); | 350 | rw = bio_rw(bio); |
304 | if (rw == READA) | 351 | if (rw == READA) |
305 | rw = READ; | 352 | rw = READ; |
@@ -320,7 +367,7 @@ out: | |||
320 | } | 367 | } |
321 | 368 | ||
322 | #ifdef CONFIG_BLK_DEV_XIP | 369 | #ifdef CONFIG_BLK_DEV_XIP |
323 | static int brd_direct_access (struct block_device *bdev, sector_t sector, | 370 | static int brd_direct_access(struct block_device *bdev, sector_t sector, |
324 | void **kaddr, unsigned long *pfn) | 371 | void **kaddr, unsigned long *pfn) |
325 | { | 372 | { |
326 | struct brd_device *brd = bdev->bd_disk->private_data; | 373 | struct brd_device *brd = bdev->bd_disk->private_data; |
@@ -355,6 +402,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode, | |||
355 | * ram device BLKFLSBUF has special semantics, we want to actually | 402 | * ram device BLKFLSBUF has special semantics, we want to actually |
356 | * release and destroy the ramdisk data. | 403 | * release and destroy the ramdisk data. |
357 | */ | 404 | */ |
405 | lock_kernel(); | ||
358 | mutex_lock(&bdev->bd_mutex); | 406 | mutex_lock(&bdev->bd_mutex); |
359 | error = -EBUSY; | 407 | error = -EBUSY; |
360 | if (bdev->bd_openers <= 1) { | 408 | if (bdev->bd_openers <= 1) { |
@@ -371,13 +419,14 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode, | |||
371 | error = 0; | 419 | error = 0; |
372 | } | 420 | } |
373 | mutex_unlock(&bdev->bd_mutex); | 421 | mutex_unlock(&bdev->bd_mutex); |
422 | unlock_kernel(); | ||
374 | 423 | ||
375 | return error; | 424 | return error; |
376 | } | 425 | } |
377 | 426 | ||
378 | static const struct block_device_operations brd_fops = { | 427 | static const struct block_device_operations brd_fops = { |
379 | .owner = THIS_MODULE, | 428 | .owner = THIS_MODULE, |
380 | .locked_ioctl = brd_ioctl, | 429 | .ioctl = brd_ioctl, |
381 | #ifdef CONFIG_BLK_DEV_XIP | 430 | #ifdef CONFIG_BLK_DEV_XIP |
382 | .direct_access = brd_direct_access, | 431 | .direct_access = brd_direct_access, |
383 | #endif | 432 | #endif |
@@ -433,10 +482,15 @@ static struct brd_device *brd_alloc(int i) | |||
433 | if (!brd->brd_queue) | 482 | if (!brd->brd_queue) |
434 | goto out_free_dev; | 483 | goto out_free_dev; |
435 | blk_queue_make_request(brd->brd_queue, brd_make_request); | 484 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
436 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); | 485 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); |
437 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); | 486 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
438 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 487 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
439 | 488 | ||
489 | brd->brd_queue->limits.discard_granularity = PAGE_SIZE; | ||
490 | brd->brd_queue->limits.max_discard_sectors = UINT_MAX; | ||
491 | brd->brd_queue->limits.discard_zeroes_data = 1; | ||
492 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); | ||
493 | |||
440 | disk = brd->brd_disk = alloc_disk(1 << part_shift); | 494 | disk = brd->brd_disk = alloc_disk(1 << part_shift); |
441 | if (!disk) | 495 | if (!disk) |
442 | goto out_free_queue; | 496 | goto out_free_queue; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index eb5ff0531cfb..5e4fadcdece9 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -56,16 +56,14 @@ | |||
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | 57 | ||
58 | #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) | 58 | #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) |
59 | #define DRIVER_NAME "HP CISS Driver (v 3.6.20)" | 59 | #define DRIVER_NAME "HP CISS Driver (v 3.6.26)" |
60 | #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20) | 60 | #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) |
61 | 61 | ||
62 | /* Embedded module documentation macros - see modules.h */ | 62 | /* Embedded module documentation macros - see modules.h */ |
63 | MODULE_AUTHOR("Hewlett-Packard Company"); | 63 | MODULE_AUTHOR("Hewlett-Packard Company"); |
64 | MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); | 64 | MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); |
65 | MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" | 65 | MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); |
66 | " SA6i P600 P800 P400 P400i E200 E200i E500 P700m" | 66 | MODULE_VERSION("3.6.26"); |
67 | " Smart Array G2 Series SAS/SATA Controllers"); | ||
68 | MODULE_VERSION("3.6.20"); | ||
69 | MODULE_LICENSE("GPL"); | 67 | MODULE_LICENSE("GPL"); |
70 | 68 | ||
71 | static int cciss_allow_hpsa; | 69 | static int cciss_allow_hpsa; |
@@ -107,6 +105,11 @@ static const struct pci_device_id cciss_pci_device_id[] = { | |||
107 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | 105 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, | 106 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, | 107 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, |
108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, | ||
109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, | ||
110 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, | ||
111 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, | ||
112 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, | ||
110 | {0,} | 113 | {0,} |
111 | }; | 114 | }; |
112 | 115 | ||
@@ -146,6 +149,11 @@ static struct board_type products[] = { | |||
146 | {0x3249103C, "Smart Array P812", &SA5_access}, | 149 | {0x3249103C, "Smart Array P812", &SA5_access}, |
147 | {0x324A103C, "Smart Array P712m", &SA5_access}, | 150 | {0x324A103C, "Smart Array P712m", &SA5_access}, |
148 | {0x324B103C, "Smart Array P711m", &SA5_access}, | 151 | {0x324B103C, "Smart Array P711m", &SA5_access}, |
152 | {0x3250103C, "Smart Array", &SA5_access}, | ||
153 | {0x3251103C, "Smart Array", &SA5_access}, | ||
154 | {0x3252103C, "Smart Array", &SA5_access}, | ||
155 | {0x3253103C, "Smart Array", &SA5_access}, | ||
156 | {0x3254103C, "Smart Array", &SA5_access}, | ||
149 | }; | 157 | }; |
150 | 158 | ||
151 | /* How long to wait (in milliseconds) for board to go into simple mode */ | 159 | /* How long to wait (in milliseconds) for board to go into simple mode */ |
@@ -167,9 +175,13 @@ static DEFINE_MUTEX(scan_mutex); | |||
167 | static LIST_HEAD(scan_q); | 175 | static LIST_HEAD(scan_q); |
168 | 176 | ||
169 | static void do_cciss_request(struct request_queue *q); | 177 | static void do_cciss_request(struct request_queue *q); |
170 | static irqreturn_t do_cciss_intr(int irq, void *dev_id); | 178 | static irqreturn_t do_cciss_intx(int irq, void *dev_id); |
179 | static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); | ||
171 | static int cciss_open(struct block_device *bdev, fmode_t mode); | 180 | static int cciss_open(struct block_device *bdev, fmode_t mode); |
181 | static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); | ||
172 | static int cciss_release(struct gendisk *disk, fmode_t mode); | 182 | static int cciss_release(struct gendisk *disk, fmode_t mode); |
183 | static int do_ioctl(struct block_device *bdev, fmode_t mode, | ||
184 | unsigned int cmd, unsigned long arg); | ||
173 | static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | 185 | static int cciss_ioctl(struct block_device *bdev, fmode_t mode, |
174 | unsigned int cmd, unsigned long arg); | 186 | unsigned int cmd, unsigned long arg); |
175 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 187 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
@@ -179,25 +191,23 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); | |||
179 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 191 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
180 | int clear_all, int via_ioctl); | 192 | int clear_all, int via_ioctl); |
181 | 193 | ||
182 | static void cciss_read_capacity(int ctlr, int logvol, | 194 | static void cciss_read_capacity(ctlr_info_t *h, int logvol, |
183 | sector_t *total_size, unsigned int *block_size); | 195 | sector_t *total_size, unsigned int *block_size); |
184 | static void cciss_read_capacity_16(int ctlr, int logvol, | 196 | static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, |
185 | sector_t *total_size, unsigned int *block_size); | 197 | sector_t *total_size, unsigned int *block_size); |
186 | static void cciss_geometry_inquiry(int ctlr, int logvol, | 198 | static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, |
187 | sector_t total_size, | 199 | sector_t total_size, |
188 | unsigned int block_size, InquiryData_struct *inq_buff, | 200 | unsigned int block_size, InquiryData_struct *inq_buff, |
189 | drive_info_struct *drv); | 201 | drive_info_struct *drv); |
190 | static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, | 202 | static void __devinit cciss_interrupt_mode(ctlr_info_t *); |
191 | __u32); | ||
192 | static void start_io(ctlr_info_t *h); | 203 | static void start_io(ctlr_info_t *h); |
193 | static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | 204 | static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, |
194 | __u8 page_code, unsigned char scsi3addr[], | 205 | __u8 page_code, unsigned char scsi3addr[], |
195 | int cmd_type); | 206 | int cmd_type); |
196 | static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, | 207 | static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, |
197 | int attempt_retry); | 208 | int attempt_retry); |
198 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); | 209 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); |
199 | 210 | ||
200 | static void fail_all_cmds(unsigned long ctlr); | ||
201 | static int add_to_scan_list(struct ctlr_info *h); | 211 | static int add_to_scan_list(struct ctlr_info *h); |
202 | static int scan_thread(void *data); | 212 | static int scan_thread(void *data); |
203 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); | 213 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); |
@@ -205,11 +215,23 @@ static void cciss_hba_release(struct device *dev); | |||
205 | static void cciss_device_release(struct device *dev); | 215 | static void cciss_device_release(struct device *dev); |
206 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); | 216 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); |
207 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); | 217 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); |
218 | static inline u32 next_command(ctlr_info_t *h); | ||
219 | static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, | ||
220 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, | ||
221 | u64 *cfg_offset); | ||
222 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, | ||
223 | unsigned long *memory_bar); | ||
224 | |||
225 | |||
226 | /* performant mode helper functions */ | ||
227 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, | ||
228 | int *bucket_map); | ||
229 | static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); | ||
208 | 230 | ||
209 | #ifdef CONFIG_PROC_FS | 231 | #ifdef CONFIG_PROC_FS |
210 | static void cciss_procinit(int i); | 232 | static void cciss_procinit(ctlr_info_t *h); |
211 | #else | 233 | #else |
212 | static void cciss_procinit(int i) | 234 | static void cciss_procinit(ctlr_info_t *h) |
213 | { | 235 | { |
214 | } | 236 | } |
215 | #endif /* CONFIG_PROC_FS */ | 237 | #endif /* CONFIG_PROC_FS */ |
@@ -221,9 +243,9 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t, | |||
221 | 243 | ||
222 | static const struct block_device_operations cciss_fops = { | 244 | static const struct block_device_operations cciss_fops = { |
223 | .owner = THIS_MODULE, | 245 | .owner = THIS_MODULE, |
224 | .open = cciss_open, | 246 | .open = cciss_unlocked_open, |
225 | .release = cciss_release, | 247 | .release = cciss_release, |
226 | .locked_ioctl = cciss_ioctl, | 248 | .ioctl = do_ioctl, |
227 | .getgeo = cciss_getgeo, | 249 | .getgeo = cciss_getgeo, |
228 | #ifdef CONFIG_COMPAT | 250 | #ifdef CONFIG_COMPAT |
229 | .compat_ioctl = cciss_compat_ioctl, | 251 | .compat_ioctl = cciss_compat_ioctl, |
@@ -231,6 +253,16 @@ static const struct block_device_operations cciss_fops = { | |||
231 | .revalidate_disk = cciss_revalidate, | 253 | .revalidate_disk = cciss_revalidate, |
232 | }; | 254 | }; |
233 | 255 | ||
256 | /* set_performant_mode: Modify the tag for cciss performant | ||
257 | * set bit 0 for pull model, bits 3-1 for block fetch | ||
258 | * register number | ||
259 | */ | ||
260 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) | ||
261 | { | ||
262 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | ||
263 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | ||
264 | } | ||
265 | |||
234 | /* | 266 | /* |
235 | * Enqueuing and dequeuing functions for cmdlists. | 267 | * Enqueuing and dequeuing functions for cmdlists. |
236 | */ | 268 | */ |
@@ -257,6 +289,20 @@ static inline void removeQ(CommandList_struct *c) | |||
257 | hlist_del_init(&c->list); | 289 | hlist_del_init(&c->list); |
258 | } | 290 | } |
259 | 291 | ||
292 | static void enqueue_cmd_and_start_io(ctlr_info_t *h, | ||
293 | CommandList_struct *c) | ||
294 | { | ||
295 | unsigned long flags; | ||
296 | set_performant_mode(h, c); | ||
297 | spin_lock_irqsave(&h->lock, flags); | ||
298 | addQ(&h->reqQ, c); | ||
299 | h->Qdepth++; | ||
300 | if (h->Qdepth > h->maxQsinceinit) | ||
301 | h->maxQsinceinit = h->Qdepth; | ||
302 | start_io(h); | ||
303 | spin_unlock_irqrestore(&h->lock, flags); | ||
304 | } | ||
305 | |||
260 | static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, | 306 | static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, |
261 | int nr_cmds) | 307 | int nr_cmds) |
262 | { | 308 | { |
@@ -335,7 +381,7 @@ static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, | |||
335 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 381 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
336 | "UNKNOWN" | 382 | "UNKNOWN" |
337 | }; | 383 | }; |
338 | #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) | 384 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) |
339 | 385 | ||
340 | #ifdef CONFIG_PROC_FS | 386 | #ifdef CONFIG_PROC_FS |
341 | 387 | ||
@@ -366,32 +412,31 @@ static void cciss_seq_show_header(struct seq_file *seq) | |||
366 | h->product_name, | 412 | h->product_name, |
367 | (unsigned long)h->board_id, | 413 | (unsigned long)h->board_id, |
368 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | 414 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], |
369 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | 415 | h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT], |
370 | h->num_luns, | 416 | h->num_luns, |
371 | h->Qdepth, h->commands_outstanding, | 417 | h->Qdepth, h->commands_outstanding, |
372 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | 418 | h->maxQsinceinit, h->max_outstanding, h->maxSG); |
373 | 419 | ||
374 | #ifdef CONFIG_CISS_SCSI_TAPE | 420 | #ifdef CONFIG_CISS_SCSI_TAPE |
375 | cciss_seq_tape_report(seq, h->ctlr); | 421 | cciss_seq_tape_report(seq, h); |
376 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 422 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
377 | } | 423 | } |
378 | 424 | ||
379 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) | 425 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) |
380 | { | 426 | { |
381 | ctlr_info_t *h = seq->private; | 427 | ctlr_info_t *h = seq->private; |
382 | unsigned ctlr = h->ctlr; | ||
383 | unsigned long flags; | 428 | unsigned long flags; |
384 | 429 | ||
385 | /* prevent displaying bogus info during configuration | 430 | /* prevent displaying bogus info during configuration |
386 | * or deconfiguration of a logical volume | 431 | * or deconfiguration of a logical volume |
387 | */ | 432 | */ |
388 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 433 | spin_lock_irqsave(&h->lock, flags); |
389 | if (h->busy_configuring) { | 434 | if (h->busy_configuring) { |
390 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 435 | spin_unlock_irqrestore(&h->lock, flags); |
391 | return ERR_PTR(-EBUSY); | 436 | return ERR_PTR(-EBUSY); |
392 | } | 437 | } |
393 | h->busy_configuring = 1; | 438 | h->busy_configuring = 1; |
394 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 439 | spin_unlock_irqrestore(&h->lock, flags); |
395 | 440 | ||
396 | if (*pos == 0) | 441 | if (*pos == 0) |
397 | cciss_seq_show_header(seq); | 442 | cciss_seq_show_header(seq); |
@@ -499,7 +544,7 @@ cciss_proc_write(struct file *file, const char __user *buf, | |||
499 | struct seq_file *seq = file->private_data; | 544 | struct seq_file *seq = file->private_data; |
500 | ctlr_info_t *h = seq->private; | 545 | ctlr_info_t *h = seq->private; |
501 | 546 | ||
502 | err = cciss_engage_scsi(h->ctlr); | 547 | err = cciss_engage_scsi(h); |
503 | if (err == 0) | 548 | if (err == 0) |
504 | err = length; | 549 | err = length; |
505 | } else | 550 | } else |
@@ -522,7 +567,7 @@ static const struct file_operations cciss_proc_fops = { | |||
522 | .write = cciss_proc_write, | 567 | .write = cciss_proc_write, |
523 | }; | 568 | }; |
524 | 569 | ||
525 | static void __devinit cciss_procinit(int i) | 570 | static void __devinit cciss_procinit(ctlr_info_t *h) |
526 | { | 571 | { |
527 | struct proc_dir_entry *pde; | 572 | struct proc_dir_entry *pde; |
528 | 573 | ||
@@ -530,9 +575,9 @@ static void __devinit cciss_procinit(int i) | |||
530 | proc_cciss = proc_mkdir("driver/cciss", NULL); | 575 | proc_cciss = proc_mkdir("driver/cciss", NULL); |
531 | if (!proc_cciss) | 576 | if (!proc_cciss) |
532 | return; | 577 | return; |
533 | pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | | 578 | pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
534 | S_IROTH, proc_cciss, | 579 | S_IROTH, proc_cciss, |
535 | &cciss_proc_fops, hba[i]); | 580 | &cciss_proc_fops, h); |
536 | } | 581 | } |
537 | #endif /* CONFIG_PROC_FS */ | 582 | #endif /* CONFIG_PROC_FS */ |
538 | 583 | ||
@@ -565,12 +610,12 @@ static ssize_t dev_show_unique_id(struct device *dev, | |||
565 | unsigned long flags; | 610 | unsigned long flags; |
566 | int ret = 0; | 611 | int ret = 0; |
567 | 612 | ||
568 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 613 | spin_lock_irqsave(&h->lock, flags); |
569 | if (h->busy_configuring) | 614 | if (h->busy_configuring) |
570 | ret = -EBUSY; | 615 | ret = -EBUSY; |
571 | else | 616 | else |
572 | memcpy(sn, drv->serial_no, sizeof(sn)); | 617 | memcpy(sn, drv->serial_no, sizeof(sn)); |
573 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 618 | spin_unlock_irqrestore(&h->lock, flags); |
574 | 619 | ||
575 | if (ret) | 620 | if (ret) |
576 | return ret; | 621 | return ret; |
@@ -595,12 +640,12 @@ static ssize_t dev_show_vendor(struct device *dev, | |||
595 | unsigned long flags; | 640 | unsigned long flags; |
596 | int ret = 0; | 641 | int ret = 0; |
597 | 642 | ||
598 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 643 | spin_lock_irqsave(&h->lock, flags); |
599 | if (h->busy_configuring) | 644 | if (h->busy_configuring) |
600 | ret = -EBUSY; | 645 | ret = -EBUSY; |
601 | else | 646 | else |
602 | memcpy(vendor, drv->vendor, VENDOR_LEN + 1); | 647 | memcpy(vendor, drv->vendor, VENDOR_LEN + 1); |
603 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 648 | spin_unlock_irqrestore(&h->lock, flags); |
604 | 649 | ||
605 | if (ret) | 650 | if (ret) |
606 | return ret; | 651 | return ret; |
@@ -619,12 +664,12 @@ static ssize_t dev_show_model(struct device *dev, | |||
619 | unsigned long flags; | 664 | unsigned long flags; |
620 | int ret = 0; | 665 | int ret = 0; |
621 | 666 | ||
622 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 667 | spin_lock_irqsave(&h->lock, flags); |
623 | if (h->busy_configuring) | 668 | if (h->busy_configuring) |
624 | ret = -EBUSY; | 669 | ret = -EBUSY; |
625 | else | 670 | else |
626 | memcpy(model, drv->model, MODEL_LEN + 1); | 671 | memcpy(model, drv->model, MODEL_LEN + 1); |
627 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 672 | spin_unlock_irqrestore(&h->lock, flags); |
628 | 673 | ||
629 | if (ret) | 674 | if (ret) |
630 | return ret; | 675 | return ret; |
@@ -643,12 +688,12 @@ static ssize_t dev_show_rev(struct device *dev, | |||
643 | unsigned long flags; | 688 | unsigned long flags; |
644 | int ret = 0; | 689 | int ret = 0; |
645 | 690 | ||
646 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 691 | spin_lock_irqsave(&h->lock, flags); |
647 | if (h->busy_configuring) | 692 | if (h->busy_configuring) |
648 | ret = -EBUSY; | 693 | ret = -EBUSY; |
649 | else | 694 | else |
650 | memcpy(rev, drv->rev, REV_LEN + 1); | 695 | memcpy(rev, drv->rev, REV_LEN + 1); |
651 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 696 | spin_unlock_irqrestore(&h->lock, flags); |
652 | 697 | ||
653 | if (ret) | 698 | if (ret) |
654 | return ret; | 699 | return ret; |
@@ -665,17 +710,17 @@ static ssize_t cciss_show_lunid(struct device *dev, | |||
665 | unsigned long flags; | 710 | unsigned long flags; |
666 | unsigned char lunid[8]; | 711 | unsigned char lunid[8]; |
667 | 712 | ||
668 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 713 | spin_lock_irqsave(&h->lock, flags); |
669 | if (h->busy_configuring) { | 714 | if (h->busy_configuring) { |
670 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 715 | spin_unlock_irqrestore(&h->lock, flags); |
671 | return -EBUSY; | 716 | return -EBUSY; |
672 | } | 717 | } |
673 | if (!drv->heads) { | 718 | if (!drv->heads) { |
674 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 719 | spin_unlock_irqrestore(&h->lock, flags); |
675 | return -ENOTTY; | 720 | return -ENOTTY; |
676 | } | 721 | } |
677 | memcpy(lunid, drv->LunID, sizeof(lunid)); | 722 | memcpy(lunid, drv->LunID, sizeof(lunid)); |
678 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 723 | spin_unlock_irqrestore(&h->lock, flags); |
679 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | 724 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
680 | lunid[0], lunid[1], lunid[2], lunid[3], | 725 | lunid[0], lunid[1], lunid[2], lunid[3], |
681 | lunid[4], lunid[5], lunid[6], lunid[7]); | 726 | lunid[4], lunid[5], lunid[6], lunid[7]); |
@@ -690,13 +735,13 @@ static ssize_t cciss_show_raid_level(struct device *dev, | |||
690 | int raid; | 735 | int raid; |
691 | unsigned long flags; | 736 | unsigned long flags; |
692 | 737 | ||
693 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 738 | spin_lock_irqsave(&h->lock, flags); |
694 | if (h->busy_configuring) { | 739 | if (h->busy_configuring) { |
695 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 740 | spin_unlock_irqrestore(&h->lock, flags); |
696 | return -EBUSY; | 741 | return -EBUSY; |
697 | } | 742 | } |
698 | raid = drv->raid_level; | 743 | raid = drv->raid_level; |
699 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 744 | spin_unlock_irqrestore(&h->lock, flags); |
700 | if (raid < 0 || raid > RAID_UNKNOWN) | 745 | if (raid < 0 || raid > RAID_UNKNOWN) |
701 | raid = RAID_UNKNOWN; | 746 | raid = RAID_UNKNOWN; |
702 | 747 | ||
@@ -713,13 +758,13 @@ static ssize_t cciss_show_usage_count(struct device *dev, | |||
713 | unsigned long flags; | 758 | unsigned long flags; |
714 | int count; | 759 | int count; |
715 | 760 | ||
716 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 761 | spin_lock_irqsave(&h->lock, flags); |
717 | if (h->busy_configuring) { | 762 | if (h->busy_configuring) { |
718 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 763 | spin_unlock_irqrestore(&h->lock, flags); |
719 | return -EBUSY; | 764 | return -EBUSY; |
720 | } | 765 | } |
721 | count = drv->usage_count; | 766 | count = drv->usage_count; |
722 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 767 | spin_unlock_irqrestore(&h->lock, flags); |
723 | return snprintf(buf, 20, "%d\n", count); | 768 | return snprintf(buf, 20, "%d\n", count); |
724 | } | 769 | } |
725 | static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); | 770 | static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); |
@@ -864,60 +909,70 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, | |||
864 | /* | 909 | /* |
865 | * For operations that cannot sleep, a command block is allocated at init, | 910 | * For operations that cannot sleep, a command block is allocated at init, |
866 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | 911 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track |
867 | * which ones are free or in use. For operations that can wait for kmalloc | 912 | * which ones are free or in use. |
868 | * to possible sleep, this routine can be called with get_from_pool set to 0. | ||
869 | * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. | ||
870 | */ | 913 | */ |
871 | static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) | 914 | static CommandList_struct *cmd_alloc(ctlr_info_t *h) |
872 | { | 915 | { |
873 | CommandList_struct *c; | 916 | CommandList_struct *c; |
874 | int i; | 917 | int i; |
875 | u64bit temp64; | 918 | u64bit temp64; |
876 | dma_addr_t cmd_dma_handle, err_dma_handle; | 919 | dma_addr_t cmd_dma_handle, err_dma_handle; |
877 | 920 | ||
878 | if (!get_from_pool) { | 921 | do { |
879 | c = (CommandList_struct *) pci_alloc_consistent(h->pdev, | 922 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); |
880 | sizeof(CommandList_struct), &cmd_dma_handle); | 923 | if (i == h->nr_cmds) |
881 | if (c == NULL) | ||
882 | return NULL; | 924 | return NULL; |
883 | memset(c, 0, sizeof(CommandList_struct)); | 925 | } while (test_and_set_bit(i & (BITS_PER_LONG - 1), |
926 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); | ||
927 | c = h->cmd_pool + i; | ||
928 | memset(c, 0, sizeof(CommandList_struct)); | ||
929 | cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); | ||
930 | c->err_info = h->errinfo_pool + i; | ||
931 | memset(c->err_info, 0, sizeof(ErrorInfo_struct)); | ||
932 | err_dma_handle = h->errinfo_pool_dhandle | ||
933 | + i * sizeof(ErrorInfo_struct); | ||
934 | h->nr_allocs++; | ||
884 | 935 | ||
885 | c->cmdindex = -1; | 936 | c->cmdindex = i; |
886 | 937 | ||
887 | c->err_info = (ErrorInfo_struct *) | 938 | INIT_HLIST_NODE(&c->list); |
888 | pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), | 939 | c->busaddr = (__u32) cmd_dma_handle; |
889 | &err_dma_handle); | 940 | temp64.val = (__u64) err_dma_handle; |
941 | c->ErrDesc.Addr.lower = temp64.val32.lower; | ||
942 | c->ErrDesc.Addr.upper = temp64.val32.upper; | ||
943 | c->ErrDesc.Len = sizeof(ErrorInfo_struct); | ||
890 | 944 | ||
891 | if (c->err_info == NULL) { | 945 | c->ctlr = h->ctlr; |
892 | pci_free_consistent(h->pdev, | 946 | return c; |
893 | sizeof(CommandList_struct), c, cmd_dma_handle); | 947 | } |
894 | return NULL; | ||
895 | } | ||
896 | memset(c->err_info, 0, sizeof(ErrorInfo_struct)); | ||
897 | } else { /* get it out of the controllers pool */ | ||
898 | |||
899 | do { | ||
900 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); | ||
901 | if (i == h->nr_cmds) | ||
902 | return NULL; | ||
903 | } while (test_and_set_bit | ||
904 | (i & (BITS_PER_LONG - 1), | ||
905 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); | ||
906 | #ifdef CCISS_DEBUG | ||
907 | printk(KERN_DEBUG "cciss: using command buffer %d\n", i); | ||
908 | #endif | ||
909 | c = h->cmd_pool + i; | ||
910 | memset(c, 0, sizeof(CommandList_struct)); | ||
911 | cmd_dma_handle = h->cmd_pool_dhandle | ||
912 | + i * sizeof(CommandList_struct); | ||
913 | c->err_info = h->errinfo_pool + i; | ||
914 | memset(c->err_info, 0, sizeof(ErrorInfo_struct)); | ||
915 | err_dma_handle = h->errinfo_pool_dhandle | ||
916 | + i * sizeof(ErrorInfo_struct); | ||
917 | h->nr_allocs++; | ||
918 | 948 | ||
919 | c->cmdindex = i; | 949 | /* allocate a command using pci_alloc_consistent, used for ioctls, |
950 | * etc., not for the main i/o path. | ||
951 | */ | ||
952 | static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) | ||
953 | { | ||
954 | CommandList_struct *c; | ||
955 | u64bit temp64; | ||
956 | dma_addr_t cmd_dma_handle, err_dma_handle; | ||
957 | |||
958 | c = (CommandList_struct *) pci_alloc_consistent(h->pdev, | ||
959 | sizeof(CommandList_struct), &cmd_dma_handle); | ||
960 | if (c == NULL) | ||
961 | return NULL; | ||
962 | memset(c, 0, sizeof(CommandList_struct)); | ||
963 | |||
964 | c->cmdindex = -1; | ||
965 | |||
966 | c->err_info = (ErrorInfo_struct *) | ||
967 | pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), | ||
968 | &err_dma_handle); | ||
969 | |||
970 | if (c->err_info == NULL) { | ||
971 | pci_free_consistent(h->pdev, | ||
972 | sizeof(CommandList_struct), c, cmd_dma_handle); | ||
973 | return NULL; | ||
920 | } | 974 | } |
975 | memset(c->err_info, 0, sizeof(ErrorInfo_struct)); | ||
921 | 976 | ||
922 | INIT_HLIST_NODE(&c->list); | 977 | INIT_HLIST_NODE(&c->list); |
923 | c->busaddr = (__u32) cmd_dma_handle; | 978 | c->busaddr = (__u32) cmd_dma_handle; |
@@ -930,27 +985,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) | |||
930 | return c; | 985 | return c; |
931 | } | 986 | } |
932 | 987 | ||
933 | /* | 988 | static void cmd_free(ctlr_info_t *h, CommandList_struct *c) |
934 | * Frees a command block that was previously allocated with cmd_alloc(). | ||
935 | */ | ||
936 | static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool) | ||
937 | { | 989 | { |
938 | int i; | 990 | int i; |
991 | |||
992 | i = c - h->cmd_pool; | ||
993 | clear_bit(i & (BITS_PER_LONG - 1), | ||
994 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | ||
995 | h->nr_frees++; | ||
996 | } | ||
997 | |||
998 | static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) | ||
999 | { | ||
939 | u64bit temp64; | 1000 | u64bit temp64; |
940 | 1001 | ||
941 | if (!got_from_pool) { | 1002 | temp64.val32.lower = c->ErrDesc.Addr.lower; |
942 | temp64.val32.lower = c->ErrDesc.Addr.lower; | 1003 | temp64.val32.upper = c->ErrDesc.Addr.upper; |
943 | temp64.val32.upper = c->ErrDesc.Addr.upper; | 1004 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), |
944 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), | 1005 | c->err_info, (dma_addr_t) temp64.val); |
945 | c->err_info, (dma_addr_t) temp64.val); | 1006 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), |
946 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), | 1007 | c, (dma_addr_t) c->busaddr); |
947 | c, (dma_addr_t) c->busaddr); | ||
948 | } else { | ||
949 | i = c - h->cmd_pool; | ||
950 | clear_bit(i & (BITS_PER_LONG - 1), | ||
951 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | ||
952 | h->nr_frees++; | ||
953 | } | ||
954 | } | 1008 | } |
955 | 1009 | ||
956 | static inline ctlr_info_t *get_host(struct gendisk *disk) | 1010 | static inline ctlr_info_t *get_host(struct gendisk *disk) |
@@ -968,13 +1022,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk) | |||
968 | */ | 1022 | */ |
969 | static int cciss_open(struct block_device *bdev, fmode_t mode) | 1023 | static int cciss_open(struct block_device *bdev, fmode_t mode) |
970 | { | 1024 | { |
971 | ctlr_info_t *host = get_host(bdev->bd_disk); | 1025 | ctlr_info_t *h = get_host(bdev->bd_disk); |
972 | drive_info_struct *drv = get_drv(bdev->bd_disk); | 1026 | drive_info_struct *drv = get_drv(bdev->bd_disk); |
973 | 1027 | ||
974 | #ifdef CCISS_DEBUG | 1028 | dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); |
975 | printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); | ||
976 | #endif /* CCISS_DEBUG */ | ||
977 | |||
978 | if (drv->busy_configuring) | 1029 | if (drv->busy_configuring) |
979 | return -EBUSY; | 1030 | return -EBUSY; |
980 | /* | 1031 | /* |
@@ -1000,29 +1051,39 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) | |||
1000 | return -EPERM; | 1051 | return -EPERM; |
1001 | } | 1052 | } |
1002 | drv->usage_count++; | 1053 | drv->usage_count++; |
1003 | host->usage_count++; | 1054 | h->usage_count++; |
1004 | return 0; | 1055 | return 0; |
1005 | } | 1056 | } |
1006 | 1057 | ||
1058 | static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
1059 | { | ||
1060 | int ret; | ||
1061 | |||
1062 | lock_kernel(); | ||
1063 | ret = cciss_open(bdev, mode); | ||
1064 | unlock_kernel(); | ||
1065 | |||
1066 | return ret; | ||
1067 | } | ||
1068 | |||
1007 | /* | 1069 | /* |
1008 | * Close. Sync first. | 1070 | * Close. Sync first. |
1009 | */ | 1071 | */ |
1010 | static int cciss_release(struct gendisk *disk, fmode_t mode) | 1072 | static int cciss_release(struct gendisk *disk, fmode_t mode) |
1011 | { | 1073 | { |
1012 | ctlr_info_t *host = get_host(disk); | 1074 | ctlr_info_t *h; |
1013 | drive_info_struct *drv = get_drv(disk); | 1075 | drive_info_struct *drv; |
1014 | |||
1015 | #ifdef CCISS_DEBUG | ||
1016 | printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name); | ||
1017 | #endif /* CCISS_DEBUG */ | ||
1018 | 1076 | ||
1077 | lock_kernel(); | ||
1078 | h = get_host(disk); | ||
1079 | drv = get_drv(disk); | ||
1080 | dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); | ||
1019 | drv->usage_count--; | 1081 | drv->usage_count--; |
1020 | host->usage_count--; | 1082 | h->usage_count--; |
1083 | unlock_kernel(); | ||
1021 | return 0; | 1084 | return 0; |
1022 | } | 1085 | } |
1023 | 1086 | ||
1024 | #ifdef CONFIG_COMPAT | ||
1025 | |||
1026 | static int do_ioctl(struct block_device *bdev, fmode_t mode, | 1087 | static int do_ioctl(struct block_device *bdev, fmode_t mode, |
1027 | unsigned cmd, unsigned long arg) | 1088 | unsigned cmd, unsigned long arg) |
1028 | { | 1089 | { |
@@ -1033,6 +1094,8 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode, | |||
1033 | return ret; | 1094 | return ret; |
1034 | } | 1095 | } |
1035 | 1096 | ||
1097 | #ifdef CONFIG_COMPAT | ||
1098 | |||
1036 | static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, | 1099 | static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, |
1037 | unsigned cmd, unsigned long arg); | 1100 | unsigned cmd, unsigned long arg); |
1038 | static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, | 1101 | static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, |
@@ -1163,11 +1226,11 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
1163 | return 0; | 1226 | return 0; |
1164 | } | 1227 | } |
1165 | 1228 | ||
1166 | static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c) | 1229 | static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) |
1167 | { | 1230 | { |
1168 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | 1231 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && |
1169 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) | 1232 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) |
1170 | (void)check_for_unit_attention(host, c); | 1233 | (void)check_for_unit_attention(h, c); |
1171 | } | 1234 | } |
1172 | /* | 1235 | /* |
1173 | * ioctl | 1236 | * ioctl |
@@ -1176,15 +1239,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1176 | unsigned int cmd, unsigned long arg) | 1239 | unsigned int cmd, unsigned long arg) |
1177 | { | 1240 | { |
1178 | struct gendisk *disk = bdev->bd_disk; | 1241 | struct gendisk *disk = bdev->bd_disk; |
1179 | ctlr_info_t *host = get_host(disk); | 1242 | ctlr_info_t *h = get_host(disk); |
1180 | drive_info_struct *drv = get_drv(disk); | 1243 | drive_info_struct *drv = get_drv(disk); |
1181 | int ctlr = host->ctlr; | ||
1182 | void __user *argp = (void __user *)arg; | 1244 | void __user *argp = (void __user *)arg; |
1183 | 1245 | ||
1184 | #ifdef CCISS_DEBUG | 1246 | dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", |
1185 | printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); | 1247 | cmd, arg); |
1186 | #endif /* CCISS_DEBUG */ | ||
1187 | |||
1188 | switch (cmd) { | 1248 | switch (cmd) { |
1189 | case CCISS_GETPCIINFO: | 1249 | case CCISS_GETPCIINFO: |
1190 | { | 1250 | { |
@@ -1192,10 +1252,10 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1192 | 1252 | ||
1193 | if (!arg) | 1253 | if (!arg) |
1194 | return -EINVAL; | 1254 | return -EINVAL; |
1195 | pciinfo.domain = pci_domain_nr(host->pdev->bus); | 1255 | pciinfo.domain = pci_domain_nr(h->pdev->bus); |
1196 | pciinfo.bus = host->pdev->bus->number; | 1256 | pciinfo.bus = h->pdev->bus->number; |
1197 | pciinfo.dev_fn = host->pdev->devfn; | 1257 | pciinfo.dev_fn = h->pdev->devfn; |
1198 | pciinfo.board_id = host->board_id; | 1258 | pciinfo.board_id = h->board_id; |
1199 | if (copy_to_user | 1259 | if (copy_to_user |
1200 | (argp, &pciinfo, sizeof(cciss_pci_info_struct))) | 1260 | (argp, &pciinfo, sizeof(cciss_pci_info_struct))) |
1201 | return -EFAULT; | 1261 | return -EFAULT; |
@@ -1207,9 +1267,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1207 | if (!arg) | 1267 | if (!arg) |
1208 | return -EINVAL; | 1268 | return -EINVAL; |
1209 | intinfo.delay = | 1269 | intinfo.delay = |
1210 | readl(&host->cfgtable->HostWrite.CoalIntDelay); | 1270 | readl(&h->cfgtable->HostWrite.CoalIntDelay); |
1211 | intinfo.count = | 1271 | intinfo.count = |
1212 | readl(&host->cfgtable->HostWrite.CoalIntCount); | 1272 | readl(&h->cfgtable->HostWrite.CoalIntCount); |
1213 | if (copy_to_user | 1273 | if (copy_to_user |
1214 | (argp, &intinfo, sizeof(cciss_coalint_struct))) | 1274 | (argp, &intinfo, sizeof(cciss_coalint_struct))) |
1215 | return -EFAULT; | 1275 | return -EFAULT; |
@@ -1229,26 +1289,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1229 | (&intinfo, argp, sizeof(cciss_coalint_struct))) | 1289 | (&intinfo, argp, sizeof(cciss_coalint_struct))) |
1230 | return -EFAULT; | 1290 | return -EFAULT; |
1231 | if ((intinfo.delay == 0) && (intinfo.count == 0)) | 1291 | if ((intinfo.delay == 0) && (intinfo.count == 0)) |
1232 | { | ||
1233 | // printk("cciss_ioctl: delay and count cannot be 0\n"); | ||
1234 | return -EINVAL; | 1292 | return -EINVAL; |
1235 | } | 1293 | spin_lock_irqsave(&h->lock, flags); |
1236 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | ||
1237 | /* Update the field, and then ring the doorbell */ | 1294 | /* Update the field, and then ring the doorbell */ |
1238 | writel(intinfo.delay, | 1295 | writel(intinfo.delay, |
1239 | &(host->cfgtable->HostWrite.CoalIntDelay)); | 1296 | &(h->cfgtable->HostWrite.CoalIntDelay)); |
1240 | writel(intinfo.count, | 1297 | writel(intinfo.count, |
1241 | &(host->cfgtable->HostWrite.CoalIntCount)); | 1298 | &(h->cfgtable->HostWrite.CoalIntCount)); |
1242 | writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); | 1299 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
1243 | 1300 | ||
1244 | for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { | 1301 | for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { |
1245 | if (!(readl(host->vaddr + SA5_DOORBELL) | 1302 | if (!(readl(h->vaddr + SA5_DOORBELL) |
1246 | & CFGTBL_ChangeReq)) | 1303 | & CFGTBL_ChangeReq)) |
1247 | break; | 1304 | break; |
1248 | /* delay and try again */ | 1305 | /* delay and try again */ |
1249 | udelay(1000); | 1306 | udelay(1000); |
1250 | } | 1307 | } |
1251 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1308 | spin_unlock_irqrestore(&h->lock, flags); |
1252 | if (i >= MAX_IOCTL_CONFIG_WAIT) | 1309 | if (i >= MAX_IOCTL_CONFIG_WAIT) |
1253 | return -EAGAIN; | 1310 | return -EAGAIN; |
1254 | return 0; | 1311 | return 0; |
@@ -1262,7 +1319,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1262 | return -EINVAL; | 1319 | return -EINVAL; |
1263 | for (i = 0; i < 16; i++) | 1320 | for (i = 0; i < 16; i++) |
1264 | NodeName[i] = | 1321 | NodeName[i] = |
1265 | readb(&host->cfgtable->ServerName[i]); | 1322 | readb(&h->cfgtable->ServerName[i]); |
1266 | if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) | 1323 | if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) |
1267 | return -EFAULT; | 1324 | return -EFAULT; |
1268 | return 0; | 1325 | return 0; |
@@ -1282,23 +1339,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1282 | (NodeName, argp, sizeof(NodeName_type))) | 1339 | (NodeName, argp, sizeof(NodeName_type))) |
1283 | return -EFAULT; | 1340 | return -EFAULT; |
1284 | 1341 | ||
1285 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1342 | spin_lock_irqsave(&h->lock, flags); |
1286 | 1343 | ||
1287 | /* Update the field, and then ring the doorbell */ | 1344 | /* Update the field, and then ring the doorbell */ |
1288 | for (i = 0; i < 16; i++) | 1345 | for (i = 0; i < 16; i++) |
1289 | writeb(NodeName[i], | 1346 | writeb(NodeName[i], |
1290 | &host->cfgtable->ServerName[i]); | 1347 | &h->cfgtable->ServerName[i]); |
1291 | 1348 | ||
1292 | writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); | 1349 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
1293 | 1350 | ||
1294 | for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { | 1351 | for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { |
1295 | if (!(readl(host->vaddr + SA5_DOORBELL) | 1352 | if (!(readl(h->vaddr + SA5_DOORBELL) |
1296 | & CFGTBL_ChangeReq)) | 1353 | & CFGTBL_ChangeReq)) |
1297 | break; | 1354 | break; |
1298 | /* delay and try again */ | 1355 | /* delay and try again */ |
1299 | udelay(1000); | 1356 | udelay(1000); |
1300 | } | 1357 | } |
1301 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1358 | spin_unlock_irqrestore(&h->lock, flags); |
1302 | if (i >= MAX_IOCTL_CONFIG_WAIT) | 1359 | if (i >= MAX_IOCTL_CONFIG_WAIT) |
1303 | return -EAGAIN; | 1360 | return -EAGAIN; |
1304 | return 0; | 1361 | return 0; |
@@ -1310,7 +1367,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1310 | 1367 | ||
1311 | if (!arg) | 1368 | if (!arg) |
1312 | return -EINVAL; | 1369 | return -EINVAL; |
1313 | heartbeat = readl(&host->cfgtable->HeartBeat); | 1370 | heartbeat = readl(&h->cfgtable->HeartBeat); |
1314 | if (copy_to_user | 1371 | if (copy_to_user |
1315 | (argp, &heartbeat, sizeof(Heartbeat_type))) | 1372 | (argp, &heartbeat, sizeof(Heartbeat_type))) |
1316 | return -EFAULT; | 1373 | return -EFAULT; |
@@ -1322,7 +1379,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1322 | 1379 | ||
1323 | if (!arg) | 1380 | if (!arg) |
1324 | return -EINVAL; | 1381 | return -EINVAL; |
1325 | BusTypes = readl(&host->cfgtable->BusTypes); | 1382 | BusTypes = readl(&h->cfgtable->BusTypes); |
1326 | if (copy_to_user | 1383 | if (copy_to_user |
1327 | (argp, &BusTypes, sizeof(BusTypes_type))) | 1384 | (argp, &BusTypes, sizeof(BusTypes_type))) |
1328 | return -EFAULT; | 1385 | return -EFAULT; |
@@ -1334,7 +1391,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1334 | 1391 | ||
1335 | if (!arg) | 1392 | if (!arg) |
1336 | return -EINVAL; | 1393 | return -EINVAL; |
1337 | memcpy(firmware, host->firm_ver, 4); | 1394 | memcpy(firmware, h->firm_ver, 4); |
1338 | 1395 | ||
1339 | if (copy_to_user | 1396 | if (copy_to_user |
1340 | (argp, firmware, sizeof(FirmwareVer_type))) | 1397 | (argp, firmware, sizeof(FirmwareVer_type))) |
@@ -1357,7 +1414,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1357 | case CCISS_DEREGDISK: | 1414 | case CCISS_DEREGDISK: |
1358 | case CCISS_REGNEWD: | 1415 | case CCISS_REGNEWD: |
1359 | case CCISS_REVALIDVOLS: | 1416 | case CCISS_REVALIDVOLS: |
1360 | return rebuild_lun_table(host, 0, 1); | 1417 | return rebuild_lun_table(h, 0, 1); |
1361 | 1418 | ||
1362 | case CCISS_GETLUNINFO:{ | 1419 | case CCISS_GETLUNINFO:{ |
1363 | LogvolInfo_struct luninfo; | 1420 | LogvolInfo_struct luninfo; |
@@ -1377,7 +1434,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1377 | CommandList_struct *c; | 1434 | CommandList_struct *c; |
1378 | char *buff = NULL; | 1435 | char *buff = NULL; |
1379 | u64bit temp64; | 1436 | u64bit temp64; |
1380 | unsigned long flags; | ||
1381 | DECLARE_COMPLETION_ONSTACK(wait); | 1437 | DECLARE_COMPLETION_ONSTACK(wait); |
1382 | 1438 | ||
1383 | if (!arg) | 1439 | if (!arg) |
@@ -1413,7 +1469,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1413 | } else { | 1469 | } else { |
1414 | memset(buff, 0, iocommand.buf_size); | 1470 | memset(buff, 0, iocommand.buf_size); |
1415 | } | 1471 | } |
1416 | if ((c = cmd_alloc(host, 0)) == NULL) { | 1472 | c = cmd_special_alloc(h); |
1473 | if (!c) { | ||
1417 | kfree(buff); | 1474 | kfree(buff); |
1418 | return -ENOMEM; | 1475 | return -ENOMEM; |
1419 | } | 1476 | } |
@@ -1439,7 +1496,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1439 | 1496 | ||
1440 | /* Fill in the scatter gather information */ | 1497 | /* Fill in the scatter gather information */ |
1441 | if (iocommand.buf_size > 0) { | 1498 | if (iocommand.buf_size > 0) { |
1442 | temp64.val = pci_map_single(host->pdev, buff, | 1499 | temp64.val = pci_map_single(h->pdev, buff, |
1443 | iocommand.buf_size, | 1500 | iocommand.buf_size, |
1444 | PCI_DMA_BIDIRECTIONAL); | 1501 | PCI_DMA_BIDIRECTIONAL); |
1445 | c->SG[0].Addr.lower = temp64.val32.lower; | 1502 | c->SG[0].Addr.lower = temp64.val32.lower; |
@@ -1449,30 +1506,24 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1449 | } | 1506 | } |
1450 | c->waiting = &wait; | 1507 | c->waiting = &wait; |
1451 | 1508 | ||
1452 | /* Put the request on the tail of the request queue */ | 1509 | enqueue_cmd_and_start_io(h, c); |
1453 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | ||
1454 | addQ(&host->reqQ, c); | ||
1455 | host->Qdepth++; | ||
1456 | start_io(host); | ||
1457 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | ||
1458 | |||
1459 | wait_for_completion(&wait); | 1510 | wait_for_completion(&wait); |
1460 | 1511 | ||
1461 | /* unlock the buffers from DMA */ | 1512 | /* unlock the buffers from DMA */ |
1462 | temp64.val32.lower = c->SG[0].Addr.lower; | 1513 | temp64.val32.lower = c->SG[0].Addr.lower; |
1463 | temp64.val32.upper = c->SG[0].Addr.upper; | 1514 | temp64.val32.upper = c->SG[0].Addr.upper; |
1464 | pci_unmap_single(host->pdev, (dma_addr_t) temp64.val, | 1515 | pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, |
1465 | iocommand.buf_size, | 1516 | iocommand.buf_size, |
1466 | PCI_DMA_BIDIRECTIONAL); | 1517 | PCI_DMA_BIDIRECTIONAL); |
1467 | 1518 | ||
1468 | check_ioctl_unit_attention(host, c); | 1519 | check_ioctl_unit_attention(h, c); |
1469 | 1520 | ||
1470 | /* Copy the error information out */ | 1521 | /* Copy the error information out */ |
1471 | iocommand.error_info = *(c->err_info); | 1522 | iocommand.error_info = *(c->err_info); |
1472 | if (copy_to_user | 1523 | if (copy_to_user |
1473 | (argp, &iocommand, sizeof(IOCTL_Command_struct))) { | 1524 | (argp, &iocommand, sizeof(IOCTL_Command_struct))) { |
1474 | kfree(buff); | 1525 | kfree(buff); |
1475 | cmd_free(host, c, 0); | 1526 | cmd_special_free(h, c); |
1476 | return -EFAULT; | 1527 | return -EFAULT; |
1477 | } | 1528 | } |
1478 | 1529 | ||
@@ -1481,12 +1532,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1481 | if (copy_to_user | 1532 | if (copy_to_user |
1482 | (iocommand.buf, buff, iocommand.buf_size)) { | 1533 | (iocommand.buf, buff, iocommand.buf_size)) { |
1483 | kfree(buff); | 1534 | kfree(buff); |
1484 | cmd_free(host, c, 0); | 1535 | cmd_special_free(h, c); |
1485 | return -EFAULT; | 1536 | return -EFAULT; |
1486 | } | 1537 | } |
1487 | } | 1538 | } |
1488 | kfree(buff); | 1539 | kfree(buff); |
1489 | cmd_free(host, c, 0); | 1540 | cmd_special_free(h, c); |
1490 | return 0; | 1541 | return 0; |
1491 | } | 1542 | } |
1492 | case CCISS_BIG_PASSTHRU:{ | 1543 | case CCISS_BIG_PASSTHRU:{ |
@@ -1495,7 +1546,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1495 | unsigned char **buff = NULL; | 1546 | unsigned char **buff = NULL; |
1496 | int *buff_size = NULL; | 1547 | int *buff_size = NULL; |
1497 | u64bit temp64; | 1548 | u64bit temp64; |
1498 | unsigned long flags; | ||
1499 | BYTE sg_used = 0; | 1549 | BYTE sg_used = 0; |
1500 | int status = 0; | 1550 | int status = 0; |
1501 | int i; | 1551 | int i; |
@@ -1569,7 +1619,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1569 | data_ptr += sz; | 1619 | data_ptr += sz; |
1570 | sg_used++; | 1620 | sg_used++; |
1571 | } | 1621 | } |
1572 | if ((c = cmd_alloc(host, 0)) == NULL) { | 1622 | c = cmd_special_alloc(h); |
1623 | if (!c) { | ||
1573 | status = -ENOMEM; | 1624 | status = -ENOMEM; |
1574 | goto cleanup1; | 1625 | goto cleanup1; |
1575 | } | 1626 | } |
@@ -1588,10 +1639,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1588 | 1639 | ||
1589 | c->Request = ioc->Request; | 1640 | c->Request = ioc->Request; |
1590 | if (ioc->buf_size > 0) { | 1641 | if (ioc->buf_size > 0) { |
1591 | int i; | ||
1592 | for (i = 0; i < sg_used; i++) { | 1642 | for (i = 0; i < sg_used; i++) { |
1593 | temp64.val = | 1643 | temp64.val = |
1594 | pci_map_single(host->pdev, buff[i], | 1644 | pci_map_single(h->pdev, buff[i], |
1595 | buff_size[i], | 1645 | buff_size[i], |
1596 | PCI_DMA_BIDIRECTIONAL); | 1646 | PCI_DMA_BIDIRECTIONAL); |
1597 | c->SG[i].Addr.lower = | 1647 | c->SG[i].Addr.lower = |
@@ -1603,26 +1653,21 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1603 | } | 1653 | } |
1604 | } | 1654 | } |
1605 | c->waiting = &wait; | 1655 | c->waiting = &wait; |
1606 | /* Put the request on the tail of the request queue */ | 1656 | enqueue_cmd_and_start_io(h, c); |
1607 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | ||
1608 | addQ(&host->reqQ, c); | ||
1609 | host->Qdepth++; | ||
1610 | start_io(host); | ||
1611 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | ||
1612 | wait_for_completion(&wait); | 1657 | wait_for_completion(&wait); |
1613 | /* unlock the buffers from DMA */ | 1658 | /* unlock the buffers from DMA */ |
1614 | for (i = 0; i < sg_used; i++) { | 1659 | for (i = 0; i < sg_used; i++) { |
1615 | temp64.val32.lower = c->SG[i].Addr.lower; | 1660 | temp64.val32.lower = c->SG[i].Addr.lower; |
1616 | temp64.val32.upper = c->SG[i].Addr.upper; | 1661 | temp64.val32.upper = c->SG[i].Addr.upper; |
1617 | pci_unmap_single(host->pdev, | 1662 | pci_unmap_single(h->pdev, |
1618 | (dma_addr_t) temp64.val, buff_size[i], | 1663 | (dma_addr_t) temp64.val, buff_size[i], |
1619 | PCI_DMA_BIDIRECTIONAL); | 1664 | PCI_DMA_BIDIRECTIONAL); |
1620 | } | 1665 | } |
1621 | check_ioctl_unit_attention(host, c); | 1666 | check_ioctl_unit_attention(h, c); |
1622 | /* Copy the error information out */ | 1667 | /* Copy the error information out */ |
1623 | ioc->error_info = *(c->err_info); | 1668 | ioc->error_info = *(c->err_info); |
1624 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { | 1669 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { |
1625 | cmd_free(host, c, 0); | 1670 | cmd_special_free(h, c); |
1626 | status = -EFAULT; | 1671 | status = -EFAULT; |
1627 | goto cleanup1; | 1672 | goto cleanup1; |
1628 | } | 1673 | } |
@@ -1632,14 +1677,14 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1632 | for (i = 0; i < sg_used; i++) { | 1677 | for (i = 0; i < sg_used; i++) { |
1633 | if (copy_to_user | 1678 | if (copy_to_user |
1634 | (ptr, buff[i], buff_size[i])) { | 1679 | (ptr, buff[i], buff_size[i])) { |
1635 | cmd_free(host, c, 0); | 1680 | cmd_special_free(h, c); |
1636 | status = -EFAULT; | 1681 | status = -EFAULT; |
1637 | goto cleanup1; | 1682 | goto cleanup1; |
1638 | } | 1683 | } |
1639 | ptr += buff_size[i]; | 1684 | ptr += buff_size[i]; |
1640 | } | 1685 | } |
1641 | } | 1686 | } |
1642 | cmd_free(host, c, 0); | 1687 | cmd_special_free(h, c); |
1643 | status = 0; | 1688 | status = 0; |
1644 | cleanup1: | 1689 | cleanup1: |
1645 | if (buff) { | 1690 | if (buff) { |
@@ -1727,26 +1772,26 @@ static void cciss_check_queues(ctlr_info_t *h) | |||
1727 | 1772 | ||
1728 | static void cciss_softirq_done(struct request *rq) | 1773 | static void cciss_softirq_done(struct request *rq) |
1729 | { | 1774 | { |
1730 | CommandList_struct *cmd = rq->completion_data; | 1775 | CommandList_struct *c = rq->completion_data; |
1731 | ctlr_info_t *h = hba[cmd->ctlr]; | 1776 | ctlr_info_t *h = hba[c->ctlr]; |
1732 | SGDescriptor_struct *curr_sg = cmd->SG; | 1777 | SGDescriptor_struct *curr_sg = c->SG; |
1733 | unsigned long flags; | ||
1734 | u64bit temp64; | 1778 | u64bit temp64; |
1779 | unsigned long flags; | ||
1735 | int i, ddir; | 1780 | int i, ddir; |
1736 | int sg_index = 0; | 1781 | int sg_index = 0; |
1737 | 1782 | ||
1738 | if (cmd->Request.Type.Direction == XFER_READ) | 1783 | if (c->Request.Type.Direction == XFER_READ) |
1739 | ddir = PCI_DMA_FROMDEVICE; | 1784 | ddir = PCI_DMA_FROMDEVICE; |
1740 | else | 1785 | else |
1741 | ddir = PCI_DMA_TODEVICE; | 1786 | ddir = PCI_DMA_TODEVICE; |
1742 | 1787 | ||
1743 | /* command did not need to be retried */ | 1788 | /* command did not need to be retried */ |
1744 | /* unmap the DMA mapping for all the scatter gather elements */ | 1789 | /* unmap the DMA mapping for all the scatter gather elements */ |
1745 | for (i = 0; i < cmd->Header.SGList; i++) { | 1790 | for (i = 0; i < c->Header.SGList; i++) { |
1746 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { | 1791 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { |
1747 | cciss_unmap_sg_chain_block(h, cmd); | 1792 | cciss_unmap_sg_chain_block(h, c); |
1748 | /* Point to the next block */ | 1793 | /* Point to the next block */ |
1749 | curr_sg = h->cmd_sg_list[cmd->cmdindex]; | 1794 | curr_sg = h->cmd_sg_list[c->cmdindex]; |
1750 | sg_index = 0; | 1795 | sg_index = 0; |
1751 | } | 1796 | } |
1752 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; | 1797 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; |
@@ -1756,18 +1801,16 @@ static void cciss_softirq_done(struct request *rq) | |||
1756 | ++sg_index; | 1801 | ++sg_index; |
1757 | } | 1802 | } |
1758 | 1803 | ||
1759 | #ifdef CCISS_DEBUG | 1804 | dev_dbg(&h->pdev->dev, "Done with %p\n", rq); |
1760 | printk("Done with %p\n", rq); | ||
1761 | #endif /* CCISS_DEBUG */ | ||
1762 | 1805 | ||
1763 | /* set the residual count for pc requests */ | 1806 | /* set the residual count for pc requests */ |
1764 | if (blk_pc_request(rq)) | 1807 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) |
1765 | rq->resid_len = cmd->err_info->ResidualCnt; | 1808 | rq->resid_len = c->err_info->ResidualCnt; |
1766 | 1809 | ||
1767 | blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); | 1810 | blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); |
1768 | 1811 | ||
1769 | spin_lock_irqsave(&h->lock, flags); | 1812 | spin_lock_irqsave(&h->lock, flags); |
1770 | cmd_free(h, cmd, 1); | 1813 | cmd_free(h, c); |
1771 | cciss_check_queues(h); | 1814 | cciss_check_queues(h); |
1772 | spin_unlock_irqrestore(&h->lock, flags); | 1815 | spin_unlock_irqrestore(&h->lock, flags); |
1773 | } | 1816 | } |
@@ -1783,7 +1826,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h, | |||
1783 | * via the inquiry page 0. Model, vendor, and rev are set to empty strings if | 1826 | * via the inquiry page 0. Model, vendor, and rev are set to empty strings if |
1784 | * they cannot be read. | 1827 | * they cannot be read. |
1785 | */ | 1828 | */ |
1786 | static void cciss_get_device_descr(int ctlr, int logvol, | 1829 | static void cciss_get_device_descr(ctlr_info_t *h, int logvol, |
1787 | char *vendor, char *model, char *rev) | 1830 | char *vendor, char *model, char *rev) |
1788 | { | 1831 | { |
1789 | int rc; | 1832 | int rc; |
@@ -1798,8 +1841,8 @@ static void cciss_get_device_descr(int ctlr, int logvol, | |||
1798 | if (!inq_buf) | 1841 | if (!inq_buf) |
1799 | return; | 1842 | return; |
1800 | 1843 | ||
1801 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 1844 | log_unit_to_scsi3addr(h, scsi3addr, logvol); |
1802 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0, | 1845 | rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, |
1803 | scsi3addr, TYPE_CMD); | 1846 | scsi3addr, TYPE_CMD); |
1804 | if (rc == IO_OK) { | 1847 | if (rc == IO_OK) { |
1805 | memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); | 1848 | memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); |
@@ -1819,7 +1862,7 @@ static void cciss_get_device_descr(int ctlr, int logvol, | |||
1819 | * number cannot be had, for whatever reason, 16 bytes of 0xff | 1862 | * number cannot be had, for whatever reason, 16 bytes of 0xff |
1820 | * are returned instead. | 1863 | * are returned instead. |
1821 | */ | 1864 | */ |
1822 | static void cciss_get_serial_no(int ctlr, int logvol, | 1865 | static void cciss_get_serial_no(ctlr_info_t *h, int logvol, |
1823 | unsigned char *serial_no, int buflen) | 1866 | unsigned char *serial_no, int buflen) |
1824 | { | 1867 | { |
1825 | #define PAGE_83_INQ_BYTES 64 | 1868 | #define PAGE_83_INQ_BYTES 64 |
@@ -1834,8 +1877,8 @@ static void cciss_get_serial_no(int ctlr, int logvol, | |||
1834 | if (!buf) | 1877 | if (!buf) |
1835 | return; | 1878 | return; |
1836 | memset(serial_no, 0, buflen); | 1879 | memset(serial_no, 0, buflen); |
1837 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 1880 | log_unit_to_scsi3addr(h, scsi3addr, logvol); |
1838 | rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, | 1881 | rc = sendcmd_withirq(h, CISS_INQUIRY, buf, |
1839 | PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); | 1882 | PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); |
1840 | if (rc == IO_OK) | 1883 | if (rc == IO_OK) |
1841 | memcpy(serial_no, &buf[8], buflen); | 1884 | memcpy(serial_no, &buf[8], buflen); |
@@ -1901,10 +1944,9 @@ init_queue_failure: | |||
1901 | * is also the controller node. Any changes to disk 0 will show up on | 1944 | * is also the controller node. Any changes to disk 0 will show up on |
1902 | * the next reboot. | 1945 | * the next reboot. |
1903 | */ | 1946 | */ |
1904 | static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | 1947 | static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, |
1905 | int via_ioctl) | 1948 | int first_time, int via_ioctl) |
1906 | { | 1949 | { |
1907 | ctlr_info_t *h = hba[ctlr]; | ||
1908 | struct gendisk *disk; | 1950 | struct gendisk *disk; |
1909 | InquiryData_struct *inq_buff = NULL; | 1951 | InquiryData_struct *inq_buff = NULL; |
1910 | unsigned int block_size; | 1952 | unsigned int block_size; |
@@ -1921,16 +1963,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
1921 | 1963 | ||
1922 | /* testing to see if 16-byte CDBs are already being used */ | 1964 | /* testing to see if 16-byte CDBs are already being used */ |
1923 | if (h->cciss_read == CCISS_READ_16) { | 1965 | if (h->cciss_read == CCISS_READ_16) { |
1924 | cciss_read_capacity_16(h->ctlr, drv_index, | 1966 | cciss_read_capacity_16(h, drv_index, |
1925 | &total_size, &block_size); | 1967 | &total_size, &block_size); |
1926 | 1968 | ||
1927 | } else { | 1969 | } else { |
1928 | cciss_read_capacity(ctlr, drv_index, &total_size, &block_size); | 1970 | cciss_read_capacity(h, drv_index, &total_size, &block_size); |
1929 | /* if read_capacity returns all F's this volume is >2TB */ | 1971 | /* if read_capacity returns all F's this volume is >2TB */ |
1930 | /* in size so we switch to 16-byte CDB's for all */ | 1972 | /* in size so we switch to 16-byte CDB's for all */ |
1931 | /* read/write ops */ | 1973 | /* read/write ops */ |
1932 | if (total_size == 0xFFFFFFFFULL) { | 1974 | if (total_size == 0xFFFFFFFFULL) { |
1933 | cciss_read_capacity_16(ctlr, drv_index, | 1975 | cciss_read_capacity_16(h, drv_index, |
1934 | &total_size, &block_size); | 1976 | &total_size, &block_size); |
1935 | h->cciss_read = CCISS_READ_16; | 1977 | h->cciss_read = CCISS_READ_16; |
1936 | h->cciss_write = CCISS_WRITE_16; | 1978 | h->cciss_write = CCISS_WRITE_16; |
@@ -1940,14 +1982,14 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
1940 | } | 1982 | } |
1941 | } | 1983 | } |
1942 | 1984 | ||
1943 | cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size, | 1985 | cciss_geometry_inquiry(h, drv_index, total_size, block_size, |
1944 | inq_buff, drvinfo); | 1986 | inq_buff, drvinfo); |
1945 | drvinfo->block_size = block_size; | 1987 | drvinfo->block_size = block_size; |
1946 | drvinfo->nr_blocks = total_size + 1; | 1988 | drvinfo->nr_blocks = total_size + 1; |
1947 | 1989 | ||
1948 | cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor, | 1990 | cciss_get_device_descr(h, drv_index, drvinfo->vendor, |
1949 | drvinfo->model, drvinfo->rev); | 1991 | drvinfo->model, drvinfo->rev); |
1950 | cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no, | 1992 | cciss_get_serial_no(h, drv_index, drvinfo->serial_no, |
1951 | sizeof(drvinfo->serial_no)); | 1993 | sizeof(drvinfo->serial_no)); |
1952 | /* Save the lunid in case we deregister the disk, below. */ | 1994 | /* Save the lunid in case we deregister the disk, below. */ |
1953 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, | 1995 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, |
@@ -1972,10 +2014,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
1972 | * (unless it's the first disk (for the controller node). | 2014 | * (unless it's the first disk (for the controller node). |
1973 | */ | 2015 | */ |
1974 | if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { | 2016 | if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { |
1975 | printk(KERN_WARNING "disk %d has changed.\n", drv_index); | 2017 | dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); |
1976 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 2018 | spin_lock_irqsave(&h->lock, flags); |
1977 | h->drv[drv_index]->busy_configuring = 1; | 2019 | h->drv[drv_index]->busy_configuring = 1; |
1978 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2020 | spin_unlock_irqrestore(&h->lock, flags); |
1979 | 2021 | ||
1980 | /* deregister_disk sets h->drv[drv_index]->queue = NULL | 2022 | /* deregister_disk sets h->drv[drv_index]->queue = NULL |
1981 | * which keeps the interrupt handler from starting | 2023 | * which keeps the interrupt handler from starting |
@@ -2025,8 +2067,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, | |||
2025 | if (cciss_add_disk(h, disk, drv_index) != 0) { | 2067 | if (cciss_add_disk(h, disk, drv_index) != 0) { |
2026 | cciss_free_gendisk(h, drv_index); | 2068 | cciss_free_gendisk(h, drv_index); |
2027 | cciss_free_drive_info(h, drv_index); | 2069 | cciss_free_drive_info(h, drv_index); |
2028 | printk(KERN_WARNING "cciss:%d could not update " | 2070 | dev_warn(&h->pdev->dev, "could not update disk %d\n", |
2029 | "disk %d\n", h->ctlr, drv_index); | 2071 | drv_index); |
2030 | --h->num_luns; | 2072 | --h->num_luns; |
2031 | } | 2073 | } |
2032 | } | 2074 | } |
@@ -2036,7 +2078,7 @@ freeret: | |||
2036 | kfree(drvinfo); | 2078 | kfree(drvinfo); |
2037 | return; | 2079 | return; |
2038 | mem_msg: | 2080 | mem_msg: |
2039 | printk(KERN_ERR "cciss: out of memory\n"); | 2081 | dev_err(&h->pdev->dev, "out of memory\n"); |
2040 | goto freeret; | 2082 | goto freeret; |
2041 | } | 2083 | } |
2042 | 2084 | ||
@@ -2128,9 +2170,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], | |||
2128 | h->gendisk[drv_index] = | 2170 | h->gendisk[drv_index] = |
2129 | alloc_disk(1 << NWD_SHIFT); | 2171 | alloc_disk(1 << NWD_SHIFT); |
2130 | if (!h->gendisk[drv_index]) { | 2172 | if (!h->gendisk[drv_index]) { |
2131 | printk(KERN_ERR "cciss%d: could not " | 2173 | dev_err(&h->pdev->dev, |
2132 | "allocate a new disk %d\n", | 2174 | "could not allocate a new disk %d\n", |
2133 | h->ctlr, drv_index); | 2175 | drv_index); |
2134 | goto err_free_drive_info; | 2176 | goto err_free_drive_info; |
2135 | } | 2177 | } |
2136 | } | 2178 | } |
@@ -2181,8 +2223,7 @@ static void cciss_add_controller_node(ctlr_info_t *h) | |||
2181 | cciss_free_gendisk(h, drv_index); | 2223 | cciss_free_gendisk(h, drv_index); |
2182 | cciss_free_drive_info(h, drv_index); | 2224 | cciss_free_drive_info(h, drv_index); |
2183 | error: | 2225 | error: |
2184 | printk(KERN_WARNING "cciss%d: could not " | 2226 | dev_warn(&h->pdev->dev, "could not add disk 0.\n"); |
2185 | "add disk 0.\n", h->ctlr); | ||
2186 | return; | 2227 | return; |
2187 | } | 2228 | } |
2188 | 2229 | ||
@@ -2197,7 +2238,6 @@ error: | |||
2197 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, | 2238 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, |
2198 | int via_ioctl) | 2239 | int via_ioctl) |
2199 | { | 2240 | { |
2200 | int ctlr = h->ctlr; | ||
2201 | int num_luns; | 2241 | int num_luns; |
2202 | ReportLunData_struct *ld_buff = NULL; | 2242 | ReportLunData_struct *ld_buff = NULL; |
2203 | int return_code; | 2243 | int return_code; |
@@ -2212,27 +2252,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, | |||
2212 | return -EPERM; | 2252 | return -EPERM; |
2213 | 2253 | ||
2214 | /* Set busy_configuring flag for this operation */ | 2254 | /* Set busy_configuring flag for this operation */ |
2215 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 2255 | spin_lock_irqsave(&h->lock, flags); |
2216 | if (h->busy_configuring) { | 2256 | if (h->busy_configuring) { |
2217 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2257 | spin_unlock_irqrestore(&h->lock, flags); |
2218 | return -EBUSY; | 2258 | return -EBUSY; |
2219 | } | 2259 | } |
2220 | h->busy_configuring = 1; | 2260 | h->busy_configuring = 1; |
2221 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2261 | spin_unlock_irqrestore(&h->lock, flags); |
2222 | 2262 | ||
2223 | ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); | 2263 | ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); |
2224 | if (ld_buff == NULL) | 2264 | if (ld_buff == NULL) |
2225 | goto mem_msg; | 2265 | goto mem_msg; |
2226 | 2266 | ||
2227 | return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, | 2267 | return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, |
2228 | sizeof(ReportLunData_struct), | 2268 | sizeof(ReportLunData_struct), |
2229 | 0, CTLR_LUNID, TYPE_CMD); | 2269 | 0, CTLR_LUNID, TYPE_CMD); |
2230 | 2270 | ||
2231 | if (return_code == IO_OK) | 2271 | if (return_code == IO_OK) |
2232 | listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); | 2272 | listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); |
2233 | else { /* reading number of logical volumes failed */ | 2273 | else { /* reading number of logical volumes failed */ |
2234 | printk(KERN_WARNING "cciss: report logical volume" | 2274 | dev_warn(&h->pdev->dev, |
2235 | " command failed\n"); | 2275 | "report logical volume command failed\n"); |
2236 | listlength = 0; | 2276 | listlength = 0; |
2237 | goto freeret; | 2277 | goto freeret; |
2238 | } | 2278 | } |
@@ -2240,7 +2280,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, | |||
2240 | num_luns = listlength / 8; /* 8 bytes per entry */ | 2280 | num_luns = listlength / 8; /* 8 bytes per entry */ |
2241 | if (num_luns > CISS_MAX_LUN) { | 2281 | if (num_luns > CISS_MAX_LUN) { |
2242 | num_luns = CISS_MAX_LUN; | 2282 | num_luns = CISS_MAX_LUN; |
2243 | printk(KERN_WARNING "cciss: more luns configured" | 2283 | dev_warn(&h->pdev->dev, "more luns configured" |
2244 | " on controller than can be handled by" | 2284 | " on controller than can be handled by" |
2245 | " this driver.\n"); | 2285 | " this driver.\n"); |
2246 | } | 2286 | } |
@@ -2271,9 +2311,9 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, | |||
2271 | } | 2311 | } |
2272 | if (!drv_found) { | 2312 | if (!drv_found) { |
2273 | /* Deregister it from the OS, it's gone. */ | 2313 | /* Deregister it from the OS, it's gone. */ |
2274 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 2314 | spin_lock_irqsave(&h->lock, flags); |
2275 | h->drv[i]->busy_configuring = 1; | 2315 | h->drv[i]->busy_configuring = 1; |
2276 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2316 | spin_unlock_irqrestore(&h->lock, flags); |
2277 | return_code = deregister_disk(h, i, 1, via_ioctl); | 2317 | return_code = deregister_disk(h, i, 1, via_ioctl); |
2278 | if (h->drv[i] != NULL) | 2318 | if (h->drv[i] != NULL) |
2279 | h->drv[i]->busy_configuring = 0; | 2319 | h->drv[i]->busy_configuring = 0; |
@@ -2312,8 +2352,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, | |||
2312 | if (drv_index == -1) | 2352 | if (drv_index == -1) |
2313 | goto freeret; | 2353 | goto freeret; |
2314 | } | 2354 | } |
2315 | cciss_update_drive_info(ctlr, drv_index, first_time, | 2355 | cciss_update_drive_info(h, drv_index, first_time, via_ioctl); |
2316 | via_ioctl); | ||
2317 | } /* end for */ | 2356 | } /* end for */ |
2318 | 2357 | ||
2319 | freeret: | 2358 | freeret: |
@@ -2325,7 +2364,7 @@ freeret: | |||
2325 | */ | 2364 | */ |
2326 | return -1; | 2365 | return -1; |
2327 | mem_msg: | 2366 | mem_msg: |
2328 | printk(KERN_ERR "cciss: out of memory\n"); | 2367 | dev_err(&h->pdev->dev, "out of memory\n"); |
2329 | h->busy_configuring = 0; | 2368 | h->busy_configuring = 0; |
2330 | goto freeret; | 2369 | goto freeret; |
2331 | } | 2370 | } |
@@ -2434,7 +2473,7 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, | |||
2434 | 2473 | ||
2435 | /* if it was the last disk, find the new hightest lun */ | 2474 | /* if it was the last disk, find the new hightest lun */ |
2436 | if (clear_all && recalculate_highest_lun) { | 2475 | if (clear_all && recalculate_highest_lun) { |
2437 | int i, newhighest = -1; | 2476 | int newhighest = -1; |
2438 | for (i = 0; i <= h->highest_lun; i++) { | 2477 | for (i = 0; i <= h->highest_lun; i++) { |
2439 | /* if the disk has size > 0, it is available */ | 2478 | /* if the disk has size > 0, it is available */ |
2440 | if (h->drv[i] && h->drv[i]->heads) | 2479 | if (h->drv[i] && h->drv[i]->heads) |
@@ -2445,11 +2484,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, | |||
2445 | return 0; | 2484 | return 0; |
2446 | } | 2485 | } |
2447 | 2486 | ||
2448 | static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | 2487 | static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, |
2449 | size_t size, __u8 page_code, unsigned char *scsi3addr, | 2488 | size_t size, __u8 page_code, unsigned char *scsi3addr, |
2450 | int cmd_type) | 2489 | int cmd_type) |
2451 | { | 2490 | { |
2452 | ctlr_info_t *h = hba[ctlr]; | ||
2453 | u64bit buff_dma_handle; | 2491 | u64bit buff_dma_handle; |
2454 | int status = IO_OK; | 2492 | int status = IO_OK; |
2455 | 2493 | ||
@@ -2533,8 +2571,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | |||
2533 | c->Request.Timeout = 0; | 2571 | c->Request.Timeout = 0; |
2534 | break; | 2572 | break; |
2535 | default: | 2573 | default: |
2536 | printk(KERN_WARNING | 2574 | dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); |
2537 | "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); | ||
2538 | return IO_ERROR; | 2575 | return IO_ERROR; |
2539 | } | 2576 | } |
2540 | } else if (cmd_type == TYPE_MSG) { | 2577 | } else if (cmd_type == TYPE_MSG) { |
@@ -2566,13 +2603,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | |||
2566 | c->Request.CDB[0] = cmd; | 2603 | c->Request.CDB[0] = cmd; |
2567 | break; | 2604 | break; |
2568 | default: | 2605 | default: |
2569 | printk(KERN_WARNING | 2606 | dev_warn(&h->pdev->dev, |
2570 | "cciss%d: unknown message type %d\n", ctlr, cmd); | 2607 | "unknown message type %d\n", cmd); |
2571 | return IO_ERROR; | 2608 | return IO_ERROR; |
2572 | } | 2609 | } |
2573 | } else { | 2610 | } else { |
2574 | printk(KERN_WARNING | 2611 | dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); |
2575 | "cciss%d: unknown command type %d\n", ctlr, cmd_type); | ||
2576 | return IO_ERROR; | 2612 | return IO_ERROR; |
2577 | } | 2613 | } |
2578 | /* Fill in the scatter gather information */ | 2614 | /* Fill in the scatter gather information */ |
@@ -2600,15 +2636,14 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c) | |||
2600 | default: | 2636 | default: |
2601 | if (check_for_unit_attention(h, c)) | 2637 | if (check_for_unit_attention(h, c)) |
2602 | return IO_NEEDS_RETRY; | 2638 | return IO_NEEDS_RETRY; |
2603 | printk(KERN_WARNING "cciss%d: cmd 0x%02x " | 2639 | dev_warn(&h->pdev->dev, "cmd 0x%02x " |
2604 | "check condition, sense key = 0x%02x\n", | 2640 | "check condition, sense key = 0x%02x\n", |
2605 | h->ctlr, c->Request.CDB[0], | 2641 | c->Request.CDB[0], c->err_info->SenseInfo[2]); |
2606 | c->err_info->SenseInfo[2]); | ||
2607 | } | 2642 | } |
2608 | break; | 2643 | break; |
2609 | default: | 2644 | default: |
2610 | printk(KERN_WARNING "cciss%d: cmd 0x%02x" | 2645 | dev_warn(&h->pdev->dev, "cmd 0x%02x" |
2611 | "scsi status = 0x%02x\n", h->ctlr, | 2646 | "scsi status = 0x%02x\n", |
2612 | c->Request.CDB[0], c->err_info->ScsiStatus); | 2647 | c->Request.CDB[0], c->err_info->ScsiStatus); |
2613 | break; | 2648 | break; |
2614 | } | 2649 | } |
@@ -2631,43 +2666,42 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) | |||
2631 | /* expected for inquiry and report lun commands */ | 2666 | /* expected for inquiry and report lun commands */ |
2632 | break; | 2667 | break; |
2633 | case CMD_INVALID: | 2668 | case CMD_INVALID: |
2634 | printk(KERN_WARNING "cciss: cmd 0x%02x is " | 2669 | dev_warn(&h->pdev->dev, "cmd 0x%02x is " |
2635 | "reported invalid\n", c->Request.CDB[0]); | 2670 | "reported invalid\n", c->Request.CDB[0]); |
2636 | return_status = IO_ERROR; | 2671 | return_status = IO_ERROR; |
2637 | break; | 2672 | break; |
2638 | case CMD_PROTOCOL_ERR: | 2673 | case CMD_PROTOCOL_ERR: |
2639 | printk(KERN_WARNING "cciss: cmd 0x%02x has " | 2674 | dev_warn(&h->pdev->dev, "cmd 0x%02x has " |
2640 | "protocol error \n", c->Request.CDB[0]); | 2675 | "protocol error\n", c->Request.CDB[0]); |
2641 | return_status = IO_ERROR; | 2676 | return_status = IO_ERROR; |
2642 | break; | 2677 | break; |
2643 | case CMD_HARDWARE_ERR: | 2678 | case CMD_HARDWARE_ERR: |
2644 | printk(KERN_WARNING "cciss: cmd 0x%02x had " | 2679 | dev_warn(&h->pdev->dev, "cmd 0x%02x had " |
2645 | " hardware error\n", c->Request.CDB[0]); | 2680 | " hardware error\n", c->Request.CDB[0]); |
2646 | return_status = IO_ERROR; | 2681 | return_status = IO_ERROR; |
2647 | break; | 2682 | break; |
2648 | case CMD_CONNECTION_LOST: | 2683 | case CMD_CONNECTION_LOST: |
2649 | printk(KERN_WARNING "cciss: cmd 0x%02x had " | 2684 | dev_warn(&h->pdev->dev, "cmd 0x%02x had " |
2650 | "connection lost\n", c->Request.CDB[0]); | 2685 | "connection lost\n", c->Request.CDB[0]); |
2651 | return_status = IO_ERROR; | 2686 | return_status = IO_ERROR; |
2652 | break; | 2687 | break; |
2653 | case CMD_ABORTED: | 2688 | case CMD_ABORTED: |
2654 | printk(KERN_WARNING "cciss: cmd 0x%02x was " | 2689 | dev_warn(&h->pdev->dev, "cmd 0x%02x was " |
2655 | "aborted\n", c->Request.CDB[0]); | 2690 | "aborted\n", c->Request.CDB[0]); |
2656 | return_status = IO_ERROR; | 2691 | return_status = IO_ERROR; |
2657 | break; | 2692 | break; |
2658 | case CMD_ABORT_FAILED: | 2693 | case CMD_ABORT_FAILED: |
2659 | printk(KERN_WARNING "cciss: cmd 0x%02x reports " | 2694 | dev_warn(&h->pdev->dev, "cmd 0x%02x reports " |
2660 | "abort failed\n", c->Request.CDB[0]); | 2695 | "abort failed\n", c->Request.CDB[0]); |
2661 | return_status = IO_ERROR; | 2696 | return_status = IO_ERROR; |
2662 | break; | 2697 | break; |
2663 | case CMD_UNSOLICITED_ABORT: | 2698 | case CMD_UNSOLICITED_ABORT: |
2664 | printk(KERN_WARNING | 2699 | dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", |
2665 | "cciss%d: unsolicited abort 0x%02x\n", h->ctlr, | ||
2666 | c->Request.CDB[0]); | 2700 | c->Request.CDB[0]); |
2667 | return_status = IO_NEEDS_RETRY; | 2701 | return_status = IO_NEEDS_RETRY; |
2668 | break; | 2702 | break; |
2669 | default: | 2703 | default: |
2670 | printk(KERN_WARNING "cciss: cmd 0x%02x returned " | 2704 | dev_warn(&h->pdev->dev, "cmd 0x%02x returned " |
2671 | "unknown status %x\n", c->Request.CDB[0], | 2705 | "unknown status %x\n", c->Request.CDB[0], |
2672 | c->err_info->CommandStatus); | 2706 | c->err_info->CommandStatus); |
2673 | return_status = IO_ERROR; | 2707 | return_status = IO_ERROR; |
@@ -2680,17 +2714,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, | |||
2680 | { | 2714 | { |
2681 | DECLARE_COMPLETION_ONSTACK(wait); | 2715 | DECLARE_COMPLETION_ONSTACK(wait); |
2682 | u64bit buff_dma_handle; | 2716 | u64bit buff_dma_handle; |
2683 | unsigned long flags; | ||
2684 | int return_status = IO_OK; | 2717 | int return_status = IO_OK; |
2685 | 2718 | ||
2686 | resend_cmd2: | 2719 | resend_cmd2: |
2687 | c->waiting = &wait; | 2720 | c->waiting = &wait; |
2688 | /* Put the request on the tail of the queue and send it */ | 2721 | enqueue_cmd_and_start_io(h, c); |
2689 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
2690 | addQ(&h->reqQ, c); | ||
2691 | h->Qdepth++; | ||
2692 | start_io(h); | ||
2693 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
2694 | 2722 | ||
2695 | wait_for_completion(&wait); | 2723 | wait_for_completion(&wait); |
2696 | 2724 | ||
@@ -2701,7 +2729,7 @@ resend_cmd2: | |||
2701 | 2729 | ||
2702 | if (return_status == IO_NEEDS_RETRY && | 2730 | if (return_status == IO_NEEDS_RETRY && |
2703 | c->retry_count < MAX_CMD_RETRIES) { | 2731 | c->retry_count < MAX_CMD_RETRIES) { |
2704 | printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr, | 2732 | dev_warn(&h->pdev->dev, "retrying 0x%02x\n", |
2705 | c->Request.CDB[0]); | 2733 | c->Request.CDB[0]); |
2706 | c->retry_count++; | 2734 | c->retry_count++; |
2707 | /* erase the old error information */ | 2735 | /* erase the old error information */ |
@@ -2720,27 +2748,26 @@ command_done: | |||
2720 | return return_status; | 2748 | return return_status; |
2721 | } | 2749 | } |
2722 | 2750 | ||
2723 | static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | 2751 | static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, |
2724 | __u8 page_code, unsigned char scsi3addr[], | 2752 | __u8 page_code, unsigned char scsi3addr[], |
2725 | int cmd_type) | 2753 | int cmd_type) |
2726 | { | 2754 | { |
2727 | ctlr_info_t *h = hba[ctlr]; | ||
2728 | CommandList_struct *c; | 2755 | CommandList_struct *c; |
2729 | int return_status; | 2756 | int return_status; |
2730 | 2757 | ||
2731 | c = cmd_alloc(h, 0); | 2758 | c = cmd_special_alloc(h); |
2732 | if (!c) | 2759 | if (!c) |
2733 | return -ENOMEM; | 2760 | return -ENOMEM; |
2734 | return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code, | 2761 | return_status = fill_cmd(h, c, cmd, buff, size, page_code, |
2735 | scsi3addr, cmd_type); | 2762 | scsi3addr, cmd_type); |
2736 | if (return_status == IO_OK) | 2763 | if (return_status == IO_OK) |
2737 | return_status = sendcmd_withirq_core(h, c, 1); | 2764 | return_status = sendcmd_withirq_core(h, c, 1); |
2738 | 2765 | ||
2739 | cmd_free(h, c, 0); | 2766 | cmd_special_free(h, c); |
2740 | return return_status; | 2767 | return return_status; |
2741 | } | 2768 | } |
2742 | 2769 | ||
2743 | static void cciss_geometry_inquiry(int ctlr, int logvol, | 2770 | static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, |
2744 | sector_t total_size, | 2771 | sector_t total_size, |
2745 | unsigned int block_size, | 2772 | unsigned int block_size, |
2746 | InquiryData_struct *inq_buff, | 2773 | InquiryData_struct *inq_buff, |
@@ -2751,13 +2778,13 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2751 | unsigned char scsi3addr[8]; | 2778 | unsigned char scsi3addr[8]; |
2752 | 2779 | ||
2753 | memset(inq_buff, 0, sizeof(InquiryData_struct)); | 2780 | memset(inq_buff, 0, sizeof(InquiryData_struct)); |
2754 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2781 | log_unit_to_scsi3addr(h, scsi3addr, logvol); |
2755 | return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, | 2782 | return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, |
2756 | sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); | 2783 | sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); |
2757 | if (return_code == IO_OK) { | 2784 | if (return_code == IO_OK) { |
2758 | if (inq_buff->data_byte[8] == 0xFF) { | 2785 | if (inq_buff->data_byte[8] == 0xFF) { |
2759 | printk(KERN_WARNING | 2786 | dev_warn(&h->pdev->dev, |
2760 | "cciss: reading geometry failed, volume " | 2787 | "reading geometry failed, volume " |
2761 | "does not support reading geometry\n"); | 2788 | "does not support reading geometry\n"); |
2762 | drv->heads = 255; | 2789 | drv->heads = 255; |
2763 | drv->sectors = 32; /* Sectors per track */ | 2790 | drv->sectors = 32; /* Sectors per track */ |
@@ -2781,12 +2808,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2781 | drv->cylinders = real_size; | 2808 | drv->cylinders = real_size; |
2782 | } | 2809 | } |
2783 | } else { /* Get geometry failed */ | 2810 | } else { /* Get geometry failed */ |
2784 | printk(KERN_WARNING "cciss: reading geometry failed\n"); | 2811 | dev_warn(&h->pdev->dev, "reading geometry failed\n"); |
2785 | } | 2812 | } |
2786 | } | 2813 | } |
2787 | 2814 | ||
2788 | static void | 2815 | static void |
2789 | cciss_read_capacity(int ctlr, int logvol, sector_t *total_size, | 2816 | cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, |
2790 | unsigned int *block_size) | 2817 | unsigned int *block_size) |
2791 | { | 2818 | { |
2792 | ReadCapdata_struct *buf; | 2819 | ReadCapdata_struct *buf; |
@@ -2795,25 +2822,25 @@ cciss_read_capacity(int ctlr, int logvol, sector_t *total_size, | |||
2795 | 2822 | ||
2796 | buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); | 2823 | buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); |
2797 | if (!buf) { | 2824 | if (!buf) { |
2798 | printk(KERN_WARNING "cciss: out of memory\n"); | 2825 | dev_warn(&h->pdev->dev, "out of memory\n"); |
2799 | return; | 2826 | return; |
2800 | } | 2827 | } |
2801 | 2828 | ||
2802 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2829 | log_unit_to_scsi3addr(h, scsi3addr, logvol); |
2803 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf, | 2830 | return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, |
2804 | sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); | 2831 | sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); |
2805 | if (return_code == IO_OK) { | 2832 | if (return_code == IO_OK) { |
2806 | *total_size = be32_to_cpu(*(__be32 *) buf->total_size); | 2833 | *total_size = be32_to_cpu(*(__be32 *) buf->total_size); |
2807 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); | 2834 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); |
2808 | } else { /* read capacity command failed */ | 2835 | } else { /* read capacity command failed */ |
2809 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 2836 | dev_warn(&h->pdev->dev, "read capacity failed\n"); |
2810 | *total_size = 0; | 2837 | *total_size = 0; |
2811 | *block_size = BLOCK_SIZE; | 2838 | *block_size = BLOCK_SIZE; |
2812 | } | 2839 | } |
2813 | kfree(buf); | 2840 | kfree(buf); |
2814 | } | 2841 | } |
2815 | 2842 | ||
2816 | static void cciss_read_capacity_16(int ctlr, int logvol, | 2843 | static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, |
2817 | sector_t *total_size, unsigned int *block_size) | 2844 | sector_t *total_size, unsigned int *block_size) |
2818 | { | 2845 | { |
2819 | ReadCapdata_struct_16 *buf; | 2846 | ReadCapdata_struct_16 *buf; |
@@ -2822,23 +2849,23 @@ static void cciss_read_capacity_16(int ctlr, int logvol, | |||
2822 | 2849 | ||
2823 | buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); | 2850 | buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); |
2824 | if (!buf) { | 2851 | if (!buf) { |
2825 | printk(KERN_WARNING "cciss: out of memory\n"); | 2852 | dev_warn(&h->pdev->dev, "out of memory\n"); |
2826 | return; | 2853 | return; |
2827 | } | 2854 | } |
2828 | 2855 | ||
2829 | log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); | 2856 | log_unit_to_scsi3addr(h, scsi3addr, logvol); |
2830 | return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, | 2857 | return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, |
2831 | ctlr, buf, sizeof(ReadCapdata_struct_16), | 2858 | buf, sizeof(ReadCapdata_struct_16), |
2832 | 0, scsi3addr, TYPE_CMD); | 2859 | 0, scsi3addr, TYPE_CMD); |
2833 | if (return_code == IO_OK) { | 2860 | if (return_code == IO_OK) { |
2834 | *total_size = be64_to_cpu(*(__be64 *) buf->total_size); | 2861 | *total_size = be64_to_cpu(*(__be64 *) buf->total_size); |
2835 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); | 2862 | *block_size = be32_to_cpu(*(__be32 *) buf->block_size); |
2836 | } else { /* read capacity command failed */ | 2863 | } else { /* read capacity command failed */ |
2837 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 2864 | dev_warn(&h->pdev->dev, "read capacity failed\n"); |
2838 | *total_size = 0; | 2865 | *total_size = 0; |
2839 | *block_size = BLOCK_SIZE; | 2866 | *block_size = BLOCK_SIZE; |
2840 | } | 2867 | } |
2841 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | 2868 | dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", |
2842 | (unsigned long long)*total_size+1, *block_size); | 2869 | (unsigned long long)*total_size+1, *block_size); |
2843 | kfree(buf); | 2870 | kfree(buf); |
2844 | } | 2871 | } |
@@ -2866,17 +2893,17 @@ static int cciss_revalidate(struct gendisk *disk) | |||
2866 | 2893 | ||
2867 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); | 2894 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); |
2868 | if (inq_buff == NULL) { | 2895 | if (inq_buff == NULL) { |
2869 | printk(KERN_WARNING "cciss: out of memory\n"); | 2896 | dev_warn(&h->pdev->dev, "out of memory\n"); |
2870 | return 1; | 2897 | return 1; |
2871 | } | 2898 | } |
2872 | if (h->cciss_read == CCISS_READ_10) { | 2899 | if (h->cciss_read == CCISS_READ_10) { |
2873 | cciss_read_capacity(h->ctlr, logvol, | 2900 | cciss_read_capacity(h, logvol, |
2874 | &total_size, &block_size); | 2901 | &total_size, &block_size); |
2875 | } else { | 2902 | } else { |
2876 | cciss_read_capacity_16(h->ctlr, logvol, | 2903 | cciss_read_capacity_16(h, logvol, |
2877 | &total_size, &block_size); | 2904 | &total_size, &block_size); |
2878 | } | 2905 | } |
2879 | cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size, | 2906 | cciss_geometry_inquiry(h, logvol, total_size, block_size, |
2880 | inq_buff, drv); | 2907 | inq_buff, drv); |
2881 | 2908 | ||
2882 | blk_queue_logical_block_size(drv->queue, drv->block_size); | 2909 | blk_queue_logical_block_size(drv->queue, drv->block_size); |
@@ -2910,7 +2937,7 @@ static void start_io(ctlr_info_t *h) | |||
2910 | c = hlist_entry(h->reqQ.first, CommandList_struct, list); | 2937 | c = hlist_entry(h->reqQ.first, CommandList_struct, list); |
2911 | /* can't do anything if fifo is full */ | 2938 | /* can't do anything if fifo is full */ |
2912 | if ((h->access.fifo_full(h))) { | 2939 | if ((h->access.fifo_full(h))) { |
2913 | printk(KERN_WARNING "cciss: fifo full\n"); | 2940 | dev_warn(&h->pdev->dev, "fifo full\n"); |
2914 | break; | 2941 | break; |
2915 | } | 2942 | } |
2916 | 2943 | ||
@@ -2926,7 +2953,7 @@ static void start_io(ctlr_info_t *h) | |||
2926 | } | 2953 | } |
2927 | } | 2954 | } |
2928 | 2955 | ||
2929 | /* Assumes that CCISS_LOCK(h->ctlr) is held. */ | 2956 | /* Assumes that h->lock is held. */ |
2930 | /* Zeros out the error record and then resends the command back */ | 2957 | /* Zeros out the error record and then resends the command back */ |
2931 | /* to the controller */ | 2958 | /* to the controller */ |
2932 | static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) | 2959 | static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) |
@@ -2967,7 +2994,7 @@ static inline int evaluate_target_status(ctlr_info_t *h, | |||
2967 | driver_byte = DRIVER_OK; | 2994 | driver_byte = DRIVER_OK; |
2968 | msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ | 2995 | msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ |
2969 | 2996 | ||
2970 | if (blk_pc_request(cmd->rq)) | 2997 | if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) |
2971 | host_byte = DID_PASSTHROUGH; | 2998 | host_byte = DID_PASSTHROUGH; |
2972 | else | 2999 | else |
2973 | host_byte = DID_OK; | 3000 | host_byte = DID_OK; |
@@ -2976,8 +3003,8 @@ static inline int evaluate_target_status(ctlr_info_t *h, | |||
2976 | host_byte, driver_byte); | 3003 | host_byte, driver_byte); |
2977 | 3004 | ||
2978 | if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { | 3005 | if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { |
2979 | if (!blk_pc_request(cmd->rq)) | 3006 | if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) |
2980 | printk(KERN_WARNING "cciss: cmd %p " | 3007 | dev_warn(&h->pdev->dev, "cmd %p " |
2981 | "has SCSI Status 0x%x\n", | 3008 | "has SCSI Status 0x%x\n", |
2982 | cmd, cmd->err_info->ScsiStatus); | 3009 | cmd, cmd->err_info->ScsiStatus); |
2983 | return error_value; | 3010 | return error_value; |
@@ -2986,17 +3013,19 @@ static inline int evaluate_target_status(ctlr_info_t *h, | |||
2986 | /* check the sense key */ | 3013 | /* check the sense key */ |
2987 | sense_key = 0xf & cmd->err_info->SenseInfo[2]; | 3014 | sense_key = 0xf & cmd->err_info->SenseInfo[2]; |
2988 | /* no status or recovered error */ | 3015 | /* no status or recovered error */ |
2989 | if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) | 3016 | if (((sense_key == 0x0) || (sense_key == 0x1)) && |
3017 | (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) | ||
2990 | error_value = 0; | 3018 | error_value = 0; |
2991 | 3019 | ||
2992 | if (check_for_unit_attention(h, cmd)) { | 3020 | if (check_for_unit_attention(h, cmd)) { |
2993 | *retry_cmd = !blk_pc_request(cmd->rq); | 3021 | *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); |
2994 | return 0; | 3022 | return 0; |
2995 | } | 3023 | } |
2996 | 3024 | ||
2997 | if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ | 3025 | /* Not SG_IO or similar? */ |
3026 | if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { | ||
2998 | if (error_value != 0) | 3027 | if (error_value != 0) |
2999 | printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" | 3028 | dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" |
3000 | " sense key = 0x%x\n", cmd, sense_key); | 3029 | " sense key = 0x%x\n", cmd, sense_key); |
3001 | return error_value; | 3030 | return error_value; |
3002 | } | 3031 | } |
@@ -3036,90 +3065,97 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, | |||
3036 | rq->errors = evaluate_target_status(h, cmd, &retry_cmd); | 3065 | rq->errors = evaluate_target_status(h, cmd, &retry_cmd); |
3037 | break; | 3066 | break; |
3038 | case CMD_DATA_UNDERRUN: | 3067 | case CMD_DATA_UNDERRUN: |
3039 | if (blk_fs_request(cmd->rq)) { | 3068 | if (cmd->rq->cmd_type == REQ_TYPE_FS) { |
3040 | printk(KERN_WARNING "cciss: cmd %p has" | 3069 | dev_warn(&h->pdev->dev, "cmd %p has" |
3041 | " completed with data underrun " | 3070 | " completed with data underrun " |
3042 | "reported\n", cmd); | 3071 | "reported\n", cmd); |
3043 | cmd->rq->resid_len = cmd->err_info->ResidualCnt; | 3072 | cmd->rq->resid_len = cmd->err_info->ResidualCnt; |
3044 | } | 3073 | } |
3045 | break; | 3074 | break; |
3046 | case CMD_DATA_OVERRUN: | 3075 | case CMD_DATA_OVERRUN: |
3047 | if (blk_fs_request(cmd->rq)) | 3076 | if (cmd->rq->cmd_type == REQ_TYPE_FS) |
3048 | printk(KERN_WARNING "cciss: cmd %p has" | 3077 | dev_warn(&h->pdev->dev, "cciss: cmd %p has" |
3049 | " completed with data overrun " | 3078 | " completed with data overrun " |
3050 | "reported\n", cmd); | 3079 | "reported\n", cmd); |
3051 | break; | 3080 | break; |
3052 | case CMD_INVALID: | 3081 | case CMD_INVALID: |
3053 | printk(KERN_WARNING "cciss: cmd %p is " | 3082 | dev_warn(&h->pdev->dev, "cciss: cmd %p is " |
3054 | "reported invalid\n", cmd); | 3083 | "reported invalid\n", cmd); |
3055 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3084 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3056 | cmd->err_info->CommandStatus, DRIVER_OK, | 3085 | cmd->err_info->CommandStatus, DRIVER_OK, |
3057 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3086 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3087 | DID_PASSTHROUGH : DID_ERROR); | ||
3058 | break; | 3088 | break; |
3059 | case CMD_PROTOCOL_ERR: | 3089 | case CMD_PROTOCOL_ERR: |
3060 | printk(KERN_WARNING "cciss: cmd %p has " | 3090 | dev_warn(&h->pdev->dev, "cciss: cmd %p has " |
3061 | "protocol error \n", cmd); | 3091 | "protocol error\n", cmd); |
3062 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3092 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3063 | cmd->err_info->CommandStatus, DRIVER_OK, | 3093 | cmd->err_info->CommandStatus, DRIVER_OK, |
3064 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3094 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3095 | DID_PASSTHROUGH : DID_ERROR); | ||
3065 | break; | 3096 | break; |
3066 | case CMD_HARDWARE_ERR: | 3097 | case CMD_HARDWARE_ERR: |
3067 | printk(KERN_WARNING "cciss: cmd %p had " | 3098 | dev_warn(&h->pdev->dev, "cciss: cmd %p had " |
3068 | " hardware error\n", cmd); | 3099 | " hardware error\n", cmd); |
3069 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3100 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3070 | cmd->err_info->CommandStatus, DRIVER_OK, | 3101 | cmd->err_info->CommandStatus, DRIVER_OK, |
3071 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3102 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3103 | DID_PASSTHROUGH : DID_ERROR); | ||
3072 | break; | 3104 | break; |
3073 | case CMD_CONNECTION_LOST: | 3105 | case CMD_CONNECTION_LOST: |
3074 | printk(KERN_WARNING "cciss: cmd %p had " | 3106 | dev_warn(&h->pdev->dev, "cciss: cmd %p had " |
3075 | "connection lost\n", cmd); | 3107 | "connection lost\n", cmd); |
3076 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3108 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3077 | cmd->err_info->CommandStatus, DRIVER_OK, | 3109 | cmd->err_info->CommandStatus, DRIVER_OK, |
3078 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3110 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3111 | DID_PASSTHROUGH : DID_ERROR); | ||
3079 | break; | 3112 | break; |
3080 | case CMD_ABORTED: | 3113 | case CMD_ABORTED: |
3081 | printk(KERN_WARNING "cciss: cmd %p was " | 3114 | dev_warn(&h->pdev->dev, "cciss: cmd %p was " |
3082 | "aborted\n", cmd); | 3115 | "aborted\n", cmd); |
3083 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3116 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3084 | cmd->err_info->CommandStatus, DRIVER_OK, | 3117 | cmd->err_info->CommandStatus, DRIVER_OK, |
3085 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); | 3118 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3119 | DID_PASSTHROUGH : DID_ABORT); | ||
3086 | break; | 3120 | break; |
3087 | case CMD_ABORT_FAILED: | 3121 | case CMD_ABORT_FAILED: |
3088 | printk(KERN_WARNING "cciss: cmd %p reports " | 3122 | dev_warn(&h->pdev->dev, "cciss: cmd %p reports " |
3089 | "abort failed\n", cmd); | 3123 | "abort failed\n", cmd); |
3090 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3124 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3091 | cmd->err_info->CommandStatus, DRIVER_OK, | 3125 | cmd->err_info->CommandStatus, DRIVER_OK, |
3092 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3126 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3127 | DID_PASSTHROUGH : DID_ERROR); | ||
3093 | break; | 3128 | break; |
3094 | case CMD_UNSOLICITED_ABORT: | 3129 | case CMD_UNSOLICITED_ABORT: |
3095 | printk(KERN_WARNING "cciss%d: unsolicited " | 3130 | dev_warn(&h->pdev->dev, "cciss%d: unsolicited " |
3096 | "abort %p\n", h->ctlr, cmd); | 3131 | "abort %p\n", h->ctlr, cmd); |
3097 | if (cmd->retry_count < MAX_CMD_RETRIES) { | 3132 | if (cmd->retry_count < MAX_CMD_RETRIES) { |
3098 | retry_cmd = 1; | 3133 | retry_cmd = 1; |
3099 | printk(KERN_WARNING | 3134 | dev_warn(&h->pdev->dev, "retrying %p\n", cmd); |
3100 | "cciss%d: retrying %p\n", h->ctlr, cmd); | ||
3101 | cmd->retry_count++; | 3135 | cmd->retry_count++; |
3102 | } else | 3136 | } else |
3103 | printk(KERN_WARNING | 3137 | dev_warn(&h->pdev->dev, |
3104 | "cciss%d: %p retried too " | 3138 | "%p retried too many times\n", cmd); |
3105 | "many times\n", h->ctlr, cmd); | ||
3106 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3139 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3107 | cmd->err_info->CommandStatus, DRIVER_OK, | 3140 | cmd->err_info->CommandStatus, DRIVER_OK, |
3108 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); | 3141 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3142 | DID_PASSTHROUGH : DID_ABORT); | ||
3109 | break; | 3143 | break; |
3110 | case CMD_TIMEOUT: | 3144 | case CMD_TIMEOUT: |
3111 | printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); | 3145 | dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); |
3112 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3146 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3113 | cmd->err_info->CommandStatus, DRIVER_OK, | 3147 | cmd->err_info->CommandStatus, DRIVER_OK, |
3114 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3148 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3149 | DID_PASSTHROUGH : DID_ERROR); | ||
3115 | break; | 3150 | break; |
3116 | default: | 3151 | default: |
3117 | printk(KERN_WARNING "cciss: cmd %p returned " | 3152 | dev_warn(&h->pdev->dev, "cmd %p returned " |
3118 | "unknown status %x\n", cmd, | 3153 | "unknown status %x\n", cmd, |
3119 | cmd->err_info->CommandStatus); | 3154 | cmd->err_info->CommandStatus); |
3120 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | 3155 | rq->errors = make_status_bytes(SAM_STAT_GOOD, |
3121 | cmd->err_info->CommandStatus, DRIVER_OK, | 3156 | cmd->err_info->CommandStatus, DRIVER_OK, |
3122 | blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); | 3157 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3158 | DID_PASSTHROUGH : DID_ERROR); | ||
3123 | } | 3159 | } |
3124 | 3160 | ||
3125 | after_error_processing: | 3161 | after_error_processing: |
@@ -3133,6 +3169,34 @@ after_error_processing: | |||
3133 | blk_complete_request(cmd->rq); | 3169 | blk_complete_request(cmd->rq); |
3134 | } | 3170 | } |
3135 | 3171 | ||
3172 | static inline u32 cciss_tag_contains_index(u32 tag) | ||
3173 | { | ||
3174 | #define DIRECT_LOOKUP_BIT 0x10 | ||
3175 | return tag & DIRECT_LOOKUP_BIT; | ||
3176 | } | ||
3177 | |||
3178 | static inline u32 cciss_tag_to_index(u32 tag) | ||
3179 | { | ||
3180 | #define DIRECT_LOOKUP_SHIFT 5 | ||
3181 | return tag >> DIRECT_LOOKUP_SHIFT; | ||
3182 | } | ||
3183 | |||
3184 | static inline u32 cciss_tag_discard_error_bits(u32 tag) | ||
3185 | { | ||
3186 | #define CCISS_ERROR_BITS 0x03 | ||
3187 | return tag & ~CCISS_ERROR_BITS; | ||
3188 | } | ||
3189 | |||
3190 | static inline void cciss_mark_tag_indexed(u32 *tag) | ||
3191 | { | ||
3192 | *tag |= DIRECT_LOOKUP_BIT; | ||
3193 | } | ||
3194 | |||
3195 | static inline void cciss_set_tag_index(u32 *tag, u32 index) | ||
3196 | { | ||
3197 | *tag |= (index << DIRECT_LOOKUP_SHIFT); | ||
3198 | } | ||
3199 | |||
3136 | /* | 3200 | /* |
3137 | * Get a request and submit it to the controller. | 3201 | * Get a request and submit it to the controller. |
3138 | */ | 3202 | */ |
@@ -3164,7 +3228,8 @@ static void do_cciss_request(struct request_queue *q) | |||
3164 | 3228 | ||
3165 | BUG_ON(creq->nr_phys_segments > h->maxsgentries); | 3229 | BUG_ON(creq->nr_phys_segments > h->maxsgentries); |
3166 | 3230 | ||
3167 | if ((c = cmd_alloc(h, 1)) == NULL) | 3231 | c = cmd_alloc(h); |
3232 | if (!c) | ||
3168 | goto full; | 3233 | goto full; |
3169 | 3234 | ||
3170 | blk_start_request(creq); | 3235 | blk_start_request(creq); |
@@ -3181,8 +3246,8 @@ static void do_cciss_request(struct request_queue *q) | |||
3181 | /* got command from pool, so use the command block index instead */ | 3246 | /* got command from pool, so use the command block index instead */ |
3182 | /* for direct lookups. */ | 3247 | /* for direct lookups. */ |
3183 | /* The first 2 bits are reserved for controller error reporting. */ | 3248 | /* The first 2 bits are reserved for controller error reporting. */ |
3184 | c->Header.Tag.lower = (c->cmdindex << 3); | 3249 | cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); |
3185 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ | 3250 | cciss_mark_tag_indexed(&c->Header.Tag.lower); |
3186 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); | 3251 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); |
3187 | c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ | 3252 | c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ |
3188 | c->Request.Type.Type = TYPE_CMD; /* It is a command. */ | 3253 | c->Request.Type.Type = TYPE_CMD; /* It is a command. */ |
@@ -3193,11 +3258,8 @@ static void do_cciss_request(struct request_queue *q) | |||
3193 | c->Request.CDB[0] = | 3258 | c->Request.CDB[0] = |
3194 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; | 3259 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; |
3195 | start_blk = blk_rq_pos(creq); | 3260 | start_blk = blk_rq_pos(creq); |
3196 | #ifdef CCISS_DEBUG | 3261 | dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", |
3197 | printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", | ||
3198 | (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); | 3262 | (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); |
3199 | #endif /* CCISS_DEBUG */ | ||
3200 | |||
3201 | sg_init_table(tmp_sg, h->maxsgentries); | 3263 | sg_init_table(tmp_sg, h->maxsgentries); |
3202 | seg = blk_rq_map_sg(q, creq, tmp_sg); | 3264 | seg = blk_rq_map_sg(q, creq, tmp_sg); |
3203 | 3265 | ||
@@ -3237,17 +3299,18 @@ static void do_cciss_request(struct request_queue *q) | |||
3237 | if (seg > h->maxSG) | 3299 | if (seg > h->maxSG) |
3238 | h->maxSG = seg; | 3300 | h->maxSG = seg; |
3239 | 3301 | ||
3240 | #ifdef CCISS_DEBUG | 3302 | dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " |
3241 | printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments " | ||
3242 | "chained[%d]\n", | 3303 | "chained[%d]\n", |
3243 | blk_rq_sectors(creq), seg, chained); | 3304 | blk_rq_sectors(creq), seg, chained); |
3244 | #endif /* CCISS_DEBUG */ | ||
3245 | 3305 | ||
3246 | c->Header.SGList = c->Header.SGTotal = seg + chained; | 3306 | c->Header.SGTotal = seg + chained; |
3247 | if (seg > h->max_cmd_sgentries) | 3307 | if (seg <= h->max_cmd_sgentries) |
3308 | c->Header.SGList = c->Header.SGTotal; | ||
3309 | else | ||
3248 | c->Header.SGList = h->max_cmd_sgentries; | 3310 | c->Header.SGList = h->max_cmd_sgentries; |
3311 | set_performant_mode(h, c); | ||
3249 | 3312 | ||
3250 | if (likely(blk_fs_request(creq))) { | 3313 | if (likely(creq->cmd_type == REQ_TYPE_FS)) { |
3251 | if(h->cciss_read == CCISS_READ_10) { | 3314 | if(h->cciss_read == CCISS_READ_10) { |
3252 | c->Request.CDB[1] = 0; | 3315 | c->Request.CDB[1] = 0; |
3253 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ | 3316 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ |
@@ -3277,11 +3340,12 @@ static void do_cciss_request(struct request_queue *q) | |||
3277 | c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; | 3340 | c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; |
3278 | c->Request.CDB[14] = c->Request.CDB[15] = 0; | 3341 | c->Request.CDB[14] = c->Request.CDB[15] = 0; |
3279 | } | 3342 | } |
3280 | } else if (blk_pc_request(creq)) { | 3343 | } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { |
3281 | c->Request.CDBLen = creq->cmd_len; | 3344 | c->Request.CDBLen = creq->cmd_len; |
3282 | memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); | 3345 | memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); |
3283 | } else { | 3346 | } else { |
3284 | printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type); | 3347 | dev_warn(&h->pdev->dev, "bad request type %d\n", |
3348 | creq->cmd_type); | ||
3285 | BUG(); | 3349 | BUG(); |
3286 | } | 3350 | } |
3287 | 3351 | ||
@@ -3314,72 +3378,131 @@ static inline int interrupt_pending(ctlr_info_t *h) | |||
3314 | 3378 | ||
3315 | static inline long interrupt_not_for_us(ctlr_info_t *h) | 3379 | static inline long interrupt_not_for_us(ctlr_info_t *h) |
3316 | { | 3380 | { |
3317 | return (((h->access.intr_pending(h) == 0) || | 3381 | return ((h->access.intr_pending(h) == 0) || |
3318 | (h->interrupts_enabled == 0))); | 3382 | (h->interrupts_enabled == 0)); |
3319 | } | 3383 | } |
3320 | 3384 | ||
3321 | static irqreturn_t do_cciss_intr(int irq, void *dev_id) | 3385 | static inline int bad_tag(ctlr_info_t *h, u32 tag_index, |
3386 | u32 raw_tag) | ||
3322 | { | 3387 | { |
3323 | ctlr_info_t *h = dev_id; | 3388 | if (unlikely(tag_index >= h->nr_cmds)) { |
3389 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | ||
3390 | return 1; | ||
3391 | } | ||
3392 | return 0; | ||
3393 | } | ||
3394 | |||
3395 | static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, | ||
3396 | u32 raw_tag) | ||
3397 | { | ||
3398 | removeQ(c); | ||
3399 | if (likely(c->cmd_type == CMD_RWREQ)) | ||
3400 | complete_command(h, c, 0); | ||
3401 | else if (c->cmd_type == CMD_IOCTL_PEND) | ||
3402 | complete(c->waiting); | ||
3403 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
3404 | else if (c->cmd_type == CMD_SCSI) | ||
3405 | complete_scsi_command(c, 0, raw_tag); | ||
3406 | #endif | ||
3407 | } | ||
3408 | |||
3409 | static inline u32 next_command(ctlr_info_t *h) | ||
3410 | { | ||
3411 | u32 a; | ||
3412 | |||
3413 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | ||
3414 | return h->access.command_completed(h); | ||
3415 | |||
3416 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
3417 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | ||
3418 | (h->reply_pool_head)++; | ||
3419 | h->commands_outstanding--; | ||
3420 | } else { | ||
3421 | a = FIFO_EMPTY; | ||
3422 | } | ||
3423 | /* Check for wraparound */ | ||
3424 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
3425 | h->reply_pool_head = h->reply_pool; | ||
3426 | h->reply_pool_wraparound ^= 1; | ||
3427 | } | ||
3428 | return a; | ||
3429 | } | ||
3430 | |||
3431 | /* process completion of an indexed ("direct lookup") command */ | ||
3432 | static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) | ||
3433 | { | ||
3434 | u32 tag_index; | ||
3324 | CommandList_struct *c; | 3435 | CommandList_struct *c; |
3436 | |||
3437 | tag_index = cciss_tag_to_index(raw_tag); | ||
3438 | if (bad_tag(h, tag_index, raw_tag)) | ||
3439 | return next_command(h); | ||
3440 | c = h->cmd_pool + tag_index; | ||
3441 | finish_cmd(h, c, raw_tag); | ||
3442 | return next_command(h); | ||
3443 | } | ||
3444 | |||
3445 | /* process completion of a non-indexed command */ | ||
3446 | static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) | ||
3447 | { | ||
3448 | u32 tag; | ||
3449 | CommandList_struct *c = NULL; | ||
3450 | struct hlist_node *tmp; | ||
3451 | __u32 busaddr_masked, tag_masked; | ||
3452 | |||
3453 | tag = cciss_tag_discard_error_bits(raw_tag); | ||
3454 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
3455 | busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); | ||
3456 | tag_masked = cciss_tag_discard_error_bits(tag); | ||
3457 | if (busaddr_masked == tag_masked) { | ||
3458 | finish_cmd(h, c, raw_tag); | ||
3459 | return next_command(h); | ||
3460 | } | ||
3461 | } | ||
3462 | bad_tag(h, h->nr_cmds + 1, raw_tag); | ||
3463 | return next_command(h); | ||
3464 | } | ||
3465 | |||
3466 | static irqreturn_t do_cciss_intx(int irq, void *dev_id) | ||
3467 | { | ||
3468 | ctlr_info_t *h = dev_id; | ||
3325 | unsigned long flags; | 3469 | unsigned long flags; |
3326 | __u32 a, a1, a2; | 3470 | u32 raw_tag; |
3327 | 3471 | ||
3328 | if (interrupt_not_for_us(h)) | 3472 | if (interrupt_not_for_us(h)) |
3329 | return IRQ_NONE; | 3473 | return IRQ_NONE; |
3330 | /* | 3474 | spin_lock_irqsave(&h->lock, flags); |
3331 | * If there are completed commands in the completion queue, | ||
3332 | * we had better do something about it. | ||
3333 | */ | ||
3334 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
3335 | while (interrupt_pending(h)) { | 3475 | while (interrupt_pending(h)) { |
3336 | while ((a = get_next_completion(h)) != FIFO_EMPTY) { | 3476 | raw_tag = get_next_completion(h); |
3337 | a1 = a; | 3477 | while (raw_tag != FIFO_EMPTY) { |
3338 | if ((a & 0x04)) { | 3478 | if (cciss_tag_contains_index(raw_tag)) |
3339 | a2 = (a >> 3); | 3479 | raw_tag = process_indexed_cmd(h, raw_tag); |
3340 | if (a2 >= h->nr_cmds) { | 3480 | else |
3341 | printk(KERN_WARNING | 3481 | raw_tag = process_nonindexed_cmd(h, raw_tag); |
3342 | "cciss: controller cciss%d failed, stopping.\n", | ||
3343 | h->ctlr); | ||
3344 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
3345 | fail_all_cmds(h->ctlr); | ||
3346 | return IRQ_HANDLED; | ||
3347 | } | ||
3348 | |||
3349 | c = h->cmd_pool + a2; | ||
3350 | a = c->busaddr; | ||
3351 | |||
3352 | } else { | ||
3353 | struct hlist_node *tmp; | ||
3354 | |||
3355 | a &= ~3; | ||
3356 | c = NULL; | ||
3357 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
3358 | if (c->busaddr == a) | ||
3359 | break; | ||
3360 | } | ||
3361 | } | ||
3362 | /* | ||
3363 | * If we've found the command, take it off the | ||
3364 | * completion Q and free it | ||
3365 | */ | ||
3366 | if (c && c->busaddr == a) { | ||
3367 | removeQ(c); | ||
3368 | if (c->cmd_type == CMD_RWREQ) { | ||
3369 | complete_command(h, c, 0); | ||
3370 | } else if (c->cmd_type == CMD_IOCTL_PEND) { | ||
3371 | complete(c->waiting); | ||
3372 | } | ||
3373 | # ifdef CONFIG_CISS_SCSI_TAPE | ||
3374 | else if (c->cmd_type == CMD_SCSI) | ||
3375 | complete_scsi_command(c, 0, a1); | ||
3376 | # endif | ||
3377 | continue; | ||
3378 | } | ||
3379 | } | 3482 | } |
3380 | } | 3483 | } |
3484 | spin_unlock_irqrestore(&h->lock, flags); | ||
3485 | return IRQ_HANDLED; | ||
3486 | } | ||
3381 | 3487 | ||
3382 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 3488 | /* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never |
3489 | * check the interrupt pending register because it is not set. | ||
3490 | */ | ||
3491 | static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) | ||
3492 | { | ||
3493 | ctlr_info_t *h = dev_id; | ||
3494 | unsigned long flags; | ||
3495 | u32 raw_tag; | ||
3496 | |||
3497 | spin_lock_irqsave(&h->lock, flags); | ||
3498 | raw_tag = get_next_completion(h); | ||
3499 | while (raw_tag != FIFO_EMPTY) { | ||
3500 | if (cciss_tag_contains_index(raw_tag)) | ||
3501 | raw_tag = process_indexed_cmd(h, raw_tag); | ||
3502 | else | ||
3503 | raw_tag = process_nonindexed_cmd(h, raw_tag); | ||
3504 | } | ||
3505 | spin_unlock_irqrestore(&h->lock, flags); | ||
3383 | return IRQ_HANDLED; | 3506 | return IRQ_HANDLED; |
3384 | } | 3507 | } |
3385 | 3508 | ||
@@ -3511,18 +3634,17 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
3511 | 3634 | ||
3512 | switch (c->err_info->SenseInfo[12]) { | 3635 | switch (c->err_info->SenseInfo[12]) { |
3513 | case STATE_CHANGED: | 3636 | case STATE_CHANGED: |
3514 | printk(KERN_WARNING "cciss%d: a state change " | 3637 | dev_warn(&h->pdev->dev, "a state change " |
3515 | "detected, command retried\n", h->ctlr); | 3638 | "detected, command retried\n"); |
3516 | return 1; | 3639 | return 1; |
3517 | break; | 3640 | break; |
3518 | case LUN_FAILED: | 3641 | case LUN_FAILED: |
3519 | printk(KERN_WARNING "cciss%d: LUN failure " | 3642 | dev_warn(&h->pdev->dev, "LUN failure " |
3520 | "detected, action required\n", h->ctlr); | 3643 | "detected, action required\n"); |
3521 | return 1; | 3644 | return 1; |
3522 | break; | 3645 | break; |
3523 | case REPORT_LUNS_CHANGED: | 3646 | case REPORT_LUNS_CHANGED: |
3524 | printk(KERN_WARNING "cciss%d: report LUN data " | 3647 | dev_warn(&h->pdev->dev, "report LUN data changed\n"); |
3525 | "changed\n", h->ctlr); | ||
3526 | /* | 3648 | /* |
3527 | * Here, we could call add_to_scan_list and wake up the scan thread, | 3649 | * Here, we could call add_to_scan_list and wake up the scan thread, |
3528 | * except that it's quite likely that we will get more than one | 3650 | * except that it's quite likely that we will get more than one |
@@ -3542,19 +3664,18 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
3542 | return 1; | 3664 | return 1; |
3543 | break; | 3665 | break; |
3544 | case POWER_OR_RESET: | 3666 | case POWER_OR_RESET: |
3545 | printk(KERN_WARNING "cciss%d: a power on " | 3667 | dev_warn(&h->pdev->dev, |
3546 | "or device reset detected\n", h->ctlr); | 3668 | "a power on or device reset detected\n"); |
3547 | return 1; | 3669 | return 1; |
3548 | break; | 3670 | break; |
3549 | case UNIT_ATTENTION_CLEARED: | 3671 | case UNIT_ATTENTION_CLEARED: |
3550 | printk(KERN_WARNING "cciss%d: unit attention " | 3672 | dev_warn(&h->pdev->dev, |
3551 | "cleared by another initiator\n", h->ctlr); | 3673 | "unit attention cleared by another initiator\n"); |
3552 | return 1; | 3674 | return 1; |
3553 | break; | 3675 | break; |
3554 | default: | 3676 | default: |
3555 | printk(KERN_WARNING "cciss%d: unknown " | 3677 | dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); |
3556 | "unit attention detected\n", h->ctlr); | 3678 | return 1; |
3557 | return 1; | ||
3558 | } | 3679 | } |
3559 | } | 3680 | } |
3560 | 3681 | ||
@@ -3563,39 +3684,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
3563 | * the io functions. | 3684 | * the io functions. |
3564 | * This is for debug only. | 3685 | * This is for debug only. |
3565 | */ | 3686 | */ |
3566 | #ifdef CCISS_DEBUG | 3687 | static void print_cfg_table(ctlr_info_t *h) |
3567 | static void print_cfg_table(CfgTable_struct *tb) | ||
3568 | { | 3688 | { |
3569 | int i; | 3689 | int i; |
3570 | char temp_name[17]; | 3690 | char temp_name[17]; |
3691 | CfgTable_struct *tb = h->cfgtable; | ||
3571 | 3692 | ||
3572 | printk("Controller Configuration information\n"); | 3693 | dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); |
3573 | printk("------------------------------------\n"); | 3694 | dev_dbg(&h->pdev->dev, "------------------------------------\n"); |
3574 | for (i = 0; i < 4; i++) | 3695 | for (i = 0; i < 4; i++) |
3575 | temp_name[i] = readb(&(tb->Signature[i])); | 3696 | temp_name[i] = readb(&(tb->Signature[i])); |
3576 | temp_name[4] = '\0'; | 3697 | temp_name[4] = '\0'; |
3577 | printk(" Signature = %s\n", temp_name); | 3698 | dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); |
3578 | printk(" Spec Number = %d\n", readl(&(tb->SpecValence))); | 3699 | dev_dbg(&h->pdev->dev, " Spec Number = %d\n", |
3579 | printk(" Transport methods supported = 0x%x\n", | 3700 | readl(&(tb->SpecValence))); |
3701 | dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", | ||
3580 | readl(&(tb->TransportSupport))); | 3702 | readl(&(tb->TransportSupport))); |
3581 | printk(" Transport methods active = 0x%x\n", | 3703 | dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", |
3582 | readl(&(tb->TransportActive))); | 3704 | readl(&(tb->TransportActive))); |
3583 | printk(" Requested transport Method = 0x%x\n", | 3705 | dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", |
3584 | readl(&(tb->HostWrite.TransportRequest))); | 3706 | readl(&(tb->HostWrite.TransportRequest))); |
3585 | printk(" Coalesce Interrupt Delay = 0x%x\n", | 3707 | dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", |
3586 | readl(&(tb->HostWrite.CoalIntDelay))); | 3708 | readl(&(tb->HostWrite.CoalIntDelay))); |
3587 | printk(" Coalesce Interrupt Count = 0x%x\n", | 3709 | dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", |
3588 | readl(&(tb->HostWrite.CoalIntCount))); | 3710 | readl(&(tb->HostWrite.CoalIntCount))); |
3589 | printk(" Max outstanding commands = 0x%d\n", | 3711 | dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", |
3590 | readl(&(tb->CmdsOutMax))); | 3712 | readl(&(tb->CmdsOutMax))); |
3591 | printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | 3713 | dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", |
3714 | readl(&(tb->BusTypes))); | ||
3592 | for (i = 0; i < 16; i++) | 3715 | for (i = 0; i < 16; i++) |
3593 | temp_name[i] = readb(&(tb->ServerName[i])); | 3716 | temp_name[i] = readb(&(tb->ServerName[i])); |
3594 | temp_name[16] = '\0'; | 3717 | temp_name[16] = '\0'; |
3595 | printk(" Server Name = %s\n", temp_name); | 3718 | dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); |
3596 | printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); | 3719 | dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", |
3720 | readl(&(tb->HeartBeat))); | ||
3597 | } | 3721 | } |
3598 | #endif /* CCISS_DEBUG */ | ||
3599 | 3722 | ||
3600 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | 3723 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) |
3601 | { | 3724 | { |
@@ -3619,7 +3742,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
3619 | offset += 8; | 3742 | offset += 8; |
3620 | break; | 3743 | break; |
3621 | default: /* reserved in PCI 2.2 */ | 3744 | default: /* reserved in PCI 2.2 */ |
3622 | printk(KERN_WARNING | 3745 | dev_warn(&pdev->dev, |
3623 | "Base address is invalid\n"); | 3746 | "Base address is invalid\n"); |
3624 | return -1; | 3747 | return -1; |
3625 | break; | 3748 | break; |
@@ -3631,12 +3754,182 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
3631 | return -1; | 3754 | return -1; |
3632 | } | 3755 | } |
3633 | 3756 | ||
3757 | /* Fill in bucket_map[], given nsgs (the max number of | ||
3758 | * scatter gather elements supported) and bucket[], | ||
3759 | * which is an array of 8 integers. The bucket[] array | ||
3760 | * contains 8 different DMA transfer sizes (in 16 | ||
3761 | * byte increments) which the controller uses to fetch | ||
3762 | * commands. This function fills in bucket_map[], which | ||
3763 | * maps a given number of scatter gather elements to one of | ||
3764 | * the 8 DMA transfer sizes. The point of it is to allow the | ||
3765 | * controller to only do as much DMA as needed to fetch the | ||
3766 | * command, with the DMA transfer size encoded in the lower | ||
3767 | * bits of the command address. | ||
3768 | */ | ||
3769 | static void calc_bucket_map(int bucket[], int num_buckets, | ||
3770 | int nsgs, int *bucket_map) | ||
3771 | { | ||
3772 | int i, j, b, size; | ||
3773 | |||
3774 | /* even a command with 0 SGs requires 4 blocks */ | ||
3775 | #define MINIMUM_TRANSFER_BLOCKS 4 | ||
3776 | #define NUM_BUCKETS 8 | ||
3777 | /* Note, bucket_map must have nsgs+1 entries. */ | ||
3778 | for (i = 0; i <= nsgs; i++) { | ||
3779 | /* Compute size of a command with i SG entries */ | ||
3780 | size = i + MINIMUM_TRANSFER_BLOCKS; | ||
3781 | b = num_buckets; /* Assume the biggest bucket */ | ||
3782 | /* Find the bucket that is just big enough */ | ||
3783 | for (j = 0; j < 8; j++) { | ||
3784 | if (bucket[j] >= size) { | ||
3785 | b = j; | ||
3786 | break; | ||
3787 | } | ||
3788 | } | ||
3789 | /* for a command with i SG entries, use bucket b. */ | ||
3790 | bucket_map[i] = b; | ||
3791 | } | ||
3792 | } | ||
3793 | |||
3794 | static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) | ||
3795 | { | ||
3796 | int i; | ||
3797 | |||
3798 | /* under certain very rare conditions, this can take awhile. | ||
3799 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | ||
3800 | * as we enter this code.) */ | ||
3801 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | ||
3802 | if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) | ||
3803 | break; | ||
3804 | msleep(10); | ||
3805 | } | ||
3806 | } | ||
3807 | |||
3808 | static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) | ||
3809 | { | ||
3810 | /* This is a bit complicated. There are 8 registers on | ||
3811 | * the controller which we write to to tell it 8 different | ||
3812 | * sizes of commands which there may be. It's a way of | ||
3813 | * reducing the DMA done to fetch each command. Encoded into | ||
3814 | * each command's tag are 3 bits which communicate to the controller | ||
3815 | * which of the eight sizes that command fits within. The size of | ||
3816 | * each command depends on how many scatter gather entries there are. | ||
3817 | * Each SG entry requires 16 bytes. The eight registers are programmed | ||
3818 | * with the number of 16-byte blocks a command of that size requires. | ||
3819 | * The smallest command possible requires 5 such 16 byte blocks. | ||
3820 | * the largest command possible requires MAXSGENTRIES + 4 16-byte | ||
3821 | * blocks. Note, this only extends to the SG entries contained | ||
3822 | * within the command block, and does not extend to chained blocks | ||
3823 | * of SG elements. bft[] contains the eight values we write to | ||
3824 | * the registers. They are not evenly distributed, but have more | ||
3825 | * sizes for small commands, and fewer sizes for larger commands. | ||
3826 | */ | ||
3827 | __u32 trans_offset; | ||
3828 | int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; | ||
3829 | /* | ||
3830 | * 5 = 1 s/g entry or 4k | ||
3831 | * 6 = 2 s/g entry or 8k | ||
3832 | * 8 = 4 s/g entry or 16k | ||
3833 | * 10 = 6 s/g entry or 24k | ||
3834 | */ | ||
3835 | unsigned long register_value; | ||
3836 | BUILD_BUG_ON(28 > MAXSGENTRIES + 4); | ||
3837 | |||
3838 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | ||
3839 | |||
3840 | /* Controller spec: zero out this buffer. */ | ||
3841 | memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); | ||
3842 | h->reply_pool_head = h->reply_pool; | ||
3843 | |||
3844 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | ||
3845 | calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, | ||
3846 | h->blockFetchTable); | ||
3847 | writel(bft[0], &h->transtable->BlockFetch0); | ||
3848 | writel(bft[1], &h->transtable->BlockFetch1); | ||
3849 | writel(bft[2], &h->transtable->BlockFetch2); | ||
3850 | writel(bft[3], &h->transtable->BlockFetch3); | ||
3851 | writel(bft[4], &h->transtable->BlockFetch4); | ||
3852 | writel(bft[5], &h->transtable->BlockFetch5); | ||
3853 | writel(bft[6], &h->transtable->BlockFetch6); | ||
3854 | writel(bft[7], &h->transtable->BlockFetch7); | ||
3855 | |||
3856 | /* size of controller ring buffer */ | ||
3857 | writel(h->max_commands, &h->transtable->RepQSize); | ||
3858 | writel(1, &h->transtable->RepQCount); | ||
3859 | writel(0, &h->transtable->RepQCtrAddrLow32); | ||
3860 | writel(0, &h->transtable->RepQCtrAddrHigh32); | ||
3861 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | ||
3862 | writel(0, &h->transtable->RepQAddr0High32); | ||
3863 | writel(CFGTBL_Trans_Performant, | ||
3864 | &(h->cfgtable->HostWrite.TransportRequest)); | ||
3865 | |||
3866 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | ||
3867 | cciss_wait_for_mode_change_ack(h); | ||
3868 | register_value = readl(&(h->cfgtable->TransportActive)); | ||
3869 | if (!(register_value & CFGTBL_Trans_Performant)) | ||
3870 | dev_warn(&h->pdev->dev, "cciss: unable to get board into" | ||
3871 | " performant mode\n"); | ||
3872 | } | ||
3873 | |||
3874 | static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) | ||
3875 | { | ||
3876 | __u32 trans_support; | ||
3877 | |||
3878 | dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); | ||
3879 | /* Attempt to put controller into performant mode if supported */ | ||
3880 | /* Does board support performant mode? */ | ||
3881 | trans_support = readl(&(h->cfgtable->TransportSupport)); | ||
3882 | if (!(trans_support & PERFORMANT_MODE)) | ||
3883 | return; | ||
3884 | |||
3885 | dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); | ||
3886 | /* Performant mode demands commands on a 32 byte boundary | ||
3887 | * pci_alloc_consistent aligns on page boundarys already. | ||
3888 | * Just need to check if divisible by 32 | ||
3889 | */ | ||
3890 | if ((sizeof(CommandList_struct) % 32) != 0) { | ||
3891 | dev_warn(&h->pdev->dev, "%s %d %s\n", | ||
3892 | "cciss info: command size[", | ||
3893 | (int)sizeof(CommandList_struct), | ||
3894 | "] not divisible by 32, no performant mode..\n"); | ||
3895 | return; | ||
3896 | } | ||
3897 | |||
3898 | /* Performant mode ring buffer and supporting data structures */ | ||
3899 | h->reply_pool = (__u64 *)pci_alloc_consistent( | ||
3900 | h->pdev, h->max_commands * sizeof(__u64), | ||
3901 | &(h->reply_pool_dhandle)); | ||
3902 | |||
3903 | /* Need a block fetch table for performant mode */ | ||
3904 | h->blockFetchTable = kmalloc(((h->maxsgentries+1) * | ||
3905 | sizeof(__u32)), GFP_KERNEL); | ||
3906 | |||
3907 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) | ||
3908 | goto clean_up; | ||
3909 | |||
3910 | cciss_enter_performant_mode(h); | ||
3911 | |||
3912 | /* Change the access methods to the performant access methods */ | ||
3913 | h->access = SA5_performant_access; | ||
3914 | h->transMethod = CFGTBL_Trans_Performant; | ||
3915 | |||
3916 | return; | ||
3917 | clean_up: | ||
3918 | kfree(h->blockFetchTable); | ||
3919 | if (h->reply_pool) | ||
3920 | pci_free_consistent(h->pdev, | ||
3921 | h->max_commands * sizeof(__u64), | ||
3922 | h->reply_pool, | ||
3923 | h->reply_pool_dhandle); | ||
3924 | return; | ||
3925 | |||
3926 | } /* cciss_put_controller_into_performant_mode */ | ||
3927 | |||
3634 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | 3928 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
3635 | * controllers that are capable. If not, we use IO-APIC mode. | 3929 | * controllers that are capable. If not, we use IO-APIC mode. |
3636 | */ | 3930 | */ |
3637 | 3931 | ||
3638 | static void __devinit cciss_interrupt_mode(ctlr_info_t *c, | 3932 | static void __devinit cciss_interrupt_mode(ctlr_info_t *h) |
3639 | struct pci_dev *pdev, __u32 board_id) | ||
3640 | { | 3933 | { |
3641 | #ifdef CONFIG_PCI_MSI | 3934 | #ifdef CONFIG_PCI_MSI |
3642 | int err; | 3935 | int err; |
@@ -3645,268 +3938,283 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, | |||
3645 | }; | 3938 | }; |
3646 | 3939 | ||
3647 | /* Some boards advertise MSI but don't really support it */ | 3940 | /* Some boards advertise MSI but don't really support it */ |
3648 | if ((board_id == 0x40700E11) || | 3941 | if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || |
3649 | (board_id == 0x40800E11) || | 3942 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) |
3650 | (board_id == 0x40820E11) || (board_id == 0x40830E11)) | ||
3651 | goto default_int_mode; | 3943 | goto default_int_mode; |
3652 | 3944 | ||
3653 | if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { | 3945 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
3654 | err = pci_enable_msix(pdev, cciss_msix_entries, 4); | 3946 | err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); |
3655 | if (!err) { | 3947 | if (!err) { |
3656 | c->intr[0] = cciss_msix_entries[0].vector; | 3948 | h->intr[0] = cciss_msix_entries[0].vector; |
3657 | c->intr[1] = cciss_msix_entries[1].vector; | 3949 | h->intr[1] = cciss_msix_entries[1].vector; |
3658 | c->intr[2] = cciss_msix_entries[2].vector; | 3950 | h->intr[2] = cciss_msix_entries[2].vector; |
3659 | c->intr[3] = cciss_msix_entries[3].vector; | 3951 | h->intr[3] = cciss_msix_entries[3].vector; |
3660 | c->msix_vector = 1; | 3952 | h->msix_vector = 1; |
3661 | return; | 3953 | return; |
3662 | } | 3954 | } |
3663 | if (err > 0) { | 3955 | if (err > 0) { |
3664 | printk(KERN_WARNING "cciss: only %d MSI-X vectors " | 3956 | dev_warn(&h->pdev->dev, |
3665 | "available\n", err); | 3957 | "only %d MSI-X vectors available\n", err); |
3666 | goto default_int_mode; | 3958 | goto default_int_mode; |
3667 | } else { | 3959 | } else { |
3668 | printk(KERN_WARNING "cciss: MSI-X init failed %d\n", | 3960 | dev_warn(&h->pdev->dev, |
3669 | err); | 3961 | "MSI-X init failed %d\n", err); |
3670 | goto default_int_mode; | 3962 | goto default_int_mode; |
3671 | } | 3963 | } |
3672 | } | 3964 | } |
3673 | if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { | 3965 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
3674 | if (!pci_enable_msi(pdev)) { | 3966 | if (!pci_enable_msi(h->pdev)) |
3675 | c->msi_vector = 1; | 3967 | h->msi_vector = 1; |
3676 | } else { | 3968 | else |
3677 | printk(KERN_WARNING "cciss: MSI init failed\n"); | 3969 | dev_warn(&h->pdev->dev, "MSI init failed\n"); |
3678 | } | ||
3679 | } | 3970 | } |
3680 | default_int_mode: | 3971 | default_int_mode: |
3681 | #endif /* CONFIG_PCI_MSI */ | 3972 | #endif /* CONFIG_PCI_MSI */ |
3682 | /* if we get here we're going to use the default interrupt mode */ | 3973 | /* if we get here we're going to use the default interrupt mode */ |
3683 | c->intr[SIMPLE_MODE_INT] = pdev->irq; | 3974 | h->intr[PERF_MODE_INT] = h->pdev->irq; |
3684 | return; | 3975 | return; |
3685 | } | 3976 | } |
3686 | 3977 | ||
3687 | static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | 3978 | static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) |
3688 | { | 3979 | { |
3689 | ushort subsystem_vendor_id, subsystem_device_id, command; | 3980 | int i; |
3690 | __u32 board_id, scratchpad = 0; | 3981 | u32 subsystem_vendor_id, subsystem_device_id; |
3691 | __u64 cfg_offset; | ||
3692 | __u32 cfg_base_addr; | ||
3693 | __u64 cfg_base_addr_index; | ||
3694 | int i, prod_index, err; | ||
3695 | 3982 | ||
3696 | subsystem_vendor_id = pdev->subsystem_vendor; | 3983 | subsystem_vendor_id = pdev->subsystem_vendor; |
3697 | subsystem_device_id = pdev->subsystem_device; | 3984 | subsystem_device_id = pdev->subsystem_device; |
3698 | board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | | 3985 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | |
3699 | subsystem_vendor_id); | 3986 | subsystem_vendor_id; |
3700 | 3987 | ||
3701 | for (i = 0; i < ARRAY_SIZE(products); i++) { | 3988 | for (i = 0; i < ARRAY_SIZE(products); i++) { |
3702 | /* Stand aside for hpsa driver on request */ | 3989 | /* Stand aside for hpsa driver on request */ |
3703 | if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) | 3990 | if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) |
3704 | return -ENODEV; | 3991 | return -ENODEV; |
3705 | if (board_id == products[i].board_id) | 3992 | if (*board_id == products[i].board_id) |
3706 | break; | 3993 | return i; |
3707 | } | ||
3708 | prod_index = i; | ||
3709 | if (prod_index == ARRAY_SIZE(products)) { | ||
3710 | dev_warn(&pdev->dev, | ||
3711 | "unrecognized board ID: 0x%08lx, ignoring.\n", | ||
3712 | (unsigned long) board_id); | ||
3713 | return -ENODEV; | ||
3714 | } | 3994 | } |
3995 | dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", | ||
3996 | *board_id); | ||
3997 | return -ENODEV; | ||
3998 | } | ||
3715 | 3999 | ||
3716 | /* check to see if controller has been disabled */ | 4000 | static inline bool cciss_board_disabled(ctlr_info_t *h) |
3717 | /* BEFORE trying to enable it */ | 4001 | { |
3718 | (void)pci_read_config_word(pdev, PCI_COMMAND, &command); | 4002 | u16 command; |
3719 | if (!(command & 0x02)) { | ||
3720 | printk(KERN_WARNING | ||
3721 | "cciss: controller appears to be disabled\n"); | ||
3722 | return -ENODEV; | ||
3723 | } | ||
3724 | 4003 | ||
3725 | err = pci_enable_device(pdev); | 4004 | (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); |
3726 | if (err) { | 4005 | return ((command & PCI_COMMAND_MEMORY) == 0); |
3727 | printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); | 4006 | } |
3728 | return err; | ||
3729 | } | ||
3730 | 4007 | ||
3731 | err = pci_request_regions(pdev, "cciss"); | 4008 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, |
3732 | if (err) { | 4009 | unsigned long *memory_bar) |
3733 | printk(KERN_ERR "cciss: Cannot obtain PCI resources, " | 4010 | { |
3734 | "aborting\n"); | 4011 | int i; |
3735 | return err; | ||
3736 | } | ||
3737 | 4012 | ||
3738 | #ifdef CCISS_DEBUG | 4013 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
3739 | printk("command = %x\n", command); | 4014 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
3740 | printk("irq = %x\n", pdev->irq); | 4015 | /* addressing mode bits already removed */ |
3741 | printk("board_id = %x\n", board_id); | 4016 | *memory_bar = pci_resource_start(pdev, i); |
3742 | #endif /* CCISS_DEBUG */ | 4017 | dev_dbg(&pdev->dev, "memory BAR = %lx\n", |
4018 | *memory_bar); | ||
4019 | return 0; | ||
4020 | } | ||
4021 | dev_warn(&pdev->dev, "no memory BAR found\n"); | ||
4022 | return -ENODEV; | ||
4023 | } | ||
3743 | 4024 | ||
3744 | /* If the kernel supports MSI/MSI-X we will try to enable that functionality, | 4025 | static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) |
3745 | * else we use the IO-APIC interrupt assigned to us by system ROM. | 4026 | { |
3746 | */ | 4027 | int i; |
3747 | cciss_interrupt_mode(c, pdev, board_id); | 4028 | u32 scratchpad; |
3748 | 4029 | ||
3749 | /* find the memory BAR */ | 4030 | for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { |
3750 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 4031 | scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); |
3751 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) | 4032 | if (scratchpad == CCISS_FIRMWARE_READY) |
3752 | break; | 4033 | return 0; |
3753 | } | 4034 | msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); |
3754 | if (i == DEVICE_COUNT_RESOURCE) { | ||
3755 | printk(KERN_WARNING "cciss: No memory BAR found\n"); | ||
3756 | err = -ENODEV; | ||
3757 | goto err_out_free_res; | ||
3758 | } | 4035 | } |
4036 | dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); | ||
4037 | return -ENODEV; | ||
4038 | } | ||
3759 | 4039 | ||
3760 | c->paddr = pci_resource_start(pdev, i); /* addressing mode bits | 4040 | static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, |
3761 | * already removed | 4041 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, |
3762 | */ | 4042 | u64 *cfg_offset) |
4043 | { | ||
4044 | *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); | ||
4045 | *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); | ||
4046 | *cfg_base_addr &= (u32) 0x0000ffff; | ||
4047 | *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); | ||
4048 | if (*cfg_base_addr_index == -1) { | ||
4049 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " | ||
4050 | "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); | ||
4051 | return -ENODEV; | ||
4052 | } | ||
4053 | return 0; | ||
4054 | } | ||
3763 | 4055 | ||
3764 | #ifdef CCISS_DEBUG | 4056 | static int __devinit cciss_find_cfgtables(ctlr_info_t *h) |
3765 | printk("address 0 = %lx\n", c->paddr); | 4057 | { |
3766 | #endif /* CCISS_DEBUG */ | 4058 | u64 cfg_offset; |
3767 | c->vaddr = remap_pci_mem(c->paddr, 0x250); | 4059 | u32 cfg_base_addr; |
4060 | u64 cfg_base_addr_index; | ||
4061 | u32 trans_offset; | ||
4062 | int rc; | ||
3768 | 4063 | ||
3769 | /* Wait for the board to become ready. (PCI hotplug needs this.) | 4064 | rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, |
3770 | * We poll for up to 120 secs, once per 100ms. */ | 4065 | &cfg_base_addr_index, &cfg_offset); |
3771 | for (i = 0; i < 1200; i++) { | 4066 | if (rc) |
3772 | scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET); | 4067 | return rc; |
3773 | if (scratchpad == CCISS_FIRMWARE_READY) | 4068 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
3774 | break; | 4069 | cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); |
3775 | set_current_state(TASK_INTERRUPTIBLE); | 4070 | if (!h->cfgtable) |
3776 | schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ | 4071 | return -ENOMEM; |
3777 | } | 4072 | /* Find performant mode table. */ |
3778 | if (scratchpad != CCISS_FIRMWARE_READY) { | 4073 | trans_offset = readl(&h->cfgtable->TransMethodOffset); |
3779 | printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); | 4074 | h->transtable = remap_pci_mem(pci_resource_start(h->pdev, |
3780 | err = -ENODEV; | 4075 | cfg_base_addr_index)+cfg_offset+trans_offset, |
3781 | goto err_out_free_res; | 4076 | sizeof(*h->transtable)); |
3782 | } | 4077 | if (!h->transtable) |
4078 | return -ENOMEM; | ||
4079 | return 0; | ||
4080 | } | ||
3783 | 4081 | ||
3784 | /* get the address index number */ | 4082 | static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) |
3785 | cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET); | 4083 | { |
3786 | cfg_base_addr &= (__u32) 0x0000ffff; | 4084 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); |
3787 | #ifdef CCISS_DEBUG | 4085 | if (h->max_commands < 16) { |
3788 | printk("cfg base address = %x\n", cfg_base_addr); | 4086 | dev_warn(&h->pdev->dev, "Controller reports " |
3789 | #endif /* CCISS_DEBUG */ | 4087 | "max supported commands of %d, an obvious lie. " |
3790 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); | 4088 | "Using 16. Ensure that firmware is up to date.\n", |
3791 | #ifdef CCISS_DEBUG | 4089 | h->max_commands); |
3792 | printk("cfg base address index = %llx\n", | 4090 | h->max_commands = 16; |
3793 | (unsigned long long)cfg_base_addr_index); | ||
3794 | #endif /* CCISS_DEBUG */ | ||
3795 | if (cfg_base_addr_index == -1) { | ||
3796 | printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); | ||
3797 | err = -ENODEV; | ||
3798 | goto err_out_free_res; | ||
3799 | } | 4091 | } |
4092 | } | ||
3800 | 4093 | ||
3801 | cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); | 4094 | /* Interrogate the hardware for some limits: |
3802 | #ifdef CCISS_DEBUG | 4095 | * max commands, max SG elements without chaining, and with chaining, |
3803 | printk("cfg offset = %llx\n", (unsigned long long)cfg_offset); | 4096 | * SG chain block size, etc. |
3804 | #endif /* CCISS_DEBUG */ | 4097 | */ |
3805 | c->cfgtable = remap_pci_mem(pci_resource_start(pdev, | 4098 | static void __devinit cciss_find_board_params(ctlr_info_t *h) |
3806 | cfg_base_addr_index) + | 4099 | { |
3807 | cfg_offset, sizeof(CfgTable_struct)); | 4100 | cciss_get_max_perf_mode_cmds(h); |
3808 | c->board_id = board_id; | 4101 | h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ |
3809 | 4102 | h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); | |
3810 | #ifdef CCISS_DEBUG | ||
3811 | print_cfg_table(c->cfgtable); | ||
3812 | #endif /* CCISS_DEBUG */ | ||
3813 | |||
3814 | /* Some controllers support Zero Memory Raid (ZMR). | ||
3815 | * When configured in ZMR mode the number of supported | ||
3816 | * commands drops to 64. So instead of just setting an | ||
3817 | * arbitrary value we make the driver a little smarter. | ||
3818 | * We read the config table to tell us how many commands | ||
3819 | * are supported on the controller then subtract 4 to | ||
3820 | * leave a little room for ioctl calls. | ||
3821 | */ | ||
3822 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); | ||
3823 | c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); | ||
3824 | |||
3825 | /* | 4103 | /* |
3826 | * Limit native command to 32 s/g elements to save dma'able memory. | 4104 | * Limit in-command s/g elements to 32 save dma'able memory. |
3827 | * Howvever spec says if 0, use 31 | 4105 | * Howvever spec says if 0, use 31 |
3828 | */ | 4106 | */ |
3829 | 4107 | h->max_cmd_sgentries = 31; | |
3830 | c->max_cmd_sgentries = 31; | 4108 | if (h->maxsgentries > 512) { |
3831 | if (c->maxsgentries > 512) { | 4109 | h->max_cmd_sgentries = 32; |
3832 | c->max_cmd_sgentries = 32; | 4110 | h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; |
3833 | c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1; | 4111 | h->maxsgentries--; /* save one for chain pointer */ |
3834 | c->maxsgentries -= 1; /* account for chain pointer */ | ||
3835 | } else { | 4112 | } else { |
3836 | c->maxsgentries = 31; /* Default to traditional value */ | 4113 | h->maxsgentries = 31; /* default to traditional values */ |
3837 | c->chainsize = 0; /* traditional */ | 4114 | h->chainsize = 0; |
3838 | } | 4115 | } |
4116 | } | ||
3839 | 4117 | ||
3840 | c->product_name = products[prod_index].product_name; | 4118 | static inline bool CISS_signature_present(ctlr_info_t *h) |
3841 | c->access = *(products[prod_index].access); | 4119 | { |
3842 | c->nr_cmds = c->max_commands - 4; | 4120 | if ((readb(&h->cfgtable->Signature[0]) != 'C') || |
3843 | if ((readb(&c->cfgtable->Signature[0]) != 'C') || | 4121 | (readb(&h->cfgtable->Signature[1]) != 'I') || |
3844 | (readb(&c->cfgtable->Signature[1]) != 'I') || | 4122 | (readb(&h->cfgtable->Signature[2]) != 'S') || |
3845 | (readb(&c->cfgtable->Signature[2]) != 'S') || | 4123 | (readb(&h->cfgtable->Signature[3]) != 'S')) { |
3846 | (readb(&c->cfgtable->Signature[3]) != 'S')) { | 4124 | dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); |
3847 | printk("Does not appear to be a valid CISS config table\n"); | 4125 | return false; |
3848 | err = -ENODEV; | ||
3849 | goto err_out_free_res; | ||
3850 | } | 4126 | } |
4127 | return true; | ||
4128 | } | ||
4129 | |||
4130 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | ||
4131 | static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) | ||
4132 | { | ||
3851 | #ifdef CONFIG_X86 | 4133 | #ifdef CONFIG_X86 |
3852 | { | 4134 | u32 prefetch; |
3853 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | 4135 | |
3854 | __u32 prefetch; | 4136 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); |
3855 | prefetch = readl(&(c->cfgtable->SCSI_Prefetch)); | 4137 | prefetch |= 0x100; |
3856 | prefetch |= 0x100; | 4138 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); |
3857 | writel(prefetch, &(c->cfgtable->SCSI_Prefetch)); | ||
3858 | } | ||
3859 | #endif | 4139 | #endif |
4140 | } | ||
3860 | 4141 | ||
3861 | /* Disabling DMA prefetch and refetch for the P600. | 4142 | /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result |
3862 | * An ASIC bug may result in accesses to invalid memory addresses. | 4143 | * in a prefetch beyond physical memory. |
3863 | * We've disabled prefetch for some time now. Testing with XEN | 4144 | */ |
3864 | * kernels revealed a bug in the refetch if dom0 resides on a P600. | 4145 | static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) |
3865 | */ | 4146 | { |
3866 | if(board_id == 0x3225103C) { | 4147 | u32 dma_prefetch; |
3867 | __u32 dma_prefetch; | 4148 | __u32 dma_refetch; |
3868 | __u32 dma_refetch; | 4149 | |
3869 | dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); | 4150 | if (h->board_id != 0x3225103C) |
3870 | dma_prefetch |= 0x8000; | 4151 | return; |
3871 | writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG); | 4152 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); |
3872 | pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch); | 4153 | dma_prefetch |= 0x8000; |
3873 | dma_refetch |= 0x1; | 4154 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
3874 | pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch); | 4155 | pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); |
4156 | dma_refetch |= 0x1; | ||
4157 | pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); | ||
4158 | } | ||
4159 | |||
4160 | static int __devinit cciss_pci_init(ctlr_info_t *h) | ||
4161 | { | ||
4162 | int prod_index, err; | ||
4163 | |||
4164 | prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); | ||
4165 | if (prod_index < 0) | ||
4166 | return -ENODEV; | ||
4167 | h->product_name = products[prod_index].product_name; | ||
4168 | h->access = *(products[prod_index].access); | ||
4169 | |||
4170 | if (cciss_board_disabled(h)) { | ||
4171 | dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); | ||
4172 | return -ENODEV; | ||
4173 | } | ||
4174 | err = pci_enable_device(h->pdev); | ||
4175 | if (err) { | ||
4176 | dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); | ||
4177 | return err; | ||
3875 | } | 4178 | } |
3876 | 4179 | ||
3877 | #ifdef CCISS_DEBUG | 4180 | err = pci_request_regions(h->pdev, "cciss"); |
3878 | printk("Trying to put board into Simple mode\n"); | 4181 | if (err) { |
3879 | #endif /* CCISS_DEBUG */ | 4182 | dev_warn(&h->pdev->dev, |
3880 | c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); | 4183 | "Cannot obtain PCI resources, aborting\n"); |
3881 | /* Update the field, and then ring the doorbell */ | 4184 | return err; |
3882 | writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest)); | 4185 | } |
3883 | writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); | ||
3884 | 4186 | ||
3885 | /* under certain very rare conditions, this can take awhile. | 4187 | dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); |
3886 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | 4188 | dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); |
3887 | * as we enter this code.) */ | 4189 | |
3888 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 4190 | /* If the kernel supports MSI/MSI-X we will try to enable that functionality, |
3889 | if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) | 4191 | * else we use the IO-APIC interrupt assigned to us by system ROM. |
3890 | break; | 4192 | */ |
3891 | /* delay and try again */ | 4193 | cciss_interrupt_mode(h); |
3892 | set_current_state(TASK_INTERRUPTIBLE); | 4194 | err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); |
3893 | schedule_timeout(msecs_to_jiffies(1)); | 4195 | if (err) |
4196 | goto err_out_free_res; | ||
4197 | h->vaddr = remap_pci_mem(h->paddr, 0x250); | ||
4198 | if (!h->vaddr) { | ||
4199 | err = -ENOMEM; | ||
4200 | goto err_out_free_res; | ||
3894 | } | 4201 | } |
4202 | err = cciss_wait_for_board_ready(h); | ||
4203 | if (err) | ||
4204 | goto err_out_free_res; | ||
4205 | err = cciss_find_cfgtables(h); | ||
4206 | if (err) | ||
4207 | goto err_out_free_res; | ||
4208 | print_cfg_table(h); | ||
4209 | cciss_find_board_params(h); | ||
3895 | 4210 | ||
3896 | #ifdef CCISS_DEBUG | 4211 | if (!CISS_signature_present(h)) { |
3897 | printk(KERN_DEBUG "I counter got to %d %x\n", i, | ||
3898 | readl(c->vaddr + SA5_DOORBELL)); | ||
3899 | #endif /* CCISS_DEBUG */ | ||
3900 | #ifdef CCISS_DEBUG | ||
3901 | print_cfg_table(c->cfgtable); | ||
3902 | #endif /* CCISS_DEBUG */ | ||
3903 | |||
3904 | if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { | ||
3905 | printk(KERN_WARNING "cciss: unable to get board into" | ||
3906 | " simple mode\n"); | ||
3907 | err = -ENODEV; | 4212 | err = -ENODEV; |
3908 | goto err_out_free_res; | 4213 | goto err_out_free_res; |
3909 | } | 4214 | } |
4215 | cciss_enable_scsi_prefetch(h); | ||
4216 | cciss_p600_dma_prefetch_quirk(h); | ||
4217 | cciss_put_controller_into_performant_mode(h); | ||
3910 | return 0; | 4218 | return 0; |
3911 | 4219 | ||
3912 | err_out_free_res: | 4220 | err_out_free_res: |
@@ -3914,42 +4222,47 @@ err_out_free_res: | |||
3914 | * Deliberately omit pci_disable_device(): it does something nasty to | 4222 | * Deliberately omit pci_disable_device(): it does something nasty to |
3915 | * Smart Array controllers that pci_enable_device does not undo | 4223 | * Smart Array controllers that pci_enable_device does not undo |
3916 | */ | 4224 | */ |
3917 | pci_release_regions(pdev); | 4225 | if (h->transtable) |
4226 | iounmap(h->transtable); | ||
4227 | if (h->cfgtable) | ||
4228 | iounmap(h->cfgtable); | ||
4229 | if (h->vaddr) | ||
4230 | iounmap(h->vaddr); | ||
4231 | pci_release_regions(h->pdev); | ||
3918 | return err; | 4232 | return err; |
3919 | } | 4233 | } |
3920 | 4234 | ||
3921 | /* Function to find the first free pointer into our hba[] array | 4235 | /* Function to find the first free pointer into our hba[] array |
3922 | * Returns -1 if no free entries are left. | 4236 | * Returns -1 if no free entries are left. |
3923 | */ | 4237 | */ |
3924 | static int alloc_cciss_hba(void) | 4238 | static int alloc_cciss_hba(struct pci_dev *pdev) |
3925 | { | 4239 | { |
3926 | int i; | 4240 | int i; |
3927 | 4241 | ||
3928 | for (i = 0; i < MAX_CTLR; i++) { | 4242 | for (i = 0; i < MAX_CTLR; i++) { |
3929 | if (!hba[i]) { | 4243 | if (!hba[i]) { |
3930 | ctlr_info_t *p; | 4244 | ctlr_info_t *h; |
3931 | 4245 | ||
3932 | p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); | 4246 | h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); |
3933 | if (!p) | 4247 | if (!h) |
3934 | goto Enomem; | 4248 | goto Enomem; |
3935 | hba[i] = p; | 4249 | hba[i] = h; |
3936 | return i; | 4250 | return i; |
3937 | } | 4251 | } |
3938 | } | 4252 | } |
3939 | printk(KERN_WARNING "cciss: This driver supports a maximum" | 4253 | dev_warn(&pdev->dev, "This driver supports a maximum" |
3940 | " of %d controllers.\n", MAX_CTLR); | 4254 | " of %d controllers.\n", MAX_CTLR); |
3941 | return -1; | 4255 | return -1; |
3942 | Enomem: | 4256 | Enomem: |
3943 | printk(KERN_ERR "cciss: out of memory.\n"); | 4257 | dev_warn(&pdev->dev, "out of memory.\n"); |
3944 | return -1; | 4258 | return -1; |
3945 | } | 4259 | } |
3946 | 4260 | ||
3947 | static void free_hba(int n) | 4261 | static void free_hba(ctlr_info_t *h) |
3948 | { | 4262 | { |
3949 | ctlr_info_t *h = hba[n]; | ||
3950 | int i; | 4263 | int i; |
3951 | 4264 | ||
3952 | hba[n] = NULL; | 4265 | hba[h->ctlr] = NULL; |
3953 | for (i = 0; i < h->highest_lun + 1; i++) | 4266 | for (i = 0; i < h->highest_lun + 1; i++) |
3954 | if (h->gendisk[i] != NULL) | 4267 | if (h->gendisk[i] != NULL) |
3955 | put_disk(h->gendisk[i]); | 4268 | put_disk(h->gendisk[i]); |
@@ -4029,7 +4342,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u | |||
4029 | /* we leak the DMA buffer here ... no choice since the controller could | 4342 | /* we leak the DMA buffer here ... no choice since the controller could |
4030 | still complete the command. */ | 4343 | still complete the command. */ |
4031 | if (i == 10) { | 4344 | if (i == 10) { |
4032 | printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n", | 4345 | dev_err(&pdev->dev, |
4346 | "controller message %02x:%02x timed out\n", | ||
4033 | opcode, type); | 4347 | opcode, type); |
4034 | return -ETIMEDOUT; | 4348 | return -ETIMEDOUT; |
4035 | } | 4349 | } |
@@ -4037,12 +4351,12 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u | |||
4037 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); | 4351 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); |
4038 | 4352 | ||
4039 | if (tag & 2) { | 4353 | if (tag & 2) { |
4040 | printk(KERN_ERR "cciss: controller message %02x:%02x failed\n", | 4354 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", |
4041 | opcode, type); | 4355 | opcode, type); |
4042 | return -EIO; | 4356 | return -EIO; |
4043 | } | 4357 | } |
4044 | 4358 | ||
4045 | printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n", | 4359 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", |
4046 | opcode, type); | 4360 | opcode, type); |
4047 | return 0; | 4361 | return 0; |
4048 | } | 4362 | } |
@@ -4063,7 +4377,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev) | |||
4063 | if (pos) { | 4377 | if (pos) { |
4064 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | 4378 | pci_read_config_word(pdev, msi_control_reg(pos), &control); |
4065 | if (control & PCI_MSI_FLAGS_ENABLE) { | 4379 | if (control & PCI_MSI_FLAGS_ENABLE) { |
4066 | printk(KERN_INFO "cciss: resetting MSI\n"); | 4380 | dev_info(&pdev->dev, "resetting MSI\n"); |
4067 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); | 4381 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); |
4068 | } | 4382 | } |
4069 | } | 4383 | } |
@@ -4072,7 +4386,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev) | |||
4072 | if (pos) { | 4386 | if (pos) { |
4073 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | 4387 | pci_read_config_word(pdev, msi_control_reg(pos), &control); |
4074 | if (control & PCI_MSIX_FLAGS_ENABLE) { | 4388 | if (control & PCI_MSIX_FLAGS_ENABLE) { |
4075 | printk(KERN_INFO "cciss: resetting MSI-X\n"); | 4389 | dev_info(&pdev->dev, "resetting MSI-X\n"); |
4076 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); | 4390 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); |
4077 | } | 4391 | } |
4078 | } | 4392 | } |
@@ -4080,68 +4394,150 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev) | |||
4080 | return 0; | 4394 | return 0; |
4081 | } | 4395 | } |
4082 | 4396 | ||
4083 | /* This does a hard reset of the controller using PCI power management | 4397 | static int cciss_controller_hard_reset(struct pci_dev *pdev, |
4084 | * states. */ | 4398 | void * __iomem vaddr, bool use_doorbell) |
4085 | static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) | ||
4086 | { | 4399 | { |
4087 | u16 pmcsr, saved_config_space[32]; | 4400 | u16 pmcsr; |
4088 | int i, pos; | 4401 | int pos; |
4089 | 4402 | ||
4090 | printk(KERN_INFO "cciss: using PCI PM to reset controller\n"); | 4403 | if (use_doorbell) { |
4404 | /* For everything after the P600, the PCI power state method | ||
4405 | * of resetting the controller doesn't work, so we have this | ||
4406 | * other way using the doorbell register. | ||
4407 | */ | ||
4408 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); | ||
4409 | writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); | ||
4410 | msleep(1000); | ||
4411 | } else { /* Try to do it the PCI power state way */ | ||
4412 | |||
4413 | /* Quoting from the Open CISS Specification: "The Power | ||
4414 | * Management Control/Status Register (CSR) controls the power | ||
4415 | * state of the device. The normal operating state is D0, | ||
4416 | * CSR=00h. The software off state is D3, CSR=03h. To reset | ||
4417 | * the controller, place the interface device in D3 then to D0, | ||
4418 | * this causes a secondary PCI reset which will reset the | ||
4419 | * controller." */ | ||
4420 | |||
4421 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | ||
4422 | if (pos == 0) { | ||
4423 | dev_err(&pdev->dev, | ||
4424 | "cciss_controller_hard_reset: " | ||
4425 | "PCI PM not supported\n"); | ||
4426 | return -ENODEV; | ||
4427 | } | ||
4428 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); | ||
4429 | /* enter the D3hot power management state */ | ||
4430 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | ||
4431 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
4432 | pmcsr |= PCI_D3hot; | ||
4433 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | ||
4091 | 4434 | ||
4092 | /* This is very nearly the same thing as | 4435 | msleep(500); |
4093 | 4436 | ||
4094 | pci_save_state(pci_dev); | 4437 | /* enter the D0 power management state */ |
4095 | pci_set_power_state(pci_dev, PCI_D3hot); | 4438 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
4096 | pci_set_power_state(pci_dev, PCI_D0); | 4439 | pmcsr |= PCI_D0; |
4097 | pci_restore_state(pci_dev); | 4440 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); |
4098 | 4441 | ||
4099 | but we can't use these nice canned kernel routines on | 4442 | msleep(500); |
4100 | kexec, because they also check the MSI/MSI-X state in PCI | 4443 | } |
4101 | configuration space and do the wrong thing when it is | 4444 | return 0; |
4102 | set/cleared. Also, the pci_save/restore_state functions | 4445 | } |
4103 | violate the ordering requirements for restoring the | ||
4104 | configuration space from the CCISS document (see the | ||
4105 | comment below). So we roll our own .... */ | ||
4106 | 4446 | ||
4107 | for (i = 0; i < 32; i++) | 4447 | /* This does a hard reset of the controller using PCI power management |
4108 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); | 4448 | * states or using the doorbell register. */ |
4449 | static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) | ||
4450 | { | ||
4451 | u16 saved_config_space[32]; | ||
4452 | u64 cfg_offset; | ||
4453 | u32 cfg_base_addr; | ||
4454 | u64 cfg_base_addr_index; | ||
4455 | void __iomem *vaddr; | ||
4456 | unsigned long paddr; | ||
4457 | u32 misc_fw_support, active_transport; | ||
4458 | int rc, i; | ||
4459 | CfgTable_struct __iomem *cfgtable; | ||
4460 | bool use_doorbell; | ||
4461 | u32 board_id; | ||
4462 | |||
4463 | /* For controllers as old a the p600, this is very nearly | ||
4464 | * the same thing as | ||
4465 | * | ||
4466 | * pci_save_state(pci_dev); | ||
4467 | * pci_set_power_state(pci_dev, PCI_D3hot); | ||
4468 | * pci_set_power_state(pci_dev, PCI_D0); | ||
4469 | * pci_restore_state(pci_dev); | ||
4470 | * | ||
4471 | * but we can't use these nice canned kernel routines on | ||
4472 | * kexec, because they also check the MSI/MSI-X state in PCI | ||
4473 | * configuration space and do the wrong thing when it is | ||
4474 | * set/cleared. Also, the pci_save/restore_state functions | ||
4475 | * violate the ordering requirements for restoring the | ||
4476 | * configuration space from the CCISS document (see the | ||
4477 | * comment below). So we roll our own .... | ||
4478 | * | ||
4479 | * For controllers newer than the P600, the pci power state | ||
4480 | * method of resetting doesn't work so we have another way | ||
4481 | * using the doorbell register. | ||
4482 | */ | ||
4109 | 4483 | ||
4110 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | 4484 | /* Exclude 640x boards. These are two pci devices in one slot |
4111 | if (pos == 0) { | 4485 | * which share a battery backed cache module. One controls the |
4112 | printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n"); | 4486 | * cache, the other accesses the cache through the one that controls |
4487 | * it. If we reset the one controlling the cache, the other will | ||
4488 | * likely not be happy. Just forbid resetting this conjoined mess. | ||
4489 | */ | ||
4490 | cciss_lookup_board_id(pdev, &board_id); | ||
4491 | if (board_id == 0x409C0E11 || board_id == 0x409D0E11) { | ||
4492 | dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " | ||
4493 | "due to shared cache module."); | ||
4113 | return -ENODEV; | 4494 | return -ENODEV; |
4114 | } | 4495 | } |
4115 | 4496 | ||
4116 | /* Quoting from the Open CISS Specification: "The Power | 4497 | for (i = 0; i < 32; i++) |
4117 | * Management Control/Status Register (CSR) controls the power | 4498 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); |
4118 | * state of the device. The normal operating state is D0, | 4499 | |
4119 | * CSR=00h. The software off state is D3, CSR=03h. To reset | 4500 | /* find the first memory BAR, so we can find the cfg table */ |
4120 | * the controller, place the interface device in D3 then to | 4501 | rc = cciss_pci_find_memory_BAR(pdev, &paddr); |
4121 | * D0, this causes a secondary PCI reset which will reset the | 4502 | if (rc) |
4122 | * controller." */ | 4503 | return rc; |
4504 | vaddr = remap_pci_mem(paddr, 0x250); | ||
4505 | if (!vaddr) | ||
4506 | return -ENOMEM; | ||
4123 | 4507 | ||
4124 | /* enter the D3hot power management state */ | 4508 | /* find cfgtable in order to check if reset via doorbell is supported */ |
4125 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | 4509 | rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, |
4126 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 4510 | &cfg_base_addr_index, &cfg_offset); |
4127 | pmcsr |= PCI_D3hot; | 4511 | if (rc) |
4128 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | 4512 | goto unmap_vaddr; |
4513 | cfgtable = remap_pci_mem(pci_resource_start(pdev, | ||
4514 | cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); | ||
4515 | if (!cfgtable) { | ||
4516 | rc = -ENOMEM; | ||
4517 | goto unmap_vaddr; | ||
4518 | } | ||
4129 | 4519 | ||
4130 | schedule_timeout_uninterruptible(HZ >> 1); | 4520 | /* If reset via doorbell register is supported, use that. */ |
4521 | misc_fw_support = readl(&cfgtable->misc_fw_support); | ||
4522 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | ||
4131 | 4523 | ||
4132 | /* enter the D0 power management state */ | 4524 | /* The doorbell reset seems to cause lockups on some Smart |
4133 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 4525 | * Arrays (e.g. P410, P410i, maybe others). Until this is |
4134 | pmcsr |= PCI_D0; | 4526 | * fixed or at least isolated, avoid the doorbell reset. |
4135 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | 4527 | */ |
4528 | use_doorbell = 0; | ||
4136 | 4529 | ||
4137 | schedule_timeout_uninterruptible(HZ >> 1); | 4530 | rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); |
4531 | if (rc) | ||
4532 | goto unmap_cfgtable; | ||
4138 | 4533 | ||
4139 | /* Restore the PCI configuration space. The Open CISS | 4534 | /* Restore the PCI configuration space. The Open CISS |
4140 | * Specification says, "Restore the PCI Configuration | 4535 | * Specification says, "Restore the PCI Configuration |
4141 | * Registers, offsets 00h through 60h. It is important to | 4536 | * Registers, offsets 00h through 60h. It is important to |
4142 | * restore the command register, 16-bits at offset 04h, | 4537 | * restore the command register, 16-bits at offset 04h, |
4143 | * last. Do not restore the configuration status register, | 4538 | * last. Do not restore the configuration status register, |
4144 | * 16-bits at offset 06h." Note that the offset is 2*i. */ | 4539 | * 16-bits at offset 06h." Note that the offset is 2*i. |
4540 | */ | ||
4145 | for (i = 0; i < 32; i++) { | 4541 | for (i = 0; i < 32; i++) { |
4146 | if (i == 2 || i == 3) | 4542 | if (i == 2 || i == 3) |
4147 | continue; | 4543 | continue; |
@@ -4150,6 +4546,63 @@ static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) | |||
4150 | wmb(); | 4546 | wmb(); |
4151 | pci_write_config_word(pdev, 4, saved_config_space[2]); | 4547 | pci_write_config_word(pdev, 4, saved_config_space[2]); |
4152 | 4548 | ||
4549 | /* Some devices (notably the HP Smart Array 5i Controller) | ||
4550 | need a little pause here */ | ||
4551 | msleep(CCISS_POST_RESET_PAUSE_MSECS); | ||
4552 | |||
4553 | /* Controller should be in simple mode at this point. If it's not, | ||
4554 | * It means we're on one of those controllers which doesn't support | ||
4555 | * the doorbell reset method and on which the PCI power management reset | ||
4556 | * method doesn't work (P800, for example.) | ||
4557 | * In those cases, don't try to proceed, as it generally doesn't work. | ||
4558 | */ | ||
4559 | active_transport = readl(&cfgtable->TransportActive); | ||
4560 | if (active_transport & PERFORMANT_MODE) { | ||
4561 | dev_warn(&pdev->dev, "Unable to successfully reset controller," | ||
4562 | " Ignoring controller.\n"); | ||
4563 | rc = -ENODEV; | ||
4564 | } | ||
4565 | |||
4566 | unmap_cfgtable: | ||
4567 | iounmap(cfgtable); | ||
4568 | |||
4569 | unmap_vaddr: | ||
4570 | iounmap(vaddr); | ||
4571 | return rc; | ||
4572 | } | ||
4573 | |||
4574 | static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) | ||
4575 | { | ||
4576 | int rc, i; | ||
4577 | |||
4578 | if (!reset_devices) | ||
4579 | return 0; | ||
4580 | |||
4581 | /* Reset the controller with a PCI power-cycle or via doorbell */ | ||
4582 | rc = cciss_kdump_hard_reset_controller(pdev); | ||
4583 | |||
4584 | /* -ENOTSUPP here means we cannot reset the controller | ||
4585 | * but it's already (and still) up and running in | ||
4586 | * "performant mode". Or, it might be 640x, which can't reset | ||
4587 | * due to concerns about shared bbwc between 6402/6404 pair. | ||
4588 | */ | ||
4589 | if (rc == -ENOTSUPP) | ||
4590 | return 0; /* just try to do the kdump anyhow. */ | ||
4591 | if (rc) | ||
4592 | return -ENODEV; | ||
4593 | if (cciss_reset_msi(pdev)) | ||
4594 | return -ENODEV; | ||
4595 | |||
4596 | /* Now try to get the controller to respond to a no-op */ | ||
4597 | for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { | ||
4598 | if (cciss_noop(pdev) == 0) | ||
4599 | break; | ||
4600 | else | ||
4601 | dev_warn(&pdev->dev, "no-op failed%s\n", | ||
4602 | (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? | ||
4603 | "; re-trying" : "")); | ||
4604 | msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); | ||
4605 | } | ||
4153 | return 0; | 4606 | return 0; |
4154 | } | 4607 | } |
4155 | 4608 | ||
@@ -4167,46 +4620,31 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4167 | int rc; | 4620 | int rc; |
4168 | int dac, return_code; | 4621 | int dac, return_code; |
4169 | InquiryData_struct *inq_buff; | 4622 | InquiryData_struct *inq_buff; |
4623 | ctlr_info_t *h; | ||
4170 | 4624 | ||
4171 | if (reset_devices) { | 4625 | rc = cciss_init_reset_devices(pdev); |
4172 | /* Reset the controller with a PCI power-cycle */ | 4626 | if (rc) |
4173 | if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) | 4627 | return rc; |
4174 | return -ENODEV; | 4628 | i = alloc_cciss_hba(pdev); |
4175 | |||
4176 | /* Now try to get the controller to respond to a no-op. Some | ||
4177 | devices (notably the HP Smart Array 5i Controller) need | ||
4178 | up to 30 seconds to respond. */ | ||
4179 | for (i=0; i<30; i++) { | ||
4180 | if (cciss_noop(pdev) == 0) | ||
4181 | break; | ||
4182 | |||
4183 | schedule_timeout_uninterruptible(HZ); | ||
4184 | } | ||
4185 | if (i == 30) { | ||
4186 | printk(KERN_ERR "cciss: controller seems dead\n"); | ||
4187 | return -EBUSY; | ||
4188 | } | ||
4189 | } | ||
4190 | |||
4191 | i = alloc_cciss_hba(); | ||
4192 | if (i < 0) | 4629 | if (i < 0) |
4193 | return -1; | 4630 | return -1; |
4194 | 4631 | ||
4195 | hba[i]->busy_initializing = 1; | 4632 | h = hba[i]; |
4196 | INIT_HLIST_HEAD(&hba[i]->cmpQ); | 4633 | h->pdev = pdev; |
4197 | INIT_HLIST_HEAD(&hba[i]->reqQ); | 4634 | h->busy_initializing = 1; |
4198 | mutex_init(&hba[i]->busy_shutting_down); | 4635 | INIT_HLIST_HEAD(&h->cmpQ); |
4636 | INIT_HLIST_HEAD(&h->reqQ); | ||
4637 | mutex_init(&h->busy_shutting_down); | ||
4199 | 4638 | ||
4200 | if (cciss_pci_init(hba[i], pdev) != 0) | 4639 | if (cciss_pci_init(h) != 0) |
4201 | goto clean_no_release_regions; | 4640 | goto clean_no_release_regions; |
4202 | 4641 | ||
4203 | sprintf(hba[i]->devname, "cciss%d", i); | 4642 | sprintf(h->devname, "cciss%d", i); |
4204 | hba[i]->ctlr = i; | 4643 | h->ctlr = i; |
4205 | hba[i]->pdev = pdev; | ||
4206 | 4644 | ||
4207 | init_completion(&hba[i]->scan_wait); | 4645 | init_completion(&h->scan_wait); |
4208 | 4646 | ||
4209 | if (cciss_create_hba_sysfs_entry(hba[i])) | 4647 | if (cciss_create_hba_sysfs_entry(h)) |
4210 | goto clean0; | 4648 | goto clean0; |
4211 | 4649 | ||
4212 | /* configure PCI DMA stuff */ | 4650 | /* configure PCI DMA stuff */ |
@@ -4215,7 +4653,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4215 | else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) | 4653 | else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) |
4216 | dac = 0; | 4654 | dac = 0; |
4217 | else { | 4655 | else { |
4218 | printk(KERN_ERR "cciss: no suitable DMA available\n"); | 4656 | dev_err(&h->pdev->dev, "no suitable DMA available\n"); |
4219 | goto clean1; | 4657 | goto clean1; |
4220 | } | 4658 | } |
4221 | 4659 | ||
@@ -4225,151 +4663,164 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4225 | * 8 controller support. | 4663 | * 8 controller support. |
4226 | */ | 4664 | */ |
4227 | if (i < MAX_CTLR_ORIG) | 4665 | if (i < MAX_CTLR_ORIG) |
4228 | hba[i]->major = COMPAQ_CISS_MAJOR + i; | 4666 | h->major = COMPAQ_CISS_MAJOR + i; |
4229 | rc = register_blkdev(hba[i]->major, hba[i]->devname); | 4667 | rc = register_blkdev(h->major, h->devname); |
4230 | if (rc == -EBUSY || rc == -EINVAL) { | 4668 | if (rc == -EBUSY || rc == -EINVAL) { |
4231 | printk(KERN_ERR | 4669 | dev_err(&h->pdev->dev, |
4232 | "cciss: Unable to get major number %d for %s " | 4670 | "Unable to get major number %d for %s " |
4233 | "on hba %d\n", hba[i]->major, hba[i]->devname, i); | 4671 | "on hba %d\n", h->major, h->devname, i); |
4234 | goto clean1; | 4672 | goto clean1; |
4235 | } else { | 4673 | } else { |
4236 | if (i >= MAX_CTLR_ORIG) | 4674 | if (i >= MAX_CTLR_ORIG) |
4237 | hba[i]->major = rc; | 4675 | h->major = rc; |
4238 | } | 4676 | } |
4239 | 4677 | ||
4240 | /* make sure the board interrupts are off */ | 4678 | /* make sure the board interrupts are off */ |
4241 | hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); | 4679 | h->access.set_intr_mask(h, CCISS_INTR_OFF); |
4242 | if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr, | 4680 | if (h->msi_vector || h->msix_vector) { |
4243 | IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) { | 4681 | if (request_irq(h->intr[PERF_MODE_INT], |
4244 | printk(KERN_ERR "cciss: Unable to get irq %d for %s\n", | 4682 | do_cciss_msix_intr, |
4245 | hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname); | 4683 | IRQF_DISABLED, h->devname, h)) { |
4246 | goto clean2; | 4684 | dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", |
4685 | h->intr[PERF_MODE_INT], h->devname); | ||
4686 | goto clean2; | ||
4687 | } | ||
4688 | } else { | ||
4689 | if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx, | ||
4690 | IRQF_DISABLED, h->devname, h)) { | ||
4691 | dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", | ||
4692 | h->intr[PERF_MODE_INT], h->devname); | ||
4693 | goto clean2; | ||
4694 | } | ||
4247 | } | 4695 | } |
4248 | 4696 | ||
4249 | printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", | 4697 | dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", |
4250 | hba[i]->devname, pdev->device, pci_name(pdev), | 4698 | h->devname, pdev->device, pci_name(pdev), |
4251 | hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); | 4699 | h->intr[PERF_MODE_INT], dac ? "" : " not"); |
4252 | 4700 | ||
4253 | hba[i]->cmd_pool_bits = | 4701 | h->cmd_pool_bits = |
4254 | kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) | 4702 | kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) |
4255 | * sizeof(unsigned long), GFP_KERNEL); | 4703 | * sizeof(unsigned long), GFP_KERNEL); |
4256 | hba[i]->cmd_pool = (CommandList_struct *) | 4704 | h->cmd_pool = (CommandList_struct *) |
4257 | pci_alloc_consistent(hba[i]->pdev, | 4705 | pci_alloc_consistent(h->pdev, |
4258 | hba[i]->nr_cmds * sizeof(CommandList_struct), | 4706 | h->nr_cmds * sizeof(CommandList_struct), |
4259 | &(hba[i]->cmd_pool_dhandle)); | 4707 | &(h->cmd_pool_dhandle)); |
4260 | hba[i]->errinfo_pool = (ErrorInfo_struct *) | 4708 | h->errinfo_pool = (ErrorInfo_struct *) |
4261 | pci_alloc_consistent(hba[i]->pdev, | 4709 | pci_alloc_consistent(h->pdev, |
4262 | hba[i]->nr_cmds * sizeof(ErrorInfo_struct), | 4710 | h->nr_cmds * sizeof(ErrorInfo_struct), |
4263 | &(hba[i]->errinfo_pool_dhandle)); | 4711 | &(h->errinfo_pool_dhandle)); |
4264 | if ((hba[i]->cmd_pool_bits == NULL) | 4712 | if ((h->cmd_pool_bits == NULL) |
4265 | || (hba[i]->cmd_pool == NULL) | 4713 | || (h->cmd_pool == NULL) |
4266 | || (hba[i]->errinfo_pool == NULL)) { | 4714 | || (h->errinfo_pool == NULL)) { |
4267 | printk(KERN_ERR "cciss: out of memory"); | 4715 | dev_err(&h->pdev->dev, "out of memory"); |
4268 | goto clean4; | 4716 | goto clean4; |
4269 | } | 4717 | } |
4270 | 4718 | ||
4271 | /* Need space for temp scatter list */ | 4719 | /* Need space for temp scatter list */ |
4272 | hba[i]->scatter_list = kmalloc(hba[i]->max_commands * | 4720 | h->scatter_list = kmalloc(h->max_commands * |
4273 | sizeof(struct scatterlist *), | 4721 | sizeof(struct scatterlist *), |
4274 | GFP_KERNEL); | 4722 | GFP_KERNEL); |
4275 | for (k = 0; k < hba[i]->nr_cmds; k++) { | 4723 | if (!h->scatter_list) |
4276 | hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * | 4724 | goto clean4; |
4277 | hba[i]->maxsgentries, | 4725 | |
4726 | for (k = 0; k < h->nr_cmds; k++) { | ||
4727 | h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * | ||
4728 | h->maxsgentries, | ||
4278 | GFP_KERNEL); | 4729 | GFP_KERNEL); |
4279 | if (hba[i]->scatter_list[k] == NULL) { | 4730 | if (h->scatter_list[k] == NULL) { |
4280 | printk(KERN_ERR "cciss%d: could not allocate " | 4731 | dev_err(&h->pdev->dev, |
4281 | "s/g lists\n", i); | 4732 | "could not allocate s/g lists\n"); |
4282 | goto clean4; | 4733 | goto clean4; |
4283 | } | 4734 | } |
4284 | } | 4735 | } |
4285 | hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i], | 4736 | h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, |
4286 | hba[i]->chainsize, hba[i]->nr_cmds); | 4737 | h->chainsize, h->nr_cmds); |
4287 | if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0) | 4738 | if (!h->cmd_sg_list && h->chainsize > 0) |
4288 | goto clean4; | 4739 | goto clean4; |
4289 | 4740 | ||
4290 | spin_lock_init(&hba[i]->lock); | 4741 | spin_lock_init(&h->lock); |
4291 | 4742 | ||
4292 | /* Initialize the pdev driver private data. | 4743 | /* Initialize the pdev driver private data. |
4293 | have it point to hba[i]. */ | 4744 | have it point to h. */ |
4294 | pci_set_drvdata(pdev, hba[i]); | 4745 | pci_set_drvdata(pdev, h); |
4295 | /* command and error info recs zeroed out before | 4746 | /* command and error info recs zeroed out before |
4296 | they are used */ | 4747 | they are used */ |
4297 | memset(hba[i]->cmd_pool_bits, 0, | 4748 | memset(h->cmd_pool_bits, 0, |
4298 | DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) | 4749 | DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) |
4299 | * sizeof(unsigned long)); | 4750 | * sizeof(unsigned long)); |
4300 | 4751 | ||
4301 | hba[i]->num_luns = 0; | 4752 | h->num_luns = 0; |
4302 | hba[i]->highest_lun = -1; | 4753 | h->highest_lun = -1; |
4303 | for (j = 0; j < CISS_MAX_LUN; j++) { | 4754 | for (j = 0; j < CISS_MAX_LUN; j++) { |
4304 | hba[i]->drv[j] = NULL; | 4755 | h->drv[j] = NULL; |
4305 | hba[i]->gendisk[j] = NULL; | 4756 | h->gendisk[j] = NULL; |
4306 | } | 4757 | } |
4307 | 4758 | ||
4308 | cciss_scsi_setup(i); | 4759 | cciss_scsi_setup(h); |
4309 | 4760 | ||
4310 | /* Turn the interrupts on so we can service requests */ | 4761 | /* Turn the interrupts on so we can service requests */ |
4311 | hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); | 4762 | h->access.set_intr_mask(h, CCISS_INTR_ON); |
4312 | 4763 | ||
4313 | /* Get the firmware version */ | 4764 | /* Get the firmware version */ |
4314 | inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); | 4765 | inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); |
4315 | if (inq_buff == NULL) { | 4766 | if (inq_buff == NULL) { |
4316 | printk(KERN_ERR "cciss: out of memory\n"); | 4767 | dev_err(&h->pdev->dev, "out of memory\n"); |
4317 | goto clean4; | 4768 | goto clean4; |
4318 | } | 4769 | } |
4319 | 4770 | ||
4320 | return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, | 4771 | return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, |
4321 | sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); | 4772 | sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); |
4322 | if (return_code == IO_OK) { | 4773 | if (return_code == IO_OK) { |
4323 | hba[i]->firm_ver[0] = inq_buff->data_byte[32]; | 4774 | h->firm_ver[0] = inq_buff->data_byte[32]; |
4324 | hba[i]->firm_ver[1] = inq_buff->data_byte[33]; | 4775 | h->firm_ver[1] = inq_buff->data_byte[33]; |
4325 | hba[i]->firm_ver[2] = inq_buff->data_byte[34]; | 4776 | h->firm_ver[2] = inq_buff->data_byte[34]; |
4326 | hba[i]->firm_ver[3] = inq_buff->data_byte[35]; | 4777 | h->firm_ver[3] = inq_buff->data_byte[35]; |
4327 | } else { /* send command failed */ | 4778 | } else { /* send command failed */ |
4328 | printk(KERN_WARNING "cciss: unable to determine firmware" | 4779 | dev_warn(&h->pdev->dev, "unable to determine firmware" |
4329 | " version of controller\n"); | 4780 | " version of controller\n"); |
4330 | } | 4781 | } |
4331 | kfree(inq_buff); | 4782 | kfree(inq_buff); |
4332 | 4783 | ||
4333 | cciss_procinit(i); | 4784 | cciss_procinit(h); |
4334 | 4785 | ||
4335 | hba[i]->cciss_max_sectors = 8192; | 4786 | h->cciss_max_sectors = 8192; |
4336 | 4787 | ||
4337 | rebuild_lun_table(hba[i], 1, 0); | 4788 | rebuild_lun_table(h, 1, 0); |
4338 | hba[i]->busy_initializing = 0; | 4789 | h->busy_initializing = 0; |
4339 | return 1; | 4790 | return 1; |
4340 | 4791 | ||
4341 | clean4: | 4792 | clean4: |
4342 | kfree(hba[i]->cmd_pool_bits); | 4793 | kfree(h->cmd_pool_bits); |
4343 | /* Free up sg elements */ | 4794 | /* Free up sg elements */ |
4344 | for (k = 0; k < hba[i]->nr_cmds; k++) | 4795 | for (k-- ; k >= 0; k--) |
4345 | kfree(hba[i]->scatter_list[k]); | 4796 | kfree(h->scatter_list[k]); |
4346 | kfree(hba[i]->scatter_list); | 4797 | kfree(h->scatter_list); |
4347 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); | 4798 | cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); |
4348 | if (hba[i]->cmd_pool) | 4799 | if (h->cmd_pool) |
4349 | pci_free_consistent(hba[i]->pdev, | 4800 | pci_free_consistent(h->pdev, |
4350 | hba[i]->nr_cmds * sizeof(CommandList_struct), | 4801 | h->nr_cmds * sizeof(CommandList_struct), |
4351 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); | 4802 | h->cmd_pool, h->cmd_pool_dhandle); |
4352 | if (hba[i]->errinfo_pool) | 4803 | if (h->errinfo_pool) |
4353 | pci_free_consistent(hba[i]->pdev, | 4804 | pci_free_consistent(h->pdev, |
4354 | hba[i]->nr_cmds * sizeof(ErrorInfo_struct), | 4805 | h->nr_cmds * sizeof(ErrorInfo_struct), |
4355 | hba[i]->errinfo_pool, | 4806 | h->errinfo_pool, |
4356 | hba[i]->errinfo_pool_dhandle); | 4807 | h->errinfo_pool_dhandle); |
4357 | free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); | 4808 | free_irq(h->intr[PERF_MODE_INT], h); |
4358 | clean2: | 4809 | clean2: |
4359 | unregister_blkdev(hba[i]->major, hba[i]->devname); | 4810 | unregister_blkdev(h->major, h->devname); |
4360 | clean1: | 4811 | clean1: |
4361 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4812 | cciss_destroy_hba_sysfs_entry(h); |
4362 | clean0: | 4813 | clean0: |
4363 | pci_release_regions(pdev); | 4814 | pci_release_regions(pdev); |
4364 | clean_no_release_regions: | 4815 | clean_no_release_regions: |
4365 | hba[i]->busy_initializing = 0; | 4816 | h->busy_initializing = 0; |
4366 | 4817 | ||
4367 | /* | 4818 | /* |
4368 | * Deliberately omit pci_disable_device(): it does something nasty to | 4819 | * Deliberately omit pci_disable_device(): it does something nasty to |
4369 | * Smart Array controllers that pci_enable_device does not undo | 4820 | * Smart Array controllers that pci_enable_device does not undo |
4370 | */ | 4821 | */ |
4371 | pci_set_drvdata(pdev, NULL); | 4822 | pci_set_drvdata(pdev, NULL); |
4372 | free_hba(i); | 4823 | free_hba(h); |
4373 | return -1; | 4824 | return -1; |
4374 | } | 4825 | } |
4375 | 4826 | ||
@@ -4382,55 +4833,51 @@ static void cciss_shutdown(struct pci_dev *pdev) | |||
4382 | h = pci_get_drvdata(pdev); | 4833 | h = pci_get_drvdata(pdev); |
4383 | flush_buf = kzalloc(4, GFP_KERNEL); | 4834 | flush_buf = kzalloc(4, GFP_KERNEL); |
4384 | if (!flush_buf) { | 4835 | if (!flush_buf) { |
4385 | printk(KERN_WARNING | 4836 | dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); |
4386 | "cciss:%d cache not flushed, out of memory.\n", | ||
4387 | h->ctlr); | ||
4388 | return; | 4837 | return; |
4389 | } | 4838 | } |
4390 | /* write all data in the battery backed cache to disk */ | 4839 | /* write all data in the battery backed cache to disk */ |
4391 | memset(flush_buf, 0, 4); | 4840 | memset(flush_buf, 0, 4); |
4392 | return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf, | 4841 | return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, |
4393 | 4, 0, CTLR_LUNID, TYPE_CMD); | 4842 | 4, 0, CTLR_LUNID, TYPE_CMD); |
4394 | kfree(flush_buf); | 4843 | kfree(flush_buf); |
4395 | if (return_code != IO_OK) | 4844 | if (return_code != IO_OK) |
4396 | printk(KERN_WARNING "cciss%d: Error flushing cache\n", | 4845 | dev_warn(&h->pdev->dev, "Error flushing cache\n"); |
4397 | h->ctlr); | ||
4398 | h->access.set_intr_mask(h, CCISS_INTR_OFF); | 4846 | h->access.set_intr_mask(h, CCISS_INTR_OFF); |
4399 | free_irq(h->intr[2], h); | 4847 | free_irq(h->intr[PERF_MODE_INT], h); |
4400 | } | 4848 | } |
4401 | 4849 | ||
4402 | static void __devexit cciss_remove_one(struct pci_dev *pdev) | 4850 | static void __devexit cciss_remove_one(struct pci_dev *pdev) |
4403 | { | 4851 | { |
4404 | ctlr_info_t *tmp_ptr; | 4852 | ctlr_info_t *h; |
4405 | int i, j; | 4853 | int i, j; |
4406 | 4854 | ||
4407 | if (pci_get_drvdata(pdev) == NULL) { | 4855 | if (pci_get_drvdata(pdev) == NULL) { |
4408 | printk(KERN_ERR "cciss: Unable to remove device \n"); | 4856 | dev_err(&pdev->dev, "Unable to remove device\n"); |
4409 | return; | 4857 | return; |
4410 | } | 4858 | } |
4411 | 4859 | ||
4412 | tmp_ptr = pci_get_drvdata(pdev); | 4860 | h = pci_get_drvdata(pdev); |
4413 | i = tmp_ptr->ctlr; | 4861 | i = h->ctlr; |
4414 | if (hba[i] == NULL) { | 4862 | if (hba[i] == NULL) { |
4415 | printk(KERN_ERR "cciss: device appears to " | 4863 | dev_err(&pdev->dev, "device appears to already be removed\n"); |
4416 | "already be removed \n"); | ||
4417 | return; | 4864 | return; |
4418 | } | 4865 | } |
4419 | 4866 | ||
4420 | mutex_lock(&hba[i]->busy_shutting_down); | 4867 | mutex_lock(&h->busy_shutting_down); |
4421 | 4868 | ||
4422 | remove_from_scan_list(hba[i]); | 4869 | remove_from_scan_list(h); |
4423 | remove_proc_entry(hba[i]->devname, proc_cciss); | 4870 | remove_proc_entry(h->devname, proc_cciss); |
4424 | unregister_blkdev(hba[i]->major, hba[i]->devname); | 4871 | unregister_blkdev(h->major, h->devname); |
4425 | 4872 | ||
4426 | /* remove it from the disk list */ | 4873 | /* remove it from the disk list */ |
4427 | for (j = 0; j < CISS_MAX_LUN; j++) { | 4874 | for (j = 0; j < CISS_MAX_LUN; j++) { |
4428 | struct gendisk *disk = hba[i]->gendisk[j]; | 4875 | struct gendisk *disk = h->gendisk[j]; |
4429 | if (disk) { | 4876 | if (disk) { |
4430 | struct request_queue *q = disk->queue; | 4877 | struct request_queue *q = disk->queue; |
4431 | 4878 | ||
4432 | if (disk->flags & GENHD_FL_UP) { | 4879 | if (disk->flags & GENHD_FL_UP) { |
4433 | cciss_destroy_ld_sysfs_entry(hba[i], j, 1); | 4880 | cciss_destroy_ld_sysfs_entry(h, j, 1); |
4434 | del_gendisk(disk); | 4881 | del_gendisk(disk); |
4435 | } | 4882 | } |
4436 | if (q) | 4883 | if (q) |
@@ -4439,39 +4886,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4439 | } | 4886 | } |
4440 | 4887 | ||
4441 | #ifdef CONFIG_CISS_SCSI_TAPE | 4888 | #ifdef CONFIG_CISS_SCSI_TAPE |
4442 | cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ | 4889 | cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ |
4443 | #endif | 4890 | #endif |
4444 | 4891 | ||
4445 | cciss_shutdown(pdev); | 4892 | cciss_shutdown(pdev); |
4446 | 4893 | ||
4447 | #ifdef CONFIG_PCI_MSI | 4894 | #ifdef CONFIG_PCI_MSI |
4448 | if (hba[i]->msix_vector) | 4895 | if (h->msix_vector) |
4449 | pci_disable_msix(hba[i]->pdev); | 4896 | pci_disable_msix(h->pdev); |
4450 | else if (hba[i]->msi_vector) | 4897 | else if (h->msi_vector) |
4451 | pci_disable_msi(hba[i]->pdev); | 4898 | pci_disable_msi(h->pdev); |
4452 | #endif /* CONFIG_PCI_MSI */ | 4899 | #endif /* CONFIG_PCI_MSI */ |
4453 | 4900 | ||
4454 | iounmap(hba[i]->vaddr); | 4901 | iounmap(h->transtable); |
4902 | iounmap(h->cfgtable); | ||
4903 | iounmap(h->vaddr); | ||
4455 | 4904 | ||
4456 | pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), | 4905 | pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), |
4457 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); | 4906 | h->cmd_pool, h->cmd_pool_dhandle); |
4458 | pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), | 4907 | pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), |
4459 | hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); | 4908 | h->errinfo_pool, h->errinfo_pool_dhandle); |
4460 | kfree(hba[i]->cmd_pool_bits); | 4909 | kfree(h->cmd_pool_bits); |
4461 | /* Free up sg elements */ | 4910 | /* Free up sg elements */ |
4462 | for (j = 0; j < hba[i]->nr_cmds; j++) | 4911 | for (j = 0; j < h->nr_cmds; j++) |
4463 | kfree(hba[i]->scatter_list[j]); | 4912 | kfree(h->scatter_list[j]); |
4464 | kfree(hba[i]->scatter_list); | 4913 | kfree(h->scatter_list); |
4465 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); | 4914 | cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); |
4466 | /* | 4915 | /* |
4467 | * Deliberately omit pci_disable_device(): it does something nasty to | 4916 | * Deliberately omit pci_disable_device(): it does something nasty to |
4468 | * Smart Array controllers that pci_enable_device does not undo | 4917 | * Smart Array controllers that pci_enable_device does not undo |
4469 | */ | 4918 | */ |
4470 | pci_release_regions(pdev); | 4919 | pci_release_regions(pdev); |
4471 | pci_set_drvdata(pdev, NULL); | 4920 | pci_set_drvdata(pdev, NULL); |
4472 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4921 | cciss_destroy_hba_sysfs_entry(h); |
4473 | mutex_unlock(&hba[i]->busy_shutting_down); | 4922 | mutex_unlock(&h->busy_shutting_down); |
4474 | free_hba(i); | 4923 | free_hba(h); |
4475 | } | 4924 | } |
4476 | 4925 | ||
4477 | static struct pci_driver cciss_pci_driver = { | 4926 | static struct pci_driver cciss_pci_driver = { |
@@ -4496,7 +4945,6 @@ static int __init cciss_init(void) | |||
4496 | * array of them, the size must be a multiple of 8 bytes. | 4945 | * array of them, the size must be a multiple of 8 bytes. |
4497 | */ | 4946 | */ |
4498 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); | 4947 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); |
4499 | |||
4500 | printk(KERN_INFO DRIVER_NAME "\n"); | 4948 | printk(KERN_INFO DRIVER_NAME "\n"); |
4501 | 4949 | ||
4502 | err = bus_register(&cciss_bus_type); | 4950 | err = bus_register(&cciss_bus_type); |
@@ -4533,8 +4981,8 @@ static void __exit cciss_cleanup(void) | |||
4533 | /* double check that all controller entrys have been removed */ | 4981 | /* double check that all controller entrys have been removed */ |
4534 | for (i = 0; i < MAX_CTLR; i++) { | 4982 | for (i = 0; i < MAX_CTLR; i++) { |
4535 | if (hba[i] != NULL) { | 4983 | if (hba[i] != NULL) { |
4536 | printk(KERN_WARNING "cciss: had to remove" | 4984 | dev_warn(&hba[i]->pdev->dev, |
4537 | " controller %d\n", i); | 4985 | "had to remove controller\n"); |
4538 | cciss_remove_one(hba[i]->pdev); | 4986 | cciss_remove_one(hba[i]->pdev); |
4539 | } | 4987 | } |
4540 | } | 4988 | } |
@@ -4543,46 +4991,5 @@ static void __exit cciss_cleanup(void) | |||
4543 | bus_unregister(&cciss_bus_type); | 4991 | bus_unregister(&cciss_bus_type); |
4544 | } | 4992 | } |
4545 | 4993 | ||
4546 | static void fail_all_cmds(unsigned long ctlr) | ||
4547 | { | ||
4548 | /* If we get here, the board is apparently dead. */ | ||
4549 | ctlr_info_t *h = hba[ctlr]; | ||
4550 | CommandList_struct *c; | ||
4551 | unsigned long flags; | ||
4552 | |||
4553 | printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); | ||
4554 | h->alive = 0; /* the controller apparently died... */ | ||
4555 | |||
4556 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | ||
4557 | |||
4558 | pci_disable_device(h->pdev); /* Make sure it is really dead. */ | ||
4559 | |||
4560 | /* move everything off the request queue onto the completed queue */ | ||
4561 | while (!hlist_empty(&h->reqQ)) { | ||
4562 | c = hlist_entry(h->reqQ.first, CommandList_struct, list); | ||
4563 | removeQ(c); | ||
4564 | h->Qdepth--; | ||
4565 | addQ(&h->cmpQ, c); | ||
4566 | } | ||
4567 | |||
4568 | /* Now, fail everything on the completed queue with a HW error */ | ||
4569 | while (!hlist_empty(&h->cmpQ)) { | ||
4570 | c = hlist_entry(h->cmpQ.first, CommandList_struct, list); | ||
4571 | removeQ(c); | ||
4572 | if (c->cmd_type != CMD_MSG_STALE) | ||
4573 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | ||
4574 | if (c->cmd_type == CMD_RWREQ) { | ||
4575 | complete_command(h, c, 0); | ||
4576 | } else if (c->cmd_type == CMD_IOCTL_PEND) | ||
4577 | complete(c->waiting); | ||
4578 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
4579 | else if (c->cmd_type == CMD_SCSI) | ||
4580 | complete_scsi_command(c, 0, 0); | ||
4581 | #endif | ||
4582 | } | ||
4583 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | ||
4584 | return; | ||
4585 | } | ||
4586 | |||
4587 | module_init(cciss_init); | 4994 | module_init(cciss_init); |
4588 | module_exit(cciss_cleanup); | 4995 | module_exit(cciss_cleanup); |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index c5d411174db0..ae340ffc8f81 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -25,7 +25,7 @@ struct access_method { | |||
25 | void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); | 25 | void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); |
26 | void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); | 26 | void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); |
27 | unsigned long (*fifo_full)(ctlr_info_t *h); | 27 | unsigned long (*fifo_full)(ctlr_info_t *h); |
28 | unsigned long (*intr_pending)(ctlr_info_t *h); | 28 | bool (*intr_pending)(ctlr_info_t *h); |
29 | unsigned long (*command_completed)(ctlr_info_t *h); | 29 | unsigned long (*command_completed)(ctlr_info_t *h); |
30 | }; | 30 | }; |
31 | typedef struct _drive_info_struct | 31 | typedef struct _drive_info_struct |
@@ -85,8 +85,8 @@ struct ctlr_info | |||
85 | int max_cmd_sgentries; | 85 | int max_cmd_sgentries; |
86 | SGDescriptor_struct **cmd_sg_list; | 86 | SGDescriptor_struct **cmd_sg_list; |
87 | 87 | ||
88 | # define DOORBELL_INT 0 | 88 | # define PERF_MODE_INT 0 |
89 | # define PERF_MODE_INT 1 | 89 | # define DOORBELL_INT 1 |
90 | # define SIMPLE_MODE_INT 2 | 90 | # define SIMPLE_MODE_INT 2 |
91 | # define MEMQ_MODE_INT 3 | 91 | # define MEMQ_MODE_INT 3 |
92 | unsigned int intr[4]; | 92 | unsigned int intr[4]; |
@@ -137,10 +137,27 @@ struct ctlr_info | |||
137 | struct list_head scan_list; | 137 | struct list_head scan_list; |
138 | struct completion scan_wait; | 138 | struct completion scan_wait; |
139 | struct device dev; | 139 | struct device dev; |
140 | /* | ||
141 | * Performant mode tables. | ||
142 | */ | ||
143 | u32 trans_support; | ||
144 | u32 trans_offset; | ||
145 | struct TransTable_struct *transtable; | ||
146 | unsigned long transMethod; | ||
147 | |||
148 | /* | ||
149 | * Performant mode completion buffer | ||
150 | */ | ||
151 | u64 *reply_pool; | ||
152 | dma_addr_t reply_pool_dhandle; | ||
153 | u64 *reply_pool_head; | ||
154 | size_t reply_pool_size; | ||
155 | unsigned char reply_pool_wraparound; | ||
156 | u32 *blockFetchTable; | ||
140 | }; | 157 | }; |
141 | 158 | ||
142 | /* Defining the diffent access_menthods */ | 159 | /* Defining the diffent access_methods |
143 | /* | 160 | * |
144 | * Memory mapped FIFO interface (SMART 53xx cards) | 161 | * Memory mapped FIFO interface (SMART 53xx cards) |
145 | */ | 162 | */ |
146 | #define SA5_DOORBELL 0x20 | 163 | #define SA5_DOORBELL 0x20 |
@@ -159,19 +176,47 @@ struct ctlr_info | |||
159 | #define SA5B_INTR_PENDING 0x04 | 176 | #define SA5B_INTR_PENDING 0x04 |
160 | #define FIFO_EMPTY 0xffffffff | 177 | #define FIFO_EMPTY 0xffffffff |
161 | #define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ | 178 | #define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ |
179 | /* Perf. mode flags */ | ||
180 | #define SA5_PERF_INTR_PENDING 0x04 | ||
181 | #define SA5_PERF_INTR_OFF 0x05 | ||
182 | #define SA5_OUTDB_STATUS_PERF_BIT 0x01 | ||
183 | #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 | ||
184 | #define SA5_OUTDB_CLEAR 0xA0 | ||
185 | #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 | ||
186 | #define SA5_OUTDB_STATUS 0x9C | ||
187 | |||
162 | 188 | ||
163 | #define CISS_ERROR_BIT 0x02 | 189 | #define CISS_ERROR_BIT 0x02 |
164 | 190 | ||
165 | #define CCISS_INTR_ON 1 | 191 | #define CCISS_INTR_ON 1 |
166 | #define CCISS_INTR_OFF 0 | 192 | #define CCISS_INTR_OFF 0 |
193 | |||
194 | |||
195 | /* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board | ||
196 | * to become ready, in seconds, before giving up on it. | ||
197 | * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait | ||
198 | * between polling the board to see if it is ready, in | ||
199 | * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived | ||
200 | * the above. | ||
201 | */ | ||
202 | #define CCISS_BOARD_READY_WAIT_SECS (120) | ||
203 | #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) | ||
204 | #define CCISS_BOARD_READY_ITERATIONS \ | ||
205 | ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ | ||
206 | CCISS_BOARD_READY_POLL_INTERVAL_MSECS) | ||
207 | #define CCISS_POST_RESET_PAUSE_MSECS (3000) | ||
208 | #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) | ||
209 | #define CCISS_POST_RESET_NOOP_RETRIES (12) | ||
210 | |||
167 | /* | 211 | /* |
168 | Send the command to the hardware | 212 | Send the command to the hardware |
169 | */ | 213 | */ |
170 | static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) | 214 | static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) |
171 | { | 215 | { |
172 | #ifdef CCISS_DEBUG | 216 | #ifdef CCISS_DEBUG |
173 | printk("Sending %x - down to controller\n", c->busaddr ); | 217 | printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n", |
174 | #endif /* CCISS_DEBUG */ | 218 | h->ctlr, c->busaddr); |
219 | #endif /* CCISS_DEBUG */ | ||
175 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | 220 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
176 | h->commands_outstanding++; | 221 | h->commands_outstanding++; |
177 | if ( h->commands_outstanding > h->max_outstanding) | 222 | if ( h->commands_outstanding > h->max_outstanding) |
@@ -214,6 +259,20 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val) | |||
214 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | 259 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); |
215 | } | 260 | } |
216 | } | 261 | } |
262 | |||
263 | /* Performant mode intr_mask */ | ||
264 | static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val) | ||
265 | { | ||
266 | if (val) { /* turn on interrupts */ | ||
267 | h->interrupts_enabled = 1; | ||
268 | writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
269 | } else { | ||
270 | h->interrupts_enabled = 0; | ||
271 | writel(SA5_PERF_INTR_OFF, | ||
272 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
273 | } | ||
274 | } | ||
275 | |||
217 | /* | 276 | /* |
218 | * Returns true if fifo is full. | 277 | * Returns true if fifo is full. |
219 | * | 278 | * |
@@ -250,10 +309,44 @@ static unsigned long SA5_completed(ctlr_info_t *h) | |||
250 | return ( register_value); | 309 | return ( register_value); |
251 | 310 | ||
252 | } | 311 | } |
312 | |||
313 | /* Performant mode command completed */ | ||
314 | static unsigned long SA5_performant_completed(ctlr_info_t *h) | ||
315 | { | ||
316 | unsigned long register_value = FIFO_EMPTY; | ||
317 | |||
318 | /* flush the controller write of the reply queue by reading | ||
319 | * outbound doorbell status register. | ||
320 | */ | ||
321 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
322 | /* msi auto clears the interrupt pending bit. */ | ||
323 | if (!(h->msi_vector || h->msix_vector)) { | ||
324 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); | ||
325 | /* Do a read in order to flush the write to the controller | ||
326 | * (as per spec.) | ||
327 | */ | ||
328 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
329 | } | ||
330 | |||
331 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
332 | register_value = *(h->reply_pool_head); | ||
333 | (h->reply_pool_head)++; | ||
334 | h->commands_outstanding--; | ||
335 | } else { | ||
336 | register_value = FIFO_EMPTY; | ||
337 | } | ||
338 | /* Check for wraparound */ | ||
339 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
340 | h->reply_pool_head = h->reply_pool; | ||
341 | h->reply_pool_wraparound ^= 1; | ||
342 | } | ||
343 | |||
344 | return register_value; | ||
345 | } | ||
253 | /* | 346 | /* |
254 | * Returns true if an interrupt is pending.. | 347 | * Returns true if an interrupt is pending.. |
255 | */ | 348 | */ |
256 | static unsigned long SA5_intr_pending(ctlr_info_t *h) | 349 | static bool SA5_intr_pending(ctlr_info_t *h) |
257 | { | 350 | { |
258 | unsigned long register_value = | 351 | unsigned long register_value = |
259 | readl(h->vaddr + SA5_INTR_STATUS); | 352 | readl(h->vaddr + SA5_INTR_STATUS); |
@@ -268,7 +361,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h) | |||
268 | /* | 361 | /* |
269 | * Returns true if an interrupt is pending.. | 362 | * Returns true if an interrupt is pending.. |
270 | */ | 363 | */ |
271 | static unsigned long SA5B_intr_pending(ctlr_info_t *h) | 364 | static bool SA5B_intr_pending(ctlr_info_t *h) |
272 | { | 365 | { |
273 | unsigned long register_value = | 366 | unsigned long register_value = |
274 | readl(h->vaddr + SA5_INTR_STATUS); | 367 | readl(h->vaddr + SA5_INTR_STATUS); |
@@ -280,6 +373,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h) | |||
280 | return 0 ; | 373 | return 0 ; |
281 | } | 374 | } |
282 | 375 | ||
376 | static bool SA5_performant_intr_pending(ctlr_info_t *h) | ||
377 | { | ||
378 | unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); | ||
379 | |||
380 | if (!register_value) | ||
381 | return false; | ||
382 | |||
383 | if (h->msi_vector || h->msix_vector) | ||
384 | return true; | ||
385 | |||
386 | /* Read outbound doorbell to flush */ | ||
387 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
388 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; | ||
389 | } | ||
283 | 390 | ||
284 | static struct access_method SA5_access = { | 391 | static struct access_method SA5_access = { |
285 | SA5_submit_command, | 392 | SA5_submit_command, |
@@ -297,6 +404,14 @@ static struct access_method SA5B_access = { | |||
297 | SA5_completed, | 404 | SA5_completed, |
298 | }; | 405 | }; |
299 | 406 | ||
407 | static struct access_method SA5_performant_access = { | ||
408 | SA5_submit_command, | ||
409 | SA5_performant_intr_mask, | ||
410 | SA5_fifo_full, | ||
411 | SA5_performant_intr_pending, | ||
412 | SA5_performant_completed, | ||
413 | }; | ||
414 | |||
300 | struct board_type { | 415 | struct board_type { |
301 | __u32 board_id; | 416 | __u32 board_id; |
302 | char *product_name; | 417 | char *product_name; |
@@ -304,6 +419,4 @@ struct board_type { | |||
304 | int nr_cmds; /* Max cmds this kind of ctlr can handle. */ | 419 | int nr_cmds; /* Max cmds this kind of ctlr can handle. */ |
305 | }; | 420 | }; |
306 | 421 | ||
307 | #define CCISS_LOCK(i) (&hba[i]->lock) | ||
308 | |||
309 | #endif /* CCISS_H */ | 422 | #endif /* CCISS_H */ |
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index e624ff959cb6..eb060f1b00b6 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
@@ -52,8 +52,10 @@ | |||
52 | /* Configuration Table */ | 52 | /* Configuration Table */ |
53 | #define CFGTBL_ChangeReq 0x00000001l | 53 | #define CFGTBL_ChangeReq 0x00000001l |
54 | #define CFGTBL_AccCmds 0x00000001l | 54 | #define CFGTBL_AccCmds 0x00000001l |
55 | #define DOORBELL_CTLR_RESET 0x00000004l | ||
55 | 56 | ||
56 | #define CFGTBL_Trans_Simple 0x00000002l | 57 | #define CFGTBL_Trans_Simple 0x00000002l |
58 | #define CFGTBL_Trans_Performant 0x00000004l | ||
57 | 59 | ||
58 | #define CFGTBL_BusType_Ultra2 0x00000001l | 60 | #define CFGTBL_BusType_Ultra2 0x00000001l |
59 | #define CFGTBL_BusType_Ultra3 0x00000002l | 61 | #define CFGTBL_BusType_Ultra3 0x00000002l |
@@ -173,12 +175,15 @@ typedef struct _SGDescriptor_struct { | |||
173 | * PAD_64 can be adjusted independently as needed for 32-bit | 175 | * PAD_64 can be adjusted independently as needed for 32-bit |
174 | * and 64-bits systems. | 176 | * and 64-bits systems. |
175 | */ | 177 | */ |
176 | #define COMMANDLIST_ALIGNMENT (8) | 178 | #define COMMANDLIST_ALIGNMENT (32) |
177 | #define IS_64_BIT ((sizeof(long) - 4)/4) | 179 | #define IS_64_BIT ((sizeof(long) - 4)/4) |
178 | #define IS_32_BIT (!IS_64_BIT) | 180 | #define IS_32_BIT (!IS_64_BIT) |
179 | #define PAD_32 (0) | 181 | #define PAD_32 (0) |
180 | #define PAD_64 (4) | 182 | #define PAD_64 (4) |
181 | #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) | 183 | #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) |
184 | #define DIRECT_LOOKUP_BIT 0x10 | ||
185 | #define DIRECT_LOOKUP_SHIFT 5 | ||
186 | |||
182 | typedef struct _CommandList_struct { | 187 | typedef struct _CommandList_struct { |
183 | CommandListHeader_struct Header; | 188 | CommandListHeader_struct Header; |
184 | RequestBlock_struct Request; | 189 | RequestBlock_struct Request; |
@@ -195,7 +200,7 @@ typedef struct _CommandList_struct { | |||
195 | struct completion *waiting; | 200 | struct completion *waiting; |
196 | int retry_count; | 201 | int retry_count; |
197 | void * scsi_cmd; | 202 | void * scsi_cmd; |
198 | char pad[PADSIZE]; | 203 | char pad[PADSIZE]; |
199 | } CommandList_struct; | 204 | } CommandList_struct; |
200 | 205 | ||
201 | /* Configuration Table Structure */ | 206 | /* Configuration Table Structure */ |
@@ -209,12 +214,15 @@ typedef struct _HostWrite_struct { | |||
209 | typedef struct _CfgTable_struct { | 214 | typedef struct _CfgTable_struct { |
210 | BYTE Signature[4]; | 215 | BYTE Signature[4]; |
211 | DWORD SpecValence; | 216 | DWORD SpecValence; |
217 | #define SIMPLE_MODE 0x02 | ||
218 | #define PERFORMANT_MODE 0x04 | ||
219 | #define MEMQ_MODE 0x08 | ||
212 | DWORD TransportSupport; | 220 | DWORD TransportSupport; |
213 | DWORD TransportActive; | 221 | DWORD TransportActive; |
214 | HostWrite_struct HostWrite; | 222 | HostWrite_struct HostWrite; |
215 | DWORD CmdsOutMax; | 223 | DWORD CmdsOutMax; |
216 | DWORD BusTypes; | 224 | DWORD BusTypes; |
217 | DWORD Reserved; | 225 | DWORD TransMethodOffset; |
218 | BYTE ServerName[16]; | 226 | BYTE ServerName[16]; |
219 | DWORD HeartBeat; | 227 | DWORD HeartBeat; |
220 | DWORD SCSI_Prefetch; | 228 | DWORD SCSI_Prefetch; |
@@ -222,6 +230,28 @@ typedef struct _CfgTable_struct { | |||
222 | DWORD MaxLogicalUnits; | 230 | DWORD MaxLogicalUnits; |
223 | DWORD MaxPhysicalDrives; | 231 | DWORD MaxPhysicalDrives; |
224 | DWORD MaxPhysicalDrivesPerLogicalUnit; | 232 | DWORD MaxPhysicalDrivesPerLogicalUnit; |
233 | DWORD MaxPerformantModeCommands; | ||
234 | u8 reserved[0x78 - 0x58]; | ||
235 | u32 misc_fw_support; /* offset 0x78 */ | ||
236 | #define MISC_FW_DOORBELL_RESET (0x02) | ||
225 | } CfgTable_struct; | 237 | } CfgTable_struct; |
238 | |||
239 | struct TransTable_struct { | ||
240 | u32 BlockFetch0; | ||
241 | u32 BlockFetch1; | ||
242 | u32 BlockFetch2; | ||
243 | u32 BlockFetch3; | ||
244 | u32 BlockFetch4; | ||
245 | u32 BlockFetch5; | ||
246 | u32 BlockFetch6; | ||
247 | u32 BlockFetch7; | ||
248 | u32 RepQSize; | ||
249 | u32 RepQCount; | ||
250 | u32 RepQCtrAddrLow32; | ||
251 | u32 RepQCtrAddrHigh32; | ||
252 | u32 RepQAddr0Low32; | ||
253 | u32 RepQAddr0High32; | ||
254 | }; | ||
255 | |||
226 | #pragma pack() | 256 | #pragma pack() |
227 | #endif /* CCISS_CMD_H */ | 257 | #endif /* CCISS_CMD_H */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index e1d0e2cfec72..575495f3c4b8 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -44,13 +44,15 @@ | |||
44 | #define CCISS_ABORT_MSG 0x00 | 44 | #define CCISS_ABORT_MSG 0x00 |
45 | #define CCISS_RESET_MSG 0x01 | 45 | #define CCISS_RESET_MSG 0x01 |
46 | 46 | ||
47 | static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | 47 | static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, |
48 | size_t size, | 48 | size_t size, |
49 | __u8 page_code, unsigned char *scsi3addr, | 49 | __u8 page_code, unsigned char *scsi3addr, |
50 | int cmd_type); | 50 | int cmd_type); |
51 | 51 | ||
52 | static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool); | 52 | static CommandList_struct *cmd_alloc(ctlr_info_t *h); |
53 | static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool); | 53 | static CommandList_struct *cmd_special_alloc(ctlr_info_t *h); |
54 | static void cmd_free(ctlr_info_t *h, CommandList_struct *c); | ||
55 | static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c); | ||
54 | 56 | ||
55 | static int cciss_scsi_proc_info( | 57 | static int cciss_scsi_proc_info( |
56 | struct Scsi_Host *sh, | 58 | struct Scsi_Host *sh, |
@@ -93,8 +95,8 @@ static struct scsi_host_template cciss_driver_template = { | |||
93 | 95 | ||
94 | #pragma pack(1) | 96 | #pragma pack(1) |
95 | 97 | ||
96 | #define SCSI_PAD_32 0 | 98 | #define SCSI_PAD_32 8 |
97 | #define SCSI_PAD_64 0 | 99 | #define SCSI_PAD_64 8 |
98 | 100 | ||
99 | struct cciss_scsi_cmd_stack_elem_t { | 101 | struct cciss_scsi_cmd_stack_elem_t { |
100 | CommandList_struct cmd; | 102 | CommandList_struct cmd; |
@@ -127,16 +129,16 @@ struct cciss_scsi_adapter_data_t { | |||
127 | spinlock_t lock; // to protect ccissscsi[ctlr]; | 129 | spinlock_t lock; // to protect ccissscsi[ctlr]; |
128 | }; | 130 | }; |
129 | 131 | ||
130 | #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ | 132 | #define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \ |
131 | &hba[ctlr]->scsi_ctlr->lock, flags); | 133 | &h->scsi_ctlr->lock, flags); |
132 | #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ | 134 | #define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \ |
133 | &hba[ctlr]->scsi_ctlr->lock, flags); | 135 | &h->scsi_ctlr->lock, flags); |
134 | 136 | ||
135 | static CommandList_struct * | 137 | static CommandList_struct * |
136 | scsi_cmd_alloc(ctlr_info_t *h) | 138 | scsi_cmd_alloc(ctlr_info_t *h) |
137 | { | 139 | { |
138 | /* assume only one process in here at a time, locking done by caller. */ | 140 | /* assume only one process in here at a time, locking done by caller. */ |
139 | /* use CCISS_LOCK(ctlr) */ | 141 | /* use h->lock */ |
140 | /* might be better to rewrite how we allocate scsi commands in a way that */ | 142 | /* might be better to rewrite how we allocate scsi commands in a way that */ |
141 | /* needs no locking at all. */ | 143 | /* needs no locking at all. */ |
142 | 144 | ||
@@ -177,10 +179,10 @@ scsi_cmd_alloc(ctlr_info_t *h) | |||
177 | } | 179 | } |
178 | 180 | ||
179 | static void | 181 | static void |
180 | scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) | 182 | scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c) |
181 | { | 183 | { |
182 | /* assume only one process in here at a time, locking done by caller. */ | 184 | /* assume only one process in here at a time, locking done by caller. */ |
183 | /* use CCISS_LOCK(ctlr) */ | 185 | /* use h->lock */ |
184 | /* drop the free memory chunk on top of the stack. */ | 186 | /* drop the free memory chunk on top of the stack. */ |
185 | 187 | ||
186 | struct cciss_scsi_adapter_data_t *sa; | 188 | struct cciss_scsi_adapter_data_t *sa; |
@@ -188,24 +190,25 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) | |||
188 | 190 | ||
189 | sa = h->scsi_ctlr; | 191 | sa = h->scsi_ctlr; |
190 | stk = &sa->cmd_stack; | 192 | stk = &sa->cmd_stack; |
193 | stk->top++; | ||
191 | if (stk->top >= CMD_STACK_SIZE) { | 194 | if (stk->top >= CMD_STACK_SIZE) { |
192 | printk("cciss: scsi_cmd_free called too many times.\n"); | 195 | dev_err(&h->pdev->dev, |
196 | "scsi_cmd_free called too many times.\n"); | ||
193 | BUG(); | 197 | BUG(); |
194 | } | 198 | } |
195 | stk->top++; | 199 | stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c; |
196 | stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd; | ||
197 | } | 200 | } |
198 | 201 | ||
199 | static int | 202 | static int |
200 | scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) | 203 | scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) |
201 | { | 204 | { |
202 | int i; | 205 | int i; |
203 | struct cciss_scsi_cmd_stack_t *stk; | 206 | struct cciss_scsi_cmd_stack_t *stk; |
204 | size_t size; | 207 | size_t size; |
205 | 208 | ||
206 | sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr], | 209 | sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, |
207 | hba[ctlr]->chainsize, CMD_STACK_SIZE); | 210 | h->chainsize, CMD_STACK_SIZE); |
208 | if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0) | 211 | if (!sa->cmd_sg_list && h->chainsize > 0) |
209 | return -ENOMEM; | 212 | return -ENOMEM; |
210 | 213 | ||
211 | stk = &sa->cmd_stack; | 214 | stk = &sa->cmd_stack; |
@@ -215,7 +218,7 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) | |||
215 | BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); | 218 | BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); |
216 | /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ | 219 | /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ |
217 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) | 220 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) |
218 | pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); | 221 | pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); |
219 | 222 | ||
220 | if (stk->pool == NULL) { | 223 | if (stk->pool == NULL) { |
221 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); | 224 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); |
@@ -234,23 +237,22 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) | |||
234 | } | 237 | } |
235 | 238 | ||
236 | static void | 239 | static void |
237 | scsi_cmd_stack_free(int ctlr) | 240 | scsi_cmd_stack_free(ctlr_info_t *h) |
238 | { | 241 | { |
239 | struct cciss_scsi_adapter_data_t *sa; | 242 | struct cciss_scsi_adapter_data_t *sa; |
240 | struct cciss_scsi_cmd_stack_t *stk; | 243 | struct cciss_scsi_cmd_stack_t *stk; |
241 | size_t size; | 244 | size_t size; |
242 | 245 | ||
243 | sa = hba[ctlr]->scsi_ctlr; | 246 | sa = h->scsi_ctlr; |
244 | stk = &sa->cmd_stack; | 247 | stk = &sa->cmd_stack; |
245 | if (stk->top != CMD_STACK_SIZE-1) { | 248 | if (stk->top != CMD_STACK_SIZE-1) { |
246 | printk( "cciss: %d scsi commands are still outstanding.\n", | 249 | dev_warn(&h->pdev->dev, |
250 | "bug: %d scsi commands are still outstanding.\n", | ||
247 | CMD_STACK_SIZE - stk->top); | 251 | CMD_STACK_SIZE - stk->top); |
248 | // BUG(); | ||
249 | printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk); | ||
250 | } | 252 | } |
251 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; | 253 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; |
252 | 254 | ||
253 | pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); | 255 | pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); |
254 | stk->pool = NULL; | 256 | stk->pool = NULL; |
255 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); | 257 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); |
256 | } | 258 | } |
@@ -342,20 +344,20 @@ print_cmd(CommandList_struct *cp) | |||
342 | #endif | 344 | #endif |
343 | 345 | ||
344 | static int | 346 | static int |
345 | find_bus_target_lun(int ctlr, int *bus, int *target, int *lun) | 347 | find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun) |
346 | { | 348 | { |
347 | /* finds an unused bus, target, lun for a new device */ | 349 | /* finds an unused bus, target, lun for a new device */ |
348 | /* assumes hba[ctlr]->scsi_ctlr->lock is held */ | 350 | /* assumes h->scsi_ctlr->lock is held */ |
349 | int i, found=0; | 351 | int i, found=0; |
350 | unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; | 352 | unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; |
351 | 353 | ||
352 | memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); | 354 | memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); |
353 | 355 | ||
354 | target_taken[SELF_SCSI_ID] = 1; | 356 | target_taken[SELF_SCSI_ID] = 1; |
355 | for (i=0;i<ccissscsi[ctlr].ndevices;i++) | 357 | for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) |
356 | target_taken[ccissscsi[ctlr].dev[i].target] = 1; | 358 | target_taken[ccissscsi[h->ctlr].dev[i].target] = 1; |
357 | 359 | ||
358 | for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) { | 360 | for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) { |
359 | if (!target_taken[i]) { | 361 | if (!target_taken[i]) { |
360 | *bus = 0; *target=i; *lun = 0; found=1; | 362 | *bus = 0; *target=i; *lun = 0; found=1; |
361 | break; | 363 | break; |
@@ -369,19 +371,19 @@ struct scsi2map { | |||
369 | }; | 371 | }; |
370 | 372 | ||
371 | static int | 373 | static int |
372 | cciss_scsi_add_entry(int ctlr, int hostno, | 374 | cciss_scsi_add_entry(ctlr_info_t *h, int hostno, |
373 | struct cciss_scsi_dev_t *device, | 375 | struct cciss_scsi_dev_t *device, |
374 | struct scsi2map *added, int *nadded) | 376 | struct scsi2map *added, int *nadded) |
375 | { | 377 | { |
376 | /* assumes hba[ctlr]->scsi_ctlr->lock is held */ | 378 | /* assumes h->scsi_ctlr->lock is held */ |
377 | int n = ccissscsi[ctlr].ndevices; | 379 | int n = ccissscsi[h->ctlr].ndevices; |
378 | struct cciss_scsi_dev_t *sd; | 380 | struct cciss_scsi_dev_t *sd; |
379 | int i, bus, target, lun; | 381 | int i, bus, target, lun; |
380 | unsigned char addr1[8], addr2[8]; | 382 | unsigned char addr1[8], addr2[8]; |
381 | 383 | ||
382 | if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { | 384 | if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { |
383 | printk("cciss%d: Too many devices, " | 385 | dev_warn(&h->pdev->dev, "Too many devices, " |
384 | "some will be inaccessible.\n", ctlr); | 386 | "some will be inaccessible.\n"); |
385 | return -1; | 387 | return -1; |
386 | } | 388 | } |
387 | 389 | ||
@@ -397,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno, | |||
397 | memcpy(addr1, device->scsi3addr, 8); | 399 | memcpy(addr1, device->scsi3addr, 8); |
398 | addr1[4] = 0; | 400 | addr1[4] = 0; |
399 | for (i = 0; i < n; i++) { | 401 | for (i = 0; i < n; i++) { |
400 | sd = &ccissscsi[ctlr].dev[i]; | 402 | sd = &ccissscsi[h->ctlr].dev[i]; |
401 | memcpy(addr2, sd->scsi3addr, 8); | 403 | memcpy(addr2, sd->scsi3addr, 8); |
402 | addr2[4] = 0; | 404 | addr2[4] = 0; |
403 | /* differ only in byte 4? */ | 405 | /* differ only in byte 4? */ |
@@ -410,9 +412,9 @@ cciss_scsi_add_entry(int ctlr, int hostno, | |||
410 | } | 412 | } |
411 | } | 413 | } |
412 | 414 | ||
413 | sd = &ccissscsi[ctlr].dev[n]; | 415 | sd = &ccissscsi[h->ctlr].dev[n]; |
414 | if (lun == 0) { | 416 | if (lun == 0) { |
415 | if (find_bus_target_lun(ctlr, | 417 | if (find_bus_target_lun(h, |
416 | &sd->bus, &sd->target, &sd->lun) != 0) | 418 | &sd->bus, &sd->target, &sd->lun) != 0) |
417 | return -1; | 419 | return -1; |
418 | } else { | 420 | } else { |
@@ -431,37 +433,37 @@ cciss_scsi_add_entry(int ctlr, int hostno, | |||
431 | memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); | 433 | memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); |
432 | sd->devtype = device->devtype; | 434 | sd->devtype = device->devtype; |
433 | 435 | ||
434 | ccissscsi[ctlr].ndevices++; | 436 | ccissscsi[h->ctlr].ndevices++; |
435 | 437 | ||
436 | /* initially, (before registering with scsi layer) we don't | 438 | /* initially, (before registering with scsi layer) we don't |
437 | know our hostno and we don't want to print anything first | 439 | know our hostno and we don't want to print anything first |
438 | time anyway (the scsi layer's inquiries will show that info) */ | 440 | time anyway (the scsi layer's inquiries will show that info) */ |
439 | if (hostno != -1) | 441 | if (hostno != -1) |
440 | printk("cciss%d: %s device c%db%dt%dl%d added.\n", | 442 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", |
441 | ctlr, scsi_device_type(sd->devtype), hostno, | 443 | scsi_device_type(sd->devtype), hostno, |
442 | sd->bus, sd->target, sd->lun); | 444 | sd->bus, sd->target, sd->lun); |
443 | return 0; | 445 | return 0; |
444 | } | 446 | } |
445 | 447 | ||
446 | static void | 448 | static void |
447 | cciss_scsi_remove_entry(int ctlr, int hostno, int entry, | 449 | cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry, |
448 | struct scsi2map *removed, int *nremoved) | 450 | struct scsi2map *removed, int *nremoved) |
449 | { | 451 | { |
450 | /* assumes hba[ctlr]->scsi_ctlr->lock is held */ | 452 | /* assumes h->ctlr]->scsi_ctlr->lock is held */ |
451 | int i; | 453 | int i; |
452 | struct cciss_scsi_dev_t sd; | 454 | struct cciss_scsi_dev_t sd; |
453 | 455 | ||
454 | if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; | 456 | if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; |
455 | sd = ccissscsi[ctlr].dev[entry]; | 457 | sd = ccissscsi[h->ctlr].dev[entry]; |
456 | removed[*nremoved].bus = sd.bus; | 458 | removed[*nremoved].bus = sd.bus; |
457 | removed[*nremoved].target = sd.target; | 459 | removed[*nremoved].target = sd.target; |
458 | removed[*nremoved].lun = sd.lun; | 460 | removed[*nremoved].lun = sd.lun; |
459 | (*nremoved)++; | 461 | (*nremoved)++; |
460 | for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++) | 462 | for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++) |
461 | ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; | 463 | ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1]; |
462 | ccissscsi[ctlr].ndevices--; | 464 | ccissscsi[h->ctlr].ndevices--; |
463 | printk("cciss%d: %s device c%db%dt%dl%d removed.\n", | 465 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", |
464 | ctlr, scsi_device_type(sd.devtype), hostno, | 466 | scsi_device_type(sd.devtype), hostno, |
465 | sd.bus, sd.target, sd.lun); | 467 | sd.bus, sd.target, sd.lun); |
466 | } | 468 | } |
467 | 469 | ||
@@ -476,24 +478,24 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry, | |||
476 | (a)[1] == (b)[1] && \ | 478 | (a)[1] == (b)[1] && \ |
477 | (a)[0] == (b)[0]) | 479 | (a)[0] == (b)[0]) |
478 | 480 | ||
479 | static void fixup_botched_add(int ctlr, char *scsi3addr) | 481 | static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr) |
480 | { | 482 | { |
481 | /* called when scsi_add_device fails in order to re-adjust */ | 483 | /* called when scsi_add_device fails in order to re-adjust */ |
482 | /* ccissscsi[] to match the mid layer's view. */ | 484 | /* ccissscsi[] to match the mid layer's view. */ |
483 | unsigned long flags; | 485 | unsigned long flags; |
484 | int i, j; | 486 | int i, j; |
485 | CPQ_TAPE_LOCK(ctlr, flags); | 487 | CPQ_TAPE_LOCK(h, flags); |
486 | for (i = 0; i < ccissscsi[ctlr].ndevices; i++) { | 488 | for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { |
487 | if (memcmp(scsi3addr, | 489 | if (memcmp(scsi3addr, |
488 | ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) { | 490 | ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) { |
489 | for (j = i; j < ccissscsi[ctlr].ndevices-1; j++) | 491 | for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++) |
490 | ccissscsi[ctlr].dev[j] = | 492 | ccissscsi[h->ctlr].dev[j] = |
491 | ccissscsi[ctlr].dev[j+1]; | 493 | ccissscsi[h->ctlr].dev[j+1]; |
492 | ccissscsi[ctlr].ndevices--; | 494 | ccissscsi[h->ctlr].ndevices--; |
493 | break; | 495 | break; |
494 | } | 496 | } |
495 | } | 497 | } |
496 | CPQ_TAPE_UNLOCK(ctlr, flags); | 498 | CPQ_TAPE_UNLOCK(h, flags); |
497 | } | 499 | } |
498 | 500 | ||
499 | static int device_is_the_same(struct cciss_scsi_dev_t *dev1, | 501 | static int device_is_the_same(struct cciss_scsi_dev_t *dev1, |
@@ -513,7 +515,7 @@ static int device_is_the_same(struct cciss_scsi_dev_t *dev1, | |||
513 | } | 515 | } |
514 | 516 | ||
515 | static int | 517 | static int |
516 | adjust_cciss_scsi_table(int ctlr, int hostno, | 518 | adjust_cciss_scsi_table(ctlr_info_t *h, int hostno, |
517 | struct cciss_scsi_dev_t sd[], int nsds) | 519 | struct cciss_scsi_dev_t sd[], int nsds) |
518 | { | 520 | { |
519 | /* sd contains scsi3 addresses and devtypes, but | 521 | /* sd contains scsi3 addresses and devtypes, but |
@@ -534,15 +536,15 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
534 | GFP_KERNEL); | 536 | GFP_KERNEL); |
535 | 537 | ||
536 | if (!added || !removed) { | 538 | if (!added || !removed) { |
537 | printk(KERN_WARNING "cciss%d: Out of memory in " | 539 | dev_warn(&h->pdev->dev, |
538 | "adjust_cciss_scsi_table\n", ctlr); | 540 | "Out of memory in adjust_cciss_scsi_table\n"); |
539 | goto free_and_out; | 541 | goto free_and_out; |
540 | } | 542 | } |
541 | 543 | ||
542 | CPQ_TAPE_LOCK(ctlr, flags); | 544 | CPQ_TAPE_LOCK(h, flags); |
543 | 545 | ||
544 | if (hostno != -1) /* if it's not the first time... */ | 546 | if (hostno != -1) /* if it's not the first time... */ |
545 | sh = hba[ctlr]->scsi_ctlr->scsi_host; | 547 | sh = h->scsi_ctlr->scsi_host; |
546 | 548 | ||
547 | /* find any devices in ccissscsi[] that are not in | 549 | /* find any devices in ccissscsi[] that are not in |
548 | sd[] and remove them from ccissscsi[] */ | 550 | sd[] and remove them from ccissscsi[] */ |
@@ -550,8 +552,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
550 | i = 0; | 552 | i = 0; |
551 | nremoved = 0; | 553 | nremoved = 0; |
552 | nadded = 0; | 554 | nadded = 0; |
553 | while(i<ccissscsi[ctlr].ndevices) { | 555 | while (i < ccissscsi[h->ctlr].ndevices) { |
554 | csd = &ccissscsi[ctlr].dev[i]; | 556 | csd = &ccissscsi[h->ctlr].dev[i]; |
555 | found=0; | 557 | found=0; |
556 | for (j=0;j<nsds;j++) { | 558 | for (j=0;j<nsds;j++) { |
557 | if (SCSI3ADDR_EQ(sd[j].scsi3addr, | 559 | if (SCSI3ADDR_EQ(sd[j].scsi3addr, |
@@ -566,20 +568,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
566 | 568 | ||
567 | if (found == 0) { /* device no longer present. */ | 569 | if (found == 0) { /* device no longer present. */ |
568 | changes++; | 570 | changes++; |
569 | /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", | 571 | cciss_scsi_remove_entry(h, hostno, i, |
570 | ctlr, scsi_device_type(csd->devtype), hostno, | ||
571 | csd->bus, csd->target, csd->lun); */ | ||
572 | cciss_scsi_remove_entry(ctlr, hostno, i, | ||
573 | removed, &nremoved); | 572 | removed, &nremoved); |
574 | /* remove ^^^, hence i not incremented */ | 573 | /* remove ^^^, hence i not incremented */ |
575 | } else if (found == 1) { /* device is different in some way */ | 574 | } else if (found == 1) { /* device is different in some way */ |
576 | changes++; | 575 | changes++; |
577 | printk("cciss%d: device c%db%dt%dl%d has changed.\n", | 576 | dev_info(&h->pdev->dev, |
578 | ctlr, hostno, csd->bus, csd->target, csd->lun); | 577 | "device c%db%dt%dl%d has changed.\n", |
579 | cciss_scsi_remove_entry(ctlr, hostno, i, | 578 | hostno, csd->bus, csd->target, csd->lun); |
579 | cciss_scsi_remove_entry(h, hostno, i, | ||
580 | removed, &nremoved); | 580 | removed, &nremoved); |
581 | /* remove ^^^, hence i not incremented */ | 581 | /* remove ^^^, hence i not incremented */ |
582 | if (cciss_scsi_add_entry(ctlr, hostno, &sd[j], | 582 | if (cciss_scsi_add_entry(h, hostno, &sd[j], |
583 | added, &nadded) != 0) | 583 | added, &nadded) != 0) |
584 | /* we just removed one, so add can't fail. */ | 584 | /* we just removed one, so add can't fail. */ |
585 | BUG(); | 585 | BUG(); |
@@ -601,8 +601,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
601 | 601 | ||
602 | for (i=0;i<nsds;i++) { | 602 | for (i=0;i<nsds;i++) { |
603 | found=0; | 603 | found=0; |
604 | for (j=0;j<ccissscsi[ctlr].ndevices;j++) { | 604 | for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) { |
605 | csd = &ccissscsi[ctlr].dev[j]; | 605 | csd = &ccissscsi[h->ctlr].dev[j]; |
606 | if (SCSI3ADDR_EQ(sd[i].scsi3addr, | 606 | if (SCSI3ADDR_EQ(sd[i].scsi3addr, |
607 | csd->scsi3addr)) { | 607 | csd->scsi3addr)) { |
608 | if (device_is_the_same(&sd[i], csd)) | 608 | if (device_is_the_same(&sd[i], csd)) |
@@ -614,18 +614,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
614 | } | 614 | } |
615 | if (!found) { | 615 | if (!found) { |
616 | changes++; | 616 | changes++; |
617 | if (cciss_scsi_add_entry(ctlr, hostno, &sd[i], | 617 | if (cciss_scsi_add_entry(h, hostno, &sd[i], |
618 | added, &nadded) != 0) | 618 | added, &nadded) != 0) |
619 | break; | 619 | break; |
620 | } else if (found == 1) { | 620 | } else if (found == 1) { |
621 | /* should never happen... */ | 621 | /* should never happen... */ |
622 | changes++; | 622 | changes++; |
623 | printk(KERN_WARNING "cciss%d: device " | 623 | dev_warn(&h->pdev->dev, |
624 | "unexpectedly changed\n", ctlr); | 624 | "device unexpectedly changed\n"); |
625 | /* but if it does happen, we just ignore that device */ | 625 | /* but if it does happen, we just ignore that device */ |
626 | } | 626 | } |
627 | } | 627 | } |
628 | CPQ_TAPE_UNLOCK(ctlr, flags); | 628 | CPQ_TAPE_UNLOCK(h, flags); |
629 | 629 | ||
630 | /* Don't notify scsi mid layer of any changes the first time through */ | 630 | /* Don't notify scsi mid layer of any changes the first time through */ |
631 | /* (or if there are no changes) scsi_scan_host will do it later the */ | 631 | /* (or if there are no changes) scsi_scan_host will do it later the */ |
@@ -645,9 +645,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
645 | /* We don't expect to get here. */ | 645 | /* We don't expect to get here. */ |
646 | /* future cmds to this device will get selection */ | 646 | /* future cmds to this device will get selection */ |
647 | /* timeout as if the device was gone. */ | 647 | /* timeout as if the device was gone. */ |
648 | printk(KERN_WARNING "cciss%d: didn't find " | 648 | dev_warn(&h->pdev->dev, "didn't find " |
649 | "c%db%dt%dl%d\n for removal.", | 649 | "c%db%dt%dl%d\n for removal.", |
650 | ctlr, hostno, removed[i].bus, | 650 | hostno, removed[i].bus, |
651 | removed[i].target, removed[i].lun); | 651 | removed[i].target, removed[i].lun); |
652 | } | 652 | } |
653 | } | 653 | } |
@@ -659,13 +659,12 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
659 | added[i].target, added[i].lun); | 659 | added[i].target, added[i].lun); |
660 | if (rc == 0) | 660 | if (rc == 0) |
661 | continue; | 661 | continue; |
662 | printk(KERN_WARNING "cciss%d: scsi_add_device " | 662 | dev_warn(&h->pdev->dev, "scsi_add_device " |
663 | "c%db%dt%dl%d failed, device not added.\n", | 663 | "c%db%dt%dl%d failed, device not added.\n", |
664 | ctlr, hostno, | 664 | hostno, added[i].bus, added[i].target, added[i].lun); |
665 | added[i].bus, added[i].target, added[i].lun); | ||
666 | /* now we have to remove it from ccissscsi, */ | 665 | /* now we have to remove it from ccissscsi, */ |
667 | /* since it didn't get added to scsi mid layer */ | 666 | /* since it didn't get added to scsi mid layer */ |
668 | fixup_botched_add(ctlr, added[i].scsi3addr); | 667 | fixup_botched_add(h, added[i].scsi3addr); |
669 | } | 668 | } |
670 | 669 | ||
671 | free_and_out: | 670 | free_and_out: |
@@ -675,33 +674,33 @@ free_and_out: | |||
675 | } | 674 | } |
676 | 675 | ||
677 | static int | 676 | static int |
678 | lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr) | 677 | lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr) |
679 | { | 678 | { |
680 | int i; | 679 | int i; |
681 | struct cciss_scsi_dev_t *sd; | 680 | struct cciss_scsi_dev_t *sd; |
682 | unsigned long flags; | 681 | unsigned long flags; |
683 | 682 | ||
684 | CPQ_TAPE_LOCK(ctlr, flags); | 683 | CPQ_TAPE_LOCK(h, flags); |
685 | for (i=0;i<ccissscsi[ctlr].ndevices;i++) { | 684 | for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { |
686 | sd = &ccissscsi[ctlr].dev[i]; | 685 | sd = &ccissscsi[h->ctlr].dev[i]; |
687 | if (sd->bus == bus && | 686 | if (sd->bus == bus && |
688 | sd->target == target && | 687 | sd->target == target && |
689 | sd->lun == lun) { | 688 | sd->lun == lun) { |
690 | memcpy(scsi3addr, &sd->scsi3addr[0], 8); | 689 | memcpy(scsi3addr, &sd->scsi3addr[0], 8); |
691 | CPQ_TAPE_UNLOCK(ctlr, flags); | 690 | CPQ_TAPE_UNLOCK(h, flags); |
692 | return 0; | 691 | return 0; |
693 | } | 692 | } |
694 | } | 693 | } |
695 | CPQ_TAPE_UNLOCK(ctlr, flags); | 694 | CPQ_TAPE_UNLOCK(h, flags); |
696 | return -1; | 695 | return -1; |
697 | } | 696 | } |
698 | 697 | ||
699 | static void | 698 | static void |
700 | cciss_scsi_setup(int cntl_num) | 699 | cciss_scsi_setup(ctlr_info_t *h) |
701 | { | 700 | { |
702 | struct cciss_scsi_adapter_data_t * shba; | 701 | struct cciss_scsi_adapter_data_t * shba; |
703 | 702 | ||
704 | ccissscsi[cntl_num].ndevices = 0; | 703 | ccissscsi[h->ctlr].ndevices = 0; |
705 | shba = (struct cciss_scsi_adapter_data_t *) | 704 | shba = (struct cciss_scsi_adapter_data_t *) |
706 | kmalloc(sizeof(*shba), GFP_KERNEL); | 705 | kmalloc(sizeof(*shba), GFP_KERNEL); |
707 | if (shba == NULL) | 706 | if (shba == NULL) |
@@ -709,35 +708,35 @@ cciss_scsi_setup(int cntl_num) | |||
709 | shba->scsi_host = NULL; | 708 | shba->scsi_host = NULL; |
710 | spin_lock_init(&shba->lock); | 709 | spin_lock_init(&shba->lock); |
711 | shba->registered = 0; | 710 | shba->registered = 0; |
712 | if (scsi_cmd_stack_setup(cntl_num, shba) != 0) { | 711 | if (scsi_cmd_stack_setup(h, shba) != 0) { |
713 | kfree(shba); | 712 | kfree(shba); |
714 | shba = NULL; | 713 | shba = NULL; |
715 | } | 714 | } |
716 | hba[cntl_num]->scsi_ctlr = shba; | 715 | h->scsi_ctlr = shba; |
717 | return; | 716 | return; |
718 | } | 717 | } |
719 | 718 | ||
720 | static void | 719 | static void complete_scsi_command(CommandList_struct *c, int timeout, |
721 | complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | 720 | __u32 tag) |
722 | { | 721 | { |
723 | struct scsi_cmnd *cmd; | 722 | struct scsi_cmnd *cmd; |
724 | ctlr_info_t *ctlr; | 723 | ctlr_info_t *h; |
725 | ErrorInfo_struct *ei; | 724 | ErrorInfo_struct *ei; |
726 | 725 | ||
727 | ei = cp->err_info; | 726 | ei = c->err_info; |
728 | 727 | ||
729 | /* First, see if it was a message rather than a command */ | 728 | /* First, see if it was a message rather than a command */ |
730 | if (cp->Request.Type.Type == TYPE_MSG) { | 729 | if (c->Request.Type.Type == TYPE_MSG) { |
731 | cp->cmd_type = CMD_MSG_DONE; | 730 | c->cmd_type = CMD_MSG_DONE; |
732 | return; | 731 | return; |
733 | } | 732 | } |
734 | 733 | ||
735 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; | 734 | cmd = (struct scsi_cmnd *) c->scsi_cmd; |
736 | ctlr = hba[cp->ctlr]; | 735 | h = hba[c->ctlr]; |
737 | 736 | ||
738 | scsi_dma_unmap(cmd); | 737 | scsi_dma_unmap(cmd); |
739 | if (cp->Header.SGTotal > ctlr->max_cmd_sgentries) | 738 | if (c->Header.SGTotal > h->max_cmd_sgentries) |
740 | cciss_unmap_sg_chain_block(ctlr, cp); | 739 | cciss_unmap_sg_chain_block(h, c); |
741 | 740 | ||
742 | cmd->result = (DID_OK << 16); /* host byte */ | 741 | cmd->result = (DID_OK << 16); /* host byte */ |
743 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | 742 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
@@ -764,9 +763,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | |||
764 | { | 763 | { |
765 | #if 0 | 764 | #if 0 |
766 | printk(KERN_WARNING "cciss: cmd %p " | 765 | printk(KERN_WARNING "cciss: cmd %p " |
767 | "has SCSI Status = %x\n", | 766 | "has SCSI Status = %x\n", |
768 | cp, | 767 | c, ei->ScsiStatus); |
769 | ei->ScsiStatus); | ||
770 | #endif | 768 | #endif |
771 | cmd->result |= (ei->ScsiStatus << 1); | 769 | cmd->result |= (ei->ScsiStatus << 1); |
772 | } | 770 | } |
@@ -786,13 +784,13 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | |||
786 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | 784 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
787 | break; | 785 | break; |
788 | case CMD_DATA_OVERRUN: | 786 | case CMD_DATA_OVERRUN: |
789 | printk(KERN_WARNING "cciss: cp %p has" | 787 | dev_warn(&h->pdev->dev, "%p has" |
790 | " completed with data overrun " | 788 | " completed with data overrun " |
791 | "reported\n", cp); | 789 | "reported\n", c); |
792 | break; | 790 | break; |
793 | case CMD_INVALID: { | 791 | case CMD_INVALID: { |
794 | /* print_bytes(cp, sizeof(*cp), 1, 0); | 792 | /* print_bytes(c, sizeof(*c), 1, 0); |
795 | print_cmd(cp); */ | 793 | print_cmd(c); */ |
796 | /* We get CMD_INVALID if you address a non-existent tape drive instead | 794 | /* We get CMD_INVALID if you address a non-existent tape drive instead |
797 | of a selection timeout (no response). You will see this if you yank | 795 | of a selection timeout (no response). You will see this if you yank |
798 | out a tape drive, then try to access it. This is kind of a shame | 796 | out a tape drive, then try to access it. This is kind of a shame |
@@ -802,54 +800,50 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | |||
802 | } | 800 | } |
803 | break; | 801 | break; |
804 | case CMD_PROTOCOL_ERR: | 802 | case CMD_PROTOCOL_ERR: |
805 | printk(KERN_WARNING "cciss: cp %p has " | 803 | dev_warn(&h->pdev->dev, |
806 | "protocol error \n", cp); | 804 | "%p has protocol error\n", c); |
807 | break; | 805 | break; |
808 | case CMD_HARDWARE_ERR: | 806 | case CMD_HARDWARE_ERR: |
809 | cmd->result = DID_ERROR << 16; | 807 | cmd->result = DID_ERROR << 16; |
810 | printk(KERN_WARNING "cciss: cp %p had " | 808 | dev_warn(&h->pdev->dev, |
811 | " hardware error\n", cp); | 809 | "%p had hardware error\n", c); |
812 | break; | 810 | break; |
813 | case CMD_CONNECTION_LOST: | 811 | case CMD_CONNECTION_LOST: |
814 | cmd->result = DID_ERROR << 16; | 812 | cmd->result = DID_ERROR << 16; |
815 | printk(KERN_WARNING "cciss: cp %p had " | 813 | dev_warn(&h->pdev->dev, |
816 | "connection lost\n", cp); | 814 | "%p had connection lost\n", c); |
817 | break; | 815 | break; |
818 | case CMD_ABORTED: | 816 | case CMD_ABORTED: |
819 | cmd->result = DID_ABORT << 16; | 817 | cmd->result = DID_ABORT << 16; |
820 | printk(KERN_WARNING "cciss: cp %p was " | 818 | dev_warn(&h->pdev->dev, "%p was aborted\n", c); |
821 | "aborted\n", cp); | ||
822 | break; | 819 | break; |
823 | case CMD_ABORT_FAILED: | 820 | case CMD_ABORT_FAILED: |
824 | cmd->result = DID_ERROR << 16; | 821 | cmd->result = DID_ERROR << 16; |
825 | printk(KERN_WARNING "cciss: cp %p reports " | 822 | dev_warn(&h->pdev->dev, |
826 | "abort failed\n", cp); | 823 | "%p reports abort failed\n", c); |
827 | break; | 824 | break; |
828 | case CMD_UNSOLICITED_ABORT: | 825 | case CMD_UNSOLICITED_ABORT: |
829 | cmd->result = DID_ABORT << 16; | 826 | cmd->result = DID_ABORT << 16; |
830 | printk(KERN_WARNING "cciss: cp %p aborted " | 827 | dev_warn(&h->pdev->dev, "%p aborted do to an " |
831 | "do to an unsolicited abort\n", cp); | 828 | "unsolicited abort\n", c); |
832 | break; | 829 | break; |
833 | case CMD_TIMEOUT: | 830 | case CMD_TIMEOUT: |
834 | cmd->result = DID_TIME_OUT << 16; | 831 | cmd->result = DID_TIME_OUT << 16; |
835 | printk(KERN_WARNING "cciss: cp %p timedout\n", | 832 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
836 | cp); | ||
837 | break; | 833 | break; |
838 | default: | 834 | default: |
839 | cmd->result = DID_ERROR << 16; | 835 | cmd->result = DID_ERROR << 16; |
840 | printk(KERN_WARNING "cciss: cp %p returned " | 836 | dev_warn(&h->pdev->dev, |
841 | "unknown status %x\n", cp, | 837 | "%p returned unknown status %x\n", c, |
842 | ei->CommandStatus); | 838 | ei->CommandStatus); |
843 | } | 839 | } |
844 | } | 840 | } |
845 | // printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, | ||
846 | // cmd->target, cmd->lun); | ||
847 | cmd->scsi_done(cmd); | 841 | cmd->scsi_done(cmd); |
848 | scsi_cmd_free(ctlr, cp); | 842 | scsi_cmd_free(h, c); |
849 | } | 843 | } |
850 | 844 | ||
851 | static int | 845 | static int |
852 | cciss_scsi_detect(int ctlr) | 846 | cciss_scsi_detect(ctlr_info_t *h) |
853 | { | 847 | { |
854 | struct Scsi_Host *sh; | 848 | struct Scsi_Host *sh; |
855 | int error; | 849 | int error; |
@@ -860,14 +854,15 @@ cciss_scsi_detect(int ctlr) | |||
860 | sh->io_port = 0; // good enough? FIXME, | 854 | sh->io_port = 0; // good enough? FIXME, |
861 | sh->n_io_port = 0; // I don't think we use these two... | 855 | sh->n_io_port = 0; // I don't think we use these two... |
862 | sh->this_id = SELF_SCSI_ID; | 856 | sh->this_id = SELF_SCSI_ID; |
863 | sh->sg_tablesize = hba[ctlr]->maxsgentries; | 857 | sh->sg_tablesize = h->maxsgentries; |
858 | sh->max_cmd_len = MAX_COMMAND_SIZE; | ||
864 | 859 | ||
865 | ((struct cciss_scsi_adapter_data_t *) | 860 | ((struct cciss_scsi_adapter_data_t *) |
866 | hba[ctlr]->scsi_ctlr)->scsi_host = sh; | 861 | h->scsi_ctlr)->scsi_host = sh; |
867 | sh->hostdata[0] = (unsigned long) hba[ctlr]; | 862 | sh->hostdata[0] = (unsigned long) h; |
868 | sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; | 863 | sh->irq = h->intr[SIMPLE_MODE_INT]; |
869 | sh->unique_id = sh->irq; | 864 | sh->unique_id = sh->irq; |
870 | error = scsi_add_host(sh, &hba[ctlr]->pdev->dev); | 865 | error = scsi_add_host(sh, &h->pdev->dev); |
871 | if (error) | 866 | if (error) |
872 | goto fail_host_put; | 867 | goto fail_host_put; |
873 | scsi_scan_host(sh); | 868 | scsi_scan_host(sh); |
@@ -881,20 +876,20 @@ cciss_scsi_detect(int ctlr) | |||
881 | 876 | ||
882 | static void | 877 | static void |
883 | cciss_unmap_one(struct pci_dev *pdev, | 878 | cciss_unmap_one(struct pci_dev *pdev, |
884 | CommandList_struct *cp, | 879 | CommandList_struct *c, |
885 | size_t buflen, | 880 | size_t buflen, |
886 | int data_direction) | 881 | int data_direction) |
887 | { | 882 | { |
888 | u64bit addr64; | 883 | u64bit addr64; |
889 | 884 | ||
890 | addr64.val32.lower = cp->SG[0].Addr.lower; | 885 | addr64.val32.lower = c->SG[0].Addr.lower; |
891 | addr64.val32.upper = cp->SG[0].Addr.upper; | 886 | addr64.val32.upper = c->SG[0].Addr.upper; |
892 | pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); | 887 | pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); |
893 | } | 888 | } |
894 | 889 | ||
895 | static void | 890 | static void |
896 | cciss_map_one(struct pci_dev *pdev, | 891 | cciss_map_one(struct pci_dev *pdev, |
897 | CommandList_struct *cp, | 892 | CommandList_struct *c, |
898 | unsigned char *buf, | 893 | unsigned char *buf, |
899 | size_t buflen, | 894 | size_t buflen, |
900 | int data_direction) | 895 | int data_direction) |
@@ -902,164 +897,149 @@ cciss_map_one(struct pci_dev *pdev, | |||
902 | __u64 addr64; | 897 | __u64 addr64; |
903 | 898 | ||
904 | addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); | 899 | addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); |
905 | cp->SG[0].Addr.lower = | 900 | c->SG[0].Addr.lower = |
906 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 901 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); |
907 | cp->SG[0].Addr.upper = | 902 | c->SG[0].Addr.upper = |
908 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 903 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); |
909 | cp->SG[0].Len = buflen; | 904 | c->SG[0].Len = buflen; |
910 | cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ | 905 | c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ |
911 | cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ | 906 | c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ |
912 | } | 907 | } |
913 | 908 | ||
914 | static int | 909 | static int |
915 | cciss_scsi_do_simple_cmd(ctlr_info_t *c, | 910 | cciss_scsi_do_simple_cmd(ctlr_info_t *h, |
916 | CommandList_struct *cp, | 911 | CommandList_struct *c, |
917 | unsigned char *scsi3addr, | 912 | unsigned char *scsi3addr, |
918 | unsigned char *cdb, | 913 | unsigned char *cdb, |
919 | unsigned char cdblen, | 914 | unsigned char cdblen, |
920 | unsigned char *buf, int bufsize, | 915 | unsigned char *buf, int bufsize, |
921 | int direction) | 916 | int direction) |
922 | { | 917 | { |
923 | unsigned long flags; | ||
924 | DECLARE_COMPLETION_ONSTACK(wait); | 918 | DECLARE_COMPLETION_ONSTACK(wait); |
925 | 919 | ||
926 | cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl | 920 | c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */ |
927 | cp->scsi_cmd = NULL; | 921 | c->scsi_cmd = NULL; |
928 | cp->Header.ReplyQueue = 0; // unused in simple mode | 922 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
929 | memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN)); | 923 | memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN)); |
930 | cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag | 924 | c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ |
931 | // Fill in the request block... | 925 | // Fill in the request block... |
932 | 926 | ||
933 | /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", | 927 | /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", |
934 | scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], | 928 | scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], |
935 | scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ | 929 | scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ |
936 | 930 | ||
937 | memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); | 931 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); |
938 | memcpy(cp->Request.CDB, cdb, cdblen); | 932 | memcpy(c->Request.CDB, cdb, cdblen); |
939 | cp->Request.Timeout = 0; | 933 | c->Request.Timeout = 0; |
940 | cp->Request.CDBLen = cdblen; | 934 | c->Request.CDBLen = cdblen; |
941 | cp->Request.Type.Type = TYPE_CMD; | 935 | c->Request.Type.Type = TYPE_CMD; |
942 | cp->Request.Type.Attribute = ATTR_SIMPLE; | 936 | c->Request.Type.Attribute = ATTR_SIMPLE; |
943 | cp->Request.Type.Direction = direction; | 937 | c->Request.Type.Direction = direction; |
944 | 938 | ||
945 | /* Fill in the SG list and do dma mapping */ | 939 | /* Fill in the SG list and do dma mapping */ |
946 | cciss_map_one(c->pdev, cp, (unsigned char *) buf, | 940 | cciss_map_one(h->pdev, c, (unsigned char *) buf, |
947 | bufsize, DMA_FROM_DEVICE); | 941 | bufsize, DMA_FROM_DEVICE); |
948 | 942 | ||
949 | cp->waiting = &wait; | 943 | c->waiting = &wait; |
950 | 944 | enqueue_cmd_and_start_io(h, c); | |
951 | /* Put the request on the tail of the request queue */ | ||
952 | spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); | ||
953 | addQ(&c->reqQ, cp); | ||
954 | c->Qdepth++; | ||
955 | start_io(c); | ||
956 | spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); | ||
957 | |||
958 | wait_for_completion(&wait); | 945 | wait_for_completion(&wait); |
959 | 946 | ||
960 | /* undo the dma mapping */ | 947 | /* undo the dma mapping */ |
961 | cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE); | 948 | cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE); |
962 | return(0); | 949 | return(0); |
963 | } | 950 | } |
964 | 951 | ||
965 | static void | 952 | static void |
966 | cciss_scsi_interpret_error(CommandList_struct *cp) | 953 | cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) |
967 | { | 954 | { |
968 | ErrorInfo_struct *ei; | 955 | ErrorInfo_struct *ei; |
969 | 956 | ||
970 | ei = cp->err_info; | 957 | ei = c->err_info; |
971 | switch(ei->CommandStatus) | 958 | switch(ei->CommandStatus) |
972 | { | 959 | { |
973 | case CMD_TARGET_STATUS: | 960 | case CMD_TARGET_STATUS: |
974 | printk(KERN_WARNING "cciss: cmd %p has " | 961 | dev_warn(&h->pdev->dev, |
975 | "completed with errors\n", cp); | 962 | "cmd %p has completed with errors\n", c); |
976 | printk(KERN_WARNING "cciss: cmd %p " | 963 | dev_warn(&h->pdev->dev, |
977 | "has SCSI Status = %x\n", | 964 | "cmd %p has SCSI Status = %x\n", |
978 | cp, | 965 | c, ei->ScsiStatus); |
979 | ei->ScsiStatus); | ||
980 | if (ei->ScsiStatus == 0) | 966 | if (ei->ScsiStatus == 0) |
981 | printk(KERN_WARNING | 967 | dev_warn(&h->pdev->dev, |
982 | "cciss:SCSI status is abnormally zero. " | 968 | "SCSI status is abnormally zero. " |
983 | "(probably indicates selection timeout " | 969 | "(probably indicates selection timeout " |
984 | "reported incorrectly due to a known " | 970 | "reported incorrectly due to a known " |
985 | "firmware bug, circa July, 2001.)\n"); | 971 | "firmware bug, circa July, 2001.)\n"); |
986 | break; | 972 | break; |
987 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | 973 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
988 | printk("UNDERRUN\n"); | 974 | dev_info(&h->pdev->dev, "UNDERRUN\n"); |
989 | break; | 975 | break; |
990 | case CMD_DATA_OVERRUN: | 976 | case CMD_DATA_OVERRUN: |
991 | printk(KERN_WARNING "cciss: cp %p has" | 977 | dev_warn(&h->pdev->dev, "%p has" |
992 | " completed with data overrun " | 978 | " completed with data overrun " |
993 | "reported\n", cp); | 979 | "reported\n", c); |
994 | break; | 980 | break; |
995 | case CMD_INVALID: { | 981 | case CMD_INVALID: { |
996 | /* controller unfortunately reports SCSI passthru's */ | 982 | /* controller unfortunately reports SCSI passthru's */ |
997 | /* to non-existent targets as invalid commands. */ | 983 | /* to non-existent targets as invalid commands. */ |
998 | printk(KERN_WARNING "cciss: cp %p is " | 984 | dev_warn(&h->pdev->dev, |
999 | "reported invalid (probably means " | 985 | "%p is reported invalid (probably means " |
1000 | "target device no longer present)\n", | 986 | "target device no longer present)\n", c); |
1001 | cp); | 987 | /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0); |
1002 | /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); | 988 | print_cmd(c); */ |
1003 | print_cmd(cp); */ | ||
1004 | } | 989 | } |
1005 | break; | 990 | break; |
1006 | case CMD_PROTOCOL_ERR: | 991 | case CMD_PROTOCOL_ERR: |
1007 | printk(KERN_WARNING "cciss: cp %p has " | 992 | dev_warn(&h->pdev->dev, "%p has protocol error\n", c); |
1008 | "protocol error \n", cp); | ||
1009 | break; | 993 | break; |
1010 | case CMD_HARDWARE_ERR: | 994 | case CMD_HARDWARE_ERR: |
1011 | /* cmd->result = DID_ERROR << 16; */ | 995 | /* cmd->result = DID_ERROR << 16; */ |
1012 | printk(KERN_WARNING "cciss: cp %p had " | 996 | dev_warn(&h->pdev->dev, "%p had hardware error\n", c); |
1013 | " hardware error\n", cp); | ||
1014 | break; | 997 | break; |
1015 | case CMD_CONNECTION_LOST: | 998 | case CMD_CONNECTION_LOST: |
1016 | printk(KERN_WARNING "cciss: cp %p had " | 999 | dev_warn(&h->pdev->dev, "%p had connection lost\n", c); |
1017 | "connection lost\n", cp); | ||
1018 | break; | 1000 | break; |
1019 | case CMD_ABORTED: | 1001 | case CMD_ABORTED: |
1020 | printk(KERN_WARNING "cciss: cp %p was " | 1002 | dev_warn(&h->pdev->dev, "%p was aborted\n", c); |
1021 | "aborted\n", cp); | ||
1022 | break; | 1003 | break; |
1023 | case CMD_ABORT_FAILED: | 1004 | case CMD_ABORT_FAILED: |
1024 | printk(KERN_WARNING "cciss: cp %p reports " | 1005 | dev_warn(&h->pdev->dev, |
1025 | "abort failed\n", cp); | 1006 | "%p reports abort failed\n", c); |
1026 | break; | 1007 | break; |
1027 | case CMD_UNSOLICITED_ABORT: | 1008 | case CMD_UNSOLICITED_ABORT: |
1028 | printk(KERN_WARNING "cciss: cp %p aborted " | 1009 | dev_warn(&h->pdev->dev, |
1029 | "do to an unsolicited abort\n", cp); | 1010 | "%p aborted do to an unsolicited abort\n", c); |
1030 | break; | 1011 | break; |
1031 | case CMD_TIMEOUT: | 1012 | case CMD_TIMEOUT: |
1032 | printk(KERN_WARNING "cciss: cp %p timedout\n", | 1013 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
1033 | cp); | ||
1034 | break; | 1014 | break; |
1035 | default: | 1015 | default: |
1036 | printk(KERN_WARNING "cciss: cp %p returned " | 1016 | dev_warn(&h->pdev->dev, |
1037 | "unknown status %x\n", cp, | 1017 | "%p returned unknown status %x\n", |
1038 | ei->CommandStatus); | 1018 | c, ei->CommandStatus); |
1039 | } | 1019 | } |
1040 | } | 1020 | } |
1041 | 1021 | ||
1042 | static int | 1022 | static int |
1043 | cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, | 1023 | cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr, |
1044 | unsigned char page, unsigned char *buf, | 1024 | unsigned char page, unsigned char *buf, |
1045 | unsigned char bufsize) | 1025 | unsigned char bufsize) |
1046 | { | 1026 | { |
1047 | int rc; | 1027 | int rc; |
1048 | CommandList_struct *cp; | 1028 | CommandList_struct *c; |
1049 | char cdb[6]; | 1029 | char cdb[6]; |
1050 | ErrorInfo_struct *ei; | 1030 | ErrorInfo_struct *ei; |
1051 | unsigned long flags; | 1031 | unsigned long flags; |
1052 | 1032 | ||
1053 | spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); | 1033 | spin_lock_irqsave(&h->lock, flags); |
1054 | cp = scsi_cmd_alloc(c); | 1034 | c = scsi_cmd_alloc(h); |
1055 | spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); | 1035 | spin_unlock_irqrestore(&h->lock, flags); |
1056 | 1036 | ||
1057 | if (cp == NULL) { /* trouble... */ | 1037 | if (c == NULL) { /* trouble... */ |
1058 | printk("cmd_alloc returned NULL!\n"); | 1038 | printk("cmd_alloc returned NULL!\n"); |
1059 | return -1; | 1039 | return -1; |
1060 | } | 1040 | } |
1061 | 1041 | ||
1062 | ei = cp->err_info; | 1042 | ei = c->err_info; |
1063 | 1043 | ||
1064 | cdb[0] = CISS_INQUIRY; | 1044 | cdb[0] = CISS_INQUIRY; |
1065 | cdb[1] = (page != 0); | 1045 | cdb[1] = (page != 0); |
@@ -1067,24 +1047,24 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, | |||
1067 | cdb[3] = 0; | 1047 | cdb[3] = 0; |
1068 | cdb[4] = bufsize; | 1048 | cdb[4] = bufsize; |
1069 | cdb[5] = 0; | 1049 | cdb[5] = 0; |
1070 | rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, | 1050 | rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb, |
1071 | 6, buf, bufsize, XFER_READ); | 1051 | 6, buf, bufsize, XFER_READ); |
1072 | 1052 | ||
1073 | if (rc != 0) return rc; /* something went wrong */ | 1053 | if (rc != 0) return rc; /* something went wrong */ |
1074 | 1054 | ||
1075 | if (ei->CommandStatus != 0 && | 1055 | if (ei->CommandStatus != 0 && |
1076 | ei->CommandStatus != CMD_DATA_UNDERRUN) { | 1056 | ei->CommandStatus != CMD_DATA_UNDERRUN) { |
1077 | cciss_scsi_interpret_error(cp); | 1057 | cciss_scsi_interpret_error(h, c); |
1078 | rc = -1; | 1058 | rc = -1; |
1079 | } | 1059 | } |
1080 | spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); | 1060 | spin_lock_irqsave(&h->lock, flags); |
1081 | scsi_cmd_free(c, cp); | 1061 | scsi_cmd_free(h, c); |
1082 | spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); | 1062 | spin_unlock_irqrestore(&h->lock, flags); |
1083 | return rc; | 1063 | return rc; |
1084 | } | 1064 | } |
1085 | 1065 | ||
1086 | /* Get the device id from inquiry page 0x83 */ | 1066 | /* Get the device id from inquiry page 0x83 */ |
1087 | static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr, | 1067 | static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr, |
1088 | unsigned char *device_id, int buflen) | 1068 | unsigned char *device_id, int buflen) |
1089 | { | 1069 | { |
1090 | int rc; | 1070 | int rc; |
@@ -1095,7 +1075,7 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr, | |||
1095 | buf = kzalloc(64, GFP_KERNEL); | 1075 | buf = kzalloc(64, GFP_KERNEL); |
1096 | if (!buf) | 1076 | if (!buf) |
1097 | return -1; | 1077 | return -1; |
1098 | rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64); | 1078 | rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); |
1099 | if (rc == 0) | 1079 | if (rc == 0) |
1100 | memcpy(device_id, &buf[8], buflen); | 1080 | memcpy(device_id, &buf[8], buflen); |
1101 | kfree(buf); | 1081 | kfree(buf); |
@@ -1103,20 +1083,20 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr, | |||
1103 | } | 1083 | } |
1104 | 1084 | ||
1105 | static int | 1085 | static int |
1106 | cciss_scsi_do_report_phys_luns(ctlr_info_t *c, | 1086 | cciss_scsi_do_report_phys_luns(ctlr_info_t *h, |
1107 | ReportLunData_struct *buf, int bufsize) | 1087 | ReportLunData_struct *buf, int bufsize) |
1108 | { | 1088 | { |
1109 | int rc; | 1089 | int rc; |
1110 | CommandList_struct *cp; | 1090 | CommandList_struct *c; |
1111 | unsigned char cdb[12]; | 1091 | unsigned char cdb[12]; |
1112 | unsigned char scsi3addr[8]; | 1092 | unsigned char scsi3addr[8]; |
1113 | ErrorInfo_struct *ei; | 1093 | ErrorInfo_struct *ei; |
1114 | unsigned long flags; | 1094 | unsigned long flags; |
1115 | 1095 | ||
1116 | spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); | 1096 | spin_lock_irqsave(&h->lock, flags); |
1117 | cp = scsi_cmd_alloc(c); | 1097 | c = scsi_cmd_alloc(h); |
1118 | spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); | 1098 | spin_unlock_irqrestore(&h->lock, flags); |
1119 | if (cp == NULL) { /* trouble... */ | 1099 | if (c == NULL) { /* trouble... */ |
1120 | printk("cmd_alloc returned NULL!\n"); | 1100 | printk("cmd_alloc returned NULL!\n"); |
1121 | return -1; | 1101 | return -1; |
1122 | } | 1102 | } |
@@ -1135,27 +1115,27 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c, | |||
1135 | cdb[10] = 0; | 1115 | cdb[10] = 0; |
1136 | cdb[11] = 0; | 1116 | cdb[11] = 0; |
1137 | 1117 | ||
1138 | rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, | 1118 | rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, |
1139 | cdb, 12, | 1119 | cdb, 12, |
1140 | (unsigned char *) buf, | 1120 | (unsigned char *) buf, |
1141 | bufsize, XFER_READ); | 1121 | bufsize, XFER_READ); |
1142 | 1122 | ||
1143 | if (rc != 0) return rc; /* something went wrong */ | 1123 | if (rc != 0) return rc; /* something went wrong */ |
1144 | 1124 | ||
1145 | ei = cp->err_info; | 1125 | ei = c->err_info; |
1146 | if (ei->CommandStatus != 0 && | 1126 | if (ei->CommandStatus != 0 && |
1147 | ei->CommandStatus != CMD_DATA_UNDERRUN) { | 1127 | ei->CommandStatus != CMD_DATA_UNDERRUN) { |
1148 | cciss_scsi_interpret_error(cp); | 1128 | cciss_scsi_interpret_error(h, c); |
1149 | rc = -1; | 1129 | rc = -1; |
1150 | } | 1130 | } |
1151 | spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); | 1131 | spin_lock_irqsave(&h->lock, flags); |
1152 | scsi_cmd_free(c, cp); | 1132 | scsi_cmd_free(h, c); |
1153 | spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); | 1133 | spin_unlock_irqrestore(&h->lock, flags); |
1154 | return rc; | 1134 | return rc; |
1155 | } | 1135 | } |
1156 | 1136 | ||
1157 | static void | 1137 | static void |
1158 | cciss_update_non_disk_devices(int cntl_num, int hostno) | 1138 | cciss_update_non_disk_devices(ctlr_info_t *h, int hostno) |
1159 | { | 1139 | { |
1160 | /* the idea here is we could get notified from /proc | 1140 | /* the idea here is we could get notified from /proc |
1161 | that some devices have changed, so we do a report | 1141 | that some devices have changed, so we do a report |
@@ -1188,7 +1168,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1188 | ReportLunData_struct *ld_buff; | 1168 | ReportLunData_struct *ld_buff; |
1189 | unsigned char *inq_buff; | 1169 | unsigned char *inq_buff; |
1190 | unsigned char scsi3addr[8]; | 1170 | unsigned char scsi3addr[8]; |
1191 | ctlr_info_t *c; | ||
1192 | __u32 num_luns=0; | 1171 | __u32 num_luns=0; |
1193 | unsigned char *ch; | 1172 | unsigned char *ch; |
1194 | struct cciss_scsi_dev_t *currentsd, *this_device; | 1173 | struct cciss_scsi_dev_t *currentsd, *this_device; |
@@ -1196,7 +1175,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1196 | int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; | 1175 | int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; |
1197 | int i; | 1176 | int i; |
1198 | 1177 | ||
1199 | c = (ctlr_info_t *) hba[cntl_num]; | ||
1200 | ld_buff = kzalloc(reportlunsize, GFP_KERNEL); | 1178 | ld_buff = kzalloc(reportlunsize, GFP_KERNEL); |
1201 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | 1179 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1202 | currentsd = kzalloc(sizeof(*currentsd) * | 1180 | currentsd = kzalloc(sizeof(*currentsd) * |
@@ -1206,7 +1184,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1206 | goto out; | 1184 | goto out; |
1207 | } | 1185 | } |
1208 | this_device = ¤tsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; | 1186 | this_device = ¤tsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; |
1209 | if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) { | 1187 | if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) { |
1210 | ch = &ld_buff->LUNListLength[0]; | 1188 | ch = &ld_buff->LUNListLength[0]; |
1211 | num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; | 1189 | num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; |
1212 | if (num_luns > CISS_MAX_PHYS_LUN) { | 1190 | if (num_luns > CISS_MAX_PHYS_LUN) { |
@@ -1230,7 +1208,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1230 | memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); | 1208 | memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); |
1231 | memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); | 1209 | memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); |
1232 | 1210 | ||
1233 | if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff, | 1211 | if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, |
1234 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) | 1212 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) |
1235 | /* Inquiry failed (msg printed already) */ | 1213 | /* Inquiry failed (msg printed already) */ |
1236 | continue; /* so we will skip this device. */ | 1214 | continue; /* so we will skip this device. */ |
@@ -1248,7 +1226,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1248 | sizeof(this_device->revision)); | 1226 | sizeof(this_device->revision)); |
1249 | memset(this_device->device_id, 0, | 1227 | memset(this_device->device_id, 0, |
1250 | sizeof(this_device->device_id)); | 1228 | sizeof(this_device->device_id)); |
1251 | cciss_scsi_get_device_id(hba[cntl_num], scsi3addr, | 1229 | cciss_scsi_get_device_id(h, scsi3addr, |
1252 | this_device->device_id, sizeof(this_device->device_id)); | 1230 | this_device->device_id, sizeof(this_device->device_id)); |
1253 | 1231 | ||
1254 | switch (this_device->devtype) | 1232 | switch (this_device->devtype) |
@@ -1275,7 +1253,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1275 | case 0x08: /* medium changer */ | 1253 | case 0x08: /* medium changer */ |
1276 | if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { | 1254 | if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { |
1277 | printk(KERN_INFO "cciss%d: %s ignored, " | 1255 | printk(KERN_INFO "cciss%d: %s ignored, " |
1278 | "too many devices.\n", cntl_num, | 1256 | "too many devices.\n", h->ctlr, |
1279 | scsi_device_type(this_device->devtype)); | 1257 | scsi_device_type(this_device->devtype)); |
1280 | break; | 1258 | break; |
1281 | } | 1259 | } |
@@ -1287,7 +1265,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) | |||
1287 | } | 1265 | } |
1288 | } | 1266 | } |
1289 | 1267 | ||
1290 | adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent); | 1268 | adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent); |
1291 | out: | 1269 | out: |
1292 | kfree(inq_buff); | 1270 | kfree(inq_buff); |
1293 | kfree(ld_buff); | 1271 | kfree(ld_buff); |
@@ -1306,12 +1284,12 @@ is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c | |||
1306 | } | 1284 | } |
1307 | 1285 | ||
1308 | static int | 1286 | static int |
1309 | cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length) | 1287 | cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length) |
1310 | { | 1288 | { |
1311 | int arg_len; | 1289 | int arg_len; |
1312 | 1290 | ||
1313 | if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) | 1291 | if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) |
1314 | cciss_update_non_disk_devices(ctlr, hostno); | 1292 | cciss_update_non_disk_devices(h, hostno); |
1315 | else | 1293 | else |
1316 | return -EINVAL; | 1294 | return -EINVAL; |
1317 | return length; | 1295 | return length; |
@@ -1328,20 +1306,16 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, | |||
1328 | { | 1306 | { |
1329 | 1307 | ||
1330 | int buflen, datalen; | 1308 | int buflen, datalen; |
1331 | ctlr_info_t *ci; | 1309 | ctlr_info_t *h; |
1332 | int i; | 1310 | int i; |
1333 | int cntl_num; | ||
1334 | 1311 | ||
1335 | 1312 | h = (ctlr_info_t *) sh->hostdata[0]; | |
1336 | ci = (ctlr_info_t *) sh->hostdata[0]; | 1313 | if (h == NULL) /* This really shouldn't ever happen. */ |
1337 | if (ci == NULL) /* This really shouldn't ever happen. */ | ||
1338 | return -EINVAL; | 1314 | return -EINVAL; |
1339 | 1315 | ||
1340 | cntl_num = ci->ctlr; /* Get our index into the hba[] array */ | ||
1341 | |||
1342 | if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ | 1316 | if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ |
1343 | buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", | 1317 | buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", |
1344 | cntl_num, sh->host_no); | 1318 | h->ctlr, sh->host_no); |
1345 | 1319 | ||
1346 | /* this information is needed by apps to know which cciss | 1320 | /* this information is needed by apps to know which cciss |
1347 | device corresponds to which scsi host number without | 1321 | device corresponds to which scsi host number without |
@@ -1351,8 +1325,9 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, | |||
1351 | this info is for an app to be able to use to know how to | 1325 | this info is for an app to be able to use to know how to |
1352 | get them back in sync. */ | 1326 | get them back in sync. */ |
1353 | 1327 | ||
1354 | for (i=0;i<ccissscsi[cntl_num].ndevices;i++) { | 1328 | for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { |
1355 | struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i]; | 1329 | struct cciss_scsi_dev_t *sd = |
1330 | &ccissscsi[h->ctlr].dev[i]; | ||
1356 | buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " | 1331 | buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " |
1357 | "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | 1332 | "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
1358 | sh->host_no, sd->bus, sd->target, sd->lun, | 1333 | sh->host_no, sd->bus, sd->target, sd->lun, |
@@ -1370,15 +1345,15 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, | |||
1370 | *start = buffer + offset; | 1345 | *start = buffer + offset; |
1371 | return(datalen); | 1346 | return(datalen); |
1372 | } else /* User is writing to /proc/scsi/cciss*?/?* ... */ | 1347 | } else /* User is writing to /proc/scsi/cciss*?/?* ... */ |
1373 | return cciss_scsi_user_command(cntl_num, sh->host_no, | 1348 | return cciss_scsi_user_command(h, sh->host_no, |
1374 | buffer, length); | 1349 | buffer, length); |
1375 | } | 1350 | } |
1376 | 1351 | ||
1377 | /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | 1352 | /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci |
1378 | dma mapping and fills in the scatter gather entries of the | 1353 | dma mapping and fills in the scatter gather entries of the |
1379 | cciss command, cp. */ | 1354 | cciss command, c. */ |
1380 | 1355 | ||
1381 | static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, | 1356 | static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, |
1382 | struct scsi_cmnd *cmd) | 1357 | struct scsi_cmnd *cmd) |
1383 | { | 1358 | { |
1384 | unsigned int len; | 1359 | unsigned int len; |
@@ -1392,7 +1367,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, | |||
1392 | 1367 | ||
1393 | chained = 0; | 1368 | chained = 0; |
1394 | sg_index = 0; | 1369 | sg_index = 0; |
1395 | curr_sg = cp->SG; | 1370 | curr_sg = c->SG; |
1396 | request_nsgs = scsi_dma_map(cmd); | 1371 | request_nsgs = scsi_dma_map(cmd); |
1397 | if (request_nsgs) { | 1372 | if (request_nsgs) { |
1398 | scsi_for_each_sg(cmd, sg, request_nsgs, i) { | 1373 | scsi_for_each_sg(cmd, sg, request_nsgs, i) { |
@@ -1400,7 +1375,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, | |||
1400 | !chained && request_nsgs - i > 1) { | 1375 | !chained && request_nsgs - i > 1) { |
1401 | chained = 1; | 1376 | chained = 1; |
1402 | sg_index = 0; | 1377 | sg_index = 0; |
1403 | curr_sg = sa->cmd_sg_list[cp->cmdindex]; | 1378 | curr_sg = sa->cmd_sg_list[c->cmdindex]; |
1404 | } | 1379 | } |
1405 | addr64 = (__u64) sg_dma_address(sg); | 1380 | addr64 = (__u64) sg_dma_address(sg); |
1406 | len = sg_dma_len(sg); | 1381 | len = sg_dma_len(sg); |
@@ -1413,19 +1388,19 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, | |||
1413 | ++sg_index; | 1388 | ++sg_index; |
1414 | } | 1389 | } |
1415 | if (chained) | 1390 | if (chained) |
1416 | cciss_map_sg_chain_block(h, cp, | 1391 | cciss_map_sg_chain_block(h, c, |
1417 | sa->cmd_sg_list[cp->cmdindex], | 1392 | sa->cmd_sg_list[c->cmdindex], |
1418 | (request_nsgs - (h->max_cmd_sgentries - 1)) * | 1393 | (request_nsgs - (h->max_cmd_sgentries - 1)) * |
1419 | sizeof(SGDescriptor_struct)); | 1394 | sizeof(SGDescriptor_struct)); |
1420 | } | 1395 | } |
1421 | /* track how many SG entries we are using */ | 1396 | /* track how many SG entries we are using */ |
1422 | if (request_nsgs > h->maxSG) | 1397 | if (request_nsgs > h->maxSG) |
1423 | h->maxSG = request_nsgs; | 1398 | h->maxSG = request_nsgs; |
1424 | cp->Header.SGTotal = (__u8) request_nsgs + chained; | 1399 | c->Header.SGTotal = (__u8) request_nsgs + chained; |
1425 | if (request_nsgs > h->max_cmd_sgentries) | 1400 | if (request_nsgs > h->max_cmd_sgentries) |
1426 | cp->Header.SGList = h->max_cmd_sgentries; | 1401 | c->Header.SGList = h->max_cmd_sgentries; |
1427 | else | 1402 | else |
1428 | cp->Header.SGList = cp->Header.SGTotal; | 1403 | c->Header.SGList = c->Header.SGTotal; |
1429 | return; | 1404 | return; |
1430 | } | 1405 | } |
1431 | 1406 | ||
@@ -1433,18 +1408,17 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, | |||
1433 | static int | 1408 | static int |
1434 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 1409 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) |
1435 | { | 1410 | { |
1436 | ctlr_info_t *c; | 1411 | ctlr_info_t *h; |
1437 | int ctlr, rc; | 1412 | int rc; |
1438 | unsigned char scsi3addr[8]; | 1413 | unsigned char scsi3addr[8]; |
1439 | CommandList_struct *cp; | 1414 | CommandList_struct *c; |
1440 | unsigned long flags; | 1415 | unsigned long flags; |
1441 | 1416 | ||
1442 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. | 1417 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. |
1443 | // We violate cmd->host privacy here. (Is there another way?) | 1418 | // We violate cmd->host privacy here. (Is there another way?) |
1444 | c = (ctlr_info_t *) cmd->device->host->hostdata[0]; | 1419 | h = (ctlr_info_t *) cmd->device->host->hostdata[0]; |
1445 | ctlr = c->ctlr; | ||
1446 | 1420 | ||
1447 | rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, | 1421 | rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id, |
1448 | cmd->device->lun, scsi3addr); | 1422 | cmd->device->lun, scsi3addr); |
1449 | if (rc != 0) { | 1423 | if (rc != 0) { |
1450 | /* the scsi nexus does not match any that we presented... */ | 1424 | /* the scsi nexus does not match any that we presented... */ |
@@ -1456,19 +1430,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1456 | return 0; | 1430 | return 0; |
1457 | } | 1431 | } |
1458 | 1432 | ||
1459 | /* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", | ||
1460 | cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/ | ||
1461 | // printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, | ||
1462 | // cmd->target, cmd->lun); | ||
1463 | |||
1464 | /* Ok, we have a reasonable scsi nexus, so send the cmd down, and | 1433 | /* Ok, we have a reasonable scsi nexus, so send the cmd down, and |
1465 | see what the device thinks of it. */ | 1434 | see what the device thinks of it. */ |
1466 | 1435 | ||
1467 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1436 | spin_lock_irqsave(&h->lock, flags); |
1468 | cp = scsi_cmd_alloc(c); | 1437 | c = scsi_cmd_alloc(h); |
1469 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1438 | spin_unlock_irqrestore(&h->lock, flags); |
1470 | if (cp == NULL) { /* trouble... */ | 1439 | if (c == NULL) { /* trouble... */ |
1471 | printk("scsi_cmd_alloc returned NULL!\n"); | 1440 | dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n"); |
1472 | /* FIXME: next 3 lines are -> BAD! <- */ | 1441 | /* FIXME: next 3 lines are -> BAD! <- */ |
1473 | cmd->result = DID_NO_CONNECT << 16; | 1442 | cmd->result = DID_NO_CONNECT << 16; |
1474 | done(cmd); | 1443 | done(cmd); |
@@ -1479,35 +1448,41 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1479 | 1448 | ||
1480 | cmd->scsi_done = done; // save this for use by completion code | 1449 | cmd->scsi_done = done; // save this for use by completion code |
1481 | 1450 | ||
1482 | // save cp in case we have to abort it | 1451 | /* save c in case we have to abort it */ |
1483 | cmd->host_scribble = (unsigned char *) cp; | 1452 | cmd->host_scribble = (unsigned char *) c; |
1484 | 1453 | ||
1485 | cp->cmd_type = CMD_SCSI; | 1454 | c->cmd_type = CMD_SCSI; |
1486 | cp->scsi_cmd = cmd; | 1455 | c->scsi_cmd = cmd; |
1487 | cp->Header.ReplyQueue = 0; // unused in simple mode | 1456 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
1488 | memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 1457 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
1489 | cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag | 1458 | c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ |
1490 | 1459 | ||
1491 | // Fill in the request block... | 1460 | // Fill in the request block... |
1492 | 1461 | ||
1493 | cp->Request.Timeout = 0; | 1462 | c->Request.Timeout = 0; |
1494 | memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); | 1463 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); |
1495 | BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB)); | 1464 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); |
1496 | cp->Request.CDBLen = cmd->cmd_len; | 1465 | c->Request.CDBLen = cmd->cmd_len; |
1497 | memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len); | 1466 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); |
1498 | cp->Request.Type.Type = TYPE_CMD; | 1467 | c->Request.Type.Type = TYPE_CMD; |
1499 | cp->Request.Type.Attribute = ATTR_SIMPLE; | 1468 | c->Request.Type.Attribute = ATTR_SIMPLE; |
1500 | switch(cmd->sc_data_direction) | 1469 | switch(cmd->sc_data_direction) |
1501 | { | 1470 | { |
1502 | case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break; | 1471 | case DMA_TO_DEVICE: |
1503 | case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break; | 1472 | c->Request.Type.Direction = XFER_WRITE; |
1504 | case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break; | 1473 | break; |
1474 | case DMA_FROM_DEVICE: | ||
1475 | c->Request.Type.Direction = XFER_READ; | ||
1476 | break; | ||
1477 | case DMA_NONE: | ||
1478 | c->Request.Type.Direction = XFER_NONE; | ||
1479 | break; | ||
1505 | case DMA_BIDIRECTIONAL: | 1480 | case DMA_BIDIRECTIONAL: |
1506 | // This can happen if a buggy application does a scsi passthru | 1481 | // This can happen if a buggy application does a scsi passthru |
1507 | // and sets both inlen and outlen to non-zero. ( see | 1482 | // and sets both inlen and outlen to non-zero. ( see |
1508 | // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) | 1483 | // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) |
1509 | 1484 | ||
1510 | cp->Request.Type.Direction = XFER_RSVD; | 1485 | c->Request.Type.Direction = XFER_RSVD; |
1511 | // This is technically wrong, and cciss controllers should | 1486 | // This is technically wrong, and cciss controllers should |
1512 | // reject it with CMD_INVALID, which is the most correct | 1487 | // reject it with CMD_INVALID, which is the most correct |
1513 | // response, but non-fibre backends appear to let it | 1488 | // response, but non-fibre backends appear to let it |
@@ -1518,27 +1493,18 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1518 | break; | 1493 | break; |
1519 | 1494 | ||
1520 | default: | 1495 | default: |
1521 | printk("cciss: unknown data direction: %d\n", | 1496 | dev_warn(&h->pdev->dev, "unknown data direction: %d\n", |
1522 | cmd->sc_data_direction); | 1497 | cmd->sc_data_direction); |
1523 | BUG(); | 1498 | BUG(); |
1524 | break; | 1499 | break; |
1525 | } | 1500 | } |
1526 | cciss_scatter_gather(c, cp, cmd); | 1501 | cciss_scatter_gather(h, c, cmd); |
1527 | 1502 | enqueue_cmd_and_start_io(h, c); | |
1528 | /* Put the request on the tail of the request queue */ | ||
1529 | |||
1530 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | ||
1531 | addQ(&c->reqQ, cp); | ||
1532 | c->Qdepth++; | ||
1533 | start_io(c); | ||
1534 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | ||
1535 | |||
1536 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | 1503 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
1537 | return 0; | 1504 | return 0; |
1538 | } | 1505 | } |
1539 | 1506 | ||
1540 | static void | 1507 | static void cciss_unregister_scsi(ctlr_info_t *h) |
1541 | cciss_unregister_scsi(int ctlr) | ||
1542 | { | 1508 | { |
1543 | struct cciss_scsi_adapter_data_t *sa; | 1509 | struct cciss_scsi_adapter_data_t *sa; |
1544 | struct cciss_scsi_cmd_stack_t *stk; | 1510 | struct cciss_scsi_cmd_stack_t *stk; |
@@ -1546,59 +1512,58 @@ cciss_unregister_scsi(int ctlr) | |||
1546 | 1512 | ||
1547 | /* we are being forcibly unloaded, and may not refuse. */ | 1513 | /* we are being forcibly unloaded, and may not refuse. */ |
1548 | 1514 | ||
1549 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1515 | spin_lock_irqsave(&h->lock, flags); |
1550 | sa = hba[ctlr]->scsi_ctlr; | 1516 | sa = h->scsi_ctlr; |
1551 | stk = &sa->cmd_stack; | 1517 | stk = &sa->cmd_stack; |
1552 | 1518 | ||
1553 | /* if we weren't ever actually registered, don't unregister */ | 1519 | /* if we weren't ever actually registered, don't unregister */ |
1554 | if (sa->registered) { | 1520 | if (sa->registered) { |
1555 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1521 | spin_unlock_irqrestore(&h->lock, flags); |
1556 | scsi_remove_host(sa->scsi_host); | 1522 | scsi_remove_host(sa->scsi_host); |
1557 | scsi_host_put(sa->scsi_host); | 1523 | scsi_host_put(sa->scsi_host); |
1558 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1524 | spin_lock_irqsave(&h->lock, flags); |
1559 | } | 1525 | } |
1560 | 1526 | ||
1561 | /* set scsi_host to NULL so our detect routine will | 1527 | /* set scsi_host to NULL so our detect routine will |
1562 | find us on register */ | 1528 | find us on register */ |
1563 | sa->scsi_host = NULL; | 1529 | sa->scsi_host = NULL; |
1564 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1530 | spin_unlock_irqrestore(&h->lock, flags); |
1565 | scsi_cmd_stack_free(ctlr); | 1531 | scsi_cmd_stack_free(h); |
1566 | kfree(sa); | 1532 | kfree(sa); |
1567 | } | 1533 | } |
1568 | 1534 | ||
1569 | static int | 1535 | static int cciss_engage_scsi(ctlr_info_t *h) |
1570 | cciss_engage_scsi(int ctlr) | ||
1571 | { | 1536 | { |
1572 | struct cciss_scsi_adapter_data_t *sa; | 1537 | struct cciss_scsi_adapter_data_t *sa; |
1573 | struct cciss_scsi_cmd_stack_t *stk; | 1538 | struct cciss_scsi_cmd_stack_t *stk; |
1574 | unsigned long flags; | 1539 | unsigned long flags; |
1575 | 1540 | ||
1576 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1541 | spin_lock_irqsave(&h->lock, flags); |
1577 | sa = hba[ctlr]->scsi_ctlr; | 1542 | sa = h->scsi_ctlr; |
1578 | stk = &sa->cmd_stack; | 1543 | stk = &sa->cmd_stack; |
1579 | 1544 | ||
1580 | if (sa->registered) { | 1545 | if (sa->registered) { |
1581 | printk("cciss%d: SCSI subsystem already engaged.\n", ctlr); | 1546 | dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n"); |
1582 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1547 | spin_unlock_irqrestore(&h->lock, flags); |
1583 | return -ENXIO; | 1548 | return -ENXIO; |
1584 | } | 1549 | } |
1585 | sa->registered = 1; | 1550 | sa->registered = 1; |
1586 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1551 | spin_unlock_irqrestore(&h->lock, flags); |
1587 | cciss_update_non_disk_devices(ctlr, -1); | 1552 | cciss_update_non_disk_devices(h, -1); |
1588 | cciss_scsi_detect(ctlr); | 1553 | cciss_scsi_detect(h); |
1589 | return 0; | 1554 | return 0; |
1590 | } | 1555 | } |
1591 | 1556 | ||
1592 | static void | 1557 | static void |
1593 | cciss_seq_tape_report(struct seq_file *seq, int ctlr) | 1558 | cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h) |
1594 | { | 1559 | { |
1595 | unsigned long flags; | 1560 | unsigned long flags; |
1596 | 1561 | ||
1597 | CPQ_TAPE_LOCK(ctlr, flags); | 1562 | CPQ_TAPE_LOCK(h, flags); |
1598 | seq_printf(seq, | 1563 | seq_printf(seq, |
1599 | "Sequential access devices: %d\n\n", | 1564 | "Sequential access devices: %d\n\n", |
1600 | ccissscsi[ctlr].ndevices); | 1565 | ccissscsi[h->ctlr].ndevices); |
1601 | CPQ_TAPE_UNLOCK(ctlr, flags); | 1566 | CPQ_TAPE_UNLOCK(h, flags); |
1602 | } | 1567 | } |
1603 | 1568 | ||
1604 | static int wait_for_device_to_become_ready(ctlr_info_t *h, | 1569 | static int wait_for_device_to_become_ready(ctlr_info_t *h, |
@@ -1609,10 +1574,10 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h, | |||
1609 | int waittime = HZ; | 1574 | int waittime = HZ; |
1610 | CommandList_struct *c; | 1575 | CommandList_struct *c; |
1611 | 1576 | ||
1612 | c = cmd_alloc(h, 1); | 1577 | c = cmd_alloc(h); |
1613 | if (!c) { | 1578 | if (!c) { |
1614 | printk(KERN_WARNING "cciss%d: out of memory in " | 1579 | dev_warn(&h->pdev->dev, "out of memory in " |
1615 | "wait_for_device_to_become_ready.\n", h->ctlr); | 1580 | "wait_for_device_to_become_ready.\n"); |
1616 | return IO_ERROR; | 1581 | return IO_ERROR; |
1617 | } | 1582 | } |
1618 | 1583 | ||
@@ -1630,7 +1595,7 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h, | |||
1630 | waittime = waittime * 2; | 1595 | waittime = waittime * 2; |
1631 | 1596 | ||
1632 | /* Send the Test Unit Ready */ | 1597 | /* Send the Test Unit Ready */ |
1633 | rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0, | 1598 | rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0, |
1634 | lunaddr, TYPE_CMD); | 1599 | lunaddr, TYPE_CMD); |
1635 | if (rc == 0) | 1600 | if (rc == 0) |
1636 | rc = sendcmd_withirq_core(h, c, 0); | 1601 | rc = sendcmd_withirq_core(h, c, 0); |
@@ -1656,18 +1621,18 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h, | |||
1656 | } | 1621 | } |
1657 | } | 1622 | } |
1658 | retry_tur: | 1623 | retry_tur: |
1659 | printk(KERN_WARNING "cciss%d: Waiting %d secs " | 1624 | dev_warn(&h->pdev->dev, "Waiting %d secs " |
1660 | "for device to become ready.\n", | 1625 | "for device to become ready.\n", |
1661 | h->ctlr, waittime / HZ); | 1626 | waittime / HZ); |
1662 | rc = 1; /* device not ready. */ | 1627 | rc = 1; /* device not ready. */ |
1663 | } | 1628 | } |
1664 | 1629 | ||
1665 | if (rc) | 1630 | if (rc) |
1666 | printk("cciss%d: giving up on device.\n", h->ctlr); | 1631 | dev_warn(&h->pdev->dev, "giving up on device.\n"); |
1667 | else | 1632 | else |
1668 | printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr); | 1633 | dev_warn(&h->pdev->dev, "device is ready.\n"); |
1669 | 1634 | ||
1670 | cmd_free(h, c, 1); | 1635 | cmd_free(h, c); |
1671 | return rc; | 1636 | return rc; |
1672 | } | 1637 | } |
1673 | 1638 | ||
@@ -1687,26 +1652,24 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
1687 | int rc; | 1652 | int rc; |
1688 | CommandList_struct *cmd_in_trouble; | 1653 | CommandList_struct *cmd_in_trouble; |
1689 | unsigned char lunaddr[8]; | 1654 | unsigned char lunaddr[8]; |
1690 | ctlr_info_t *c; | 1655 | ctlr_info_t *h; |
1691 | int ctlr; | ||
1692 | 1656 | ||
1693 | /* find the controller to which the command to be aborted was sent */ | 1657 | /* find the controller to which the command to be aborted was sent */ |
1694 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; | 1658 | h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
1695 | if (c == NULL) /* paranoia */ | 1659 | if (h == NULL) /* paranoia */ |
1696 | return FAILED; | 1660 | return FAILED; |
1697 | ctlr = c->ctlr; | 1661 | dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n"); |
1698 | printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); | ||
1699 | /* find the command that's giving us trouble */ | 1662 | /* find the command that's giving us trouble */ |
1700 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; | 1663 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; |
1701 | if (cmd_in_trouble == NULL) /* paranoia */ | 1664 | if (cmd_in_trouble == NULL) /* paranoia */ |
1702 | return FAILED; | 1665 | return FAILED; |
1703 | memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); | 1666 | memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); |
1704 | /* send a reset to the SCSI LUN which the command was sent to */ | 1667 | /* send a reset to the SCSI LUN which the command was sent to */ |
1705 | rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, | 1668 | rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr, |
1706 | TYPE_MSG); | 1669 | TYPE_MSG); |
1707 | if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0) | 1670 | if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0) |
1708 | return SUCCESS; | 1671 | return SUCCESS; |
1709 | printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); | 1672 | dev_warn(&h->pdev->dev, "resetting device failed.\n"); |
1710 | return FAILED; | 1673 | return FAILED; |
1711 | } | 1674 | } |
1712 | 1675 | ||
@@ -1715,22 +1678,20 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1715 | int rc; | 1678 | int rc; |
1716 | CommandList_struct *cmd_to_abort; | 1679 | CommandList_struct *cmd_to_abort; |
1717 | unsigned char lunaddr[8]; | 1680 | unsigned char lunaddr[8]; |
1718 | ctlr_info_t *c; | 1681 | ctlr_info_t *h; |
1719 | int ctlr; | ||
1720 | 1682 | ||
1721 | /* find the controller to which the command to be aborted was sent */ | 1683 | /* find the controller to which the command to be aborted was sent */ |
1722 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; | 1684 | h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
1723 | if (c == NULL) /* paranoia */ | 1685 | if (h == NULL) /* paranoia */ |
1724 | return FAILED; | 1686 | return FAILED; |
1725 | ctlr = c->ctlr; | 1687 | dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n"); |
1726 | printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); | ||
1727 | 1688 | ||
1728 | /* find the command to be aborted */ | 1689 | /* find the command to be aborted */ |
1729 | cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; | 1690 | cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; |
1730 | if (cmd_to_abort == NULL) /* paranoia */ | 1691 | if (cmd_to_abort == NULL) /* paranoia */ |
1731 | return FAILED; | 1692 | return FAILED; |
1732 | memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); | 1693 | memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); |
1733 | rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, | 1694 | rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag, |
1734 | 0, 0, lunaddr, TYPE_MSG); | 1695 | 0, 0, lunaddr, TYPE_MSG); |
1735 | if (rc == 0) | 1696 | if (rc == 0) |
1736 | return SUCCESS; | 1697 | return SUCCESS; |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 91d11631cec9..d53b0291c44b 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/hdreg.h> | 37 | #include <linux/hdreg.h> |
38 | #include <linux/smp_lock.h> | ||
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
39 | #include <linux/blkdev.h> | 40 | #include <linux/blkdev.h> |
40 | #include <linux/genhd.h> | 41 | #include <linux/genhd.h> |
@@ -157,7 +158,7 @@ static int sendcmd( | |||
157 | unsigned int blkcnt, | 158 | unsigned int blkcnt, |
158 | unsigned int log_unit ); | 159 | unsigned int log_unit ); |
159 | 160 | ||
160 | static int ida_open(struct block_device *bdev, fmode_t mode); | 161 | static int ida_unlocked_open(struct block_device *bdev, fmode_t mode); |
161 | static int ida_release(struct gendisk *disk, fmode_t mode); | 162 | static int ida_release(struct gendisk *disk, fmode_t mode); |
162 | static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); | 163 | static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); |
163 | static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 164 | static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
@@ -195,9 +196,9 @@ static inline ctlr_info_t *get_host(struct gendisk *disk) | |||
195 | 196 | ||
196 | static const struct block_device_operations ida_fops = { | 197 | static const struct block_device_operations ida_fops = { |
197 | .owner = THIS_MODULE, | 198 | .owner = THIS_MODULE, |
198 | .open = ida_open, | 199 | .open = ida_unlocked_open, |
199 | .release = ida_release, | 200 | .release = ida_release, |
200 | .locked_ioctl = ida_ioctl, | 201 | .ioctl = ida_ioctl, |
201 | .getgeo = ida_getgeo, | 202 | .getgeo = ida_getgeo, |
202 | .revalidate_disk= ida_revalidate, | 203 | .revalidate_disk= ida_revalidate, |
203 | }; | 204 | }; |
@@ -386,7 +387,7 @@ static void __devexit cpqarray_remove_one_eisa (int i) | |||
386 | } | 387 | } |
387 | 388 | ||
388 | /* pdev is NULL for eisa */ | 389 | /* pdev is NULL for eisa */ |
389 | static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) | 390 | static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) |
390 | { | 391 | { |
391 | struct request_queue *q; | 392 | struct request_queue *q; |
392 | int j; | 393 | int j; |
@@ -503,7 +504,7 @@ Enomem4: | |||
503 | return -1; | 504 | return -1; |
504 | } | 505 | } |
505 | 506 | ||
506 | static int __init cpqarray_init_one( struct pci_dev *pdev, | 507 | static int __devinit cpqarray_init_one( struct pci_dev *pdev, |
507 | const struct pci_device_id *ent) | 508 | const struct pci_device_id *ent) |
508 | { | 509 | { |
509 | int i; | 510 | int i; |
@@ -740,7 +741,7 @@ __setup("smart2=", cpqarray_setup); | |||
740 | /* | 741 | /* |
741 | * Find an EISA controller's signature. Set up an hba if we find it. | 742 | * Find an EISA controller's signature. Set up an hba if we find it. |
742 | */ | 743 | */ |
743 | static int __init cpqarray_eisa_detect(void) | 744 | static int __devinit cpqarray_eisa_detect(void) |
744 | { | 745 | { |
745 | int i=0, j; | 746 | int i=0, j; |
746 | __u32 board_id; | 747 | __u32 board_id; |
@@ -840,13 +841,29 @@ static int ida_open(struct block_device *bdev, fmode_t mode) | |||
840 | return 0; | 841 | return 0; |
841 | } | 842 | } |
842 | 843 | ||
844 | static int ida_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
845 | { | ||
846 | int ret; | ||
847 | |||
848 | lock_kernel(); | ||
849 | ret = ida_open(bdev, mode); | ||
850 | unlock_kernel(); | ||
851 | |||
852 | return ret; | ||
853 | } | ||
854 | |||
843 | /* | 855 | /* |
844 | * Close. Sync first. | 856 | * Close. Sync first. |
845 | */ | 857 | */ |
846 | static int ida_release(struct gendisk *disk, fmode_t mode) | 858 | static int ida_release(struct gendisk *disk, fmode_t mode) |
847 | { | 859 | { |
848 | ctlr_info_t *host = get_host(disk); | 860 | ctlr_info_t *host; |
861 | |||
862 | lock_kernel(); | ||
863 | host = get_host(disk); | ||
849 | host->usage_count--; | 864 | host->usage_count--; |
865 | unlock_kernel(); | ||
866 | |||
850 | return 0; | 867 | return 0; |
851 | } | 868 | } |
852 | 869 | ||
@@ -1128,7 +1145,7 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
1128 | * ida_ioctl does some miscellaneous stuff like reporting drive geometry, | 1145 | * ida_ioctl does some miscellaneous stuff like reporting drive geometry, |
1129 | * setting readahead and submitting commands from userspace to the controller. | 1146 | * setting readahead and submitting commands from userspace to the controller. |
1130 | */ | 1147 | */ |
1131 | static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) | 1148 | static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) |
1132 | { | 1149 | { |
1133 | drv_info_t *drv = get_drv(bdev->bd_disk); | 1150 | drv_info_t *drv = get_drv(bdev->bd_disk); |
1134 | ctlr_info_t *host = get_host(bdev->bd_disk); | 1151 | ctlr_info_t *host = get_host(bdev->bd_disk); |
@@ -1162,7 +1179,8 @@ out_passthru: | |||
1162 | return error; | 1179 | return error; |
1163 | case IDAGETCTLRSIG: | 1180 | case IDAGETCTLRSIG: |
1164 | if (!arg) return -EINVAL; | 1181 | if (!arg) return -EINVAL; |
1165 | put_user(host->ctlr_sig, (int __user *)arg); | 1182 | if (put_user(host->ctlr_sig, (int __user *)arg)) |
1183 | return -EFAULT; | ||
1166 | return 0; | 1184 | return 0; |
1167 | case IDAREVALIDATEVOLS: | 1185 | case IDAREVALIDATEVOLS: |
1168 | if (MINOR(bdev->bd_dev) != 0) | 1186 | if (MINOR(bdev->bd_dev) != 0) |
@@ -1170,7 +1188,8 @@ out_passthru: | |||
1170 | return revalidate_allvol(host); | 1188 | return revalidate_allvol(host); |
1171 | case IDADRIVERVERSION: | 1189 | case IDADRIVERVERSION: |
1172 | if (!arg) return -EINVAL; | 1190 | if (!arg) return -EINVAL; |
1173 | put_user(DRIVER_VERSION, (unsigned long __user *)arg); | 1191 | if (put_user(DRIVER_VERSION, (unsigned long __user *)arg)) |
1192 | return -EFAULT; | ||
1174 | return 0; | 1193 | return 0; |
1175 | case IDAGETPCIINFO: | 1194 | case IDAGETPCIINFO: |
1176 | { | 1195 | { |
@@ -1192,6 +1211,19 @@ out_passthru: | |||
1192 | } | 1211 | } |
1193 | 1212 | ||
1194 | } | 1213 | } |
1214 | |||
1215 | static int ida_ioctl(struct block_device *bdev, fmode_t mode, | ||
1216 | unsigned int cmd, unsigned long param) | ||
1217 | { | ||
1218 | int ret; | ||
1219 | |||
1220 | lock_kernel(); | ||
1221 | ret = ida_locked_ioctl(bdev, mode, cmd, param); | ||
1222 | unlock_kernel(); | ||
1223 | |||
1224 | return ret; | ||
1225 | } | ||
1226 | |||
1195 | /* | 1227 | /* |
1196 | * ida_ctlr_ioctl is for passing commands to the controller from userspace. | 1228 | * ida_ctlr_ioctl is for passing commands to the controller from userspace. |
1197 | * The command block (io) has already been copied to kernel space for us, | 1229 | * The command block (io) has already been copied to kernel space for us, |
@@ -1225,17 +1257,11 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io) | |||
1225 | /* Pre submit processing */ | 1257 | /* Pre submit processing */ |
1226 | switch(io->cmd) { | 1258 | switch(io->cmd) { |
1227 | case PASSTHRU_A: | 1259 | case PASSTHRU_A: |
1228 | p = kmalloc(io->sg[0].size, GFP_KERNEL); | 1260 | p = memdup_user(io->sg[0].addr, io->sg[0].size); |
1229 | if (!p) | 1261 | if (IS_ERR(p)) { |
1230 | { | 1262 | error = PTR_ERR(p); |
1231 | error = -ENOMEM; | 1263 | cmd_free(h, c, 0); |
1232 | cmd_free(h, c, 0); | 1264 | return error; |
1233 | return(error); | ||
1234 | } | ||
1235 | if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { | ||
1236 | kfree(p); | ||
1237 | cmd_free(h, c, 0); | ||
1238 | return -EFAULT; | ||
1239 | } | 1265 | } |
1240 | c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), | 1266 | c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), |
1241 | sizeof(ida_ioctl_t), | 1267 | sizeof(ida_ioctl_t), |
@@ -1266,18 +1292,12 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io) | |||
1266 | case DIAG_PASS_THRU: | 1292 | case DIAG_PASS_THRU: |
1267 | case COLLECT_BUFFER: | 1293 | case COLLECT_BUFFER: |
1268 | case WRITE_FLASH_ROM: | 1294 | case WRITE_FLASH_ROM: |
1269 | p = kmalloc(io->sg[0].size, GFP_KERNEL); | 1295 | p = memdup_user(io->sg[0].addr, io->sg[0].size); |
1270 | if (!p) | 1296 | if (IS_ERR(p)) { |
1271 | { | 1297 | error = PTR_ERR(p); |
1272 | error = -ENOMEM; | 1298 | cmd_free(h, c, 0); |
1273 | cmd_free(h, c, 0); | 1299 | return error; |
1274 | return(error); | ||
1275 | } | 1300 | } |
1276 | if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { | ||
1277 | kfree(p); | ||
1278 | cmd_free(h, c, 0); | ||
1279 | return -EFAULT; | ||
1280 | } | ||
1281 | c->req.sg[0].size = io->sg[0].size; | 1301 | c->req.sg[0].size = io->sg[0].size; |
1282 | c->req.sg[0].addr = pci_map_single(h->pci_dev, p, | 1302 | c->req.sg[0].addr = pci_map_single(h->pci_dev, p, |
1283 | c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); | 1303 | c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index df018990c422..9400845d602e 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
79 | md_io.error = 0; | 79 | md_io.error = 0; |
80 | 80 | ||
81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) | 81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) |
82 | rw |= (1 << BIO_RW_BARRIER); | 82 | rw |= REQ_HARDBARRIER; |
83 | rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); | 83 | rw |= REQ_UNPLUG | REQ_SYNC; |
84 | 84 | ||
85 | retry: | 85 | retry: |
86 | bio = bio_alloc(GFP_NOIO, 1); | 86 | bio = bio_alloc(GFP_NOIO, 1); |
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
103 | /* check for unsupported barrier op. | 103 | /* check for unsupported barrier op. |
104 | * would rather check on EOPNOTSUPP, but that is not reliable. | 104 | * would rather check on EOPNOTSUPP, but that is not reliable. |
105 | * don't try again for ANY return value != 0 */ | 105 | * don't try again for ANY return value != 0 */ |
106 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { | 106 | if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { |
107 | /* Try again with no barrier */ | 107 | /* Try again with no barrier */ |
108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); | 108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); |
109 | set_bit(MD_NO_BARRIER, &mdev->flags); | 109 | set_bit(MD_NO_BARRIER, &mdev->flags); |
110 | rw &= ~(1 << BIO_RW_BARRIER); | 110 | rw &= ~REQ_HARDBARRIER; |
111 | bio_put(bio); | 111 | bio_put(bio); |
112 | goto retry; | 112 | goto retry; |
113 | } | 113 | } |
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 3390716898d5..e3f88d6e1412 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -84,6 +84,9 @@ struct drbd_bitmap { | |||
84 | #define BM_MD_IO_ERROR 1 | 84 | #define BM_MD_IO_ERROR 1 |
85 | #define BM_P_VMALLOCED 2 | 85 | #define BM_P_VMALLOCED 2 |
86 | 86 | ||
87 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | ||
88 | unsigned long e, int val, const enum km_type km); | ||
89 | |||
87 | static int bm_is_locked(struct drbd_bitmap *b) | 90 | static int bm_is_locked(struct drbd_bitmap *b) |
88 | { | 91 | { |
89 | return test_bit(BM_LOCKED, &b->bm_flags); | 92 | return test_bit(BM_LOCKED, &b->bm_flags); |
@@ -441,7 +444,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | |||
441 | * In case this is actually a resize, we copy the old bitmap into the new one. | 444 | * In case this is actually a resize, we copy the old bitmap into the new one. |
442 | * Otherwise, the bitmap is initialized to all bits set. | 445 | * Otherwise, the bitmap is initialized to all bits set. |
443 | */ | 446 | */ |
444 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity) | 447 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) |
445 | { | 448 | { |
446 | struct drbd_bitmap *b = mdev->bitmap; | 449 | struct drbd_bitmap *b = mdev->bitmap; |
447 | unsigned long bits, words, owords, obits, *p_addr, *bm; | 450 | unsigned long bits, words, owords, obits, *p_addr, *bm; |
@@ -516,7 +519,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity) | |||
516 | obits = b->bm_bits; | 519 | obits = b->bm_bits; |
517 | 520 | ||
518 | growing = bits > obits; | 521 | growing = bits > obits; |
519 | if (opages) | 522 | if (opages && growing && set_new_bits) |
520 | bm_set_surplus(b); | 523 | bm_set_surplus(b); |
521 | 524 | ||
522 | b->bm_pages = npages; | 525 | b->bm_pages = npages; |
@@ -526,8 +529,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity) | |||
526 | b->bm_dev_capacity = capacity; | 529 | b->bm_dev_capacity = capacity; |
527 | 530 | ||
528 | if (growing) { | 531 | if (growing) { |
529 | bm_memset(b, owords, 0xff, words-owords); | 532 | if (set_new_bits) { |
530 | b->bm_set += bits - obits; | 533 | bm_memset(b, owords, 0xff, words-owords); |
534 | b->bm_set += bits - obits; | ||
535 | } else | ||
536 | bm_memset(b, owords, 0x00, words-owords); | ||
537 | |||
531 | } | 538 | } |
532 | 539 | ||
533 | if (want < have) { | 540 | if (want < have) { |
@@ -773,7 +780,7 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int | |||
773 | /* nothing to do, on disk == in memory */ | 780 | /* nothing to do, on disk == in memory */ |
774 | # define bm_cpu_to_lel(x) ((void)0) | 781 | # define bm_cpu_to_lel(x) ((void)0) |
775 | # else | 782 | # else |
776 | void bm_cpu_to_lel(struct drbd_bitmap *b) | 783 | static void bm_cpu_to_lel(struct drbd_bitmap *b) |
777 | { | 784 | { |
778 | /* need to cpu_to_lel all the pages ... | 785 | /* need to cpu_to_lel all the pages ... |
779 | * this may be optimized by using | 786 | * this may be optimized by using |
@@ -1015,7 +1022,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f | |||
1015 | * wants bitnr, not sector. | 1022 | * wants bitnr, not sector. |
1016 | * expected to be called for only a few bits (e - s about BITS_PER_LONG). | 1023 | * expected to be called for only a few bits (e - s about BITS_PER_LONG). |
1017 | * Must hold bitmap lock already. */ | 1024 | * Must hold bitmap lock already. */ |
1018 | int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | 1025 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, |
1019 | unsigned long e, int val, const enum km_type km) | 1026 | unsigned long e, int val, const enum km_type km) |
1020 | { | 1027 | { |
1021 | struct drbd_bitmap *b = mdev->bitmap; | 1028 | struct drbd_bitmap *b = mdev->bitmap; |
@@ -1053,7 +1060,7 @@ int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1053 | * for val != 0, we change 0 -> 1, return code positive | 1060 | * for val != 0, we change 0 -> 1, return code positive |
1054 | * for val == 0, we change 1 -> 0, return code negative | 1061 | * for val == 0, we change 1 -> 0, return code negative |
1055 | * wants bitnr, not sector */ | 1062 | * wants bitnr, not sector */ |
1056 | int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | 1063 | static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, |
1057 | const unsigned long e, int val) | 1064 | const unsigned long e, int val) |
1058 | { | 1065 | { |
1059 | unsigned long flags; | 1066 | unsigned long flags; |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e5e86a781820..352441b0f92f 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -132,6 +132,7 @@ enum { | |||
132 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ | 132 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ |
133 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ | 133 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ |
134 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ | 134 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ |
135 | DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ | ||
135 | 136 | ||
136 | DRBD_FAULT_MAX, | 137 | DRBD_FAULT_MAX, |
137 | }; | 138 | }; |
@@ -208,8 +209,11 @@ enum drbd_packets { | |||
208 | P_RS_IS_IN_SYNC = 0x22, /* meta socket */ | 209 | P_RS_IS_IN_SYNC = 0x22, /* meta socket */ |
209 | P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ | 210 | P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ |
210 | P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ | 211 | P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ |
212 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ | ||
213 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ | ||
214 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ | ||
211 | 215 | ||
212 | P_MAX_CMD = 0x25, | 216 | P_MAX_CMD = 0x28, |
213 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ | 217 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ |
214 | P_MAX_OPT_CMD = 0x101, | 218 | P_MAX_OPT_CMD = 0x101, |
215 | 219 | ||
@@ -264,6 +268,7 @@ static inline const char *cmdname(enum drbd_packets cmd) | |||
264 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", | 268 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", |
265 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | 269 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", |
266 | [P_COMPRESSED_BITMAP] = "CBitmap", | 270 | [P_COMPRESSED_BITMAP] = "CBitmap", |
271 | [P_DELAY_PROBE] = "DelayProbe", | ||
267 | [P_MAX_CMD] = NULL, | 272 | [P_MAX_CMD] = NULL, |
268 | }; | 273 | }; |
269 | 274 | ||
@@ -481,7 +486,8 @@ struct p_sizes { | |||
481 | u64 u_size; /* user requested size */ | 486 | u64 u_size; /* user requested size */ |
482 | u64 c_size; /* current exported size */ | 487 | u64 c_size; /* current exported size */ |
483 | u32 max_segment_size; /* Maximal size of a BIO */ | 488 | u32 max_segment_size; /* Maximal size of a BIO */ |
484 | u32 queue_order_type; | 489 | u16 queue_order_type; /* not yet implemented in DRBD*/ |
490 | u16 dds_flags; /* use enum dds_flags here. */ | ||
485 | } __packed; | 491 | } __packed; |
486 | 492 | ||
487 | struct p_state { | 493 | struct p_state { |
@@ -538,6 +544,12 @@ struct p_compressed_bm { | |||
538 | u8 code[0]; | 544 | u8 code[0]; |
539 | } __packed; | 545 | } __packed; |
540 | 546 | ||
547 | struct p_delay_probe { | ||
548 | struct p_header head; | ||
549 | u32 seq_num; /* sequence number to match the two probe packets */ | ||
550 | u32 offset; /* usecs the probe got sent after the reference time point */ | ||
551 | } __packed; | ||
552 | |||
541 | /* DCBP: Drbd Compressed Bitmap Packet ... */ | 553 | /* DCBP: Drbd Compressed Bitmap Packet ... */ |
542 | static inline enum drbd_bitmap_code | 554 | static inline enum drbd_bitmap_code |
543 | DCBP_get_code(struct p_compressed_bm *p) | 555 | DCBP_get_code(struct p_compressed_bm *p) |
@@ -722,22 +734,6 @@ enum epoch_event { | |||
722 | EV_CLEANUP = 32, /* used as flag */ | 734 | EV_CLEANUP = 32, /* used as flag */ |
723 | }; | 735 | }; |
724 | 736 | ||
725 | struct drbd_epoch_entry { | ||
726 | struct drbd_work w; | ||
727 | struct drbd_conf *mdev; | ||
728 | struct bio *private_bio; | ||
729 | struct hlist_node colision; | ||
730 | sector_t sector; | ||
731 | unsigned int size; | ||
732 | struct drbd_epoch *epoch; | ||
733 | |||
734 | /* up to here, the struct layout is identical to drbd_request; | ||
735 | * we might be able to use that to our advantage... */ | ||
736 | |||
737 | unsigned int flags; | ||
738 | u64 block_id; | ||
739 | }; | ||
740 | |||
741 | struct drbd_wq_barrier { | 737 | struct drbd_wq_barrier { |
742 | struct drbd_work w; | 738 | struct drbd_work w; |
743 | struct completion done; | 739 | struct completion done; |
@@ -748,17 +744,49 @@ struct digest_info { | |||
748 | void *digest; | 744 | void *digest; |
749 | }; | 745 | }; |
750 | 746 | ||
751 | /* ee flag bits */ | 747 | struct drbd_epoch_entry { |
748 | struct drbd_work w; | ||
749 | struct hlist_node colision; | ||
750 | struct drbd_epoch *epoch; | ||
751 | struct drbd_conf *mdev; | ||
752 | struct page *pages; | ||
753 | atomic_t pending_bios; | ||
754 | unsigned int size; | ||
755 | /* see comments on ee flag bits below */ | ||
756 | unsigned long flags; | ||
757 | sector_t sector; | ||
758 | u64 block_id; | ||
759 | }; | ||
760 | |||
761 | /* ee flag bits. | ||
762 | * While corresponding bios are in flight, the only modification will be | ||
763 | * set_bit WAS_ERROR, which has to be atomic. | ||
764 | * If no bios are in flight yet, or all have been completed, | ||
765 | * non-atomic modification to ee->flags is ok. | ||
766 | */ | ||
752 | enum { | 767 | enum { |
753 | __EE_CALL_AL_COMPLETE_IO, | 768 | __EE_CALL_AL_COMPLETE_IO, |
754 | __EE_CONFLICT_PENDING, | ||
755 | __EE_MAY_SET_IN_SYNC, | 769 | __EE_MAY_SET_IN_SYNC, |
770 | |||
771 | /* This epoch entry closes an epoch using a barrier. | ||
772 | * On sucessful completion, the epoch is released, | ||
773 | * and the P_BARRIER_ACK send. */ | ||
756 | __EE_IS_BARRIER, | 774 | __EE_IS_BARRIER, |
775 | |||
776 | /* In case a barrier failed, | ||
777 | * we need to resubmit without the barrier flag. */ | ||
778 | __EE_RESUBMITTED, | ||
779 | |||
780 | /* we may have several bios per epoch entry. | ||
781 | * if any of those fail, we set this flag atomically | ||
782 | * from the endio callback */ | ||
783 | __EE_WAS_ERROR, | ||
757 | }; | 784 | }; |
758 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) | 785 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) |
759 | #define EE_CONFLICT_PENDING (1<<__EE_CONFLICT_PENDING) | ||
760 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) | 786 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) |
761 | #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) | 787 | #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) |
788 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) | ||
789 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) | ||
762 | 790 | ||
763 | /* global flag bits */ | 791 | /* global flag bits */ |
764 | enum { | 792 | enum { |
@@ -1081,6 +1109,11 @@ enum chg_state_flags { | |||
1081 | CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, | 1109 | CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, |
1082 | }; | 1110 | }; |
1083 | 1111 | ||
1112 | enum dds_flags { | ||
1113 | DDSF_FORCED = 1, | ||
1114 | DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ | ||
1115 | }; | ||
1116 | |||
1084 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); | 1117 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); |
1085 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | 1118 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, |
1086 | union drbd_state mask, union drbd_state val); | 1119 | union drbd_state mask, union drbd_state val); |
@@ -1113,7 +1146,7 @@ extern int drbd_send_protocol(struct drbd_conf *mdev); | |||
1113 | extern int drbd_send_uuids(struct drbd_conf *mdev); | 1146 | extern int drbd_send_uuids(struct drbd_conf *mdev); |
1114 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | 1147 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); |
1115 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); | 1148 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); |
1116 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply); | 1149 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
1117 | extern int _drbd_send_state(struct drbd_conf *mdev); | 1150 | extern int _drbd_send_state(struct drbd_conf *mdev); |
1118 | extern int drbd_send_state(struct drbd_conf *mdev); | 1151 | extern int drbd_send_state(struct drbd_conf *mdev); |
1119 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | 1152 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, |
@@ -1311,7 +1344,7 @@ struct bm_extent { | |||
1311 | #define APP_R_HSIZE 15 | 1344 | #define APP_R_HSIZE 15 |
1312 | 1345 | ||
1313 | extern int drbd_bm_init(struct drbd_conf *mdev); | 1346 | extern int drbd_bm_init(struct drbd_conf *mdev); |
1314 | extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors); | 1347 | extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); |
1315 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); | 1348 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); |
1316 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | 1349 | extern void drbd_bm_set_all(struct drbd_conf *mdev); |
1317 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | 1350 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); |
@@ -1383,7 +1416,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev); | |||
1383 | extern char *ppsize(char *buf, unsigned long long size); | 1416 | extern char *ppsize(char *buf, unsigned long long size); |
1384 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); | 1417 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); |
1385 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; | 1418 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; |
1386 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local); | 1419 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); |
1387 | extern void resync_after_online_grow(struct drbd_conf *); | 1420 | extern void resync_after_online_grow(struct drbd_conf *); |
1388 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); | 1421 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); |
1389 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, | 1422 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, |
@@ -1414,7 +1447,8 @@ static inline void ov_oos_print(struct drbd_conf *mdev) | |||
1414 | } | 1447 | } |
1415 | 1448 | ||
1416 | 1449 | ||
1417 | extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); | 1450 | extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); |
1451 | extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *); | ||
1418 | /* worker callbacks */ | 1452 | /* worker callbacks */ |
1419 | extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); | 1453 | extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); |
1420 | extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); | 1454 | extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); |
@@ -1426,7 +1460,6 @@ extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | |||
1426 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | 1460 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); |
1427 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); | 1461 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); |
1428 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); | 1462 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); |
1429 | extern int w_io_error(struct drbd_conf *, struct drbd_work *, int); | ||
1430 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); | 1463 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); |
1431 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); | 1464 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); |
1432 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); | 1465 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); |
@@ -1438,6 +1471,8 @@ extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | |||
1438 | extern void resync_timer_fn(unsigned long data); | 1471 | extern void resync_timer_fn(unsigned long data); |
1439 | 1472 | ||
1440 | /* drbd_receiver.c */ | 1473 | /* drbd_receiver.c */ |
1474 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | ||
1475 | const unsigned rw, const int fault_type); | ||
1441 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); | 1476 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); |
1442 | extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | 1477 | extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, |
1443 | u64 id, | 1478 | u64 id, |
@@ -1490,7 +1525,7 @@ static inline void drbd_tcp_nodelay(struct socket *sock) | |||
1490 | 1525 | ||
1491 | static inline void drbd_tcp_quickack(struct socket *sock) | 1526 | static inline void drbd_tcp_quickack(struct socket *sock) |
1492 | { | 1527 | { |
1493 | int __user val = 1; | 1528 | int __user val = 2; |
1494 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, | 1529 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, |
1495 | (char __user *)&val, sizeof(val)); | 1530 | (char __user *)&val, sizeof(val)); |
1496 | } | 1531 | } |
@@ -1593,6 +1628,41 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
1593 | * inline helper functions | 1628 | * inline helper functions |
1594 | *************************/ | 1629 | *************************/ |
1595 | 1630 | ||
1631 | /* see also page_chain_add and friends in drbd_receiver.c */ | ||
1632 | static inline struct page *page_chain_next(struct page *page) | ||
1633 | { | ||
1634 | return (struct page *)page_private(page); | ||
1635 | } | ||
1636 | #define page_chain_for_each(page) \ | ||
1637 | for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ | ||
1638 | page = page_chain_next(page)) | ||
1639 | #define page_chain_for_each_safe(page, n) \ | ||
1640 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) | ||
1641 | |||
1642 | static inline int drbd_bio_has_active_page(struct bio *bio) | ||
1643 | { | ||
1644 | struct bio_vec *bvec; | ||
1645 | int i; | ||
1646 | |||
1647 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
1648 | if (page_count(bvec->bv_page) > 1) | ||
1649 | return 1; | ||
1650 | } | ||
1651 | |||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) | ||
1656 | { | ||
1657 | struct page *page = e->pages; | ||
1658 | page_chain_for_each(page) { | ||
1659 | if (page_count(page) > 1) | ||
1660 | return 1; | ||
1661 | } | ||
1662 | return 0; | ||
1663 | } | ||
1664 | |||
1665 | |||
1596 | static inline void drbd_state_lock(struct drbd_conf *mdev) | 1666 | static inline void drbd_state_lock(struct drbd_conf *mdev) |
1597 | { | 1667 | { |
1598 | wait_event(mdev->misc_wait, | 1668 | wait_event(mdev->misc_wait, |
@@ -1641,7 +1711,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, | |||
1641 | switch (mdev->ldev->dc.on_io_error) { | 1711 | switch (mdev->ldev->dc.on_io_error) { |
1642 | case EP_PASS_ON: | 1712 | case EP_PASS_ON: |
1643 | if (!forcedetach) { | 1713 | if (!forcedetach) { |
1644 | if (printk_ratelimit()) | 1714 | if (__ratelimit(&drbd_ratelimit_state)) |
1645 | dev_err(DEV, "Local IO failed in %s." | 1715 | dev_err(DEV, "Local IO failed in %s." |
1646 | "Passing error on...\n", where); | 1716 | "Passing error on...\n", where); |
1647 | break; | 1717 | break; |
@@ -2138,7 +2208,7 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) | |||
2138 | /* I'd like to use wait_event_lock_irq, | 2208 | /* I'd like to use wait_event_lock_irq, |
2139 | * but I'm not sure when it got introduced, | 2209 | * but I'm not sure when it got introduced, |
2140 | * and not sure when it has 3 or 4 arguments */ | 2210 | * and not sure when it has 3 or 4 arguments */ |
2141 | static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two) | 2211 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) |
2142 | { | 2212 | { |
2143 | /* compare with after_state_ch, | 2213 | /* compare with after_state_ch, |
2144 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ | 2214 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ |
@@ -2160,7 +2230,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two) | |||
2160 | finish_wait(&mdev->misc_wait, &wait); | 2230 | finish_wait(&mdev->misc_wait, &wait); |
2161 | spin_lock_irq(&mdev->req_lock); | 2231 | spin_lock_irq(&mdev->req_lock); |
2162 | } | 2232 | } |
2163 | atomic_add(one_or_two, &mdev->ap_bio_cnt); | 2233 | atomic_add(count, &mdev->ap_bio_cnt); |
2164 | spin_unlock_irq(&mdev->req_lock); | 2234 | spin_unlock_irq(&mdev->req_lock); |
2165 | } | 2235 | } |
2166 | 2236 | ||
@@ -2251,7 +2321,8 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) | |||
2251 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) | 2321 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) |
2252 | return; | 2322 | return; |
2253 | 2323 | ||
2254 | r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL); | 2324 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, |
2325 | BLKDEV_IFL_WAIT); | ||
2255 | if (r) { | 2326 | if (r) { |
2256 | set_bit(MD_NO_BARRIER, &mdev->flags); | 2327 | set_bit(MD_NO_BARRIER, &mdev->flags); |
2257 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); | 2328 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 93d1f9b469d4..fa650dd85b90 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -684,6 +684,9 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | |||
684 | else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT) | 684 | else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT) |
685 | rv = SS_NO_REMOTE_DISK; | 685 | rv = SS_NO_REMOTE_DISK; |
686 | 686 | ||
687 | else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) | ||
688 | rv = SS_NO_UP_TO_DATE_DISK; | ||
689 | |||
687 | else if ((ns.conn == C_CONNECTED || | 690 | else if ((ns.conn == C_CONNECTED || |
688 | ns.conn == C_WF_BITMAP_S || | 691 | ns.conn == C_WF_BITMAP_S || |
689 | ns.conn == C_SYNC_SOURCE || | 692 | ns.conn == C_SYNC_SOURCE || |
@@ -840,7 +843,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
840 | break; | 843 | break; |
841 | case C_WF_BITMAP_S: | 844 | case C_WF_BITMAP_S: |
842 | case C_PAUSED_SYNC_S: | 845 | case C_PAUSED_SYNC_S: |
843 | ns.pdsk = D_OUTDATED; | 846 | /* remap any consistent state to D_OUTDATED, |
847 | * but disallow "upgrade" of not even consistent states. | ||
848 | */ | ||
849 | ns.pdsk = | ||
850 | (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED) | ||
851 | ? os.pdsk : D_OUTDATED; | ||
844 | break; | 852 | break; |
845 | case C_SYNC_SOURCE: | 853 | case C_SYNC_SOURCE: |
846 | ns.pdsk = D_INCONSISTENT; | 854 | ns.pdsk = D_INCONSISTENT; |
@@ -1205,8 +1213,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1205 | && (ns.pdsk < D_INCONSISTENT || | 1213 | && (ns.pdsk < D_INCONSISTENT || |
1206 | ns.pdsk == D_UNKNOWN || | 1214 | ns.pdsk == D_UNKNOWN || |
1207 | ns.pdsk == D_OUTDATED)) { | 1215 | ns.pdsk == D_OUTDATED)) { |
1208 | kfree(mdev->p_uuid); | ||
1209 | mdev->p_uuid = NULL; | ||
1210 | if (get_ldev(mdev)) { | 1216 | if (get_ldev(mdev)) { |
1211 | if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && | 1217 | if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && |
1212 | mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { | 1218 | mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { |
@@ -1230,9 +1236,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1230 | /* Last part of the attaching process ... */ | 1236 | /* Last part of the attaching process ... */ |
1231 | if (ns.conn >= C_CONNECTED && | 1237 | if (ns.conn >= C_CONNECTED && |
1232 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { | 1238 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { |
1233 | kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */ | 1239 | drbd_send_sizes(mdev, 0, 0); /* to start sync... */ |
1234 | mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */ | ||
1235 | drbd_send_sizes(mdev, 0); /* to start sync... */ | ||
1236 | drbd_send_uuids(mdev); | 1240 | drbd_send_uuids(mdev); |
1237 | drbd_send_state(mdev); | 1241 | drbd_send_state(mdev); |
1238 | } | 1242 | } |
@@ -1755,7 +1759,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) | |||
1755 | (struct p_header *)&p, sizeof(p)); | 1759 | (struct p_header *)&p, sizeof(p)); |
1756 | } | 1760 | } |
1757 | 1761 | ||
1758 | int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply) | 1762 | int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) |
1759 | { | 1763 | { |
1760 | struct p_sizes p; | 1764 | struct p_sizes p; |
1761 | sector_t d_size, u_size; | 1765 | sector_t d_size, u_size; |
@@ -1767,7 +1771,6 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply) | |||
1767 | d_size = drbd_get_max_capacity(mdev->ldev); | 1771 | d_size = drbd_get_max_capacity(mdev->ldev); |
1768 | u_size = mdev->ldev->dc.disk_size; | 1772 | u_size = mdev->ldev->dc.disk_size; |
1769 | q_order_type = drbd_queue_order_type(mdev); | 1773 | q_order_type = drbd_queue_order_type(mdev); |
1770 | p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev)); | ||
1771 | put_ldev(mdev); | 1774 | put_ldev(mdev); |
1772 | } else { | 1775 | } else { |
1773 | d_size = 0; | 1776 | d_size = 0; |
@@ -1779,7 +1782,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply) | |||
1779 | p.u_size = cpu_to_be64(u_size); | 1782 | p.u_size = cpu_to_be64(u_size); |
1780 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | 1783 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); |
1781 | p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); | 1784 | p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); |
1782 | p.queue_order_type = cpu_to_be32(q_order_type); | 1785 | p.queue_order_type = cpu_to_be16(q_order_type); |
1786 | p.dds_flags = cpu_to_be16(flags); | ||
1783 | 1787 | ||
1784 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, | 1788 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, |
1785 | (struct p_header *)&p, sizeof(p)); | 1789 | (struct p_header *)&p, sizeof(p)); |
@@ -2229,9 +2233,9 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket * | |||
2229 | * with page_count == 0 or PageSlab. | 2233 | * with page_count == 0 or PageSlab. |
2230 | */ | 2234 | */ |
2231 | static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, | 2235 | static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, |
2232 | int offset, size_t size) | 2236 | int offset, size_t size, unsigned msg_flags) |
2233 | { | 2237 | { |
2234 | int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0); | 2238 | int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags); |
2235 | kunmap(page); | 2239 | kunmap(page); |
2236 | if (sent == size) | 2240 | if (sent == size) |
2237 | mdev->send_cnt += size>>9; | 2241 | mdev->send_cnt += size>>9; |
@@ -2239,7 +2243,7 @@ static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, | |||
2239 | } | 2243 | } |
2240 | 2244 | ||
2241 | static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | 2245 | static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, |
2242 | int offset, size_t size) | 2246 | int offset, size_t size, unsigned msg_flags) |
2243 | { | 2247 | { |
2244 | mm_segment_t oldfs = get_fs(); | 2248 | mm_segment_t oldfs = get_fs(); |
2245 | int sent, ok; | 2249 | int sent, ok; |
@@ -2252,14 +2256,15 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | |||
2252 | * __page_cache_release a page that would actually still be referenced | 2256 | * __page_cache_release a page that would actually still be referenced |
2253 | * by someone, leading to some obscure delayed Oops somewhere else. */ | 2257 | * by someone, leading to some obscure delayed Oops somewhere else. */ |
2254 | if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) | 2258 | if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) |
2255 | return _drbd_no_send_page(mdev, page, offset, size); | 2259 | return _drbd_no_send_page(mdev, page, offset, size, msg_flags); |
2256 | 2260 | ||
2261 | msg_flags |= MSG_NOSIGNAL; | ||
2257 | drbd_update_congested(mdev); | 2262 | drbd_update_congested(mdev); |
2258 | set_fs(KERNEL_DS); | 2263 | set_fs(KERNEL_DS); |
2259 | do { | 2264 | do { |
2260 | sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page, | 2265 | sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page, |
2261 | offset, len, | 2266 | offset, len, |
2262 | MSG_NOSIGNAL); | 2267 | msg_flags); |
2263 | if (sent == -EAGAIN) { | 2268 | if (sent == -EAGAIN) { |
2264 | if (we_should_drop_the_connection(mdev, | 2269 | if (we_should_drop_the_connection(mdev, |
2265 | mdev->data.socket)) | 2270 | mdev->data.socket)) |
@@ -2288,9 +2293,11 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | |||
2288 | { | 2293 | { |
2289 | struct bio_vec *bvec; | 2294 | struct bio_vec *bvec; |
2290 | int i; | 2295 | int i; |
2296 | /* hint all but last page with MSG_MORE */ | ||
2291 | __bio_for_each_segment(bvec, bio, i, 0) { | 2297 | __bio_for_each_segment(bvec, bio, i, 0) { |
2292 | if (!_drbd_no_send_page(mdev, bvec->bv_page, | 2298 | if (!_drbd_no_send_page(mdev, bvec->bv_page, |
2293 | bvec->bv_offset, bvec->bv_len)) | 2299 | bvec->bv_offset, bvec->bv_len, |
2300 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | ||
2294 | return 0; | 2301 | return 0; |
2295 | } | 2302 | } |
2296 | return 1; | 2303 | return 1; |
@@ -2300,12 +2307,28 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) | |||
2300 | { | 2307 | { |
2301 | struct bio_vec *bvec; | 2308 | struct bio_vec *bvec; |
2302 | int i; | 2309 | int i; |
2310 | /* hint all but last page with MSG_MORE */ | ||
2303 | __bio_for_each_segment(bvec, bio, i, 0) { | 2311 | __bio_for_each_segment(bvec, bio, i, 0) { |
2304 | if (!_drbd_send_page(mdev, bvec->bv_page, | 2312 | if (!_drbd_send_page(mdev, bvec->bv_page, |
2305 | bvec->bv_offset, bvec->bv_len)) | 2313 | bvec->bv_offset, bvec->bv_len, |
2314 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | ||
2306 | return 0; | 2315 | return 0; |
2307 | } | 2316 | } |
2317 | return 1; | ||
2318 | } | ||
2308 | 2319 | ||
2320 | static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) | ||
2321 | { | ||
2322 | struct page *page = e->pages; | ||
2323 | unsigned len = e->size; | ||
2324 | /* hint all but last page with MSG_MORE */ | ||
2325 | page_chain_for_each(page) { | ||
2326 | unsigned l = min_t(unsigned, len, PAGE_SIZE); | ||
2327 | if (!_drbd_send_page(mdev, page, 0, l, | ||
2328 | page_chain_next(page) ? MSG_MORE : 0)) | ||
2329 | return 0; | ||
2330 | len -= l; | ||
2331 | } | ||
2309 | return 1; | 2332 | return 1; |
2310 | } | 2333 | } |
2311 | 2334 | ||
@@ -2340,15 +2363,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2340 | /* NOTE: no need to check if barriers supported here as we would | 2363 | /* NOTE: no need to check if barriers supported here as we would |
2341 | * not pass the test in make_request_common in that case | 2364 | * not pass the test in make_request_common in that case |
2342 | */ | 2365 | */ |
2343 | if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { | 2366 | if (req->master_bio->bi_rw & REQ_HARDBARRIER) { |
2344 | dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); | 2367 | dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); |
2345 | /* dp_flags |= DP_HARDBARRIER; */ | 2368 | /* dp_flags |= DP_HARDBARRIER; */ |
2346 | } | 2369 | } |
2347 | if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) | 2370 | if (req->master_bio->bi_rw & REQ_SYNC) |
2348 | dp_flags |= DP_RW_SYNC; | 2371 | dp_flags |= DP_RW_SYNC; |
2349 | /* for now handle SYNCIO and UNPLUG | 2372 | /* for now handle SYNCIO and UNPLUG |
2350 | * as if they still were one and the same flag */ | 2373 | * as if they still were one and the same flag */ |
2351 | if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) | 2374 | if (req->master_bio->bi_rw & REQ_UNPLUG) |
2352 | dp_flags |= DP_RW_SYNC; | 2375 | dp_flags |= DP_RW_SYNC; |
2353 | if (mdev->state.conn >= C_SYNC_SOURCE && | 2376 | if (mdev->state.conn >= C_SYNC_SOURCE && |
2354 | mdev->state.conn <= C_PAUSED_SYNC_T) | 2377 | mdev->state.conn <= C_PAUSED_SYNC_T) |
@@ -2357,11 +2380,11 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2357 | p.dp_flags = cpu_to_be32(dp_flags); | 2380 | p.dp_flags = cpu_to_be32(dp_flags); |
2358 | set_bit(UNPLUG_REMOTE, &mdev->flags); | 2381 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
2359 | ok = (sizeof(p) == | 2382 | ok = (sizeof(p) == |
2360 | drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE)); | 2383 | drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0)); |
2361 | if (ok && dgs) { | 2384 | if (ok && dgs) { |
2362 | dgb = mdev->int_dig_out; | 2385 | dgb = mdev->int_dig_out; |
2363 | drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); | 2386 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); |
2364 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); | 2387 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2365 | } | 2388 | } |
2366 | if (ok) { | 2389 | if (ok) { |
2367 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | 2390 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) |
@@ -2371,6 +2394,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2371 | } | 2394 | } |
2372 | 2395 | ||
2373 | drbd_put_data_sock(mdev); | 2396 | drbd_put_data_sock(mdev); |
2397 | |||
2374 | return ok; | 2398 | return ok; |
2375 | } | 2399 | } |
2376 | 2400 | ||
@@ -2406,16 +2430,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2406 | return 0; | 2430 | return 0; |
2407 | 2431 | ||
2408 | ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, | 2432 | ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, |
2409 | sizeof(p), MSG_MORE); | 2433 | sizeof(p), dgs ? MSG_MORE : 0); |
2410 | if (ok && dgs) { | 2434 | if (ok && dgs) { |
2411 | dgb = mdev->int_dig_out; | 2435 | dgb = mdev->int_dig_out; |
2412 | drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb); | 2436 | drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); |
2413 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); | 2437 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2414 | } | 2438 | } |
2415 | if (ok) | 2439 | if (ok) |
2416 | ok = _drbd_send_zc_bio(mdev, e->private_bio); | 2440 | ok = _drbd_send_zc_ee(mdev, e); |
2417 | 2441 | ||
2418 | drbd_put_data_sock(mdev); | 2442 | drbd_put_data_sock(mdev); |
2443 | |||
2419 | return ok; | 2444 | return ok; |
2420 | } | 2445 | } |
2421 | 2446 | ||
@@ -2511,6 +2536,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) | |||
2511 | unsigned long flags; | 2536 | unsigned long flags; |
2512 | int rv = 0; | 2537 | int rv = 0; |
2513 | 2538 | ||
2539 | lock_kernel(); | ||
2514 | spin_lock_irqsave(&mdev->req_lock, flags); | 2540 | spin_lock_irqsave(&mdev->req_lock, flags); |
2515 | /* to have a stable mdev->state.role | 2541 | /* to have a stable mdev->state.role |
2516 | * and no race with updating open_cnt */ | 2542 | * and no race with updating open_cnt */ |
@@ -2525,6 +2551,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) | |||
2525 | if (!rv) | 2551 | if (!rv) |
2526 | mdev->open_cnt++; | 2552 | mdev->open_cnt++; |
2527 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 2553 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
2554 | unlock_kernel(); | ||
2528 | 2555 | ||
2529 | return rv; | 2556 | return rv; |
2530 | } | 2557 | } |
@@ -2532,7 +2559,9 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) | |||
2532 | static int drbd_release(struct gendisk *gd, fmode_t mode) | 2559 | static int drbd_release(struct gendisk *gd, fmode_t mode) |
2533 | { | 2560 | { |
2534 | struct drbd_conf *mdev = gd->private_data; | 2561 | struct drbd_conf *mdev = gd->private_data; |
2562 | lock_kernel(); | ||
2535 | mdev->open_cnt--; | 2563 | mdev->open_cnt--; |
2564 | unlock_kernel(); | ||
2536 | return 0; | 2565 | return 0; |
2537 | } | 2566 | } |
2538 | 2567 | ||
@@ -2567,9 +2596,20 @@ static void drbd_unplug_fn(struct request_queue *q) | |||
2567 | 2596 | ||
2568 | static void drbd_set_defaults(struct drbd_conf *mdev) | 2597 | static void drbd_set_defaults(struct drbd_conf *mdev) |
2569 | { | 2598 | { |
2570 | mdev->sync_conf.after = DRBD_AFTER_DEF; | 2599 | /* This way we get a compile error when sync_conf grows, |
2571 | mdev->sync_conf.rate = DRBD_RATE_DEF; | 2600 | and we forgot to initialize it here */ |
2572 | mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF; | 2601 | mdev->sync_conf = (struct syncer_conf) { |
2602 | /* .rate = */ DRBD_RATE_DEF, | ||
2603 | /* .after = */ DRBD_AFTER_DEF, | ||
2604 | /* .al_extents = */ DRBD_AL_EXTENTS_DEF, | ||
2605 | /* .verify_alg = */ {}, 0, | ||
2606 | /* .cpu_mask = */ {}, 0, | ||
2607 | /* .csums_alg = */ {}, 0, | ||
2608 | /* .use_rle = */ 0 | ||
2609 | }; | ||
2610 | |||
2611 | /* Have to use that way, because the layout differs between | ||
2612 | big endian and little endian */ | ||
2573 | mdev->state = (union drbd_state) { | 2613 | mdev->state = (union drbd_state) { |
2574 | { .role = R_SECONDARY, | 2614 | { .role = R_SECONDARY, |
2575 | .peer = R_UNKNOWN, | 2615 | .peer = R_UNKNOWN, |
@@ -2628,6 +2668,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2628 | INIT_LIST_HEAD(&mdev->unplug_work.list); | 2668 | INIT_LIST_HEAD(&mdev->unplug_work.list); |
2629 | INIT_LIST_HEAD(&mdev->md_sync_work.list); | 2669 | INIT_LIST_HEAD(&mdev->md_sync_work.list); |
2630 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); | 2670 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); |
2671 | |||
2631 | mdev->resync_work.cb = w_resync_inactive; | 2672 | mdev->resync_work.cb = w_resync_inactive; |
2632 | mdev->unplug_work.cb = w_send_write_hint; | 2673 | mdev->unplug_work.cb = w_send_write_hint; |
2633 | mdev->md_sync_work.cb = w_md_sync; | 2674 | mdev->md_sync_work.cb = w_md_sync; |
@@ -2680,7 +2721,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) | |||
2680 | drbd_set_my_capacity(mdev, 0); | 2721 | drbd_set_my_capacity(mdev, 0); |
2681 | if (mdev->bitmap) { | 2722 | if (mdev->bitmap) { |
2682 | /* maybe never allocated. */ | 2723 | /* maybe never allocated. */ |
2683 | drbd_bm_resize(mdev, 0); | 2724 | drbd_bm_resize(mdev, 0, 1); |
2684 | drbd_bm_cleanup(mdev); | 2725 | drbd_bm_cleanup(mdev); |
2685 | } | 2726 | } |
2686 | 2727 | ||
@@ -3129,7 +3170,7 @@ int __init drbd_init(void) | |||
3129 | if (err) | 3170 | if (err) |
3130 | goto Enomem; | 3171 | goto Enomem; |
3131 | 3172 | ||
3132 | drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops); | 3173 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); |
3133 | if (!drbd_proc) { | 3174 | if (!drbd_proc) { |
3134 | printk(KERN_ERR "drbd: unable to register proc file\n"); | 3175 | printk(KERN_ERR "drbd: unable to register proc file\n"); |
3135 | goto Enomem; | 3176 | goto Enomem; |
@@ -3660,7 +3701,8 @@ _drbd_fault_str(unsigned int type) { | |||
3660 | [DRBD_FAULT_DT_RD] = "Data read", | 3701 | [DRBD_FAULT_DT_RD] = "Data read", |
3661 | [DRBD_FAULT_DT_RA] = "Data read ahead", | 3702 | [DRBD_FAULT_DT_RA] = "Data read ahead", |
3662 | [DRBD_FAULT_BM_ALLOC] = "BM allocation", | 3703 | [DRBD_FAULT_BM_ALLOC] = "BM allocation", |
3663 | [DRBD_FAULT_AL_EE] = "EE allocation" | 3704 | [DRBD_FAULT_AL_EE] = "EE allocation", |
3705 | [DRBD_FAULT_RECEIVE] = "receive data corruption", | ||
3664 | }; | 3706 | }; |
3665 | 3707 | ||
3666 | return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; | 3708 | return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; |
@@ -3679,7 +3721,7 @@ _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) | |||
3679 | if (ret) { | 3721 | if (ret) { |
3680 | fault_count++; | 3722 | fault_count++; |
3681 | 3723 | ||
3682 | if (printk_ratelimit()) | 3724 | if (__ratelimit(&drbd_ratelimit_state)) |
3683 | dev_warn(DEV, "***Simulating %s failure\n", | 3725 | dev_warn(DEV, "***Simulating %s failure\n", |
3684 | _drbd_fault_str(type)); | 3726 | _drbd_fault_str(type)); |
3685 | } | 3727 | } |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 6429d2b19e06..73131c5ae339 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev) | |||
510 | * Returns 0 on success, negative return values indicate errors. | 510 | * Returns 0 on success, negative return values indicate errors. |
511 | * You should call drbd_md_sync() after calling this function. | 511 | * You should call drbd_md_sync() after calling this function. |
512 | */ | 512 | */ |
513 | enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local) | 513 | enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) |
514 | { | 514 | { |
515 | sector_t prev_first_sect, prev_size; /* previous meta location */ | 515 | sector_t prev_first_sect, prev_size; /* previous meta location */ |
516 | sector_t la_size; | 516 | sector_t la_size; |
@@ -541,12 +541,12 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force | |||
541 | /* TODO: should only be some assert here, not (re)init... */ | 541 | /* TODO: should only be some assert here, not (re)init... */ |
542 | drbd_md_set_sector_offsets(mdev, mdev->ldev); | 542 | drbd_md_set_sector_offsets(mdev, mdev->ldev); |
543 | 543 | ||
544 | size = drbd_new_dev_size(mdev, mdev->ldev, force); | 544 | size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); |
545 | 545 | ||
546 | if (drbd_get_capacity(mdev->this_bdev) != size || | 546 | if (drbd_get_capacity(mdev->this_bdev) != size || |
547 | drbd_bm_capacity(mdev) != size) { | 547 | drbd_bm_capacity(mdev) != size) { |
548 | int err; | 548 | int err; |
549 | err = drbd_bm_resize(mdev, size); | 549 | err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); |
550 | if (unlikely(err)) { | 550 | if (unlikely(err)) { |
551 | /* currently there is only one error: ENOMEM! */ | 551 | /* currently there is only one error: ENOMEM! */ |
552 | size = drbd_bm_capacity(mdev)>>1; | 552 | size = drbd_bm_capacity(mdev)>>1; |
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu | |||
704 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; | 704 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; |
705 | int max_segments = mdev->ldev->dc.max_bio_bvecs; | 705 | int max_segments = mdev->ldev->dc.max_bio_bvecs; |
706 | 706 | ||
707 | if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv) | ||
708 | max_seg_s = PAGE_SIZE; | ||
709 | |||
710 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | 707 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); |
711 | 708 | ||
712 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); | 709 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); |
@@ -1117,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1117 | mdev->new_state_tmp.i = ns.i; | 1114 | mdev->new_state_tmp.i = ns.i; |
1118 | ns.i = os.i; | 1115 | ns.i = os.i; |
1119 | ns.disk = D_NEGOTIATING; | 1116 | ns.disk = D_NEGOTIATING; |
1117 | |||
1118 | /* We expect to receive up-to-date UUIDs soon. | ||
1119 | To avoid a race in receive_state, free p_uuid while | ||
1120 | holding req_lock. I.e. atomic with the state change */ | ||
1121 | kfree(mdev->p_uuid); | ||
1122 | mdev->p_uuid = NULL; | ||
1120 | } | 1123 | } |
1121 | 1124 | ||
1122 | rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | 1125 | rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); |
@@ -1199,13 +1202,12 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1199 | } | 1202 | } |
1200 | 1203 | ||
1201 | /* allocation not in the IO path, cqueue thread context */ | 1204 | /* allocation not in the IO path, cqueue thread context */ |
1202 | new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); | 1205 | new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); |
1203 | if (!new_conf) { | 1206 | if (!new_conf) { |
1204 | retcode = ERR_NOMEM; | 1207 | retcode = ERR_NOMEM; |
1205 | goto fail; | 1208 | goto fail; |
1206 | } | 1209 | } |
1207 | 1210 | ||
1208 | memset(new_conf, 0, sizeof(struct net_conf)); | ||
1209 | new_conf->timeout = DRBD_TIMEOUT_DEF; | 1211 | new_conf->timeout = DRBD_TIMEOUT_DEF; |
1210 | new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; | 1212 | new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; |
1211 | new_conf->ping_int = DRBD_PING_INT_DEF; | 1213 | new_conf->ping_int = DRBD_PING_INT_DEF; |
@@ -1477,8 +1479,8 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1477 | { | 1479 | { |
1478 | struct resize rs; | 1480 | struct resize rs; |
1479 | int retcode = NO_ERROR; | 1481 | int retcode = NO_ERROR; |
1480 | int ldsc = 0; /* local disk size changed */ | ||
1481 | enum determine_dev_size dd; | 1482 | enum determine_dev_size dd; |
1483 | enum dds_flags ddsf; | ||
1482 | 1484 | ||
1483 | memset(&rs, 0, sizeof(struct resize)); | 1485 | memset(&rs, 0, sizeof(struct resize)); |
1484 | if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { | 1486 | if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { |
@@ -1502,13 +1504,17 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1502 | goto fail; | 1504 | goto fail; |
1503 | } | 1505 | } |
1504 | 1506 | ||
1505 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { | 1507 | if (rs.no_resync && mdev->agreed_pro_version < 93) { |
1506 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | 1508 | retcode = ERR_NEED_APV_93; |
1507 | ldsc = 1; | 1509 | goto fail; |
1508 | } | 1510 | } |
1509 | 1511 | ||
1512 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) | ||
1513 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | ||
1514 | |||
1510 | mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; | 1515 | mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; |
1511 | dd = drbd_determin_dev_size(mdev, rs.resize_force); | 1516 | ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); |
1517 | dd = drbd_determin_dev_size(mdev, ddsf); | ||
1512 | drbd_md_sync(mdev); | 1518 | drbd_md_sync(mdev); |
1513 | put_ldev(mdev); | 1519 | put_ldev(mdev); |
1514 | if (dd == dev_size_error) { | 1520 | if (dd == dev_size_error) { |
@@ -1516,12 +1522,12 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1516 | goto fail; | 1522 | goto fail; |
1517 | } | 1523 | } |
1518 | 1524 | ||
1519 | if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) { | 1525 | if (mdev->state.conn == C_CONNECTED) { |
1520 | if (dd == grew) | 1526 | if (dd == grew) |
1521 | set_bit(RESIZE_PENDING, &mdev->flags); | 1527 | set_bit(RESIZE_PENDING, &mdev->flags); |
1522 | 1528 | ||
1523 | drbd_send_uuids(mdev); | 1529 | drbd_send_uuids(mdev); |
1524 | drbd_send_sizes(mdev, 1); | 1530 | drbd_send_sizes(mdev, 1, ddsf); |
1525 | } | 1531 | } |
1526 | 1532 | ||
1527 | fail: | 1533 | fail: |
@@ -2207,9 +2213,9 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2207 | { | 2213 | { |
2208 | struct cn_msg *cn_reply; | 2214 | struct cn_msg *cn_reply; |
2209 | struct drbd_nl_cfg_reply *reply; | 2215 | struct drbd_nl_cfg_reply *reply; |
2210 | struct bio_vec *bvec; | ||
2211 | unsigned short *tl; | 2216 | unsigned short *tl; |
2212 | int i; | 2217 | struct page *page; |
2218 | unsigned len; | ||
2213 | 2219 | ||
2214 | if (!e) | 2220 | if (!e) |
2215 | return; | 2221 | return; |
@@ -2247,11 +2253,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2247 | put_unaligned(T_ee_data, tl++); | 2253 | put_unaligned(T_ee_data, tl++); |
2248 | put_unaligned(e->size, tl++); | 2254 | put_unaligned(e->size, tl++); |
2249 | 2255 | ||
2250 | __bio_for_each_segment(bvec, e->private_bio, i, 0) { | 2256 | len = e->size; |
2251 | void *d = kmap(bvec->bv_page); | 2257 | page = e->pages; |
2252 | memcpy(tl, d + bvec->bv_offset, bvec->bv_len); | 2258 | page_chain_for_each(page) { |
2253 | kunmap(bvec->bv_page); | 2259 | void *d = kmap_atomic(page, KM_USER0); |
2254 | tl=(unsigned short*)((char*)tl + bvec->bv_len); | 2260 | unsigned l = min_t(unsigned, len, PAGE_SIZE); |
2261 | memcpy(tl, d, l); | ||
2262 | kunmap_atomic(d, KM_USER0); | ||
2263 | tl = (unsigned short*)((char*)tl + l); | ||
2264 | len -= l; | ||
2255 | } | 2265 | } |
2256 | put_unaligned(TT_END, tl++); /* Close the tag list */ | 2266 | put_unaligned(TT_END, tl++); /* Close the tag list */ |
2257 | 2267 | ||
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3f096e7959b4..081522d3c742 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/unistd.h> | 42 | #include <linux/unistd.h> |
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <linux/random.h> | 44 | #include <linux/random.h> |
45 | #include <linux/mm.h> | ||
46 | #include <linux/string.h> | 45 | #include <linux/string.h> |
47 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
48 | #include "drbd_int.h" | 47 | #include "drbd_int.h" |
@@ -80,30 +79,128 @@ static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epo | |||
80 | 79 | ||
81 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | 80 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) |
82 | 81 | ||
83 | static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev) | 82 | /* |
83 | * some helper functions to deal with single linked page lists, | ||
84 | * page->private being our "next" pointer. | ||
85 | */ | ||
86 | |||
87 | /* If at least n pages are linked at head, get n pages off. | ||
88 | * Otherwise, don't modify head, and return NULL. | ||
89 | * Locking is the responsibility of the caller. | ||
90 | */ | ||
91 | static struct page *page_chain_del(struct page **head, int n) | ||
92 | { | ||
93 | struct page *page; | ||
94 | struct page *tmp; | ||
95 | |||
96 | BUG_ON(!n); | ||
97 | BUG_ON(!head); | ||
98 | |||
99 | page = *head; | ||
100 | |||
101 | if (!page) | ||
102 | return NULL; | ||
103 | |||
104 | while (page) { | ||
105 | tmp = page_chain_next(page); | ||
106 | if (--n == 0) | ||
107 | break; /* found sufficient pages */ | ||
108 | if (tmp == NULL) | ||
109 | /* insufficient pages, don't use any of them. */ | ||
110 | return NULL; | ||
111 | page = tmp; | ||
112 | } | ||
113 | |||
114 | /* add end of list marker for the returned list */ | ||
115 | set_page_private(page, 0); | ||
116 | /* actual return value, and adjustment of head */ | ||
117 | page = *head; | ||
118 | *head = tmp; | ||
119 | return page; | ||
120 | } | ||
121 | |||
122 | /* may be used outside of locks to find the tail of a (usually short) | ||
123 | * "private" page chain, before adding it back to a global chain head | ||
124 | * with page_chain_add() under a spinlock. */ | ||
125 | static struct page *page_chain_tail(struct page *page, int *len) | ||
126 | { | ||
127 | struct page *tmp; | ||
128 | int i = 1; | ||
129 | while ((tmp = page_chain_next(page))) | ||
130 | ++i, page = tmp; | ||
131 | if (len) | ||
132 | *len = i; | ||
133 | return page; | ||
134 | } | ||
135 | |||
136 | static int page_chain_free(struct page *page) | ||
137 | { | ||
138 | struct page *tmp; | ||
139 | int i = 0; | ||
140 | page_chain_for_each_safe(page, tmp) { | ||
141 | put_page(page); | ||
142 | ++i; | ||
143 | } | ||
144 | return i; | ||
145 | } | ||
146 | |||
147 | static void page_chain_add(struct page **head, | ||
148 | struct page *chain_first, struct page *chain_last) | ||
149 | { | ||
150 | #if 1 | ||
151 | struct page *tmp; | ||
152 | tmp = page_chain_tail(chain_first, NULL); | ||
153 | BUG_ON(tmp != chain_last); | ||
154 | #endif | ||
155 | |||
156 | /* add chain to head */ | ||
157 | set_page_private(chain_last, (unsigned long)*head); | ||
158 | *head = chain_first; | ||
159 | } | ||
160 | |||
161 | static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) | ||
84 | { | 162 | { |
85 | struct page *page = NULL; | 163 | struct page *page = NULL; |
164 | struct page *tmp = NULL; | ||
165 | int i = 0; | ||
86 | 166 | ||
87 | /* Yes, testing drbd_pp_vacant outside the lock is racy. | 167 | /* Yes, testing drbd_pp_vacant outside the lock is racy. |
88 | * So what. It saves a spin_lock. */ | 168 | * So what. It saves a spin_lock. */ |
89 | if (drbd_pp_vacant > 0) { | 169 | if (drbd_pp_vacant >= number) { |
90 | spin_lock(&drbd_pp_lock); | 170 | spin_lock(&drbd_pp_lock); |
91 | page = drbd_pp_pool; | 171 | page = page_chain_del(&drbd_pp_pool, number); |
92 | if (page) { | 172 | if (page) |
93 | drbd_pp_pool = (struct page *)page_private(page); | 173 | drbd_pp_vacant -= number; |
94 | set_page_private(page, 0); /* just to be polite */ | ||
95 | drbd_pp_vacant--; | ||
96 | } | ||
97 | spin_unlock(&drbd_pp_lock); | 174 | spin_unlock(&drbd_pp_lock); |
175 | if (page) | ||
176 | return page; | ||
98 | } | 177 | } |
178 | |||
99 | /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD | 179 | /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD |
100 | * "criss-cross" setup, that might cause write-out on some other DRBD, | 180 | * "criss-cross" setup, that might cause write-out on some other DRBD, |
101 | * which in turn might block on the other node at this very place. */ | 181 | * which in turn might block on the other node at this very place. */ |
102 | if (!page) | 182 | for (i = 0; i < number; i++) { |
103 | page = alloc_page(GFP_TRY); | 183 | tmp = alloc_page(GFP_TRY); |
104 | if (page) | 184 | if (!tmp) |
105 | atomic_inc(&mdev->pp_in_use); | 185 | break; |
106 | return page; | 186 | set_page_private(tmp, (unsigned long)page); |
187 | page = tmp; | ||
188 | } | ||
189 | |||
190 | if (i == number) | ||
191 | return page; | ||
192 | |||
193 | /* Not enough pages immediately available this time. | ||
194 | * No need to jump around here, drbd_pp_alloc will retry this | ||
195 | * function "soon". */ | ||
196 | if (page) { | ||
197 | tmp = page_chain_tail(page, NULL); | ||
198 | spin_lock(&drbd_pp_lock); | ||
199 | page_chain_add(&drbd_pp_pool, page, tmp); | ||
200 | drbd_pp_vacant += i; | ||
201 | spin_unlock(&drbd_pp_lock); | ||
202 | } | ||
203 | return NULL; | ||
107 | } | 204 | } |
108 | 205 | ||
109 | /* kick lower level device, if we have more than (arbitrary number) | 206 | /* kick lower level device, if we have more than (arbitrary number) |
@@ -127,7 +224,7 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed | |||
127 | 224 | ||
128 | list_for_each_safe(le, tle, &mdev->net_ee) { | 225 | list_for_each_safe(le, tle, &mdev->net_ee) { |
129 | e = list_entry(le, struct drbd_epoch_entry, w.list); | 226 | e = list_entry(le, struct drbd_epoch_entry, w.list); |
130 | if (drbd_bio_has_active_page(e->private_bio)) | 227 | if (drbd_ee_has_active_page(e)) |
131 | break; | 228 | break; |
132 | list_move(le, to_be_freed); | 229 | list_move(le, to_be_freed); |
133 | } | 230 | } |
@@ -148,32 +245,34 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) | |||
148 | } | 245 | } |
149 | 246 | ||
150 | /** | 247 | /** |
151 | * drbd_pp_alloc() - Returns a page, fails only if a signal comes in | 248 | * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) |
152 | * @mdev: DRBD device. | 249 | * @mdev: DRBD device. |
153 | * @retry: whether or not to retry allocation forever (or until signalled) | 250 | * @number: number of pages requested |
251 | * @retry: whether to retry, if not enough pages are available right now | ||
154 | * | 252 | * |
155 | * Tries to allocate a page, first from our own page pool, then from the | 253 | * Tries to allocate number pages, first from our own page pool, then from |
156 | * kernel, unless this allocation would exceed the max_buffers setting. | 254 | * the kernel, unless this allocation would exceed the max_buffers setting. |
157 | * If @retry is non-zero, retry until DRBD frees a page somewhere else. | 255 | * Possibly retry until DRBD frees sufficient pages somewhere else. |
256 | * | ||
257 | * Returns a page chain linked via page->private. | ||
158 | */ | 258 | */ |
159 | static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry) | 259 | static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) |
160 | { | 260 | { |
161 | struct page *page = NULL; | 261 | struct page *page = NULL; |
162 | DEFINE_WAIT(wait); | 262 | DEFINE_WAIT(wait); |
163 | 263 | ||
164 | if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { | 264 | /* Yes, we may run up to @number over max_buffers. If we |
165 | page = drbd_pp_first_page_or_try_alloc(mdev); | 265 | * follow it strictly, the admin will get it wrong anyways. */ |
166 | if (page) | 266 | if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) |
167 | return page; | 267 | page = drbd_pp_first_pages_or_try_alloc(mdev, number); |
168 | } | ||
169 | 268 | ||
170 | for (;;) { | 269 | while (page == NULL) { |
171 | prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); | 270 | prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); |
172 | 271 | ||
173 | drbd_kick_lo_and_reclaim_net(mdev); | 272 | drbd_kick_lo_and_reclaim_net(mdev); |
174 | 273 | ||
175 | if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { | 274 | if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { |
176 | page = drbd_pp_first_page_or_try_alloc(mdev); | 275 | page = drbd_pp_first_pages_or_try_alloc(mdev, number); |
177 | if (page) | 276 | if (page) |
178 | break; | 277 | break; |
179 | } | 278 | } |
@@ -190,62 +289,32 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry) | |||
190 | } | 289 | } |
191 | finish_wait(&drbd_pp_wait, &wait); | 290 | finish_wait(&drbd_pp_wait, &wait); |
192 | 291 | ||
292 | if (page) | ||
293 | atomic_add(number, &mdev->pp_in_use); | ||
193 | return page; | 294 | return page; |
194 | } | 295 | } |
195 | 296 | ||
196 | /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. | 297 | /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. |
197 | * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */ | 298 | * Is also used from inside an other spin_lock_irq(&mdev->req_lock); |
299 | * Either links the page chain back to the global pool, | ||
300 | * or returns all pages to the system. */ | ||
198 | static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) | 301 | static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) |
199 | { | 302 | { |
200 | int free_it; | ||
201 | |||
202 | spin_lock(&drbd_pp_lock); | ||
203 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) { | ||
204 | free_it = 1; | ||
205 | } else { | ||
206 | set_page_private(page, (unsigned long)drbd_pp_pool); | ||
207 | drbd_pp_pool = page; | ||
208 | drbd_pp_vacant++; | ||
209 | free_it = 0; | ||
210 | } | ||
211 | spin_unlock(&drbd_pp_lock); | ||
212 | |||
213 | atomic_dec(&mdev->pp_in_use); | ||
214 | |||
215 | if (free_it) | ||
216 | __free_page(page); | ||
217 | |||
218 | wake_up(&drbd_pp_wait); | ||
219 | } | ||
220 | |||
221 | static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio) | ||
222 | { | ||
223 | struct page *p_to_be_freed = NULL; | ||
224 | struct page *page; | ||
225 | struct bio_vec *bvec; | ||
226 | int i; | 303 | int i; |
227 | 304 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) | |
228 | spin_lock(&drbd_pp_lock); | 305 | i = page_chain_free(page); |
229 | __bio_for_each_segment(bvec, bio, i, 0) { | 306 | else { |
230 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) { | 307 | struct page *tmp; |
231 | set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed); | 308 | tmp = page_chain_tail(page, &i); |
232 | p_to_be_freed = bvec->bv_page; | 309 | spin_lock(&drbd_pp_lock); |
233 | } else { | 310 | page_chain_add(&drbd_pp_pool, page, tmp); |
234 | set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool); | 311 | drbd_pp_vacant += i; |
235 | drbd_pp_pool = bvec->bv_page; | 312 | spin_unlock(&drbd_pp_lock); |
236 | drbd_pp_vacant++; | ||
237 | } | ||
238 | } | ||
239 | spin_unlock(&drbd_pp_lock); | ||
240 | atomic_sub(bio->bi_vcnt, &mdev->pp_in_use); | ||
241 | |||
242 | while (p_to_be_freed) { | ||
243 | page = p_to_be_freed; | ||
244 | p_to_be_freed = (struct page *)page_private(page); | ||
245 | set_page_private(page, 0); /* just to be polite */ | ||
246 | put_page(page); | ||
247 | } | 313 | } |
248 | 314 | atomic_sub(i, &mdev->pp_in_use); | |
315 | i = atomic_read(&mdev->pp_in_use); | ||
316 | if (i < 0) | ||
317 | dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); | ||
249 | wake_up(&drbd_pp_wait); | 318 | wake_up(&drbd_pp_wait); |
250 | } | 319 | } |
251 | 320 | ||
@@ -270,11 +339,9 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
270 | unsigned int data_size, | 339 | unsigned int data_size, |
271 | gfp_t gfp_mask) __must_hold(local) | 340 | gfp_t gfp_mask) __must_hold(local) |
272 | { | 341 | { |
273 | struct request_queue *q; | ||
274 | struct drbd_epoch_entry *e; | 342 | struct drbd_epoch_entry *e; |
275 | struct page *page; | 343 | struct page *page; |
276 | struct bio *bio; | 344 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
277 | unsigned int ds; | ||
278 | 345 | ||
279 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) | 346 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) |
280 | return NULL; | 347 | return NULL; |
@@ -286,84 +353,32 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
286 | return NULL; | 353 | return NULL; |
287 | } | 354 | } |
288 | 355 | ||
289 | bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE)); | 356 | page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); |
290 | if (!bio) { | 357 | if (!page) |
291 | if (!(gfp_mask & __GFP_NOWARN)) | 358 | goto fail; |
292 | dev_err(DEV, "alloc_ee: Allocation of a bio failed\n"); | ||
293 | goto fail1; | ||
294 | } | ||
295 | |||
296 | bio->bi_bdev = mdev->ldev->backing_bdev; | ||
297 | bio->bi_sector = sector; | ||
298 | |||
299 | ds = data_size; | ||
300 | while (ds) { | ||
301 | page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT)); | ||
302 | if (!page) { | ||
303 | if (!(gfp_mask & __GFP_NOWARN)) | ||
304 | dev_err(DEV, "alloc_ee: Allocation of a page failed\n"); | ||
305 | goto fail2; | ||
306 | } | ||
307 | if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) { | ||
308 | drbd_pp_free(mdev, page); | ||
309 | dev_err(DEV, "alloc_ee: bio_add_page(s=%llu," | ||
310 | "data_size=%u,ds=%u) failed\n", | ||
311 | (unsigned long long)sector, data_size, ds); | ||
312 | |||
313 | q = bdev_get_queue(bio->bi_bdev); | ||
314 | if (q->merge_bvec_fn) { | ||
315 | struct bvec_merge_data bvm = { | ||
316 | .bi_bdev = bio->bi_bdev, | ||
317 | .bi_sector = bio->bi_sector, | ||
318 | .bi_size = bio->bi_size, | ||
319 | .bi_rw = bio->bi_rw, | ||
320 | }; | ||
321 | int l = q->merge_bvec_fn(q, &bvm, | ||
322 | &bio->bi_io_vec[bio->bi_vcnt]); | ||
323 | dev_err(DEV, "merge_bvec_fn() = %d\n", l); | ||
324 | } | ||
325 | |||
326 | /* dump more of the bio. */ | ||
327 | dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs); | ||
328 | dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt); | ||
329 | dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size); | ||
330 | dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments); | ||
331 | |||
332 | goto fail2; | ||
333 | break; | ||
334 | } | ||
335 | ds -= min_t(int, ds, PAGE_SIZE); | ||
336 | } | ||
337 | |||
338 | D_ASSERT(data_size == bio->bi_size); | ||
339 | |||
340 | bio->bi_private = e; | ||
341 | e->mdev = mdev; | ||
342 | e->sector = sector; | ||
343 | e->size = bio->bi_size; | ||
344 | 359 | ||
345 | e->private_bio = bio; | ||
346 | e->block_id = id; | ||
347 | INIT_HLIST_NODE(&e->colision); | 360 | INIT_HLIST_NODE(&e->colision); |
348 | e->epoch = NULL; | 361 | e->epoch = NULL; |
362 | e->mdev = mdev; | ||
363 | e->pages = page; | ||
364 | atomic_set(&e->pending_bios, 0); | ||
365 | e->size = data_size; | ||
349 | e->flags = 0; | 366 | e->flags = 0; |
367 | e->sector = sector; | ||
368 | e->sector = sector; | ||
369 | e->block_id = id; | ||
350 | 370 | ||
351 | return e; | 371 | return e; |
352 | 372 | ||
353 | fail2: | 373 | fail: |
354 | drbd_pp_free_bio_pages(mdev, bio); | ||
355 | bio_put(bio); | ||
356 | fail1: | ||
357 | mempool_free(e, drbd_ee_mempool); | 374 | mempool_free(e, drbd_ee_mempool); |
358 | |||
359 | return NULL; | 375 | return NULL; |
360 | } | 376 | } |
361 | 377 | ||
362 | void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) | 378 | void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) |
363 | { | 379 | { |
364 | struct bio *bio = e->private_bio; | 380 | drbd_pp_free(mdev, e->pages); |
365 | drbd_pp_free_bio_pages(mdev, bio); | 381 | D_ASSERT(atomic_read(&e->pending_bios) == 0); |
366 | bio_put(bio); | ||
367 | D_ASSERT(hlist_unhashed(&e->colision)); | 382 | D_ASSERT(hlist_unhashed(&e->colision)); |
368 | mempool_free(e, drbd_ee_mempool); | 383 | mempool_free(e, drbd_ee_mempool); |
369 | } | 384 | } |
@@ -555,6 +570,25 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size) | |||
555 | return rv; | 570 | return rv; |
556 | } | 571 | } |
557 | 572 | ||
573 | /* quoting tcp(7): | ||
574 | * On individual connections, the socket buffer size must be set prior to the | ||
575 | * listen(2) or connect(2) calls in order to have it take effect. | ||
576 | * This is our wrapper to do so. | ||
577 | */ | ||
578 | static void drbd_setbufsize(struct socket *sock, unsigned int snd, | ||
579 | unsigned int rcv) | ||
580 | { | ||
581 | /* open coded SO_SNDBUF, SO_RCVBUF */ | ||
582 | if (snd) { | ||
583 | sock->sk->sk_sndbuf = snd; | ||
584 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | ||
585 | } | ||
586 | if (rcv) { | ||
587 | sock->sk->sk_rcvbuf = rcv; | ||
588 | sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | ||
589 | } | ||
590 | } | ||
591 | |||
558 | static struct socket *drbd_try_connect(struct drbd_conf *mdev) | 592 | static struct socket *drbd_try_connect(struct drbd_conf *mdev) |
559 | { | 593 | { |
560 | const char *what; | 594 | const char *what; |
@@ -576,6 +610,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev) | |||
576 | 610 | ||
577 | sock->sk->sk_rcvtimeo = | 611 | sock->sk->sk_rcvtimeo = |
578 | sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ; | 612 | sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ; |
613 | drbd_setbufsize(sock, mdev->net_conf->sndbuf_size, | ||
614 | mdev->net_conf->rcvbuf_size); | ||
579 | 615 | ||
580 | /* explicitly bind to the configured IP as source IP | 616 | /* explicitly bind to the configured IP as source IP |
581 | * for the outgoing connections. | 617 | * for the outgoing connections. |
@@ -654,6 +690,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) | |||
654 | s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ | 690 | s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ |
655 | s_listen->sk->sk_rcvtimeo = timeo; | 691 | s_listen->sk->sk_rcvtimeo = timeo; |
656 | s_listen->sk->sk_sndtimeo = timeo; | 692 | s_listen->sk->sk_sndtimeo = timeo; |
693 | drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, | ||
694 | mdev->net_conf->rcvbuf_size); | ||
657 | 695 | ||
658 | what = "bind before listen"; | 696 | what = "bind before listen"; |
659 | err = s_listen->ops->bind(s_listen, | 697 | err = s_listen->ops->bind(s_listen, |
@@ -840,16 +878,6 @@ retry: | |||
840 | sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; | 878 | sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; |
841 | msock->sk->sk_priority = TC_PRIO_INTERACTIVE; | 879 | msock->sk->sk_priority = TC_PRIO_INTERACTIVE; |
842 | 880 | ||
843 | if (mdev->net_conf->sndbuf_size) { | ||
844 | sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size; | ||
845 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | ||
846 | } | ||
847 | |||
848 | if (mdev->net_conf->rcvbuf_size) { | ||
849 | sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size; | ||
850 | sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | ||
851 | } | ||
852 | |||
853 | /* NOT YET ... | 881 | /* NOT YET ... |
854 | * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; | 882 | * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; |
855 | * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; | 883 | * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; |
@@ -902,7 +930,7 @@ retry: | |||
902 | if (!drbd_send_protocol(mdev)) | 930 | if (!drbd_send_protocol(mdev)) |
903 | return -1; | 931 | return -1; |
904 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 932 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
905 | drbd_send_sizes(mdev, 0); | 933 | drbd_send_sizes(mdev, 0, 0); |
906 | drbd_send_uuids(mdev); | 934 | drbd_send_uuids(mdev); |
907 | drbd_send_state(mdev); | 935 | drbd_send_state(mdev); |
908 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | 936 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); |
@@ -946,7 +974,8 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d | |||
946 | int rv; | 974 | int rv; |
947 | 975 | ||
948 | if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { | 976 | if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { |
949 | rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL); | 977 | rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, |
978 | NULL, BLKDEV_IFL_WAIT); | ||
950 | if (rv) { | 979 | if (rv) { |
951 | dev_err(DEV, "local disk flush failed with status %d\n", rv); | 980 | dev_err(DEV, "local disk flush failed with status %d\n", rv); |
952 | /* would rather check on EOPNOTSUPP, but that is not reliable. | 981 | /* would rather check on EOPNOTSUPP, but that is not reliable. |
@@ -1058,7 +1087,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |||
1058 | } else { | 1087 | } else { |
1059 | epoch->flags = 0; | 1088 | epoch->flags = 0; |
1060 | atomic_set(&epoch->epoch_size, 0); | 1089 | atomic_set(&epoch->epoch_size, 0); |
1061 | /* atomic_set(&epoch->active, 0); is alrady zero */ | 1090 | /* atomic_set(&epoch->active, 0); is already zero */ |
1062 | if (rv == FE_STILL_LIVE) | 1091 | if (rv == FE_STILL_LIVE) |
1063 | rv = FE_RECYCLED; | 1092 | rv = FE_RECYCLED; |
1064 | } | 1093 | } |
@@ -1120,7 +1149,91 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1120 | } | 1149 | } |
1121 | 1150 | ||
1122 | /** | 1151 | /** |
1123 | * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set | 1152 | * drbd_submit_ee() |
1153 | * @mdev: DRBD device. | ||
1154 | * @e: epoch entry | ||
1155 | * @rw: flag field, see bio->bi_rw | ||
1156 | */ | ||
1157 | /* TODO allocate from our own bio_set. */ | ||
1158 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | ||
1159 | const unsigned rw, const int fault_type) | ||
1160 | { | ||
1161 | struct bio *bios = NULL; | ||
1162 | struct bio *bio; | ||
1163 | struct page *page = e->pages; | ||
1164 | sector_t sector = e->sector; | ||
1165 | unsigned ds = e->size; | ||
1166 | unsigned n_bios = 0; | ||
1167 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; | ||
1168 | |||
1169 | /* In most cases, we will only need one bio. But in case the lower | ||
1170 | * level restrictions happen to be different at this offset on this | ||
1171 | * side than those of the sending peer, we may need to submit the | ||
1172 | * request in more than one bio. */ | ||
1173 | next_bio: | ||
1174 | bio = bio_alloc(GFP_NOIO, nr_pages); | ||
1175 | if (!bio) { | ||
1176 | dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); | ||
1177 | goto fail; | ||
1178 | } | ||
1179 | /* > e->sector, unless this is the first bio */ | ||
1180 | bio->bi_sector = sector; | ||
1181 | bio->bi_bdev = mdev->ldev->backing_bdev; | ||
1182 | /* we special case some flags in the multi-bio case, see below | ||
1183 | * (REQ_UNPLUG, REQ_HARDBARRIER) */ | ||
1184 | bio->bi_rw = rw; | ||
1185 | bio->bi_private = e; | ||
1186 | bio->bi_end_io = drbd_endio_sec; | ||
1187 | |||
1188 | bio->bi_next = bios; | ||
1189 | bios = bio; | ||
1190 | ++n_bios; | ||
1191 | |||
1192 | page_chain_for_each(page) { | ||
1193 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); | ||
1194 | if (!bio_add_page(bio, page, len, 0)) { | ||
1195 | /* a single page must always be possible! */ | ||
1196 | BUG_ON(bio->bi_vcnt == 0); | ||
1197 | goto next_bio; | ||
1198 | } | ||
1199 | ds -= len; | ||
1200 | sector += len >> 9; | ||
1201 | --nr_pages; | ||
1202 | } | ||
1203 | D_ASSERT(page == NULL); | ||
1204 | D_ASSERT(ds == 0); | ||
1205 | |||
1206 | atomic_set(&e->pending_bios, n_bios); | ||
1207 | do { | ||
1208 | bio = bios; | ||
1209 | bios = bios->bi_next; | ||
1210 | bio->bi_next = NULL; | ||
1211 | |||
1212 | /* strip off REQ_UNPLUG unless it is the last bio */ | ||
1213 | if (bios) | ||
1214 | bio->bi_rw &= ~REQ_UNPLUG; | ||
1215 | |||
1216 | drbd_generic_make_request(mdev, fault_type, bio); | ||
1217 | |||
1218 | /* strip off REQ_HARDBARRIER, | ||
1219 | * unless it is the first or last bio */ | ||
1220 | if (bios && bios->bi_next) | ||
1221 | bios->bi_rw &= ~REQ_HARDBARRIER; | ||
1222 | } while (bios); | ||
1223 | maybe_kick_lo(mdev); | ||
1224 | return 0; | ||
1225 | |||
1226 | fail: | ||
1227 | while (bios) { | ||
1228 | bio = bios; | ||
1229 | bios = bios->bi_next; | ||
1230 | bio_put(bio); | ||
1231 | } | ||
1232 | return -ENOMEM; | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set | ||
1124 | * @mdev: DRBD device. | 1237 | * @mdev: DRBD device. |
1125 | * @w: work object. | 1238 | * @w: work object. |
1126 | * @cancel: The connection will be closed anyways (unused in this callback) | 1239 | * @cancel: The connection will be closed anyways (unused in this callback) |
@@ -1128,13 +1241,11 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1128 | int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local) | 1241 | int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local) |
1129 | { | 1242 | { |
1130 | struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; | 1243 | struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; |
1131 | struct bio *bio = e->private_bio; | ||
1132 | |||
1133 | /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place, | 1244 | /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place, |
1134 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) | 1245 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) |
1135 | so that we can finish that epoch in drbd_may_finish_epoch(). | 1246 | so that we can finish that epoch in drbd_may_finish_epoch(). |
1136 | That is necessary if we already have a long chain of Epochs, before | 1247 | That is necessary if we already have a long chain of Epochs, before |
1137 | we realize that BIO_RW_BARRIER is actually not supported */ | 1248 | we realize that REQ_HARDBARRIER is actually not supported */ |
1138 | 1249 | ||
1139 | /* As long as the -ENOTSUPP on the barrier is reported immediately | 1250 | /* As long as the -ENOTSUPP on the barrier is reported immediately |
1140 | that will never trigger. If it is reported late, we will just | 1251 | that will never trigger. If it is reported late, we will just |
@@ -1143,33 +1254,17 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea | |||
1143 | if (previous_epoch(mdev, e->epoch)) | 1254 | if (previous_epoch(mdev, e->epoch)) |
1144 | dev_warn(DEV, "Write ordering was not enforced (one time event)\n"); | 1255 | dev_warn(DEV, "Write ordering was not enforced (one time event)\n"); |
1145 | 1256 | ||
1146 | /* prepare bio for re-submit, | ||
1147 | * re-init volatile members */ | ||
1148 | /* we still have a local reference, | 1257 | /* we still have a local reference, |
1149 | * get_ldev was done in receive_Data. */ | 1258 | * get_ldev was done in receive_Data. */ |
1150 | bio->bi_bdev = mdev->ldev->backing_bdev; | ||
1151 | bio->bi_sector = e->sector; | ||
1152 | bio->bi_size = e->size; | ||
1153 | bio->bi_idx = 0; | ||
1154 | |||
1155 | bio->bi_flags &= ~(BIO_POOL_MASK - 1); | ||
1156 | bio->bi_flags |= 1 << BIO_UPTODATE; | ||
1157 | |||
1158 | /* don't know whether this is necessary: */ | ||
1159 | bio->bi_phys_segments = 0; | ||
1160 | bio->bi_next = NULL; | ||
1161 | |||
1162 | /* these should be unchanged: */ | ||
1163 | /* bio->bi_end_io = drbd_endio_write_sec; */ | ||
1164 | /* bio->bi_vcnt = whatever; */ | ||
1165 | 1259 | ||
1166 | e->w.cb = e_end_block; | 1260 | e->w.cb = e_end_block; |
1167 | 1261 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) { | |
1168 | /* This is no longer a barrier request. */ | 1262 | /* drbd_submit_ee fails for one reason only: |
1169 | bio->bi_rw &= ~(1UL << BIO_RW_BARRIER); | 1263 | * if was not able to allocate sufficient bios. |
1170 | 1264 | * requeue, try again later. */ | |
1171 | drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio); | 1265 | e->w.cb = w_e_reissue; |
1172 | 1266 | drbd_queue_work(&mdev->data.work, &e->w); | |
1267 | } | ||
1173 | return 1; | 1268 | return 1; |
1174 | } | 1269 | } |
1175 | 1270 | ||
@@ -1261,13 +1356,13 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) | |||
1261 | static struct drbd_epoch_entry * | 1356 | static struct drbd_epoch_entry * |
1262 | read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) | 1357 | read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) |
1263 | { | 1358 | { |
1359 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | ||
1264 | struct drbd_epoch_entry *e; | 1360 | struct drbd_epoch_entry *e; |
1265 | struct bio_vec *bvec; | ||
1266 | struct page *page; | 1361 | struct page *page; |
1267 | struct bio *bio; | 1362 | int dgs, ds, rr; |
1268 | int dgs, ds, i, rr; | ||
1269 | void *dig_in = mdev->int_dig_in; | 1363 | void *dig_in = mdev->int_dig_in; |
1270 | void *dig_vv = mdev->int_dig_vv; | 1364 | void *dig_vv = mdev->int_dig_vv; |
1365 | unsigned long *data; | ||
1271 | 1366 | ||
1272 | dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? | 1367 | dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? |
1273 | crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; | 1368 | crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; |
@@ -1286,29 +1381,44 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1286 | ERR_IF(data_size & 0x1ff) return NULL; | 1381 | ERR_IF(data_size & 0x1ff) return NULL; |
1287 | ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; | 1382 | ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; |
1288 | 1383 | ||
1384 | /* even though we trust out peer, | ||
1385 | * we sometimes have to double check. */ | ||
1386 | if (sector + (data_size>>9) > capacity) { | ||
1387 | dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", | ||
1388 | (unsigned long long)capacity, | ||
1389 | (unsigned long long)sector, data_size); | ||
1390 | return NULL; | ||
1391 | } | ||
1392 | |||
1289 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD | 1393 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD |
1290 | * "criss-cross" setup, that might cause write-out on some other DRBD, | 1394 | * "criss-cross" setup, that might cause write-out on some other DRBD, |
1291 | * which in turn might block on the other node at this very place. */ | 1395 | * which in turn might block on the other node at this very place. */ |
1292 | e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); | 1396 | e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); |
1293 | if (!e) | 1397 | if (!e) |
1294 | return NULL; | 1398 | return NULL; |
1295 | bio = e->private_bio; | 1399 | |
1296 | ds = data_size; | 1400 | ds = data_size; |
1297 | bio_for_each_segment(bvec, bio, i) { | 1401 | page = e->pages; |
1298 | page = bvec->bv_page; | 1402 | page_chain_for_each(page) { |
1299 | rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE)); | 1403 | unsigned len = min_t(int, ds, PAGE_SIZE); |
1404 | data = kmap(page); | ||
1405 | rr = drbd_recv(mdev, data, len); | ||
1406 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { | ||
1407 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); | ||
1408 | data[0] = data[0] ^ (unsigned long)-1; | ||
1409 | } | ||
1300 | kunmap(page); | 1410 | kunmap(page); |
1301 | if (rr != min_t(int, ds, PAGE_SIZE)) { | 1411 | if (rr != len) { |
1302 | drbd_free_ee(mdev, e); | 1412 | drbd_free_ee(mdev, e); |
1303 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1413 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", |
1304 | rr, min_t(int, ds, PAGE_SIZE)); | 1414 | rr, len); |
1305 | return NULL; | 1415 | return NULL; |
1306 | } | 1416 | } |
1307 | ds -= rr; | 1417 | ds -= rr; |
1308 | } | 1418 | } |
1309 | 1419 | ||
1310 | if (dgs) { | 1420 | if (dgs) { |
1311 | drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv); | 1421 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); |
1312 | if (memcmp(dig_in, dig_vv, dgs)) { | 1422 | if (memcmp(dig_in, dig_vv, dgs)) { |
1313 | dev_err(DEV, "Digest integrity check FAILED.\n"); | 1423 | dev_err(DEV, "Digest integrity check FAILED.\n"); |
1314 | drbd_bcast_ee(mdev, "digest failed", | 1424 | drbd_bcast_ee(mdev, "digest failed", |
@@ -1330,7 +1440,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1330 | int rr, rv = 1; | 1440 | int rr, rv = 1; |
1331 | void *data; | 1441 | void *data; |
1332 | 1442 | ||
1333 | page = drbd_pp_alloc(mdev, 1); | 1443 | if (!data_size) |
1444 | return TRUE; | ||
1445 | |||
1446 | page = drbd_pp_alloc(mdev, 1, 1); | ||
1334 | 1447 | ||
1335 | data = kmap(page); | 1448 | data = kmap(page); |
1336 | while (data_size) { | 1449 | while (data_size) { |
@@ -1394,7 +1507,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1394 | } | 1507 | } |
1395 | 1508 | ||
1396 | if (dgs) { | 1509 | if (dgs) { |
1397 | drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv); | 1510 | drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv); |
1398 | if (memcmp(dig_in, dig_vv, dgs)) { | 1511 | if (memcmp(dig_in, dig_vv, dgs)) { |
1399 | dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); | 1512 | dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); |
1400 | return 0; | 1513 | return 0; |
@@ -1415,7 +1528,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u | |||
1415 | 1528 | ||
1416 | D_ASSERT(hlist_unhashed(&e->colision)); | 1529 | D_ASSERT(hlist_unhashed(&e->colision)); |
1417 | 1530 | ||
1418 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 1531 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
1419 | drbd_set_in_sync(mdev, sector, e->size); | 1532 | drbd_set_in_sync(mdev, sector, e->size); |
1420 | ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); | 1533 | ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); |
1421 | } else { | 1534 | } else { |
@@ -1434,30 +1547,28 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1434 | struct drbd_epoch_entry *e; | 1547 | struct drbd_epoch_entry *e; |
1435 | 1548 | ||
1436 | e = read_in_block(mdev, ID_SYNCER, sector, data_size); | 1549 | e = read_in_block(mdev, ID_SYNCER, sector, data_size); |
1437 | if (!e) { | 1550 | if (!e) |
1438 | put_ldev(mdev); | 1551 | goto fail; |
1439 | return FALSE; | ||
1440 | } | ||
1441 | 1552 | ||
1442 | dec_rs_pending(mdev); | 1553 | dec_rs_pending(mdev); |
1443 | 1554 | ||
1444 | e->private_bio->bi_end_io = drbd_endio_write_sec; | ||
1445 | e->private_bio->bi_rw = WRITE; | ||
1446 | e->w.cb = e_end_resync_block; | ||
1447 | |||
1448 | inc_unacked(mdev); | 1555 | inc_unacked(mdev); |
1449 | /* corresponding dec_unacked() in e_end_resync_block() | 1556 | /* corresponding dec_unacked() in e_end_resync_block() |
1450 | * respective _drbd_clear_done_ee */ | 1557 | * respective _drbd_clear_done_ee */ |
1451 | 1558 | ||
1559 | e->w.cb = e_end_resync_block; | ||
1560 | |||
1452 | spin_lock_irq(&mdev->req_lock); | 1561 | spin_lock_irq(&mdev->req_lock); |
1453 | list_add(&e->w.list, &mdev->sync_ee); | 1562 | list_add(&e->w.list, &mdev->sync_ee); |
1454 | spin_unlock_irq(&mdev->req_lock); | 1563 | spin_unlock_irq(&mdev->req_lock); |
1455 | 1564 | ||
1456 | drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio); | 1565 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) |
1457 | /* accounting done in endio */ | 1566 | return TRUE; |
1458 | 1567 | ||
1459 | maybe_kick_lo(mdev); | 1568 | drbd_free_ee(mdev, e); |
1460 | return TRUE; | 1569 | fail: |
1570 | put_ldev(mdev); | ||
1571 | return FALSE; | ||
1461 | } | 1572 | } |
1462 | 1573 | ||
1463 | static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) | 1574 | static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) |
@@ -1552,7 +1663,7 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1552 | } | 1663 | } |
1553 | 1664 | ||
1554 | if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { | 1665 | if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { |
1555 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 1666 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
1556 | pcmd = (mdev->state.conn >= C_SYNC_SOURCE && | 1667 | pcmd = (mdev->state.conn >= C_SYNC_SOURCE && |
1557 | mdev->state.conn <= C_PAUSED_SYNC_T && | 1668 | mdev->state.conn <= C_PAUSED_SYNC_T && |
1558 | e->flags & EE_MAY_SET_IN_SYNC) ? | 1669 | e->flags & EE_MAY_SET_IN_SYNC) ? |
@@ -1698,7 +1809,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1698 | return FALSE; | 1809 | return FALSE; |
1699 | } | 1810 | } |
1700 | 1811 | ||
1701 | e->private_bio->bi_end_io = drbd_endio_write_sec; | ||
1702 | e->w.cb = e_end_block; | 1812 | e->w.cb = e_end_block; |
1703 | 1813 | ||
1704 | spin_lock(&mdev->epoch_lock); | 1814 | spin_lock(&mdev->epoch_lock); |
@@ -1714,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1714 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); | 1824 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); |
1715 | if (epoch == e->epoch) { | 1825 | if (epoch == e->epoch) { |
1716 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1826 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1717 | rw |= (1<<BIO_RW_BARRIER); | 1827 | rw |= REQ_HARDBARRIER; |
1718 | e->flags |= EE_IS_BARRIER; | 1828 | e->flags |= EE_IS_BARRIER; |
1719 | } else { | 1829 | } else { |
1720 | if (atomic_read(&epoch->epoch_size) > 1 || | 1830 | if (atomic_read(&epoch->epoch_size) > 1 || |
1721 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { | 1831 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { |
1722 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | 1832 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); |
1723 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1833 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1724 | rw |= (1<<BIO_RW_BARRIER); | 1834 | rw |= REQ_HARDBARRIER; |
1725 | e->flags |= EE_IS_BARRIER; | 1835 | e->flags |= EE_IS_BARRIER; |
1726 | } | 1836 | } |
1727 | } | 1837 | } |
@@ -1731,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1731 | dp_flags = be32_to_cpu(p->dp_flags); | 1841 | dp_flags = be32_to_cpu(p->dp_flags); |
1732 | if (dp_flags & DP_HARDBARRIER) { | 1842 | if (dp_flags & DP_HARDBARRIER) { |
1733 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); | 1843 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); |
1734 | /* rw |= (1<<BIO_RW_BARRIER); */ | 1844 | /* rw |= REQ_HARDBARRIER; */ |
1735 | } | 1845 | } |
1736 | if (dp_flags & DP_RW_SYNC) | 1846 | if (dp_flags & DP_RW_SYNC) |
1737 | rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); | 1847 | rw |= REQ_SYNC | REQ_UNPLUG; |
1738 | if (dp_flags & DP_MAY_SET_IN_SYNC) | 1848 | if (dp_flags & DP_MAY_SET_IN_SYNC) |
1739 | e->flags |= EE_MAY_SET_IN_SYNC; | 1849 | e->flags |= EE_MAY_SET_IN_SYNC; |
1740 | 1850 | ||
@@ -1894,12 +2004,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1894 | drbd_al_begin_io(mdev, e->sector); | 2004 | drbd_al_begin_io(mdev, e->sector); |
1895 | } | 2005 | } |
1896 | 2006 | ||
1897 | e->private_bio->bi_rw = rw; | 2007 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) |
1898 | drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio); | 2008 | return TRUE; |
1899 | /* accounting done in endio */ | ||
1900 | |||
1901 | maybe_kick_lo(mdev); | ||
1902 | return TRUE; | ||
1903 | 2009 | ||
1904 | out_interrupted: | 2010 | out_interrupted: |
1905 | /* yes, the epoch_size now is imbalanced. | 2011 | /* yes, the epoch_size now is imbalanced. |
@@ -1945,7 +2051,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) | |||
1945 | "no local data.\n"); | 2051 | "no local data.\n"); |
1946 | drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY : | 2052 | drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY : |
1947 | P_NEG_RS_DREPLY , p); | 2053 | P_NEG_RS_DREPLY , p); |
1948 | return TRUE; | 2054 | return drbd_drain_block(mdev, h->length - brps); |
1949 | } | 2055 | } |
1950 | 2056 | ||
1951 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD | 2057 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD |
@@ -1957,9 +2063,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) | |||
1957 | return FALSE; | 2063 | return FALSE; |
1958 | } | 2064 | } |
1959 | 2065 | ||
1960 | e->private_bio->bi_rw = READ; | ||
1961 | e->private_bio->bi_end_io = drbd_endio_read_sec; | ||
1962 | |||
1963 | switch (h->command) { | 2066 | switch (h->command) { |
1964 | case P_DATA_REQUEST: | 2067 | case P_DATA_REQUEST: |
1965 | e->w.cb = w_e_end_data_req; | 2068 | e->w.cb = w_e_end_data_req; |
@@ -2053,10 +2156,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) | |||
2053 | 2156 | ||
2054 | inc_unacked(mdev); | 2157 | inc_unacked(mdev); |
2055 | 2158 | ||
2056 | drbd_generic_make_request(mdev, fault_type, e->private_bio); | 2159 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) |
2057 | maybe_kick_lo(mdev); | 2160 | return TRUE; |
2058 | |||
2059 | return TRUE; | ||
2060 | 2161 | ||
2061 | out_free_e: | 2162 | out_free_e: |
2062 | kfree(di); | 2163 | kfree(di); |
@@ -2473,6 +2574,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2473 | hg > 0 ? "source" : "target"); | 2574 | hg > 0 ? "source" : "target"); |
2474 | } | 2575 | } |
2475 | 2576 | ||
2577 | if (abs(hg) == 100) | ||
2578 | drbd_khelper(mdev, "initial-split-brain"); | ||
2579 | |||
2476 | if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) { | 2580 | if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) { |
2477 | int pcount = (mdev->state.role == R_PRIMARY) | 2581 | int pcount = (mdev->state.role == R_PRIMARY) |
2478 | + (peer_role == R_PRIMARY); | 2582 | + (peer_role == R_PRIMARY); |
@@ -2518,7 +2622,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2518 | * after an attempted attach on a diskless node. | 2622 | * after an attempted attach on a diskless node. |
2519 | * We just refuse to attach -- well, we drop the "connection" | 2623 | * We just refuse to attach -- well, we drop the "connection" |
2520 | * to that disk, in a way... */ | 2624 | * to that disk, in a way... */ |
2521 | dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); | 2625 | dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); |
2522 | drbd_khelper(mdev, "split-brain"); | 2626 | drbd_khelper(mdev, "split-brain"); |
2523 | return C_MASK; | 2627 | return C_MASK; |
2524 | } | 2628 | } |
@@ -2849,7 +2953,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) | |||
2849 | unsigned int max_seg_s; | 2953 | unsigned int max_seg_s; |
2850 | sector_t p_size, p_usize, my_usize; | 2954 | sector_t p_size, p_usize, my_usize; |
2851 | int ldsc = 0; /* local disk size changed */ | 2955 | int ldsc = 0; /* local disk size changed */ |
2852 | enum drbd_conns nconn; | 2956 | enum dds_flags ddsf; |
2853 | 2957 | ||
2854 | ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; | 2958 | ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; |
2855 | if (drbd_recv(mdev, h->payload, h->length) != h->length) | 2959 | if (drbd_recv(mdev, h->payload, h->length) != h->length) |
@@ -2905,8 +3009,9 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) | |||
2905 | } | 3009 | } |
2906 | #undef min_not_zero | 3010 | #undef min_not_zero |
2907 | 3011 | ||
3012 | ddsf = be16_to_cpu(p->dds_flags); | ||
2908 | if (get_ldev(mdev)) { | 3013 | if (get_ldev(mdev)) { |
2909 | dd = drbd_determin_dev_size(mdev, 0); | 3014 | dd = drbd_determin_dev_size(mdev, ddsf); |
2910 | put_ldev(mdev); | 3015 | put_ldev(mdev); |
2911 | if (dd == dev_size_error) | 3016 | if (dd == dev_size_error) |
2912 | return FALSE; | 3017 | return FALSE; |
@@ -2916,33 +3021,21 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) | |||
2916 | drbd_set_my_capacity(mdev, p_size); | 3021 | drbd_set_my_capacity(mdev, p_size); |
2917 | } | 3022 | } |
2918 | 3023 | ||
2919 | if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { | ||
2920 | nconn = drbd_sync_handshake(mdev, | ||
2921 | mdev->state.peer, mdev->state.pdsk); | ||
2922 | put_ldev(mdev); | ||
2923 | |||
2924 | if (nconn == C_MASK) { | ||
2925 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
2926 | return FALSE; | ||
2927 | } | ||
2928 | |||
2929 | if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) { | ||
2930 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
2931 | return FALSE; | ||
2932 | } | ||
2933 | } | ||
2934 | |||
2935 | if (get_ldev(mdev)) { | 3024 | if (get_ldev(mdev)) { |
2936 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { | 3025 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { |
2937 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | 3026 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); |
2938 | ldsc = 1; | 3027 | ldsc = 1; |
2939 | } | 3028 | } |
2940 | 3029 | ||
2941 | max_seg_s = be32_to_cpu(p->max_segment_size); | 3030 | if (mdev->agreed_pro_version < 94) |
3031 | max_seg_s = be32_to_cpu(p->max_segment_size); | ||
3032 | else /* drbd 8.3.8 onwards */ | ||
3033 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | ||
3034 | |||
2942 | if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) | 3035 | if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) |
2943 | drbd_setup_queue_param(mdev, max_seg_s); | 3036 | drbd_setup_queue_param(mdev, max_seg_s); |
2944 | 3037 | ||
2945 | drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type)); | 3038 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); |
2946 | put_ldev(mdev); | 3039 | put_ldev(mdev); |
2947 | } | 3040 | } |
2948 | 3041 | ||
@@ -2951,14 +3044,17 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) | |||
2951 | drbd_get_capacity(mdev->this_bdev) || ldsc) { | 3044 | drbd_get_capacity(mdev->this_bdev) || ldsc) { |
2952 | /* we have different sizes, probably peer | 3045 | /* we have different sizes, probably peer |
2953 | * needs to know my new size... */ | 3046 | * needs to know my new size... */ |
2954 | drbd_send_sizes(mdev, 0); | 3047 | drbd_send_sizes(mdev, 0, ddsf); |
2955 | } | 3048 | } |
2956 | if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || | 3049 | if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || |
2957 | (dd == grew && mdev->state.conn == C_CONNECTED)) { | 3050 | (dd == grew && mdev->state.conn == C_CONNECTED)) { |
2958 | if (mdev->state.pdsk >= D_INCONSISTENT && | 3051 | if (mdev->state.pdsk >= D_INCONSISTENT && |
2959 | mdev->state.disk >= D_INCONSISTENT) | 3052 | mdev->state.disk >= D_INCONSISTENT) { |
2960 | resync_after_online_grow(mdev); | 3053 | if (ddsf & DDSF_NO_RESYNC) |
2961 | else | 3054 | dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); |
3055 | else | ||
3056 | resync_after_online_grow(mdev); | ||
3057 | } else | ||
2962 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); | 3058 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); |
2963 | } | 3059 | } |
2964 | } | 3060 | } |
@@ -3459,14 +3555,15 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) | |||
3459 | return ok; | 3555 | return ok; |
3460 | } | 3556 | } |
3461 | 3557 | ||
3462 | static int receive_skip(struct drbd_conf *mdev, struct p_header *h) | 3558 | static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) |
3463 | { | 3559 | { |
3464 | /* TODO zero copy sink :) */ | 3560 | /* TODO zero copy sink :) */ |
3465 | static char sink[128]; | 3561 | static char sink[128]; |
3466 | int size, want, r; | 3562 | int size, want, r; |
3467 | 3563 | ||
3468 | dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", | 3564 | if (!silent) |
3469 | h->command, h->length); | 3565 | dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", |
3566 | h->command, h->length); | ||
3470 | 3567 | ||
3471 | size = h->length; | 3568 | size = h->length; |
3472 | while (size > 0) { | 3569 | while (size > 0) { |
@@ -3478,6 +3575,16 @@ static int receive_skip(struct drbd_conf *mdev, struct p_header *h) | |||
3478 | return size == 0; | 3575 | return size == 0; |
3479 | } | 3576 | } |
3480 | 3577 | ||
3578 | static int receive_skip(struct drbd_conf *mdev, struct p_header *h) | ||
3579 | { | ||
3580 | return receive_skip_(mdev, h, 0); | ||
3581 | } | ||
3582 | |||
3583 | static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h) | ||
3584 | { | ||
3585 | return receive_skip_(mdev, h, 1); | ||
3586 | } | ||
3587 | |||
3481 | static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) | 3588 | static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) |
3482 | { | 3589 | { |
3483 | if (mdev->state.disk >= D_INCONSISTENT) | 3590 | if (mdev->state.disk >= D_INCONSISTENT) |
@@ -3513,6 +3620,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = { | |||
3513 | [P_OV_REQUEST] = receive_DataRequest, | 3620 | [P_OV_REQUEST] = receive_DataRequest, |
3514 | [P_OV_REPLY] = receive_DataRequest, | 3621 | [P_OV_REPLY] = receive_DataRequest, |
3515 | [P_CSUM_RS_REQUEST] = receive_DataRequest, | 3622 | [P_CSUM_RS_REQUEST] = receive_DataRequest, |
3623 | [P_DELAY_PROBE] = receive_skip_silent, | ||
3516 | /* anything missing from this table is in | 3624 | /* anything missing from this table is in |
3517 | * the asender_tbl, see get_asender_cmd */ | 3625 | * the asender_tbl, see get_asender_cmd */ |
3518 | [P_MAX_CMD] = NULL, | 3626 | [P_MAX_CMD] = NULL, |
@@ -3739,7 +3847,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3739 | dev_info(DEV, "net_ee not empty, killed %u entries\n", i); | 3847 | dev_info(DEV, "net_ee not empty, killed %u entries\n", i); |
3740 | i = atomic_read(&mdev->pp_in_use); | 3848 | i = atomic_read(&mdev->pp_in_use); |
3741 | if (i) | 3849 | if (i) |
3742 | dev_info(DEV, "pp_in_use = %u, expected 0\n", i); | 3850 | dev_info(DEV, "pp_in_use = %d, expected 0\n", i); |
3743 | 3851 | ||
3744 | D_ASSERT(list_empty(&mdev->read_ee)); | 3852 | D_ASSERT(list_empty(&mdev->read_ee)); |
3745 | D_ASSERT(list_empty(&mdev->active_ee)); | 3853 | D_ASSERT(list_empty(&mdev->active_ee)); |
@@ -4232,7 +4340,6 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h) | |||
4232 | 4340 | ||
4233 | sector = be64_to_cpu(p->sector); | 4341 | sector = be64_to_cpu(p->sector); |
4234 | size = be32_to_cpu(p->blksize); | 4342 | size = be32_to_cpu(p->blksize); |
4235 | D_ASSERT(p->block_id == ID_SYNCER); | ||
4236 | 4343 | ||
4237 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | 4344 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); |
4238 | 4345 | ||
@@ -4290,6 +4397,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) | |||
4290 | return TRUE; | 4397 | return TRUE; |
4291 | } | 4398 | } |
4292 | 4399 | ||
4400 | static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h) | ||
4401 | { | ||
4402 | /* IGNORE */ | ||
4403 | return TRUE; | ||
4404 | } | ||
4405 | |||
4293 | struct asender_cmd { | 4406 | struct asender_cmd { |
4294 | size_t pkt_size; | 4407 | size_t pkt_size; |
4295 | int (*process)(struct drbd_conf *mdev, struct p_header *h); | 4408 | int (*process)(struct drbd_conf *mdev, struct p_header *h); |
@@ -4314,6 +4427,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) | |||
4314 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, | 4427 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, |
4315 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | 4428 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, |
4316 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | 4429 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, |
4430 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m }, | ||
4317 | [P_MAX_CMD] = { 0, NULL }, | 4431 | [P_MAX_CMD] = { 0, NULL }, |
4318 | }; | 4432 | }; |
4319 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | 4433 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index de81ab7b4627..f761d98a4e90 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -102,32 +102,7 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const | |||
102 | } | 102 | } |
103 | } | 103 | } |
104 | 104 | ||
105 | /* if it was a local io error, we want to notify our | 105 | drbd_req_free(req); |
106 | * peer about that, and see if we need to | ||
107 | * detach the disk and stuff. | ||
108 | * to avoid allocating some special work | ||
109 | * struct, reuse the request. */ | ||
110 | |||
111 | /* THINK | ||
112 | * why do we do this not when we detect the error, | ||
113 | * but delay it until it is "done", i.e. possibly | ||
114 | * until the next barrier ack? */ | ||
115 | |||
116 | if (rw == WRITE && | ||
117 | ((s & RQ_LOCAL_MASK) && !(s & RQ_LOCAL_OK))) { | ||
118 | if (!(req->w.list.next == LIST_POISON1 || | ||
119 | list_empty(&req->w.list))) { | ||
120 | /* DEBUG ASSERT only; if this triggers, we | ||
121 | * probably corrupt the worker list here */ | ||
122 | dev_err(DEV, "req->w.list.next = %p\n", req->w.list.next); | ||
123 | dev_err(DEV, "req->w.list.prev = %p\n", req->w.list.prev); | ||
124 | } | ||
125 | req->w.cb = w_io_error; | ||
126 | drbd_queue_work(&mdev->data.work, &req->w); | ||
127 | /* drbd_req_free() is done in w_io_error */ | ||
128 | } else { | ||
129 | drbd_req_free(req); | ||
130 | } | ||
131 | } | 106 | } |
132 | 107 | ||
133 | static void queue_barrier(struct drbd_conf *mdev) | 108 | static void queue_barrier(struct drbd_conf *mdev) |
@@ -453,9 +428,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
453 | req->rq_state |= RQ_LOCAL_COMPLETED; | 428 | req->rq_state |= RQ_LOCAL_COMPLETED; |
454 | req->rq_state &= ~RQ_LOCAL_PENDING; | 429 | req->rq_state &= ~RQ_LOCAL_PENDING; |
455 | 430 | ||
456 | dev_alert(DEV, "Local WRITE failed sec=%llus size=%u\n", | ||
457 | (unsigned long long)req->sector, req->size); | ||
458 | /* and now: check how to handle local io error. */ | ||
459 | __drbd_chk_io_error(mdev, FALSE); | 431 | __drbd_chk_io_error(mdev, FALSE); |
460 | _req_may_be_done(req, m); | 432 | _req_may_be_done(req, m); |
461 | put_ldev(mdev); | 433 | put_ldev(mdev); |
@@ -475,22 +447,21 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
475 | req->rq_state |= RQ_LOCAL_COMPLETED; | 447 | req->rq_state |= RQ_LOCAL_COMPLETED; |
476 | req->rq_state &= ~RQ_LOCAL_PENDING; | 448 | req->rq_state &= ~RQ_LOCAL_PENDING; |
477 | 449 | ||
478 | dev_alert(DEV, "Local READ failed sec=%llus size=%u\n", | ||
479 | (unsigned long long)req->sector, req->size); | ||
480 | /* _req_mod(req,to_be_send); oops, recursion... */ | ||
481 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 450 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
482 | req->rq_state |= RQ_NET_PENDING; | ||
483 | inc_ap_pending(mdev); | ||
484 | 451 | ||
485 | __drbd_chk_io_error(mdev, FALSE); | 452 | __drbd_chk_io_error(mdev, FALSE); |
486 | put_ldev(mdev); | 453 | put_ldev(mdev); |
487 | /* NOTE: if we have no connection, | ||
488 | * or know the peer has no good data either, | ||
489 | * then we don't actually need to "queue_for_net_read", | ||
490 | * but we do so anyways, since the drbd_io_error() | ||
491 | * and the potential state change to "Diskless" | ||
492 | * needs to be done from process context */ | ||
493 | 454 | ||
455 | /* no point in retrying if there is no good remote data, | ||
456 | * or we have no connection. */ | ||
457 | if (mdev->state.pdsk != D_UP_TO_DATE) { | ||
458 | _req_may_be_done(req, m); | ||
459 | break; | ||
460 | } | ||
461 | |||
462 | /* _req_mod(req,to_be_send); oops, recursion... */ | ||
463 | req->rq_state |= RQ_NET_PENDING; | ||
464 | inc_ap_pending(mdev); | ||
494 | /* fall through: _req_mod(req,queue_for_net_read); */ | 465 | /* fall through: _req_mod(req,queue_for_net_read); */ |
495 | 466 | ||
496 | case queue_for_net_read: | 467 | case queue_for_net_read: |
@@ -600,6 +571,9 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
600 | _req_may_be_done(req, m); | 571 | _req_may_be_done(req, m); |
601 | break; | 572 | break; |
602 | 573 | ||
574 | case read_retry_remote_canceled: | ||
575 | req->rq_state &= ~RQ_NET_QUEUED; | ||
576 | /* fall through, in case we raced with drbd_disconnect */ | ||
603 | case connection_lost_while_pending: | 577 | case connection_lost_while_pending: |
604 | /* transfer log cleanup after connection loss */ | 578 | /* transfer log cleanup after connection loss */ |
605 | /* assert something? */ | 579 | /* assert something? */ |
@@ -722,6 +696,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
722 | struct drbd_request *req; | 696 | struct drbd_request *req; |
723 | int local, remote; | 697 | int local, remote; |
724 | int err = -EIO; | 698 | int err = -EIO; |
699 | int ret = 0; | ||
725 | 700 | ||
726 | /* allocate outside of all locks; */ | 701 | /* allocate outside of all locks; */ |
727 | req = drbd_req_new(mdev, bio); | 702 | req = drbd_req_new(mdev, bio); |
@@ -784,7 +759,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
784 | (mdev->state.pdsk == D_INCONSISTENT && | 759 | (mdev->state.pdsk == D_INCONSISTENT && |
785 | mdev->state.conn >= C_CONNECTED)); | 760 | mdev->state.conn >= C_CONNECTED)); |
786 | 761 | ||
787 | if (!(local || remote)) { | 762 | if (!(local || remote) && !mdev->state.susp) { |
788 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | 763 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); |
789 | goto fail_free_complete; | 764 | goto fail_free_complete; |
790 | } | 765 | } |
@@ -810,6 +785,16 @@ allocate_barrier: | |||
810 | /* GOOD, everything prepared, grab the spin_lock */ | 785 | /* GOOD, everything prepared, grab the spin_lock */ |
811 | spin_lock_irq(&mdev->req_lock); | 786 | spin_lock_irq(&mdev->req_lock); |
812 | 787 | ||
788 | if (mdev->state.susp) { | ||
789 | /* If we got suspended, use the retry mechanism of | ||
790 | generic_make_request() to restart processing of this | ||
791 | bio. In the next call to drbd_make_request_26 | ||
792 | we sleep in inc_ap_bio() */ | ||
793 | ret = 1; | ||
794 | spin_unlock_irq(&mdev->req_lock); | ||
795 | goto fail_free_complete; | ||
796 | } | ||
797 | |||
813 | if (remote) { | 798 | if (remote) { |
814 | remote = (mdev->state.pdsk == D_UP_TO_DATE || | 799 | remote = (mdev->state.pdsk == D_UP_TO_DATE || |
815 | (mdev->state.pdsk == D_INCONSISTENT && | 800 | (mdev->state.pdsk == D_INCONSISTENT && |
@@ -947,12 +932,14 @@ fail_and_free_req: | |||
947 | req->private_bio = NULL; | 932 | req->private_bio = NULL; |
948 | put_ldev(mdev); | 933 | put_ldev(mdev); |
949 | } | 934 | } |
950 | bio_endio(bio, err); | 935 | if (!ret) |
936 | bio_endio(bio, err); | ||
937 | |||
951 | drbd_req_free(req); | 938 | drbd_req_free(req); |
952 | dec_ap_bio(mdev); | 939 | dec_ap_bio(mdev); |
953 | kfree(b); | 940 | kfree(b); |
954 | 941 | ||
955 | return 0; | 942 | return ret; |
956 | } | 943 | } |
957 | 944 | ||
958 | /* helper function for drbd_make_request | 945 | /* helper function for drbd_make_request |
@@ -962,11 +949,6 @@ fail_and_free_req: | |||
962 | */ | 949 | */ |
963 | static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) | 950 | static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) |
964 | { | 951 | { |
965 | /* Unconfigured */ | ||
966 | if (mdev->state.conn == C_DISCONNECTING && | ||
967 | mdev->state.disk == D_DISKLESS) | ||
968 | return 1; | ||
969 | |||
970 | if (mdev->state.role != R_PRIMARY && | 952 | if (mdev->state.role != R_PRIMARY && |
971 | (!allow_oos || is_write)) { | 953 | (!allow_oos || is_write)) { |
972 | if (__ratelimit(&drbd_ratelimit_state)) { | 954 | if (__ratelimit(&drbd_ratelimit_state)) { |
@@ -1015,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1015 | * because of those XXX, this is not yet enabled, | 997 | * because of those XXX, this is not yet enabled, |
1016 | * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. | 998 | * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. |
1017 | */ | 999 | */ |
1018 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) { | 1000 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { |
1019 | /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ | 1001 | /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ |
1020 | bio_endio(bio, -EOPNOTSUPP); | 1002 | bio_endio(bio, -EOPNOTSUPP); |
1021 | return 0; | 1003 | return 0; |
@@ -1070,15 +1052,21 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1070 | 1052 | ||
1071 | /* we need to get a "reference count" (ap_bio_cnt) | 1053 | /* we need to get a "reference count" (ap_bio_cnt) |
1072 | * to avoid races with the disconnect/reconnect/suspend code. | 1054 | * to avoid races with the disconnect/reconnect/suspend code. |
1073 | * In case we need to split the bio here, we need to get two references | 1055 | * In case we need to split the bio here, we need to get three references |
1074 | * atomically, otherwise we might deadlock when trying to submit the | 1056 | * atomically, otherwise we might deadlock when trying to submit the |
1075 | * second one! */ | 1057 | * second one! */ |
1076 | inc_ap_bio(mdev, 2); | 1058 | inc_ap_bio(mdev, 3); |
1077 | 1059 | ||
1078 | D_ASSERT(e_enr == s_enr + 1); | 1060 | D_ASSERT(e_enr == s_enr + 1); |
1079 | 1061 | ||
1080 | drbd_make_request_common(mdev, &bp->bio1); | 1062 | while (drbd_make_request_common(mdev, &bp->bio1)) |
1081 | drbd_make_request_common(mdev, &bp->bio2); | 1063 | inc_ap_bio(mdev, 1); |
1064 | |||
1065 | while (drbd_make_request_common(mdev, &bp->bio2)) | ||
1066 | inc_ap_bio(mdev, 1); | ||
1067 | |||
1068 | dec_ap_bio(mdev); | ||
1069 | |||
1082 | bio_pair_release(bp); | 1070 | bio_pair_release(bp); |
1083 | } | 1071 | } |
1084 | return 0; | 1072 | return 0; |
@@ -1115,7 +1103,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1115 | } else if (limit && get_ldev(mdev)) { | 1103 | } else if (limit && get_ldev(mdev)) { |
1116 | struct request_queue * const b = | 1104 | struct request_queue * const b = |
1117 | mdev->ldev->backing_bdev->bd_disk->queue; | 1105 | mdev->ldev->backing_bdev->bd_disk->queue; |
1118 | if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) { | 1106 | if (b->merge_bvec_fn) { |
1119 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); | 1107 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1120 | limit = min(limit, backing_limit); | 1108 | limit = min(limit, backing_limit); |
1121 | } | 1109 | } |
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 16119d7056cc..02d575d24518 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h | |||
@@ -91,6 +91,7 @@ enum drbd_req_event { | |||
91 | send_failed, | 91 | send_failed, |
92 | handed_over_to_network, | 92 | handed_over_to_network, |
93 | connection_lost_while_pending, | 93 | connection_lost_while_pending, |
94 | read_retry_remote_canceled, | ||
94 | recv_acked_by_peer, | 95 | recv_acked_by_peer, |
95 | write_acked_by_peer, | 96 | write_acked_by_peer, |
96 | write_acked_by_peer_and_sis, /* and set_in_sync */ | 97 | write_acked_by_peer_and_sis, /* and set_in_sync */ |
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c index 76863e3f05be..85179e1fb50a 100644 --- a/drivers/block/drbd/drbd_strings.c +++ b/drivers/block/drbd/drbd_strings.c | |||
@@ -70,7 +70,7 @@ static const char *drbd_disk_s_names[] = { | |||
70 | 70 | ||
71 | static const char *drbd_state_sw_errors[] = { | 71 | static const char *drbd_state_sw_errors[] = { |
72 | [-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config", | 72 | [-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config", |
73 | [-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk", | 73 | [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data", |
74 | [-SS_NO_LOCAL_DISK] = "Can not resync without local disk", | 74 | [-SS_NO_LOCAL_DISK] = "Can not resync without local disk", |
75 | [-SS_NO_REMOTE_DISK] = "Can not resync without remote disk", | 75 | [-SS_NO_REMOTE_DISK] = "Can not resync without remote disk", |
76 | [-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected", | 76 | [-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected", |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d48a1dfd7b24..ca4a16cea2d8 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -47,8 +47,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca | |||
47 | 47 | ||
48 | /* defined here: | 48 | /* defined here: |
49 | drbd_md_io_complete | 49 | drbd_md_io_complete |
50 | drbd_endio_write_sec | 50 | drbd_endio_sec |
51 | drbd_endio_read_sec | ||
52 | drbd_endio_pri | 51 | drbd_endio_pri |
53 | 52 | ||
54 | * more endio handlers: | 53 | * more endio handlers: |
@@ -85,27 +84,10 @@ void drbd_md_io_complete(struct bio *bio, int error) | |||
85 | /* reads on behalf of the partner, | 84 | /* reads on behalf of the partner, |
86 | * "submitted" by the receiver | 85 | * "submitted" by the receiver |
87 | */ | 86 | */ |
88 | void drbd_endio_read_sec(struct bio *bio, int error) __releases(local) | 87 | void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) |
89 | { | 88 | { |
90 | unsigned long flags = 0; | 89 | unsigned long flags = 0; |
91 | struct drbd_epoch_entry *e = NULL; | 90 | struct drbd_conf *mdev = e->mdev; |
92 | struct drbd_conf *mdev; | ||
93 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
94 | |||
95 | e = bio->bi_private; | ||
96 | mdev = e->mdev; | ||
97 | |||
98 | if (error) | ||
99 | dev_warn(DEV, "read: error=%d s=%llus\n", error, | ||
100 | (unsigned long long)e->sector); | ||
101 | if (!error && !uptodate) { | ||
102 | dev_warn(DEV, "read: setting error to -EIO s=%llus\n", | ||
103 | (unsigned long long)e->sector); | ||
104 | /* strange behavior of some lower level drivers... | ||
105 | * fail the request by clearing the uptodate flag, | ||
106 | * but do not return any error?! */ | ||
107 | error = -EIO; | ||
108 | } | ||
109 | 91 | ||
110 | D_ASSERT(e->block_id != ID_VACANT); | 92 | D_ASSERT(e->block_id != ID_VACANT); |
111 | 93 | ||
@@ -114,49 +96,38 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local) | |||
114 | list_del(&e->w.list); | 96 | list_del(&e->w.list); |
115 | if (list_empty(&mdev->read_ee)) | 97 | if (list_empty(&mdev->read_ee)) |
116 | wake_up(&mdev->ee_wait); | 98 | wake_up(&mdev->ee_wait); |
99 | if (test_bit(__EE_WAS_ERROR, &e->flags)) | ||
100 | __drbd_chk_io_error(mdev, FALSE); | ||
117 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 101 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
118 | 102 | ||
119 | drbd_chk_io_error(mdev, error, FALSE); | ||
120 | drbd_queue_work(&mdev->data.work, &e->w); | 103 | drbd_queue_work(&mdev->data.work, &e->w); |
121 | put_ldev(mdev); | 104 | put_ldev(mdev); |
122 | } | 105 | } |
123 | 106 | ||
107 | static int is_failed_barrier(int ee_flags) | ||
108 | { | ||
109 | return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED)) | ||
110 | == (EE_IS_BARRIER|EE_WAS_ERROR); | ||
111 | } | ||
112 | |||
124 | /* writes on behalf of the partner, or resync writes, | 113 | /* writes on behalf of the partner, or resync writes, |
125 | * "submitted" by the receiver. | 114 | * "submitted" by the receiver, final stage. */ |
126 | */ | 115 | static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) |
127 | void drbd_endio_write_sec(struct bio *bio, int error) __releases(local) | ||
128 | { | 116 | { |
129 | unsigned long flags = 0; | 117 | unsigned long flags = 0; |
130 | struct drbd_epoch_entry *e = NULL; | 118 | struct drbd_conf *mdev = e->mdev; |
131 | struct drbd_conf *mdev; | ||
132 | sector_t e_sector; | 119 | sector_t e_sector; |
133 | int do_wake; | 120 | int do_wake; |
134 | int is_syncer_req; | 121 | int is_syncer_req; |
135 | int do_al_complete_io; | 122 | int do_al_complete_io; |
136 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
137 | int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); | ||
138 | |||
139 | e = bio->bi_private; | ||
140 | mdev = e->mdev; | ||
141 | |||
142 | if (error) | ||
143 | dev_warn(DEV, "write: error=%d s=%llus\n", error, | ||
144 | (unsigned long long)e->sector); | ||
145 | if (!error && !uptodate) { | ||
146 | dev_warn(DEV, "write: setting error to -EIO s=%llus\n", | ||
147 | (unsigned long long)e->sector); | ||
148 | /* strange behavior of some lower level drivers... | ||
149 | * fail the request by clearing the uptodate flag, | ||
150 | * but do not return any error?! */ | ||
151 | error = -EIO; | ||
152 | } | ||
153 | 123 | ||
154 | /* error == -ENOTSUPP would be a better test, | 124 | /* if this is a failed barrier request, disable use of barriers, |
155 | * alas it is not reliable */ | 125 | * and schedule for resubmission */ |
156 | if (error && is_barrier && e->flags & EE_IS_BARRIER) { | 126 | if (is_failed_barrier(e->flags)) { |
157 | drbd_bump_write_ordering(mdev, WO_bdev_flush); | 127 | drbd_bump_write_ordering(mdev, WO_bdev_flush); |
158 | spin_lock_irqsave(&mdev->req_lock, flags); | 128 | spin_lock_irqsave(&mdev->req_lock, flags); |
159 | list_del(&e->w.list); | 129 | list_del(&e->w.list); |
130 | e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED; | ||
160 | e->w.cb = w_e_reissue; | 131 | e->w.cb = w_e_reissue; |
161 | /* put_ldev actually happens below, once we come here again. */ | 132 | /* put_ldev actually happens below, once we come here again. */ |
162 | __release(local); | 133 | __release(local); |
@@ -167,17 +138,16 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local) | |||
167 | 138 | ||
168 | D_ASSERT(e->block_id != ID_VACANT); | 139 | D_ASSERT(e->block_id != ID_VACANT); |
169 | 140 | ||
170 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
171 | mdev->writ_cnt += e->size >> 9; | ||
172 | is_syncer_req = is_syncer_block_id(e->block_id); | ||
173 | |||
174 | /* after we moved e to done_ee, | 141 | /* after we moved e to done_ee, |
175 | * we may no longer access it, | 142 | * we may no longer access it, |
176 | * it may be freed/reused already! | 143 | * it may be freed/reused already! |
177 | * (as soon as we release the req_lock) */ | 144 | * (as soon as we release the req_lock) */ |
178 | e_sector = e->sector; | 145 | e_sector = e->sector; |
179 | do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; | 146 | do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; |
147 | is_syncer_req = is_syncer_block_id(e->block_id); | ||
180 | 148 | ||
149 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
150 | mdev->writ_cnt += e->size >> 9; | ||
181 | list_del(&e->w.list); /* has been on active_ee or sync_ee */ | 151 | list_del(&e->w.list); /* has been on active_ee or sync_ee */ |
182 | list_add_tail(&e->w.list, &mdev->done_ee); | 152 | list_add_tail(&e->w.list, &mdev->done_ee); |
183 | 153 | ||
@@ -190,7 +160,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local) | |||
190 | ? list_empty(&mdev->sync_ee) | 160 | ? list_empty(&mdev->sync_ee) |
191 | : list_empty(&mdev->active_ee); | 161 | : list_empty(&mdev->active_ee); |
192 | 162 | ||
193 | if (error) | 163 | if (test_bit(__EE_WAS_ERROR, &e->flags)) |
194 | __drbd_chk_io_error(mdev, FALSE); | 164 | __drbd_chk_io_error(mdev, FALSE); |
195 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 165 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
196 | 166 | ||
@@ -205,7 +175,42 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local) | |||
205 | 175 | ||
206 | wake_asender(mdev); | 176 | wake_asender(mdev); |
207 | put_ldev(mdev); | 177 | put_ldev(mdev); |
178 | } | ||
208 | 179 | ||
180 | /* writes on behalf of the partner, or resync writes, | ||
181 | * "submitted" by the receiver. | ||
182 | */ | ||
183 | void drbd_endio_sec(struct bio *bio, int error) | ||
184 | { | ||
185 | struct drbd_epoch_entry *e = bio->bi_private; | ||
186 | struct drbd_conf *mdev = e->mdev; | ||
187 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
188 | int is_write = bio_data_dir(bio) == WRITE; | ||
189 | |||
190 | if (error) | ||
191 | dev_warn(DEV, "%s: error=%d s=%llus\n", | ||
192 | is_write ? "write" : "read", error, | ||
193 | (unsigned long long)e->sector); | ||
194 | if (!error && !uptodate) { | ||
195 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | ||
196 | is_write ? "write" : "read", | ||
197 | (unsigned long long)e->sector); | ||
198 | /* strange behavior of some lower level drivers... | ||
199 | * fail the request by clearing the uptodate flag, | ||
200 | * but do not return any error?! */ | ||
201 | error = -EIO; | ||
202 | } | ||
203 | |||
204 | if (error) | ||
205 | set_bit(__EE_WAS_ERROR, &e->flags); | ||
206 | |||
207 | bio_put(bio); /* no need for the bio anymore */ | ||
208 | if (atomic_dec_and_test(&e->pending_bios)) { | ||
209 | if (is_write) | ||
210 | drbd_endio_write_sec_final(e); | ||
211 | else | ||
212 | drbd_endio_read_sec_final(e); | ||
213 | } | ||
209 | } | 214 | } |
210 | 215 | ||
211 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request | 216 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request |
@@ -219,9 +224,6 @@ void drbd_endio_pri(struct bio *bio, int error) | |||
219 | enum drbd_req_event what; | 224 | enum drbd_req_event what; |
220 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | 225 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
221 | 226 | ||
222 | if (error) | ||
223 | dev_warn(DEV, "p %s: error=%d\n", | ||
224 | bio_data_dir(bio) == WRITE ? "write" : "read", error); | ||
225 | if (!error && !uptodate) { | 227 | if (!error && !uptodate) { |
226 | dev_warn(DEV, "p %s: setting error to -EIO\n", | 228 | dev_warn(DEV, "p %s: setting error to -EIO\n", |
227 | bio_data_dir(bio) == WRITE ? "write" : "read"); | 229 | bio_data_dir(bio) == WRITE ? "write" : "read"); |
@@ -252,20 +254,6 @@ void drbd_endio_pri(struct bio *bio, int error) | |||
252 | complete_master_bio(mdev, &m); | 254 | complete_master_bio(mdev, &m); |
253 | } | 255 | } |
254 | 256 | ||
255 | int w_io_error(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
256 | { | ||
257 | struct drbd_request *req = container_of(w, struct drbd_request, w); | ||
258 | |||
259 | /* NOTE: mdev->ldev can be NULL by the time we get here! */ | ||
260 | /* D_ASSERT(mdev->ldev->dc.on_io_error != EP_PASS_ON); */ | ||
261 | |||
262 | /* the only way this callback is scheduled is from _req_may_be_done, | ||
263 | * when it is done and had a local write error, see comments there */ | ||
264 | drbd_req_free(req); | ||
265 | |||
266 | return TRUE; | ||
267 | } | ||
268 | |||
269 | int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 257 | int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
270 | { | 258 | { |
271 | struct drbd_request *req = container_of(w, struct drbd_request, w); | 259 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
@@ -275,12 +263,9 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
275 | * to give the disk the chance to relocate that block */ | 263 | * to give the disk the chance to relocate that block */ |
276 | 264 | ||
277 | spin_lock_irq(&mdev->req_lock); | 265 | spin_lock_irq(&mdev->req_lock); |
278 | if (cancel || | 266 | if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { |
279 | mdev->state.conn < C_CONNECTED || | 267 | _req_mod(req, read_retry_remote_canceled); |
280 | mdev->state.pdsk <= D_INCONSISTENT) { | ||
281 | _req_mod(req, send_canceled); | ||
282 | spin_unlock_irq(&mdev->req_lock); | 268 | spin_unlock_irq(&mdev->req_lock); |
283 | dev_alert(DEV, "WE ARE LOST. Local IO failure, no peer.\n"); | ||
284 | return 1; | 269 | return 1; |
285 | } | 270 | } |
286 | spin_unlock_irq(&mdev->req_lock); | 271 | spin_unlock_irq(&mdev->req_lock); |
@@ -295,7 +280,34 @@ int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
295 | return 1; /* Simply ignore this! */ | 280 | return 1; /* Simply ignore this! */ |
296 | } | 281 | } |
297 | 282 | ||
298 | void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) | 283 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) |
284 | { | ||
285 | struct hash_desc desc; | ||
286 | struct scatterlist sg; | ||
287 | struct page *page = e->pages; | ||
288 | struct page *tmp; | ||
289 | unsigned len; | ||
290 | |||
291 | desc.tfm = tfm; | ||
292 | desc.flags = 0; | ||
293 | |||
294 | sg_init_table(&sg, 1); | ||
295 | crypto_hash_init(&desc); | ||
296 | |||
297 | while ((tmp = page_chain_next(page))) { | ||
298 | /* all but the last page will be fully used */ | ||
299 | sg_set_page(&sg, page, PAGE_SIZE, 0); | ||
300 | crypto_hash_update(&desc, &sg, sg.length); | ||
301 | page = tmp; | ||
302 | } | ||
303 | /* and now the last, possibly only partially used page */ | ||
304 | len = e->size & (PAGE_SIZE - 1); | ||
305 | sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); | ||
306 | crypto_hash_update(&desc, &sg, sg.length); | ||
307 | crypto_hash_final(&desc, digest); | ||
308 | } | ||
309 | |||
310 | void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) | ||
299 | { | 311 | { |
300 | struct hash_desc desc; | 312 | struct hash_desc desc; |
301 | struct scatterlist sg; | 313 | struct scatterlist sg; |
@@ -329,11 +341,11 @@ static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel | |||
329 | return 1; | 341 | return 1; |
330 | } | 342 | } |
331 | 343 | ||
332 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 344 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
333 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); | 345 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); |
334 | digest = kmalloc(digest_size, GFP_NOIO); | 346 | digest = kmalloc(digest_size, GFP_NOIO); |
335 | if (digest) { | 347 | if (digest) { |
336 | drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest); | 348 | drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); |
337 | 349 | ||
338 | inc_rs_pending(mdev); | 350 | inc_rs_pending(mdev); |
339 | ok = drbd_send_drequest_csum(mdev, | 351 | ok = drbd_send_drequest_csum(mdev, |
@@ -369,23 +381,21 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |||
369 | /* GFP_TRY, because if there is no memory available right now, this may | 381 | /* GFP_TRY, because if there is no memory available right now, this may |
370 | * be rescheduled for later. It is "only" background resync, after all. */ | 382 | * be rescheduled for later. It is "only" background resync, after all. */ |
371 | e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY); | 383 | e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY); |
372 | if (!e) { | 384 | if (!e) |
373 | put_ldev(mdev); | 385 | goto fail; |
374 | return 2; | ||
375 | } | ||
376 | 386 | ||
377 | spin_lock_irq(&mdev->req_lock); | 387 | spin_lock_irq(&mdev->req_lock); |
378 | list_add(&e->w.list, &mdev->read_ee); | 388 | list_add(&e->w.list, &mdev->read_ee); |
379 | spin_unlock_irq(&mdev->req_lock); | 389 | spin_unlock_irq(&mdev->req_lock); |
380 | 390 | ||
381 | e->private_bio->bi_end_io = drbd_endio_read_sec; | ||
382 | e->private_bio->bi_rw = READ; | ||
383 | e->w.cb = w_e_send_csum; | 391 | e->w.cb = w_e_send_csum; |
392 | if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) | ||
393 | return 1; | ||
384 | 394 | ||
385 | mdev->read_cnt += size >> 9; | 395 | drbd_free_ee(mdev, e); |
386 | drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio); | 396 | fail: |
387 | 397 | put_ldev(mdev); | |
388 | return 1; | 398 | return 2; |
389 | } | 399 | } |
390 | 400 | ||
391 | void resync_timer_fn(unsigned long data) | 401 | void resync_timer_fn(unsigned long data) |
@@ -420,7 +430,7 @@ int w_make_resync_request(struct drbd_conf *mdev, | |||
420 | unsigned long bit; | 430 | unsigned long bit; |
421 | sector_t sector; | 431 | sector_t sector; |
422 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | 432 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); |
423 | int max_segment_size = queue_max_segment_size(mdev->rq_queue); | 433 | int max_segment_size; |
424 | int number, i, size, pe, mx; | 434 | int number, i, size, pe, mx; |
425 | int align, queued, sndbuf; | 435 | int align, queued, sndbuf; |
426 | 436 | ||
@@ -446,7 +456,12 @@ int w_make_resync_request(struct drbd_conf *mdev, | |||
446 | return 1; | 456 | return 1; |
447 | } | 457 | } |
448 | 458 | ||
449 | number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ); | 459 | /* starting with drbd 8.3.8, we can handle multi-bio EEs, |
460 | * if it should be necessary */ | ||
461 | max_segment_size = mdev->agreed_pro_version < 94 ? | ||
462 | queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE; | ||
463 | |||
464 | number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ); | ||
450 | pe = atomic_read(&mdev->rs_pending_cnt); | 465 | pe = atomic_read(&mdev->rs_pending_cnt); |
451 | 466 | ||
452 | mutex_lock(&mdev->data.mutex); | 467 | mutex_lock(&mdev->data.mutex); |
@@ -509,12 +524,6 @@ next_sector: | |||
509 | * | 524 | * |
510 | * Additionally always align bigger requests, in order to | 525 | * Additionally always align bigger requests, in order to |
511 | * be prepared for all stripe sizes of software RAIDs. | 526 | * be prepared for all stripe sizes of software RAIDs. |
512 | * | ||
513 | * we _do_ care about the agreed-upon q->max_segment_size | ||
514 | * here, as splitting up the requests on the other side is more | ||
515 | * difficult. the consequence is, that on lvm and md and other | ||
516 | * "indirect" devices, this is dead code, since | ||
517 | * q->max_segment_size will be PAGE_SIZE. | ||
518 | */ | 527 | */ |
519 | align = 1; | 528 | align = 1; |
520 | for (;;) { | 529 | for (;;) { |
@@ -806,7 +815,7 @@ out: | |||
806 | /* helper */ | 815 | /* helper */ |
807 | static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e) | 816 | static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e) |
808 | { | 817 | { |
809 | if (drbd_bio_has_active_page(e->private_bio)) { | 818 | if (drbd_ee_has_active_page(e)) { |
810 | /* This might happen if sendpage() has not finished */ | 819 | /* This might happen if sendpage() has not finished */ |
811 | spin_lock_irq(&mdev->req_lock); | 820 | spin_lock_irq(&mdev->req_lock); |
812 | list_add_tail(&e->w.list, &mdev->net_ee); | 821 | list_add_tail(&e->w.list, &mdev->net_ee); |
@@ -832,7 +841,7 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
832 | return 1; | 841 | return 1; |
833 | } | 842 | } |
834 | 843 | ||
835 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 844 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
836 | ok = drbd_send_block(mdev, P_DATA_REPLY, e); | 845 | ok = drbd_send_block(mdev, P_DATA_REPLY, e); |
837 | } else { | 846 | } else { |
838 | if (__ratelimit(&drbd_ratelimit_state)) | 847 | if (__ratelimit(&drbd_ratelimit_state)) |
@@ -873,7 +882,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
873 | put_ldev(mdev); | 882 | put_ldev(mdev); |
874 | } | 883 | } |
875 | 884 | ||
876 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 885 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
877 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { | 886 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
878 | inc_rs_pending(mdev); | 887 | inc_rs_pending(mdev); |
879 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); | 888 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); |
@@ -921,7 +930,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
921 | 930 | ||
922 | di = (struct digest_info *)(unsigned long)e->block_id; | 931 | di = (struct digest_info *)(unsigned long)e->block_id; |
923 | 932 | ||
924 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 933 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
925 | /* quick hack to try to avoid a race against reconfiguration. | 934 | /* quick hack to try to avoid a race against reconfiguration. |
926 | * a real fix would be much more involved, | 935 | * a real fix would be much more involved, |
927 | * introducing more locking mechanisms */ | 936 | * introducing more locking mechanisms */ |
@@ -931,7 +940,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
931 | digest = kmalloc(digest_size, GFP_NOIO); | 940 | digest = kmalloc(digest_size, GFP_NOIO); |
932 | } | 941 | } |
933 | if (digest) { | 942 | if (digest) { |
934 | drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest); | 943 | drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); |
935 | eq = !memcmp(digest, di->digest, digest_size); | 944 | eq = !memcmp(digest, di->digest, digest_size); |
936 | kfree(digest); | 945 | kfree(digest); |
937 | } | 946 | } |
@@ -973,14 +982,14 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
973 | if (unlikely(cancel)) | 982 | if (unlikely(cancel)) |
974 | goto out; | 983 | goto out; |
975 | 984 | ||
976 | if (unlikely(!drbd_bio_uptodate(e->private_bio))) | 985 | if (unlikely((e->flags & EE_WAS_ERROR) != 0)) |
977 | goto out; | 986 | goto out; |
978 | 987 | ||
979 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); | 988 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
980 | /* FIXME if this allocation fails, online verify will not terminate! */ | 989 | /* FIXME if this allocation fails, online verify will not terminate! */ |
981 | digest = kmalloc(digest_size, GFP_NOIO); | 990 | digest = kmalloc(digest_size, GFP_NOIO); |
982 | if (digest) { | 991 | if (digest) { |
983 | drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest); | 992 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); |
984 | inc_rs_pending(mdev); | 993 | inc_rs_pending(mdev); |
985 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, | 994 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, |
986 | digest, digest_size, P_OV_REPLY); | 995 | digest, digest_size, P_OV_REPLY); |
@@ -1029,11 +1038,11 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1029 | 1038 | ||
1030 | di = (struct digest_info *)(unsigned long)e->block_id; | 1039 | di = (struct digest_info *)(unsigned long)e->block_id; |
1031 | 1040 | ||
1032 | if (likely(drbd_bio_uptodate(e->private_bio))) { | 1041 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { |
1033 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); | 1042 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
1034 | digest = kmalloc(digest_size, GFP_NOIO); | 1043 | digest = kmalloc(digest_size, GFP_NOIO); |
1035 | if (digest) { | 1044 | if (digest) { |
1036 | drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest); | 1045 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); |
1037 | 1046 | ||
1038 | D_ASSERT(digest_size == di->digest_size); | 1047 | D_ASSERT(digest_size == di->digest_size); |
1039 | eq = !memcmp(digest, di->digest, digest_size); | 1048 | eq = !memcmp(digest, di->digest, digest_size); |
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index f93fa111ce50..defdb5013ea3 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h | |||
@@ -18,23 +18,9 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev, | |||
18 | 18 | ||
19 | #define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE) | 19 | #define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE) |
20 | 20 | ||
21 | static inline int drbd_bio_has_active_page(struct bio *bio) | ||
22 | { | ||
23 | struct bio_vec *bvec; | ||
24 | int i; | ||
25 | |||
26 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
27 | if (page_count(bvec->bv_page) > 1) | ||
28 | return 1; | ||
29 | } | ||
30 | |||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | /* bi_end_io handlers */ | 21 | /* bi_end_io handlers */ |
35 | extern void drbd_md_io_complete(struct bio *bio, int error); | 22 | extern void drbd_md_io_complete(struct bio *bio, int error); |
36 | extern void drbd_endio_read_sec(struct bio *bio, int error); | 23 | extern void drbd_endio_sec(struct bio *bio, int error); |
37 | extern void drbd_endio_write_sec(struct bio *bio, int error); | ||
38 | extern void drbd_endio_pri(struct bio *bio, int error); | 24 | extern void drbd_endio_pri(struct bio *bio, int error); |
39 | 25 | ||
40 | /* | 26 | /* |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 90c4038702da..cf04c1b234ed 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -178,6 +178,7 @@ static int print_unex = 1; | |||
178 | #include <linux/slab.h> | 178 | #include <linux/slab.h> |
179 | #include <linux/mm.h> | 179 | #include <linux/mm.h> |
180 | #include <linux/bio.h> | 180 | #include <linux/bio.h> |
181 | #include <linux/smp_lock.h> | ||
181 | #include <linux/string.h> | 182 | #include <linux/string.h> |
182 | #include <linux/jiffies.h> | 183 | #include <linux/jiffies.h> |
183 | #include <linux/fcntl.h> | 184 | #include <linux/fcntl.h> |
@@ -514,8 +515,6 @@ static unsigned long fdc_busy; | |||
514 | static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); | 515 | static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); |
515 | static DECLARE_WAIT_QUEUE_HEAD(command_done); | 516 | static DECLARE_WAIT_QUEUE_HEAD(command_done); |
516 | 517 | ||
517 | #define NO_SIGNAL (!interruptible || !signal_pending(current)) | ||
518 | |||
519 | /* Errors during formatting are counted here. */ | 518 | /* Errors during formatting are counted here. */ |
520 | static int format_errors; | 519 | static int format_errors; |
521 | 520 | ||
@@ -539,7 +538,7 @@ static int max_buffer_sectors; | |||
539 | 538 | ||
540 | static int *errors; | 539 | static int *errors; |
541 | typedef void (*done_f)(int); | 540 | typedef void (*done_f)(int); |
542 | static struct cont_t { | 541 | static const struct cont_t { |
543 | void (*interrupt)(void); | 542 | void (*interrupt)(void); |
544 | /* this is called after the interrupt of the | 543 | /* this is called after the interrupt of the |
545 | * main command */ | 544 | * main command */ |
@@ -578,7 +577,7 @@ static void reset_fdc(void); | |||
578 | #define NEED_1_RECAL -2 | 577 | #define NEED_1_RECAL -2 |
579 | #define NEED_2_RECAL -3 | 578 | #define NEED_2_RECAL -3 |
580 | 579 | ||
581 | static int usage_count; | 580 | static atomic_t usage_count = ATOMIC_INIT(0); |
582 | 581 | ||
583 | /* buffer related variables */ | 582 | /* buffer related variables */ |
584 | static int buffer_track = -1; | 583 | static int buffer_track = -1; |
@@ -858,36 +857,15 @@ static void set_fdc(int drive) | |||
858 | } | 857 | } |
859 | 858 | ||
860 | /* locks the driver */ | 859 | /* locks the driver */ |
861 | static int _lock_fdc(int drive, bool interruptible, int line) | 860 | static int lock_fdc(int drive, bool interruptible) |
862 | { | 861 | { |
863 | if (!usage_count) { | 862 | if (WARN(atomic_read(&usage_count) == 0, |
864 | pr_err("Trying to lock fdc while usage count=0 at line %d\n", | 863 | "Trying to lock fdc while usage count=0\n")) |
865 | line); | ||
866 | return -1; | 864 | return -1; |
867 | } | ||
868 | |||
869 | if (test_and_set_bit(0, &fdc_busy)) { | ||
870 | DECLARE_WAITQUEUE(wait, current); | ||
871 | add_wait_queue(&fdc_wait, &wait); | ||
872 | |||
873 | for (;;) { | ||
874 | set_current_state(TASK_INTERRUPTIBLE); | ||
875 | |||
876 | if (!test_and_set_bit(0, &fdc_busy)) | ||
877 | break; | ||
878 | 865 | ||
879 | schedule(); | 866 | if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy))) |
880 | 867 | return -EINTR; | |
881 | if (!NO_SIGNAL) { | ||
882 | remove_wait_queue(&fdc_wait, &wait); | ||
883 | return -EINTR; | ||
884 | } | ||
885 | } | ||
886 | 868 | ||
887 | set_current_state(TASK_RUNNING); | ||
888 | remove_wait_queue(&fdc_wait, &wait); | ||
889 | flush_scheduled_work(); | ||
890 | } | ||
891 | command_status = FD_COMMAND_NONE; | 869 | command_status = FD_COMMAND_NONE; |
892 | 870 | ||
893 | __reschedule_timeout(drive, "lock fdc"); | 871 | __reschedule_timeout(drive, "lock fdc"); |
@@ -895,11 +873,8 @@ static int _lock_fdc(int drive, bool interruptible, int line) | |||
895 | return 0; | 873 | return 0; |
896 | } | 874 | } |
897 | 875 | ||
898 | #define lock_fdc(drive, interruptible) \ | ||
899 | _lock_fdc(drive, interruptible, __LINE__) | ||
900 | |||
901 | /* unlocks the driver */ | 876 | /* unlocks the driver */ |
902 | static inline void unlock_fdc(void) | 877 | static void unlock_fdc(void) |
903 | { | 878 | { |
904 | unsigned long flags; | 879 | unsigned long flags; |
905 | 880 | ||
@@ -1224,7 +1199,7 @@ static int need_more_output(void) | |||
1224 | /* Set perpendicular mode as required, based on data rate, if supported. | 1199 | /* Set perpendicular mode as required, based on data rate, if supported. |
1225 | * 82077 Now tested. 1Mbps data rate only possible with 82077-1. | 1200 | * 82077 Now tested. 1Mbps data rate only possible with 82077-1. |
1226 | */ | 1201 | */ |
1227 | static inline void perpendicular_mode(void) | 1202 | static void perpendicular_mode(void) |
1228 | { | 1203 | { |
1229 | unsigned char perp_mode; | 1204 | unsigned char perp_mode; |
1230 | 1205 | ||
@@ -1995,14 +1970,14 @@ static void do_wakeup(void) | |||
1995 | wake_up(&command_done); | 1970 | wake_up(&command_done); |
1996 | } | 1971 | } |
1997 | 1972 | ||
1998 | static struct cont_t wakeup_cont = { | 1973 | static const struct cont_t wakeup_cont = { |
1999 | .interrupt = empty, | 1974 | .interrupt = empty, |
2000 | .redo = do_wakeup, | 1975 | .redo = do_wakeup, |
2001 | .error = empty, | 1976 | .error = empty, |
2002 | .done = (done_f)empty | 1977 | .done = (done_f)empty |
2003 | }; | 1978 | }; |
2004 | 1979 | ||
2005 | static struct cont_t intr_cont = { | 1980 | static const struct cont_t intr_cont = { |
2006 | .interrupt = empty, | 1981 | .interrupt = empty, |
2007 | .redo = process_fd_request, | 1982 | .redo = process_fd_request, |
2008 | .error = empty, | 1983 | .error = empty, |
@@ -2015,25 +1990,10 @@ static int wait_til_done(void (*handler)(void), bool interruptible) | |||
2015 | 1990 | ||
2016 | schedule_bh(handler); | 1991 | schedule_bh(handler); |
2017 | 1992 | ||
2018 | if (command_status < 2 && NO_SIGNAL) { | 1993 | if (interruptible) |
2019 | DECLARE_WAITQUEUE(wait, current); | 1994 | wait_event_interruptible(command_done, command_status >= 2); |
2020 | 1995 | else | |
2021 | add_wait_queue(&command_done, &wait); | 1996 | wait_event(command_done, command_status >= 2); |
2022 | for (;;) { | ||
2023 | set_current_state(interruptible ? | ||
2024 | TASK_INTERRUPTIBLE : | ||
2025 | TASK_UNINTERRUPTIBLE); | ||
2026 | |||
2027 | if (command_status >= 2 || !NO_SIGNAL) | ||
2028 | break; | ||
2029 | |||
2030 | is_alive(__func__, ""); | ||
2031 | schedule(); | ||
2032 | } | ||
2033 | |||
2034 | set_current_state(TASK_RUNNING); | ||
2035 | remove_wait_queue(&command_done, &wait); | ||
2036 | } | ||
2037 | 1997 | ||
2038 | if (command_status < 2) { | 1998 | if (command_status < 2) { |
2039 | cancel_activity(); | 1999 | cancel_activity(); |
@@ -2223,7 +2183,7 @@ static void redo_format(void) | |||
2223 | debugt(__func__, "queue format request"); | 2183 | debugt(__func__, "queue format request"); |
2224 | } | 2184 | } |
2225 | 2185 | ||
2226 | static struct cont_t format_cont = { | 2186 | static const struct cont_t format_cont = { |
2227 | .interrupt = format_interrupt, | 2187 | .interrupt = format_interrupt, |
2228 | .redo = redo_format, | 2188 | .redo = redo_format, |
2229 | .error = bad_flp_intr, | 2189 | .error = bad_flp_intr, |
@@ -2583,10 +2543,8 @@ static int make_raw_rw_request(void) | |||
2583 | int tracksize; | 2543 | int tracksize; |
2584 | int ssize; | 2544 | int ssize; |
2585 | 2545 | ||
2586 | if (max_buffer_sectors == 0) { | 2546 | if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n")) |
2587 | pr_info("VFS: Block I/O scheduled on unopened device\n"); | ||
2588 | return 0; | 2547 | return 0; |
2589 | } | ||
2590 | 2548 | ||
2591 | set_fdc((long)current_req->rq_disk->private_data); | 2549 | set_fdc((long)current_req->rq_disk->private_data); |
2592 | 2550 | ||
@@ -2921,7 +2879,7 @@ do_request: | |||
2921 | return; | 2879 | return; |
2922 | } | 2880 | } |
2923 | 2881 | ||
2924 | static struct cont_t rw_cont = { | 2882 | static const struct cont_t rw_cont = { |
2925 | .interrupt = rw_interrupt, | 2883 | .interrupt = rw_interrupt, |
2926 | .redo = redo_fd_request, | 2884 | .redo = redo_fd_request, |
2927 | .error = bad_flp_intr, | 2885 | .error = bad_flp_intr, |
@@ -2936,19 +2894,16 @@ static void process_fd_request(void) | |||
2936 | 2894 | ||
2937 | static void do_fd_request(struct request_queue *q) | 2895 | static void do_fd_request(struct request_queue *q) |
2938 | { | 2896 | { |
2939 | if (max_buffer_sectors == 0) { | 2897 | if (WARN(max_buffer_sectors == 0, |
2940 | pr_info("VFS: %s called on non-open device\n", __func__); | 2898 | "VFS: %s called on non-open device\n", __func__)) |
2941 | return; | 2899 | return; |
2942 | } | ||
2943 | 2900 | ||
2944 | if (usage_count == 0) { | 2901 | if (WARN(atomic_read(&usage_count) == 0, |
2945 | pr_info("warning: usage count=0, current_req=%p exiting\n", | 2902 | "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", |
2946 | current_req); | 2903 | current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, |
2947 | pr_info("sect=%ld type=%x flags=%x\n", | 2904 | current_req->cmd_flags)) |
2948 | (long)blk_rq_pos(current_req), current_req->cmd_type, | ||
2949 | current_req->cmd_flags); | ||
2950 | return; | 2905 | return; |
2951 | } | 2906 | |
2952 | if (test_bit(0, &fdc_busy)) { | 2907 | if (test_bit(0, &fdc_busy)) { |
2953 | /* fdc busy, this new request will be treated when the | 2908 | /* fdc busy, this new request will be treated when the |
2954 | current one is done */ | 2909 | current one is done */ |
@@ -2960,7 +2915,7 @@ static void do_fd_request(struct request_queue *q) | |||
2960 | is_alive(__func__, ""); | 2915 | is_alive(__func__, ""); |
2961 | } | 2916 | } |
2962 | 2917 | ||
2963 | static struct cont_t poll_cont = { | 2918 | static const struct cont_t poll_cont = { |
2964 | .interrupt = success_and_wakeup, | 2919 | .interrupt = success_and_wakeup, |
2965 | .redo = floppy_ready, | 2920 | .redo = floppy_ready, |
2966 | .error = generic_failure, | 2921 | .error = generic_failure, |
@@ -2991,7 +2946,7 @@ static void reset_intr(void) | |||
2991 | pr_info("weird, reset interrupt called\n"); | 2946 | pr_info("weird, reset interrupt called\n"); |
2992 | } | 2947 | } |
2993 | 2948 | ||
2994 | static struct cont_t reset_cont = { | 2949 | static const struct cont_t reset_cont = { |
2995 | .interrupt = reset_intr, | 2950 | .interrupt = reset_intr, |
2996 | .redo = success_and_wakeup, | 2951 | .redo = success_and_wakeup, |
2997 | .error = generic_failure, | 2952 | .error = generic_failure, |
@@ -3033,7 +2988,7 @@ static inline int fd_copyin(void __user *param, void *address, | |||
3033 | return copy_from_user(address, param, size) ? -EFAULT : 0; | 2988 | return copy_from_user(address, param, size) ? -EFAULT : 0; |
3034 | } | 2989 | } |
3035 | 2990 | ||
3036 | static inline const char *drive_name(int type, int drive) | 2991 | static const char *drive_name(int type, int drive) |
3037 | { | 2992 | { |
3038 | struct floppy_struct *floppy; | 2993 | struct floppy_struct *floppy; |
3039 | 2994 | ||
@@ -3096,14 +3051,14 @@ static void raw_cmd_done(int flag) | |||
3096 | generic_done(flag); | 3051 | generic_done(flag); |
3097 | } | 3052 | } |
3098 | 3053 | ||
3099 | static struct cont_t raw_cmd_cont = { | 3054 | static const struct cont_t raw_cmd_cont = { |
3100 | .interrupt = success_and_wakeup, | 3055 | .interrupt = success_and_wakeup, |
3101 | .redo = floppy_start, | 3056 | .redo = floppy_start, |
3102 | .error = generic_failure, | 3057 | .error = generic_failure, |
3103 | .done = raw_cmd_done | 3058 | .done = raw_cmd_done |
3104 | }; | 3059 | }; |
3105 | 3060 | ||
3106 | static inline int raw_cmd_copyout(int cmd, void __user *param, | 3061 | static int raw_cmd_copyout(int cmd, void __user *param, |
3107 | struct floppy_raw_cmd *ptr) | 3062 | struct floppy_raw_cmd *ptr) |
3108 | { | 3063 | { |
3109 | int ret; | 3064 | int ret; |
@@ -3148,7 +3103,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) | |||
3148 | } | 3103 | } |
3149 | } | 3104 | } |
3150 | 3105 | ||
3151 | static inline int raw_cmd_copyin(int cmd, void __user *param, | 3106 | static int raw_cmd_copyin(int cmd, void __user *param, |
3152 | struct floppy_raw_cmd **rcmd) | 3107 | struct floppy_raw_cmd **rcmd) |
3153 | { | 3108 | { |
3154 | struct floppy_raw_cmd *ptr; | 3109 | struct floppy_raw_cmd *ptr; |
@@ -3266,7 +3221,7 @@ static int invalidate_drive(struct block_device *bdev) | |||
3266 | return 0; | 3221 | return 0; |
3267 | } | 3222 | } |
3268 | 3223 | ||
3269 | static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, | 3224 | static int set_geometry(unsigned int cmd, struct floppy_struct *g, |
3270 | int drive, int type, struct block_device *bdev) | 3225 | int drive, int type, struct block_device *bdev) |
3271 | { | 3226 | { |
3272 | int cnt; | 3227 | int cnt; |
@@ -3337,7 +3292,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3337 | } | 3292 | } |
3338 | 3293 | ||
3339 | /* handle obsolete ioctl's */ | 3294 | /* handle obsolete ioctl's */ |
3340 | static int ioctl_table[] = { | 3295 | static unsigned int ioctl_table[] = { |
3341 | FDCLRPRM, | 3296 | FDCLRPRM, |
3342 | FDSETPRM, | 3297 | FDSETPRM, |
3343 | FDDEFPRM, | 3298 | FDDEFPRM, |
@@ -3365,7 +3320,7 @@ static int ioctl_table[] = { | |||
3365 | FDTWADDLE | 3320 | FDTWADDLE |
3366 | }; | 3321 | }; |
3367 | 3322 | ||
3368 | static inline int normalize_ioctl(int *cmd, int *size) | 3323 | static int normalize_ioctl(unsigned int *cmd, int *size) |
3369 | { | 3324 | { |
3370 | int i; | 3325 | int i; |
3371 | 3326 | ||
@@ -3417,7 +3372,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
3417 | return 0; | 3372 | return 0; |
3418 | } | 3373 | } |
3419 | 3374 | ||
3420 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, | 3375 | static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, |
3421 | unsigned long param) | 3376 | unsigned long param) |
3422 | { | 3377 | { |
3423 | int drive = (long)bdev->bd_disk->private_data; | 3378 | int drive = (long)bdev->bd_disk->private_data; |
@@ -3593,6 +3548,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, | |||
3593 | return 0; | 3548 | return 0; |
3594 | } | 3549 | } |
3595 | 3550 | ||
3551 | static int fd_ioctl(struct block_device *bdev, fmode_t mode, | ||
3552 | unsigned int cmd, unsigned long param) | ||
3553 | { | ||
3554 | int ret; | ||
3555 | |||
3556 | lock_kernel(); | ||
3557 | ret = fd_locked_ioctl(bdev, mode, cmd, param); | ||
3558 | unlock_kernel(); | ||
3559 | |||
3560 | return ret; | ||
3561 | } | ||
3562 | |||
3596 | static void __init config_types(void) | 3563 | static void __init config_types(void) |
3597 | { | 3564 | { |
3598 | bool has_drive = false; | 3565 | bool has_drive = false; |
@@ -3649,6 +3616,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
3649 | { | 3616 | { |
3650 | int drive = (long)disk->private_data; | 3617 | int drive = (long)disk->private_data; |
3651 | 3618 | ||
3619 | lock_kernel(); | ||
3652 | mutex_lock(&open_lock); | 3620 | mutex_lock(&open_lock); |
3653 | if (UDRS->fd_ref < 0) | 3621 | if (UDRS->fd_ref < 0) |
3654 | UDRS->fd_ref = 0; | 3622 | UDRS->fd_ref = 0; |
@@ -3659,6 +3627,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
3659 | if (!UDRS->fd_ref) | 3627 | if (!UDRS->fd_ref) |
3660 | opened_bdev[drive] = NULL; | 3628 | opened_bdev[drive] = NULL; |
3661 | mutex_unlock(&open_lock); | 3629 | mutex_unlock(&open_lock); |
3630 | unlock_kernel(); | ||
3662 | 3631 | ||
3663 | return 0; | 3632 | return 0; |
3664 | } | 3633 | } |
@@ -3676,6 +3645,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3676 | int res = -EBUSY; | 3645 | int res = -EBUSY; |
3677 | char *tmp; | 3646 | char *tmp; |
3678 | 3647 | ||
3648 | lock_kernel(); | ||
3679 | mutex_lock(&open_lock); | 3649 | mutex_lock(&open_lock); |
3680 | old_dev = UDRS->fd_device; | 3650 | old_dev = UDRS->fd_device; |
3681 | if (opened_bdev[drive] && opened_bdev[drive] != bdev) | 3651 | if (opened_bdev[drive] && opened_bdev[drive] != bdev) |
@@ -3752,6 +3722,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3752 | goto out; | 3722 | goto out; |
3753 | } | 3723 | } |
3754 | mutex_unlock(&open_lock); | 3724 | mutex_unlock(&open_lock); |
3725 | unlock_kernel(); | ||
3755 | return 0; | 3726 | return 0; |
3756 | out: | 3727 | out: |
3757 | if (UDRS->fd_ref < 0) | 3728 | if (UDRS->fd_ref < 0) |
@@ -3762,6 +3733,7 @@ out: | |||
3762 | opened_bdev[drive] = NULL; | 3733 | opened_bdev[drive] = NULL; |
3763 | out2: | 3734 | out2: |
3764 | mutex_unlock(&open_lock); | 3735 | mutex_unlock(&open_lock); |
3736 | unlock_kernel(); | ||
3765 | return res; | 3737 | return res; |
3766 | } | 3738 | } |
3767 | 3739 | ||
@@ -3829,6 +3801,7 @@ static int __floppy_read_block_0(struct block_device *bdev) | |||
3829 | bio.bi_size = size; | 3801 | bio.bi_size = size; |
3830 | bio.bi_bdev = bdev; | 3802 | bio.bi_bdev = bdev; |
3831 | bio.bi_sector = 0; | 3803 | bio.bi_sector = 0; |
3804 | bio.bi_flags = BIO_QUIET; | ||
3832 | init_completion(&complete); | 3805 | init_completion(&complete); |
3833 | bio.bi_private = &complete; | 3806 | bio.bi_private = &complete; |
3834 | bio.bi_end_io = floppy_rb0_complete; | 3807 | bio.bi_end_io = floppy_rb0_complete; |
@@ -3857,10 +3830,10 @@ static int floppy_revalidate(struct gendisk *disk) | |||
3857 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || | 3830 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || |
3858 | test_bit(FD_VERIFY_BIT, &UDRS->flags) || | 3831 | test_bit(FD_VERIFY_BIT, &UDRS->flags) || |
3859 | test_bit(drive, &fake_change) || NO_GEOM) { | 3832 | test_bit(drive, &fake_change) || NO_GEOM) { |
3860 | if (usage_count == 0) { | 3833 | if (WARN(atomic_read(&usage_count) == 0, |
3861 | pr_info("VFS: revalidate called on non-open device.\n"); | 3834 | "VFS: revalidate called on non-open device.\n")) |
3862 | return -EFAULT; | 3835 | return -EFAULT; |
3863 | } | 3836 | |
3864 | lock_fdc(drive, false); | 3837 | lock_fdc(drive, false); |
3865 | cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || | 3838 | cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || |
3866 | test_bit(FD_VERIFY_BIT, &UDRS->flags)); | 3839 | test_bit(FD_VERIFY_BIT, &UDRS->flags)); |
@@ -3893,7 +3866,7 @@ static const struct block_device_operations floppy_fops = { | |||
3893 | .owner = THIS_MODULE, | 3866 | .owner = THIS_MODULE, |
3894 | .open = floppy_open, | 3867 | .open = floppy_open, |
3895 | .release = floppy_release, | 3868 | .release = floppy_release, |
3896 | .locked_ioctl = fd_ioctl, | 3869 | .ioctl = fd_ioctl, |
3897 | .getgeo = fd_getgeo, | 3870 | .getgeo = fd_getgeo, |
3898 | .media_changed = check_floppy_change, | 3871 | .media_changed = check_floppy_change, |
3899 | .revalidate_disk = floppy_revalidate, | 3872 | .revalidate_disk = floppy_revalidate, |
@@ -4126,7 +4099,7 @@ static ssize_t floppy_cmos_show(struct device *dev, | |||
4126 | return sprintf(buf, "%X\n", UDP->cmos); | 4099 | return sprintf(buf, "%X\n", UDP->cmos); |
4127 | } | 4100 | } |
4128 | 4101 | ||
4129 | DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); | 4102 | static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); |
4130 | 4103 | ||
4131 | static void floppy_device_release(struct device *dev) | 4104 | static void floppy_device_release(struct device *dev) |
4132 | { | 4105 | { |
@@ -4175,6 +4148,9 @@ static int __init floppy_init(void) | |||
4175 | int i, unit, drive; | 4148 | int i, unit, drive; |
4176 | int err, dr; | 4149 | int err, dr; |
4177 | 4150 | ||
4151 | set_debugt(); | ||
4152 | interruptjiffies = resultjiffies = jiffies; | ||
4153 | |||
4178 | #if defined(CONFIG_PPC) | 4154 | #if defined(CONFIG_PPC) |
4179 | if (check_legacy_ioport(FDC1)) | 4155 | if (check_legacy_ioport(FDC1)) |
4180 | return -ENODEV; | 4156 | return -ENODEV; |
@@ -4353,7 +4329,7 @@ out_unreg_platform_dev: | |||
4353 | platform_device_unregister(&floppy_device[drive]); | 4329 | platform_device_unregister(&floppy_device[drive]); |
4354 | out_flush_work: | 4330 | out_flush_work: |
4355 | flush_scheduled_work(); | 4331 | flush_scheduled_work(); |
4356 | if (usage_count) | 4332 | if (atomic_read(&usage_count)) |
4357 | floppy_release_irq_and_dma(); | 4333 | floppy_release_irq_and_dma(); |
4358 | out_unreg_region: | 4334 | out_unreg_region: |
4359 | blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); | 4335 | blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); |
@@ -4370,8 +4346,6 @@ out_put_disk: | |||
4370 | return err; | 4346 | return err; |
4371 | } | 4347 | } |
4372 | 4348 | ||
4373 | static DEFINE_SPINLOCK(floppy_usage_lock); | ||
4374 | |||
4375 | static const struct io_region { | 4349 | static const struct io_region { |
4376 | int offset; | 4350 | int offset; |
4377 | int size; | 4351 | int size; |
@@ -4417,14 +4391,8 @@ static void floppy_release_regions(int fdc) | |||
4417 | 4391 | ||
4418 | static int floppy_grab_irq_and_dma(void) | 4392 | static int floppy_grab_irq_and_dma(void) |
4419 | { | 4393 | { |
4420 | unsigned long flags; | 4394 | if (atomic_inc_return(&usage_count) > 1) |
4421 | |||
4422 | spin_lock_irqsave(&floppy_usage_lock, flags); | ||
4423 | if (usage_count++) { | ||
4424 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4425 | return 0; | 4395 | return 0; |
4426 | } | ||
4427 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4428 | 4396 | ||
4429 | /* | 4397 | /* |
4430 | * We might have scheduled a free_irq(), wait it to | 4398 | * We might have scheduled a free_irq(), wait it to |
@@ -4435,9 +4403,7 @@ static int floppy_grab_irq_and_dma(void) | |||
4435 | if (fd_request_irq()) { | 4403 | if (fd_request_irq()) { |
4436 | DPRINT("Unable to grab IRQ%d for the floppy driver\n", | 4404 | DPRINT("Unable to grab IRQ%d for the floppy driver\n", |
4437 | FLOPPY_IRQ); | 4405 | FLOPPY_IRQ); |
4438 | spin_lock_irqsave(&floppy_usage_lock, flags); | 4406 | atomic_dec(&usage_count); |
4439 | usage_count--; | ||
4440 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4441 | return -1; | 4407 | return -1; |
4442 | } | 4408 | } |
4443 | if (fd_request_dma()) { | 4409 | if (fd_request_dma()) { |
@@ -4447,9 +4413,7 @@ static int floppy_grab_irq_and_dma(void) | |||
4447 | use_virtual_dma = can_use_virtual_dma = 1; | 4413 | use_virtual_dma = can_use_virtual_dma = 1; |
4448 | if (!(can_use_virtual_dma & 1)) { | 4414 | if (!(can_use_virtual_dma & 1)) { |
4449 | fd_free_irq(); | 4415 | fd_free_irq(); |
4450 | spin_lock_irqsave(&floppy_usage_lock, flags); | 4416 | atomic_dec(&usage_count); |
4451 | usage_count--; | ||
4452 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4453 | return -1; | 4417 | return -1; |
4454 | } | 4418 | } |
4455 | } | 4419 | } |
@@ -4484,9 +4448,7 @@ cleanup: | |||
4484 | fd_free_dma(); | 4448 | fd_free_dma(); |
4485 | while (--fdc >= 0) | 4449 | while (--fdc >= 0) |
4486 | floppy_release_regions(fdc); | 4450 | floppy_release_regions(fdc); |
4487 | spin_lock_irqsave(&floppy_usage_lock, flags); | 4451 | atomic_dec(&usage_count); |
4488 | usage_count--; | ||
4489 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4490 | return -1; | 4452 | return -1; |
4491 | } | 4453 | } |
4492 | 4454 | ||
@@ -4498,14 +4460,10 @@ static void floppy_release_irq_and_dma(void) | |||
4498 | #endif | 4460 | #endif |
4499 | long tmpsize; | 4461 | long tmpsize; |
4500 | unsigned long tmpaddr; | 4462 | unsigned long tmpaddr; |
4501 | unsigned long flags; | ||
4502 | 4463 | ||
4503 | spin_lock_irqsave(&floppy_usage_lock, flags); | 4464 | if (!atomic_dec_and_test(&usage_count)) |
4504 | if (--usage_count) { | ||
4505 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4506 | return; | 4465 | return; |
4507 | } | 4466 | |
4508 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | ||
4509 | if (irqdma_allocated) { | 4467 | if (irqdma_allocated) { |
4510 | fd_disable_dma(); | 4468 | fd_disable_dma(); |
4511 | fd_free_dma(); | 4469 | fd_free_dma(); |
@@ -4598,7 +4556,7 @@ static void __exit floppy_module_exit(void) | |||
4598 | del_timer_sync(&fd_timer); | 4556 | del_timer_sync(&fd_timer); |
4599 | blk_cleanup_queue(floppy_queue); | 4557 | blk_cleanup_queue(floppy_queue); |
4600 | 4558 | ||
4601 | if (usage_count) | 4559 | if (atomic_read(&usage_count)) |
4602 | floppy_release_irq_and_dma(); | 4560 | floppy_release_irq_and_dma(); |
4603 | 4561 | ||
4604 | /* eject disk, if any */ | 4562 | /* eject disk, if any */ |
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 034e6dfc878c..30ec6b37424e 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -164,12 +164,12 @@ unsigned long read_timer(void) | |||
164 | unsigned long t, flags; | 164 | unsigned long t, flags; |
165 | int i; | 165 | int i; |
166 | 166 | ||
167 | spin_lock_irqsave(&i8253_lock, flags); | 167 | raw_spin_lock_irqsave(&i8253_lock, flags); |
168 | t = jiffies * 11932; | 168 | t = jiffies * 11932; |
169 | outb_p(0, 0x43); | 169 | outb_p(0, 0x43); |
170 | i = inb_p(0x40); | 170 | i = inb_p(0x40); |
171 | i |= inb(0x40) << 8; | 171 | i |= inb(0x40) << 8; |
172 | spin_unlock_irqrestore(&i8253_lock, flags); | 172 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
173 | return(t - i); | 173 | return(t - i); |
174 | } | 174 | } |
175 | #endif | 175 | #endif |
@@ -627,7 +627,7 @@ repeat: | |||
627 | req_data_dir(req) == READ ? "read" : "writ", | 627 | req_data_dir(req) == READ ? "read" : "writ", |
628 | cyl, head, sec, nsect, req->buffer); | 628 | cyl, head, sec, nsect, req->buffer); |
629 | #endif | 629 | #endif |
630 | if (blk_fs_request(req)) { | 630 | if (req->cmd_type == REQ_TYPE_FS) { |
631 | switch (rq_data_dir(req)) { | 631 | switch (rq_data_dir(req)) { |
632 | case READ: | 632 | case READ: |
633 | hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, | 633 | hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 8546d123b9a7..91797bbbe702 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/compat.h> | 67 | #include <linux/compat.h> |
68 | #include <linux/suspend.h> | 68 | #include <linux/suspend.h> |
69 | #include <linux/freezer.h> | 69 | #include <linux/freezer.h> |
70 | #include <linux/smp_lock.h> | ||
70 | #include <linux/writeback.h> | 71 | #include <linux/writeback.h> |
71 | #include <linux/buffer_head.h> /* for invalidate_bdev() */ | 72 | #include <linux/buffer_head.h> /* for invalidate_bdev() */ |
72 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
@@ -476,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
476 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; | 477 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; |
477 | 478 | ||
478 | if (bio_rw(bio) == WRITE) { | 479 | if (bio_rw(bio) == WRITE) { |
479 | bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); | 480 | bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); |
480 | struct file *file = lo->lo_backing_file; | 481 | struct file *file = lo->lo_backing_file; |
481 | 482 | ||
482 | if (barrier) { | 483 | if (barrier) { |
@@ -485,7 +486,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
485 | goto out; | 486 | goto out; |
486 | } | 487 | } |
487 | 488 | ||
488 | ret = vfs_fsync(file, file->f_path.dentry, 0); | 489 | ret = vfs_fsync(file, 0); |
489 | if (unlikely(ret)) { | 490 | if (unlikely(ret)) { |
490 | ret = -EIO; | 491 | ret = -EIO; |
491 | goto out; | 492 | goto out; |
@@ -495,7 +496,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
495 | ret = lo_send(lo, bio, pos); | 496 | ret = lo_send(lo, bio, pos); |
496 | 497 | ||
497 | if (barrier && !ret) { | 498 | if (barrier && !ret) { |
498 | ret = vfs_fsync(file, file->f_path.dentry, 0); | 499 | ret = vfs_fsync(file, 0); |
499 | if (unlikely(ret)) | 500 | if (unlikely(ret)) |
500 | ret = -EIO; | 501 | ret = -EIO; |
501 | } | 502 | } |
@@ -831,10 +832,12 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
831 | lo->lo_queue->unplug_fn = loop_unplug; | 832 | lo->lo_queue->unplug_fn = loop_unplug; |
832 | 833 | ||
833 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 834 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
834 | blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL); | 835 | blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN); |
835 | 836 | ||
836 | set_capacity(lo->lo_disk, size); | 837 | set_capacity(lo->lo_disk, size); |
837 | bd_set_size(bdev, size << 9); | 838 | bd_set_size(bdev, size << 9); |
839 | /* let user-space know about the new size */ | ||
840 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | ||
838 | 841 | ||
839 | set_blocksize(bdev, lo_blocksize); | 842 | set_blocksize(bdev, lo_blocksize); |
840 | 843 | ||
@@ -858,6 +861,7 @@ out_clr: | |||
858 | set_capacity(lo->lo_disk, 0); | 861 | set_capacity(lo->lo_disk, 0); |
859 | invalidate_bdev(bdev); | 862 | invalidate_bdev(bdev); |
860 | bd_set_size(bdev, 0); | 863 | bd_set_size(bdev, 0); |
864 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | ||
861 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask); | 865 | mapping_set_gfp_mask(mapping, lo->old_gfp_mask); |
862 | lo->lo_state = Lo_unbound; | 866 | lo->lo_state = Lo_unbound; |
863 | out_putf: | 867 | out_putf: |
@@ -944,8 +948,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
944 | if (bdev) | 948 | if (bdev) |
945 | invalidate_bdev(bdev); | 949 | invalidate_bdev(bdev); |
946 | set_capacity(lo->lo_disk, 0); | 950 | set_capacity(lo->lo_disk, 0); |
947 | if (bdev) | 951 | if (bdev) { |
948 | bd_set_size(bdev, 0); | 952 | bd_set_size(bdev, 0); |
953 | /* let user-space know about this change */ | ||
954 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | ||
955 | } | ||
949 | mapping_set_gfp_mask(filp->f_mapping, gfp); | 956 | mapping_set_gfp_mask(filp->f_mapping, gfp); |
950 | lo->lo_state = Lo_unbound; | 957 | lo->lo_state = Lo_unbound; |
951 | /* This is safe: open() is still holding a reference. */ | 958 | /* This is safe: open() is still holding a reference. */ |
@@ -1189,6 +1196,8 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) | |||
1189 | sz <<= 9; | 1196 | sz <<= 9; |
1190 | mutex_lock(&bdev->bd_mutex); | 1197 | mutex_lock(&bdev->bd_mutex); |
1191 | bd_set_size(bdev, sz); | 1198 | bd_set_size(bdev, sz); |
1199 | /* let user-space know about the new size */ | ||
1200 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | ||
1192 | mutex_unlock(&bdev->bd_mutex); | 1201 | mutex_unlock(&bdev->bd_mutex); |
1193 | 1202 | ||
1194 | out: | 1203 | out: |
@@ -1400,9 +1409,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode) | |||
1400 | { | 1409 | { |
1401 | struct loop_device *lo = bdev->bd_disk->private_data; | 1410 | struct loop_device *lo = bdev->bd_disk->private_data; |
1402 | 1411 | ||
1412 | lock_kernel(); | ||
1403 | mutex_lock(&lo->lo_ctl_mutex); | 1413 | mutex_lock(&lo->lo_ctl_mutex); |
1404 | lo->lo_refcnt++; | 1414 | lo->lo_refcnt++; |
1405 | mutex_unlock(&lo->lo_ctl_mutex); | 1415 | mutex_unlock(&lo->lo_ctl_mutex); |
1416 | unlock_kernel(); | ||
1406 | 1417 | ||
1407 | return 0; | 1418 | return 0; |
1408 | } | 1419 | } |
@@ -1412,6 +1423,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
1412 | struct loop_device *lo = disk->private_data; | 1423 | struct loop_device *lo = disk->private_data; |
1413 | int err; | 1424 | int err; |
1414 | 1425 | ||
1426 | lock_kernel(); | ||
1415 | mutex_lock(&lo->lo_ctl_mutex); | 1427 | mutex_lock(&lo->lo_ctl_mutex); |
1416 | 1428 | ||
1417 | if (--lo->lo_refcnt) | 1429 | if (--lo->lo_refcnt) |
@@ -1436,6 +1448,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
1436 | out: | 1448 | out: |
1437 | mutex_unlock(&lo->lo_ctl_mutex); | 1449 | mutex_unlock(&lo->lo_ctl_mutex); |
1438 | out_unlocked: | 1450 | out_unlocked: |
1451 | lock_kernel(); | ||
1439 | return 0; | 1452 | return 0; |
1440 | } | 1453 | } |
1441 | 1454 | ||
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 28db925dbdad..76fa3deaee84 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q) | |||
670 | break; | 670 | break; |
671 | } | 671 | } |
672 | 672 | ||
673 | if (unlikely(!blk_fs_request(host->req))) { | 673 | if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { |
674 | mg_end_request_cur(host, -EIO); | 674 | mg_end_request_cur(host, -EIO); |
675 | continue; | 675 | continue; |
676 | } | 676 | } |
@@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q) | |||
756 | continue; | 756 | continue; |
757 | } | 757 | } |
758 | 758 | ||
759 | if (unlikely(!blk_fs_request(req))) { | 759 | if (unlikely(req->cmd_type != REQ_TYPE_FS)) { |
760 | mg_end_request_cur(host, -EIO); | 760 | mg_end_request_cur(host, -EIO); |
761 | continue; | 761 | continue; |
762 | } | 762 | } |
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
974 | host->breq->queuedata = host; | 974 | host->breq->queuedata = host; |
975 | 975 | ||
976 | /* mflash is random device, thanx for the noop */ | 976 | /* mflash is random device, thanx for the noop */ |
977 | elevator_exit(host->breq->elevator); | 977 | err = elevator_change(host->breq, "noop"); |
978 | err = elevator_init(host->breq, "noop"); | ||
979 | if (err) { | 978 | if (err) { |
980 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", | 979 | printk(KERN_ERR "%s:%d (elevator_init) fail\n", |
981 | __func__, __LINE__); | 980 | __func__, __LINE__); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 218d091f3c52..0daa422aa281 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Note that you can not swap over this thing, yet. Seems to work but | 4 | * Note that you can not swap over this thing, yet. Seems to work but |
5 | * deadlocks sometimes - you can not swap over TCP in general. | 5 | * deadlocks sometimes - you can not swap over TCP in general. |
6 | * | 6 | * |
7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz> | 7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> | 8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
9 | * | 9 | * |
10 | * This file is released under GPLv2 or later. | 10 | * This file is released under GPLv2 or later. |
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/file.h> | 25 | #include <linux/file.h> |
26 | #include <linux/ioctl.h> | 26 | #include <linux/ioctl.h> |
27 | #include <linux/smp_lock.h> | ||
27 | #include <linux/compiler.h> | 28 | #include <linux/compiler.h> |
28 | #include <linux/err.h> | 29 | #include <linux/err.h> |
29 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
@@ -448,7 +449,7 @@ static void nbd_clear_que(struct nbd_device *lo) | |||
448 | 449 | ||
449 | static void nbd_handle_req(struct nbd_device *lo, struct request *req) | 450 | static void nbd_handle_req(struct nbd_device *lo, struct request *req) |
450 | { | 451 | { |
451 | if (!blk_fs_request(req)) | 452 | if (req->cmd_type != REQ_TYPE_FS) |
452 | goto error_out; | 453 | goto error_out; |
453 | 454 | ||
454 | nbd_cmd(req) = NBD_CMD_READ; | 455 | nbd_cmd(req) = NBD_CMD_READ; |
@@ -716,9 +717,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |||
716 | dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", | 717 | dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", |
717 | lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); | 718 | lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); |
718 | 719 | ||
720 | lock_kernel(); | ||
719 | mutex_lock(&lo->tx_lock); | 721 | mutex_lock(&lo->tx_lock); |
720 | error = __nbd_ioctl(bdev, lo, cmd, arg); | 722 | error = __nbd_ioctl(bdev, lo, cmd, arg); |
721 | mutex_unlock(&lo->tx_lock); | 723 | mutex_unlock(&lo->tx_lock); |
724 | unlock_kernel(); | ||
722 | 725 | ||
723 | return error; | 726 | return error; |
724 | } | 727 | } |
@@ -726,7 +729,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |||
726 | static const struct block_device_operations nbd_fops = | 729 | static const struct block_device_operations nbd_fops = |
727 | { | 730 | { |
728 | .owner = THIS_MODULE, | 731 | .owner = THIS_MODULE, |
729 | .locked_ioctl = nbd_ioctl, | 732 | .ioctl = nbd_ioctl, |
730 | }; | 733 | }; |
731 | 734 | ||
732 | /* | 735 | /* |
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 6cd8b705b11b..2284b4f05c62 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c | |||
@@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q) | |||
310 | break; | 310 | break; |
311 | 311 | ||
312 | /* filter out block requests we don't understand */ | 312 | /* filter out block requests we don't understand */ |
313 | if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) { | 313 | if (rq->cmd_type != REQ_TYPE_FS && |
314 | !(rq->cmd_flags & REQ_HARDBARRIER)) { | ||
314 | blk_end_request_all(rq, 0); | 315 | blk_end_request_all(rq, 0); |
315 | continue; | 316 | continue; |
316 | } | 317 | } |
@@ -322,7 +323,7 @@ static void osdblk_rq_fn(struct request_queue *q) | |||
322 | * driver-specific, etc. | 323 | * driver-specific, etc. |
323 | */ | 324 | */ |
324 | 325 | ||
325 | do_flush = (rq->special == (void *) 0xdeadbeefUL); | 326 | do_flush = rq->cmd_flags & REQ_FLUSH; |
326 | do_write = (rq_data_dir(rq) == WRITE); | 327 | do_write = (rq_data_dir(rq) == WRITE); |
327 | 328 | ||
328 | if (!do_flush) { /* osd_flush does not use a bio */ | 329 | if (!do_flush) { /* osd_flush does not use a bio */ |
@@ -379,14 +380,6 @@ static void osdblk_rq_fn(struct request_queue *q) | |||
379 | } | 380 | } |
380 | } | 381 | } |
381 | 382 | ||
382 | static void osdblk_prepare_flush(struct request_queue *q, struct request *rq) | ||
383 | { | ||
384 | /* add driver-specific marker, to indicate that this request | ||
385 | * is a flush command | ||
386 | */ | ||
387 | rq->special = (void *) 0xdeadbeefUL; | ||
388 | } | ||
389 | |||
390 | static void osdblk_free_disk(struct osdblk_device *osdev) | 383 | static void osdblk_free_disk(struct osdblk_device *osdev) |
391 | { | 384 | { |
392 | struct gendisk *disk = osdev->disk; | 385 | struct gendisk *disk = osdev->disk; |
@@ -446,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev) | |||
446 | blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); | 439 | blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); |
447 | 440 | ||
448 | blk_queue_prep_rq(q, blk_queue_start_tag); | 441 | blk_queue_prep_rq(q, blk_queue_start_tag); |
449 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush); | 442 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); |
450 | 443 | ||
451 | disk->queue = q; | 444 | disk->queue = q; |
452 | 445 | ||
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 71acf4e53356..76f8565e1e8d 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -138,6 +138,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; | |||
138 | #include <linux/cdrom.h> | 138 | #include <linux/cdrom.h> |
139 | #include <linux/spinlock.h> | 139 | #include <linux/spinlock.h> |
140 | #include <linux/blkdev.h> | 140 | #include <linux/blkdev.h> |
141 | #include <linux/smp_lock.h> | ||
141 | #include <asm/uaccess.h> | 142 | #include <asm/uaccess.h> |
142 | 143 | ||
143 | static DEFINE_SPINLOCK(pcd_lock); | 144 | static DEFINE_SPINLOCK(pcd_lock); |
@@ -224,13 +225,21 @@ static char *pcd_buf; /* buffer for request in progress */ | |||
224 | static int pcd_block_open(struct block_device *bdev, fmode_t mode) | 225 | static int pcd_block_open(struct block_device *bdev, fmode_t mode) |
225 | { | 226 | { |
226 | struct pcd_unit *cd = bdev->bd_disk->private_data; | 227 | struct pcd_unit *cd = bdev->bd_disk->private_data; |
227 | return cdrom_open(&cd->info, bdev, mode); | 228 | int ret; |
229 | |||
230 | lock_kernel(); | ||
231 | ret = cdrom_open(&cd->info, bdev, mode); | ||
232 | unlock_kernel(); | ||
233 | |||
234 | return ret; | ||
228 | } | 235 | } |
229 | 236 | ||
230 | static int pcd_block_release(struct gendisk *disk, fmode_t mode) | 237 | static int pcd_block_release(struct gendisk *disk, fmode_t mode) |
231 | { | 238 | { |
232 | struct pcd_unit *cd = disk->private_data; | 239 | struct pcd_unit *cd = disk->private_data; |
240 | lock_kernel(); | ||
233 | cdrom_release(&cd->info, mode); | 241 | cdrom_release(&cd->info, mode); |
242 | unlock_kernel(); | ||
234 | return 0; | 243 | return 0; |
235 | } | 244 | } |
236 | 245 | ||
@@ -238,7 +247,13 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode, | |||
238 | unsigned cmd, unsigned long arg) | 247 | unsigned cmd, unsigned long arg) |
239 | { | 248 | { |
240 | struct pcd_unit *cd = bdev->bd_disk->private_data; | 249 | struct pcd_unit *cd = bdev->bd_disk->private_data; |
241 | return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); | 250 | int ret; |
251 | |||
252 | lock_kernel(); | ||
253 | ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); | ||
254 | unlock_kernel(); | ||
255 | |||
256 | return ret; | ||
242 | } | 257 | } |
243 | 258 | ||
244 | static int pcd_block_media_changed(struct gendisk *disk) | 259 | static int pcd_block_media_changed(struct gendisk *disk) |
@@ -251,7 +266,7 @@ static const struct block_device_operations pcd_bdops = { | |||
251 | .owner = THIS_MODULE, | 266 | .owner = THIS_MODULE, |
252 | .open = pcd_block_open, | 267 | .open = pcd_block_open, |
253 | .release = pcd_block_release, | 268 | .release = pcd_block_release, |
254 | .locked_ioctl = pcd_block_ioctl, | 269 | .ioctl = pcd_block_ioctl, |
255 | .media_changed = pcd_block_media_changed, | 270 | .media_changed = pcd_block_media_changed, |
256 | }; | 271 | }; |
257 | 272 | ||
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c1e5cd029b23..985f0d4f1d1e 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -153,6 +153,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV}; | |||
153 | #include <linux/blkdev.h> | 153 | #include <linux/blkdev.h> |
154 | #include <linux/blkpg.h> | 154 | #include <linux/blkpg.h> |
155 | #include <linux/kernel.h> | 155 | #include <linux/kernel.h> |
156 | #include <linux/smp_lock.h> | ||
156 | #include <asm/uaccess.h> | 157 | #include <asm/uaccess.h> |
157 | #include <linux/workqueue.h> | 158 | #include <linux/workqueue.h> |
158 | 159 | ||
@@ -439,7 +440,7 @@ static char *pd_buf; /* buffer for request in progress */ | |||
439 | 440 | ||
440 | static enum action do_pd_io_start(void) | 441 | static enum action do_pd_io_start(void) |
441 | { | 442 | { |
442 | if (blk_special_request(pd_req)) { | 443 | if (pd_req->cmd_type == REQ_TYPE_SPECIAL) { |
443 | phase = pd_special; | 444 | phase = pd_special; |
444 | return pd_special(); | 445 | return pd_special(); |
445 | } | 446 | } |
@@ -735,12 +736,14 @@ static int pd_open(struct block_device *bdev, fmode_t mode) | |||
735 | { | 736 | { |
736 | struct pd_unit *disk = bdev->bd_disk->private_data; | 737 | struct pd_unit *disk = bdev->bd_disk->private_data; |
737 | 738 | ||
739 | lock_kernel(); | ||
738 | disk->access++; | 740 | disk->access++; |
739 | 741 | ||
740 | if (disk->removable) { | 742 | if (disk->removable) { |
741 | pd_special_command(disk, pd_media_check); | 743 | pd_special_command(disk, pd_media_check); |
742 | pd_special_command(disk, pd_door_lock); | 744 | pd_special_command(disk, pd_door_lock); |
743 | } | 745 | } |
746 | unlock_kernel(); | ||
744 | return 0; | 747 | return 0; |
745 | } | 748 | } |
746 | 749 | ||
@@ -768,8 +771,10 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode, | |||
768 | 771 | ||
769 | switch (cmd) { | 772 | switch (cmd) { |
770 | case CDROMEJECT: | 773 | case CDROMEJECT: |
774 | lock_kernel(); | ||
771 | if (disk->access == 1) | 775 | if (disk->access == 1) |
772 | pd_special_command(disk, pd_eject); | 776 | pd_special_command(disk, pd_eject); |
777 | unlock_kernel(); | ||
773 | return 0; | 778 | return 0; |
774 | default: | 779 | default: |
775 | return -EINVAL; | 780 | return -EINVAL; |
@@ -780,8 +785,10 @@ static int pd_release(struct gendisk *p, fmode_t mode) | |||
780 | { | 785 | { |
781 | struct pd_unit *disk = p->private_data; | 786 | struct pd_unit *disk = p->private_data; |
782 | 787 | ||
788 | lock_kernel(); | ||
783 | if (!--disk->access && disk->removable) | 789 | if (!--disk->access && disk->removable) |
784 | pd_special_command(disk, pd_door_unlock); | 790 | pd_special_command(disk, pd_door_unlock); |
791 | unlock_kernel(); | ||
785 | 792 | ||
786 | return 0; | 793 | return 0; |
787 | } | 794 | } |
@@ -812,7 +819,7 @@ static const struct block_device_operations pd_fops = { | |||
812 | .owner = THIS_MODULE, | 819 | .owner = THIS_MODULE, |
813 | .open = pd_open, | 820 | .open = pd_open, |
814 | .release = pd_release, | 821 | .release = pd_release, |
815 | .locked_ioctl = pd_ioctl, | 822 | .ioctl = pd_ioctl, |
816 | .getgeo = pd_getgeo, | 823 | .getgeo = pd_getgeo, |
817 | .media_changed = pd_check_media, | 824 | .media_changed = pd_check_media, |
818 | .revalidate_disk= pd_revalidate | 825 | .revalidate_disk= pd_revalidate |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index c059aab3006b..4457b494882a 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -152,6 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; | |||
152 | #include <linux/spinlock.h> | 152 | #include <linux/spinlock.h> |
153 | #include <linux/blkdev.h> | 153 | #include <linux/blkdev.h> |
154 | #include <linux/blkpg.h> | 154 | #include <linux/blkpg.h> |
155 | #include <linux/smp_lock.h> | ||
155 | #include <asm/uaccess.h> | 156 | #include <asm/uaccess.h> |
156 | 157 | ||
157 | static DEFINE_SPINLOCK(pf_spin_lock); | 158 | static DEFINE_SPINLOCK(pf_spin_lock); |
@@ -266,7 +267,7 @@ static const struct block_device_operations pf_fops = { | |||
266 | .owner = THIS_MODULE, | 267 | .owner = THIS_MODULE, |
267 | .open = pf_open, | 268 | .open = pf_open, |
268 | .release = pf_release, | 269 | .release = pf_release, |
269 | .locked_ioctl = pf_ioctl, | 270 | .ioctl = pf_ioctl, |
270 | .getgeo = pf_getgeo, | 271 | .getgeo = pf_getgeo, |
271 | .media_changed = pf_check_media, | 272 | .media_changed = pf_check_media, |
272 | }; | 273 | }; |
@@ -299,20 +300,26 @@ static void __init pf_init_units(void) | |||
299 | static int pf_open(struct block_device *bdev, fmode_t mode) | 300 | static int pf_open(struct block_device *bdev, fmode_t mode) |
300 | { | 301 | { |
301 | struct pf_unit *pf = bdev->bd_disk->private_data; | 302 | struct pf_unit *pf = bdev->bd_disk->private_data; |
303 | int ret; | ||
302 | 304 | ||
305 | lock_kernel(); | ||
303 | pf_identify(pf); | 306 | pf_identify(pf); |
304 | 307 | ||
308 | ret = -ENODEV; | ||
305 | if (pf->media_status == PF_NM) | 309 | if (pf->media_status == PF_NM) |
306 | return -ENODEV; | 310 | goto out; |
307 | 311 | ||
312 | ret = -EROFS; | ||
308 | if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) | 313 | if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) |
309 | return -EROFS; | 314 | goto out; |
310 | 315 | ||
316 | ret = 0; | ||
311 | pf->access++; | 317 | pf->access++; |
312 | if (pf->removable) | 318 | if (pf->removable) |
313 | pf_lock(pf, 1); | 319 | pf_lock(pf, 1); |
314 | 320 | out: | |
315 | return 0; | 321 | unlock_kernel(); |
322 | return ret; | ||
316 | } | 323 | } |
317 | 324 | ||
318 | static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 325 | static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
@@ -342,7 +349,10 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u | |||
342 | 349 | ||
343 | if (pf->access != 1) | 350 | if (pf->access != 1) |
344 | return -EBUSY; | 351 | return -EBUSY; |
352 | lock_kernel(); | ||
345 | pf_eject(pf); | 353 | pf_eject(pf); |
354 | unlock_kernel(); | ||
355 | |||
346 | return 0; | 356 | return 0; |
347 | } | 357 | } |
348 | 358 | ||
@@ -350,14 +360,18 @@ static int pf_release(struct gendisk *disk, fmode_t mode) | |||
350 | { | 360 | { |
351 | struct pf_unit *pf = disk->private_data; | 361 | struct pf_unit *pf = disk->private_data; |
352 | 362 | ||
353 | if (pf->access <= 0) | 363 | lock_kernel(); |
364 | if (pf->access <= 0) { | ||
365 | unlock_kernel(); | ||
354 | return -EINVAL; | 366 | return -EINVAL; |
367 | } | ||
355 | 368 | ||
356 | pf->access--; | 369 | pf->access--; |
357 | 370 | ||
358 | if (!pf->access && pf->removable) | 371 | if (!pf->access && pf->removable) |
359 | pf_lock(pf, 0); | 372 | pf_lock(pf, 0); |
360 | 373 | ||
374 | unlock_kernel(); | ||
361 | return 0; | 375 | return 0; |
362 | 376 | ||
363 | } | 377 | } |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a549db2aa78..37a2bb595076 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/seq_file.h> | 57 | #include <linux/seq_file.h> |
58 | #include <linux/miscdevice.h> | 58 | #include <linux/miscdevice.h> |
59 | #include <linux/freezer.h> | 59 | #include <linux/freezer.h> |
60 | #include <linux/smp_lock.h> | ||
60 | #include <linux/mutex.h> | 61 | #include <linux/mutex.h> |
61 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
62 | #include <scsi/scsi_cmnd.h> | 63 | #include <scsi/scsi_cmnd.h> |
@@ -1221,7 +1222,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1221 | pkt->bio->bi_flags = 1 << BIO_UPTODATE; | 1222 | pkt->bio->bi_flags = 1 << BIO_UPTODATE; |
1222 | pkt->bio->bi_idx = 0; | 1223 | pkt->bio->bi_idx = 0; |
1223 | 1224 | ||
1224 | BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); | 1225 | BUG_ON(pkt->bio->bi_rw != REQ_WRITE); |
1225 | BUG_ON(pkt->bio->bi_vcnt != pkt->frames); | 1226 | BUG_ON(pkt->bio->bi_vcnt != pkt->frames); |
1226 | BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); | 1227 | BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); |
1227 | BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); | 1228 | BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); |
@@ -2368,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) | |||
2368 | pkt_shrink_pktlist(pd); | 2369 | pkt_shrink_pktlist(pd); |
2369 | } | 2370 | } |
2370 | 2371 | ||
2371 | static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) | 2372 | static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) |
2372 | { | 2373 | { |
2373 | if (dev_minor >= MAX_WRITERS) | 2374 | if (dev_minor >= MAX_WRITERS) |
2374 | return NULL; | 2375 | return NULL; |
@@ -2382,6 +2383,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) | |||
2382 | 2383 | ||
2383 | VPRINTK(DRIVER_NAME": entering open\n"); | 2384 | VPRINTK(DRIVER_NAME": entering open\n"); |
2384 | 2385 | ||
2386 | lock_kernel(); | ||
2385 | mutex_lock(&ctl_mutex); | 2387 | mutex_lock(&ctl_mutex); |
2386 | pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); | 2388 | pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); |
2387 | if (!pd) { | 2389 | if (!pd) { |
@@ -2409,6 +2411,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) | |||
2409 | } | 2411 | } |
2410 | 2412 | ||
2411 | mutex_unlock(&ctl_mutex); | 2413 | mutex_unlock(&ctl_mutex); |
2414 | unlock_kernel(); | ||
2412 | return 0; | 2415 | return 0; |
2413 | 2416 | ||
2414 | out_dec: | 2417 | out_dec: |
@@ -2416,6 +2419,7 @@ out_dec: | |||
2416 | out: | 2419 | out: |
2417 | VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); | 2420 | VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); |
2418 | mutex_unlock(&ctl_mutex); | 2421 | mutex_unlock(&ctl_mutex); |
2422 | unlock_kernel(); | ||
2419 | return ret; | 2423 | return ret; |
2420 | } | 2424 | } |
2421 | 2425 | ||
@@ -2424,6 +2428,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode) | |||
2424 | struct pktcdvd_device *pd = disk->private_data; | 2428 | struct pktcdvd_device *pd = disk->private_data; |
2425 | int ret = 0; | 2429 | int ret = 0; |
2426 | 2430 | ||
2431 | lock_kernel(); | ||
2427 | mutex_lock(&ctl_mutex); | 2432 | mutex_lock(&ctl_mutex); |
2428 | pd->refcnt--; | 2433 | pd->refcnt--; |
2429 | BUG_ON(pd->refcnt < 0); | 2434 | BUG_ON(pd->refcnt < 0); |
@@ -2432,6 +2437,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode) | |||
2432 | pkt_release_dev(pd, flush); | 2437 | pkt_release_dev(pd, flush); |
2433 | } | 2438 | } |
2434 | mutex_unlock(&ctl_mutex); | 2439 | mutex_unlock(&ctl_mutex); |
2440 | unlock_kernel(); | ||
2435 | return ret; | 2441 | return ret; |
2436 | } | 2442 | } |
2437 | 2443 | ||
@@ -2762,10 +2768,12 @@ out_mem: | |||
2762 | static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) | 2768 | static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) |
2763 | { | 2769 | { |
2764 | struct pktcdvd_device *pd = bdev->bd_disk->private_data; | 2770 | struct pktcdvd_device *pd = bdev->bd_disk->private_data; |
2771 | int ret; | ||
2765 | 2772 | ||
2766 | VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, | 2773 | VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, |
2767 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); | 2774 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); |
2768 | 2775 | ||
2776 | lock_kernel(); | ||
2769 | switch (cmd) { | 2777 | switch (cmd) { |
2770 | case CDROMEJECT: | 2778 | case CDROMEJECT: |
2771 | /* | 2779 | /* |
@@ -2783,14 +2791,16 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, | |||
2783 | case CDROM_LAST_WRITTEN: | 2791 | case CDROM_LAST_WRITTEN: |
2784 | case CDROM_SEND_PACKET: | 2792 | case CDROM_SEND_PACKET: |
2785 | case SCSI_IOCTL_SEND_COMMAND: | 2793 | case SCSI_IOCTL_SEND_COMMAND: |
2786 | return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); | 2794 | ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); |
2795 | break; | ||
2787 | 2796 | ||
2788 | default: | 2797 | default: |
2789 | VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); | 2798 | VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); |
2790 | return -ENOTTY; | 2799 | ret = -ENOTTY; |
2791 | } | 2800 | } |
2801 | unlock_kernel(); | ||
2792 | 2802 | ||
2793 | return 0; | 2803 | return ret; |
2794 | } | 2804 | } |
2795 | 2805 | ||
2796 | static int pkt_media_changed(struct gendisk *disk) | 2806 | static int pkt_media_changed(struct gendisk *disk) |
@@ -2812,7 +2822,7 @@ static const struct block_device_operations pktcdvd_ops = { | |||
2812 | .owner = THIS_MODULE, | 2822 | .owner = THIS_MODULE, |
2813 | .open = pkt_open, | 2823 | .open = pkt_open, |
2814 | .release = pkt_close, | 2824 | .release = pkt_close, |
2815 | .locked_ioctl = pkt_ioctl, | 2825 | .ioctl = pkt_ioctl, |
2816 | .media_changed = pkt_media_changed, | 2826 | .media_changed = pkt_media_changed, |
2817 | }; | 2827 | }; |
2818 | 2828 | ||
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 3b419e3fffa1..03688c2da319 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, | |||
113 | memcpy(buf, dev->bounce_buf+offset, size); | 113 | memcpy(buf, dev->bounce_buf+offset, size); |
114 | offset += size; | 114 | offset += size; |
115 | flush_kernel_dcache_page(bvec->bv_page); | 115 | flush_kernel_dcache_page(bvec->bv_page); |
116 | bvec_kunmap_irq(bvec, &flags); | 116 | bvec_kunmap_irq(buf, &flags); |
117 | i++; | 117 | i++; |
118 | } | 118 | } |
119 | } | 119 | } |
@@ -196,13 +196,12 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, | |||
196 | dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); | 196 | dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); |
197 | 197 | ||
198 | while ((req = blk_fetch_request(q))) { | 198 | while ((req = blk_fetch_request(q))) { |
199 | if (blk_fs_request(req)) { | 199 | if (req->cmd_flags & REQ_FLUSH) { |
200 | if (ps3disk_submit_request_sg(dev, req)) | ||
201 | break; | ||
202 | } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | ||
203 | req->cmd[0] == REQ_LB_OP_FLUSH) { | ||
204 | if (ps3disk_submit_flush_request(dev, req)) | 200 | if (ps3disk_submit_flush_request(dev, req)) |
205 | break; | 201 | break; |
202 | } else if (req->cmd_type == REQ_TYPE_FS) { | ||
203 | if (ps3disk_submit_request_sg(dev, req)) | ||
204 | break; | ||
206 | } else { | 205 | } else { |
207 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); | 206 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); |
208 | __blk_end_request_all(req, -EIO); | 207 | __blk_end_request_all(req, -EIO); |
@@ -257,8 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) | |||
257 | return IRQ_HANDLED; | 256 | return IRQ_HANDLED; |
258 | } | 257 | } |
259 | 258 | ||
260 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | 259 | if (req->cmd_flags & REQ_FLUSH) { |
261 | req->cmd[0] == REQ_LB_OP_FLUSH) { | ||
262 | read = 0; | 260 | read = 0; |
263 | op = "flush"; | 261 | op = "flush"; |
264 | } else { | 262 | } else { |
@@ -398,16 +396,6 @@ static int ps3disk_identify(struct ps3_storage_device *dev) | |||
398 | return 0; | 396 | return 0; |
399 | } | 397 | } |
400 | 398 | ||
401 | static void ps3disk_prepare_flush(struct request_queue *q, struct request *req) | ||
402 | { | ||
403 | struct ps3_storage_device *dev = q->queuedata; | ||
404 | |||
405 | dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); | ||
406 | |||
407 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
408 | req->cmd[0] = REQ_LB_OP_FLUSH; | ||
409 | } | ||
410 | |||
411 | static unsigned long ps3disk_mask; | 399 | static unsigned long ps3disk_mask; |
412 | 400 | ||
413 | static DEFINE_MUTEX(ps3disk_mask_mutex); | 401 | static DEFINE_MUTEX(ps3disk_mask_mutex); |
@@ -480,8 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
480 | blk_queue_dma_alignment(queue, dev->blk_size-1); | 468 | blk_queue_dma_alignment(queue, dev->blk_size-1); |
481 | blk_queue_logical_block_size(queue, dev->blk_size); | 469 | blk_queue_logical_block_size(queue, dev->blk_size); |
482 | 470 | ||
483 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, | 471 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); |
484 | ps3disk_prepare_flush); | ||
485 | 472 | ||
486 | blk_queue_max_segments(queue, -1); | 473 | blk_queue_max_segments(queue, -1); |
487 | blk_queue_max_segment_size(queue, dev->bounce_size); | 474 | blk_queue_max_segment_size(queue, dev->bounce_size); |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index e463657569ff..2e46815876df 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/fd.h> | 20 | #include <linux/fd.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/smp_lock.h> | ||
23 | #include <linux/hdreg.h> | 24 | #include <linux/hdreg.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
25 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
@@ -661,11 +662,23 @@ out: | |||
661 | return err; | 662 | return err; |
662 | } | 663 | } |
663 | 664 | ||
665 | static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
666 | { | ||
667 | int ret; | ||
668 | |||
669 | lock_kernel(); | ||
670 | ret = floppy_open(bdev, mode); | ||
671 | unlock_kernel(); | ||
672 | |||
673 | return ret; | ||
674 | } | ||
675 | |||
664 | static int floppy_release(struct gendisk *disk, fmode_t mode) | 676 | static int floppy_release(struct gendisk *disk, fmode_t mode) |
665 | { | 677 | { |
666 | struct floppy_state *fs = disk->private_data; | 678 | struct floppy_state *fs = disk->private_data; |
667 | struct swim __iomem *base = fs->swd->base; | 679 | struct swim __iomem *base = fs->swd->base; |
668 | 680 | ||
681 | lock_kernel(); | ||
669 | if (fs->ref_count < 0) | 682 | if (fs->ref_count < 0) |
670 | fs->ref_count = 0; | 683 | fs->ref_count = 0; |
671 | else if (fs->ref_count > 0) | 684 | else if (fs->ref_count > 0) |
@@ -673,6 +686,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
673 | 686 | ||
674 | if (fs->ref_count == 0) | 687 | if (fs->ref_count == 0) |
675 | swim_motor(base, OFF); | 688 | swim_motor(base, OFF); |
689 | unlock_kernel(); | ||
676 | 690 | ||
677 | return 0; | 691 | return 0; |
678 | } | 692 | } |
@@ -690,7 +704,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, | |||
690 | case FDEJECT: | 704 | case FDEJECT: |
691 | if (fs->ref_count != 1) | 705 | if (fs->ref_count != 1) |
692 | return -EBUSY; | 706 | return -EBUSY; |
707 | lock_kernel(); | ||
693 | err = floppy_eject(fs); | 708 | err = floppy_eject(fs); |
709 | unlock_kernel(); | ||
694 | return err; | 710 | return err; |
695 | 711 | ||
696 | case FDGETPRM: | 712 | case FDGETPRM: |
@@ -751,9 +767,9 @@ static int floppy_revalidate(struct gendisk *disk) | |||
751 | 767 | ||
752 | static const struct block_device_operations floppy_fops = { | 768 | static const struct block_device_operations floppy_fops = { |
753 | .owner = THIS_MODULE, | 769 | .owner = THIS_MODULE, |
754 | .open = floppy_open, | 770 | .open = floppy_unlocked_open, |
755 | .release = floppy_release, | 771 | .release = floppy_release, |
756 | .locked_ioctl = floppy_ioctl, | 772 | .ioctl = floppy_ioctl, |
757 | .getgeo = floppy_getgeo, | 773 | .getgeo = floppy_getgeo, |
758 | .media_changed = floppy_check_change, | 774 | .media_changed = floppy_check_change, |
759 | .revalidate_disk = floppy_revalidate, | 775 | .revalidate_disk = floppy_revalidate, |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 59ca2b77b574..cc6a3864822c 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/ioctl.h> | 25 | #include <linux/ioctl.h> |
26 | #include <linux/blkdev.h> | 26 | #include <linux/blkdev.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/smp_lock.h> | ||
28 | #include <linux/module.h> | 29 | #include <linux/module.h> |
29 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
@@ -839,7 +840,7 @@ static int fd_eject(struct floppy_state *fs) | |||
839 | static struct floppy_struct floppy_type = | 840 | static struct floppy_struct floppy_type = |
840 | { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ | 841 | { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ |
841 | 842 | ||
842 | static int floppy_ioctl(struct block_device *bdev, fmode_t mode, | 843 | static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, |
843 | unsigned int cmd, unsigned long param) | 844 | unsigned int cmd, unsigned long param) |
844 | { | 845 | { |
845 | struct floppy_state *fs = bdev->bd_disk->private_data; | 846 | struct floppy_state *fs = bdev->bd_disk->private_data; |
@@ -867,6 +868,18 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, | |||
867 | return -ENOTTY; | 868 | return -ENOTTY; |
868 | } | 869 | } |
869 | 870 | ||
871 | static int floppy_ioctl(struct block_device *bdev, fmode_t mode, | ||
872 | unsigned int cmd, unsigned long param) | ||
873 | { | ||
874 | int ret; | ||
875 | |||
876 | lock_kernel(); | ||
877 | ret = floppy_locked_ioctl(bdev, mode, cmd, param); | ||
878 | unlock_kernel(); | ||
879 | |||
880 | return ret; | ||
881 | } | ||
882 | |||
870 | static int floppy_open(struct block_device *bdev, fmode_t mode) | 883 | static int floppy_open(struct block_device *bdev, fmode_t mode) |
871 | { | 884 | { |
872 | struct floppy_state *fs = bdev->bd_disk->private_data; | 885 | struct floppy_state *fs = bdev->bd_disk->private_data; |
@@ -936,15 +949,28 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
936 | return 0; | 949 | return 0; |
937 | } | 950 | } |
938 | 951 | ||
952 | static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
953 | { | ||
954 | int ret; | ||
955 | |||
956 | lock_kernel(); | ||
957 | ret = floppy_open(bdev, mode); | ||
958 | unlock_kernel(); | ||
959 | |||
960 | return ret; | ||
961 | } | ||
962 | |||
939 | static int floppy_release(struct gendisk *disk, fmode_t mode) | 963 | static int floppy_release(struct gendisk *disk, fmode_t mode) |
940 | { | 964 | { |
941 | struct floppy_state *fs = disk->private_data; | 965 | struct floppy_state *fs = disk->private_data; |
942 | struct swim3 __iomem *sw = fs->swim3; | 966 | struct swim3 __iomem *sw = fs->swim3; |
967 | lock_kernel(); | ||
943 | if (fs->ref_count > 0 && --fs->ref_count == 0) { | 968 | if (fs->ref_count > 0 && --fs->ref_count == 0) { |
944 | swim3_action(fs, MOTOR_OFF); | 969 | swim3_action(fs, MOTOR_OFF); |
945 | out_8(&sw->control_bic, 0xff); | 970 | out_8(&sw->control_bic, 0xff); |
946 | swim3_select(fs, RELAX); | 971 | swim3_select(fs, RELAX); |
947 | } | 972 | } |
973 | unlock_kernel(); | ||
948 | return 0; | 974 | return 0; |
949 | } | 975 | } |
950 | 976 | ||
@@ -995,16 +1021,16 @@ static int floppy_revalidate(struct gendisk *disk) | |||
995 | } | 1021 | } |
996 | 1022 | ||
997 | static const struct block_device_operations floppy_fops = { | 1023 | static const struct block_device_operations floppy_fops = { |
998 | .open = floppy_open, | 1024 | .open = floppy_unlocked_open, |
999 | .release = floppy_release, | 1025 | .release = floppy_release, |
1000 | .locked_ioctl = floppy_ioctl, | 1026 | .ioctl = floppy_ioctl, |
1001 | .media_changed = floppy_check_change, | 1027 | .media_changed = floppy_check_change, |
1002 | .revalidate_disk= floppy_revalidate, | 1028 | .revalidate_disk= floppy_revalidate, |
1003 | }; | 1029 | }; |
1004 | 1030 | ||
1005 | static int swim3_add_device(struct macio_dev *mdev, int index) | 1031 | static int swim3_add_device(struct macio_dev *mdev, int index) |
1006 | { | 1032 | { |
1007 | struct device_node *swim = mdev->ofdev.node; | 1033 | struct device_node *swim = mdev->ofdev.dev.of_node; |
1008 | struct floppy_state *fs = &floppy_states[index]; | 1034 | struct floppy_state *fs = &floppy_states[index]; |
1009 | int rc = -EBUSY; | 1035 | int rc = -EBUSY; |
1010 | 1036 | ||
@@ -1159,8 +1185,10 @@ static struct of_device_id swim3_match[] = | |||
1159 | 1185 | ||
1160 | static struct macio_driver swim3_driver = | 1186 | static struct macio_driver swim3_driver = |
1161 | { | 1187 | { |
1162 | .name = "swim3", | 1188 | .driver = { |
1163 | .match_table = swim3_match, | 1189 | .name = "swim3", |
1190 | .of_match_table = swim3_match, | ||
1191 | }, | ||
1164 | .probe = swim3_attach, | 1192 | .probe = swim3_attach, |
1165 | #if 0 | 1193 | #if 0 |
1166 | .suspend = swim3_suspend, | 1194 | .suspend = swim3_suspend, |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 0536b5b29adc..c48e14878582 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/timer.h> | 28 | #include <linux/timer.h> |
29 | #include <linux/scatterlist.h> | 29 | #include <linux/scatterlist.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/smp_lock.h> | ||
31 | #include <scsi/scsi.h> | 32 | #include <scsi/scsi.h> |
32 | 33 | ||
33 | #define DRV_NAME "ub" | 34 | #define DRV_NAME "ub" |
@@ -648,7 +649,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
648 | return 0; | 649 | return 0; |
649 | } | 650 | } |
650 | 651 | ||
651 | if (lun->changed && !blk_pc_request(rq)) { | 652 | if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) { |
652 | blk_start_request(rq); | 653 | blk_start_request(rq); |
653 | ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); | 654 | ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); |
654 | return 0; | 655 | return 0; |
@@ -684,7 +685,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
684 | } | 685 | } |
685 | urq->nsg = n_elem; | 686 | urq->nsg = n_elem; |
686 | 687 | ||
687 | if (blk_pc_request(rq)) { | 688 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
688 | ub_cmd_build_packet(sc, lun, cmd, urq); | 689 | ub_cmd_build_packet(sc, lun, cmd, urq); |
689 | } else { | 690 | } else { |
690 | ub_cmd_build_block(sc, lun, cmd, urq); | 691 | ub_cmd_build_block(sc, lun, cmd, urq); |
@@ -781,7 +782,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
781 | rq = urq->rq; | 782 | rq = urq->rq; |
782 | 783 | ||
783 | if (cmd->error == 0) { | 784 | if (cmd->error == 0) { |
784 | if (blk_pc_request(rq)) { | 785 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
785 | if (cmd->act_len >= rq->resid_len) | 786 | if (cmd->act_len >= rq->resid_len) |
786 | rq->resid_len = 0; | 787 | rq->resid_len = 0; |
787 | else | 788 | else |
@@ -795,7 +796,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
795 | } | 796 | } |
796 | } | 797 | } |
797 | } else { | 798 | } else { |
798 | if (blk_pc_request(rq)) { | 799 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
799 | /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ | 800 | /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ |
800 | memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); | 801 | memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); |
801 | rq->sense_len = UB_SENSE_SIZE; | 802 | rq->sense_len = UB_SENSE_SIZE; |
@@ -1710,6 +1711,18 @@ err_open: | |||
1710 | return rc; | 1711 | return rc; |
1711 | } | 1712 | } |
1712 | 1713 | ||
1714 | static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
1715 | { | ||
1716 | int ret; | ||
1717 | |||
1718 | lock_kernel(); | ||
1719 | ret = ub_bd_open(bdev, mode); | ||
1720 | unlock_kernel(); | ||
1721 | |||
1722 | return ret; | ||
1723 | } | ||
1724 | |||
1725 | |||
1713 | /* | 1726 | /* |
1714 | */ | 1727 | */ |
1715 | static int ub_bd_release(struct gendisk *disk, fmode_t mode) | 1728 | static int ub_bd_release(struct gendisk *disk, fmode_t mode) |
@@ -1717,7 +1730,10 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode) | |||
1717 | struct ub_lun *lun = disk->private_data; | 1730 | struct ub_lun *lun = disk->private_data; |
1718 | struct ub_dev *sc = lun->udev; | 1731 | struct ub_dev *sc = lun->udev; |
1719 | 1732 | ||
1733 | lock_kernel(); | ||
1720 | ub_put(sc); | 1734 | ub_put(sc); |
1735 | unlock_kernel(); | ||
1736 | |||
1721 | return 0; | 1737 | return 0; |
1722 | } | 1738 | } |
1723 | 1739 | ||
@@ -1729,8 +1745,13 @@ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, | |||
1729 | { | 1745 | { |
1730 | struct gendisk *disk = bdev->bd_disk; | 1746 | struct gendisk *disk = bdev->bd_disk; |
1731 | void __user *usermem = (void __user *) arg; | 1747 | void __user *usermem = (void __user *) arg; |
1748 | int ret; | ||
1749 | |||
1750 | lock_kernel(); | ||
1751 | ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); | ||
1752 | unlock_kernel(); | ||
1732 | 1753 | ||
1733 | return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); | 1754 | return ret; |
1734 | } | 1755 | } |
1735 | 1756 | ||
1736 | /* | 1757 | /* |
@@ -1792,9 +1813,9 @@ static int ub_bd_media_changed(struct gendisk *disk) | |||
1792 | 1813 | ||
1793 | static const struct block_device_operations ub_bd_fops = { | 1814 | static const struct block_device_operations ub_bd_fops = { |
1794 | .owner = THIS_MODULE, | 1815 | .owner = THIS_MODULE, |
1795 | .open = ub_bd_open, | 1816 | .open = ub_bd_unlocked_open, |
1796 | .release = ub_bd_release, | 1817 | .release = ub_bd_release, |
1797 | .locked_ioctl = ub_bd_ioctl, | 1818 | .ioctl = ub_bd_ioctl, |
1798 | .media_changed = ub_bd_media_changed, | 1819 | .media_changed = ub_bd_media_changed, |
1799 | .revalidate_disk = ub_bd_revalidate, | 1820 | .revalidate_disk = ub_bd_revalidate, |
1800 | }; | 1821 | }; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 2f9470ff8f7c..8be57151f5d6 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -478,7 +478,7 @@ static void process_page(unsigned long data) | |||
478 | le32_to_cpu(desc->local_addr)>>9, | 478 | le32_to_cpu(desc->local_addr)>>9, |
479 | le32_to_cpu(desc->transfer_size)); | 479 | le32_to_cpu(desc->transfer_size)); |
480 | dump_dmastat(card, control); | 480 | dump_dmastat(card, control); |
481 | } else if (test_bit(BIO_RW, &bio->bi_rw) && | 481 | } else if ((bio->bi_rw & REQ_WRITE) && |
482 | le32_to_cpu(desc->local_addr) >> 9 == | 482 | le32_to_cpu(desc->local_addr) >> 9 == |
483 | card->init_size) { | 483 | card->init_size) { |
484 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; | 484 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 788d93882ab9..f651e51a3319 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/string.h> | 43 | #include <linux/string.h> |
44 | #include <linux/smp_lock.h> | ||
44 | #include <linux/dma-mapping.h> | 45 | #include <linux/dma-mapping.h> |
45 | #include <linux/completion.h> | 46 | #include <linux/completion.h> |
46 | #include <linux/device.h> | 47 | #include <linux/device.h> |
@@ -175,6 +176,18 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode) | |||
175 | return 0; | 176 | return 0; |
176 | } | 177 | } |
177 | 178 | ||
179 | static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode) | ||
180 | { | ||
181 | int ret; | ||
182 | |||
183 | lock_kernel(); | ||
184 | ret = viodasd_open(bdev, mode); | ||
185 | unlock_kernel(); | ||
186 | |||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | |||
178 | /* | 191 | /* |
179 | * External release entry point. | 192 | * External release entry point. |
180 | */ | 193 | */ |
@@ -183,6 +196,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode) | |||
183 | struct viodasd_device *d = disk->private_data; | 196 | struct viodasd_device *d = disk->private_data; |
184 | HvLpEvent_Rc hvrc; | 197 | HvLpEvent_Rc hvrc; |
185 | 198 | ||
199 | lock_kernel(); | ||
186 | /* Send the event to OS/400. We DON'T expect a response */ | 200 | /* Send the event to OS/400. We DON'T expect a response */ |
187 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, | 201 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
188 | HvLpEvent_Type_VirtualIo, | 202 | HvLpEvent_Type_VirtualIo, |
@@ -195,6 +209,9 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode) | |||
195 | 0, 0, 0); | 209 | 0, 0, 0); |
196 | if (hvrc != 0) | 210 | if (hvrc != 0) |
197 | pr_warning("HV close call failed %d\n", (int)hvrc); | 211 | pr_warning("HV close call failed %d\n", (int)hvrc); |
212 | |||
213 | unlock_kernel(); | ||
214 | |||
198 | return 0; | 215 | return 0; |
199 | } | 216 | } |
200 | 217 | ||
@@ -219,7 +236,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
219 | */ | 236 | */ |
220 | static const struct block_device_operations viodasd_fops = { | 237 | static const struct block_device_operations viodasd_fops = { |
221 | .owner = THIS_MODULE, | 238 | .owner = THIS_MODULE, |
222 | .open = viodasd_open, | 239 | .open = viodasd_unlocked_open, |
223 | .release = viodasd_release, | 240 | .release = viodasd_release, |
224 | .getgeo = viodasd_getgeo, | 241 | .getgeo = viodasd_getgeo, |
225 | }; | 242 | }; |
@@ -361,7 +378,7 @@ static void do_viodasd_request(struct request_queue *q) | |||
361 | if (req == NULL) | 378 | if (req == NULL) |
362 | return; | 379 | return; |
363 | /* check that request contains a valid command */ | 380 | /* check that request contains a valid command */ |
364 | if (!blk_fs_request(req)) { | 381 | if (req->cmd_type != REQ_TYPE_FS) { |
365 | viodasd_end_request(req, -EIO, blk_rq_sectors(req)); | 382 | viodasd_end_request(req, -EIO, blk_rq_sectors(req)); |
366 | continue; | 383 | continue; |
367 | } | 384 | } |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2138a7ae050c..1101e251a629 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
4 | #include <linux/blkdev.h> | 4 | #include <linux/blkdev.h> |
5 | #include <linux/smp_lock.h> | ||
5 | #include <linux/hdreg.h> | 6 | #include <linux/hdreg.h> |
6 | #include <linux/virtio.h> | 7 | #include <linux/virtio.h> |
7 | #include <linux/virtio_blk.h> | 8 | #include <linux/virtio_blk.h> |
@@ -50,7 +51,7 @@ static void blk_done(struct virtqueue *vq) | |||
50 | unsigned long flags; | 51 | unsigned long flags; |
51 | 52 | ||
52 | spin_lock_irqsave(&vblk->lock, flags); | 53 | spin_lock_irqsave(&vblk->lock, flags); |
53 | while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { | 54 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { |
54 | int error; | 55 | int error; |
55 | 56 | ||
56 | switch (vbr->status) { | 57 | switch (vbr->status) { |
@@ -65,10 +66,17 @@ static void blk_done(struct virtqueue *vq) | |||
65 | break; | 66 | break; |
66 | } | 67 | } |
67 | 68 | ||
68 | if (blk_pc_request(vbr->req)) { | 69 | switch (vbr->req->cmd_type) { |
70 | case REQ_TYPE_BLOCK_PC: | ||
69 | vbr->req->resid_len = vbr->in_hdr.residual; | 71 | vbr->req->resid_len = vbr->in_hdr.residual; |
70 | vbr->req->sense_len = vbr->in_hdr.sense_len; | 72 | vbr->req->sense_len = vbr->in_hdr.sense_len; |
71 | vbr->req->errors = vbr->in_hdr.errors; | 73 | vbr->req->errors = vbr->in_hdr.errors; |
74 | break; | ||
75 | case REQ_TYPE_SPECIAL: | ||
76 | vbr->req->errors = (error != 0); | ||
77 | break; | ||
78 | default: | ||
79 | break; | ||
72 | } | 80 | } |
73 | 81 | ||
74 | __blk_end_request_all(vbr->req, error); | 82 | __blk_end_request_all(vbr->req, error); |
@@ -92,31 +100,35 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
92 | return false; | 100 | return false; |
93 | 101 | ||
94 | vbr->req = req; | 102 | vbr->req = req; |
95 | switch (req->cmd_type) { | 103 | |
96 | case REQ_TYPE_FS: | 104 | if (req->cmd_flags & REQ_FLUSH) { |
97 | vbr->out_hdr.type = 0; | 105 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; |
98 | vbr->out_hdr.sector = blk_rq_pos(vbr->req); | ||
99 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
100 | break; | ||
101 | case REQ_TYPE_BLOCK_PC: | ||
102 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | ||
103 | vbr->out_hdr.sector = 0; | 106 | vbr->out_hdr.sector = 0; |
104 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | 107 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); |
105 | break; | 108 | } else { |
106 | case REQ_TYPE_LINUX_BLOCK: | 109 | switch (req->cmd_type) { |
107 | if (req->cmd[0] == REQ_LB_OP_FLUSH) { | 110 | case REQ_TYPE_FS: |
108 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | 111 | vbr->out_hdr.type = 0; |
112 | vbr->out_hdr.sector = blk_rq_pos(vbr->req); | ||
113 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
114 | break; | ||
115 | case REQ_TYPE_BLOCK_PC: | ||
116 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | ||
117 | vbr->out_hdr.sector = 0; | ||
118 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | ||
119 | break; | ||
120 | case REQ_TYPE_SPECIAL: | ||
121 | vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; | ||
109 | vbr->out_hdr.sector = 0; | 122 | vbr->out_hdr.sector = 0; |
110 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | 123 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); |
111 | break; | 124 | break; |
125 | default: | ||
126 | /* We don't put anything else in the queue. */ | ||
127 | BUG(); | ||
112 | } | 128 | } |
113 | /*FALLTHRU*/ | ||
114 | default: | ||
115 | /* We don't put anything else in the queue. */ | ||
116 | BUG(); | ||
117 | } | 129 | } |
118 | 130 | ||
119 | if (blk_barrier_rq(vbr->req)) | 131 | if (vbr->req->cmd_flags & REQ_HARDBARRIER) |
120 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | 132 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; |
121 | 133 | ||
122 | sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); | 134 | sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
@@ -127,12 +139,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
127 | * block, and before the normal inhdr we put the sense data and the | 139 | * block, and before the normal inhdr we put the sense data and the |
128 | * inhdr with additional status information before the normal inhdr. | 140 | * inhdr with additional status information before the normal inhdr. |
129 | */ | 141 | */ |
130 | if (blk_pc_request(vbr->req)) | 142 | if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) |
131 | sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); | 143 | sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); |
132 | 144 | ||
133 | num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); | 145 | num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); |
134 | 146 | ||
135 | if (blk_pc_request(vbr->req)) { | 147 | if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { |
136 | sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); | 148 | sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); |
137 | sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, | 149 | sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, |
138 | sizeof(vbr->in_hdr)); | 150 | sizeof(vbr->in_hdr)); |
@@ -151,7 +163,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
151 | } | 163 | } |
152 | } | 164 | } |
153 | 165 | ||
154 | if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { | 166 | if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { |
155 | mempool_free(vbr, vblk->pool); | 167 | mempool_free(vbr, vblk->pool); |
156 | return false; | 168 | return false; |
157 | } | 169 | } |
@@ -180,16 +192,37 @@ static void do_virtblk_request(struct request_queue *q) | |||
180 | } | 192 | } |
181 | 193 | ||
182 | if (issued) | 194 | if (issued) |
183 | vblk->vq->vq_ops->kick(vblk->vq); | 195 | virtqueue_kick(vblk->vq); |
184 | } | 196 | } |
185 | 197 | ||
186 | static void virtblk_prepare_flush(struct request_queue *q, struct request *req) | 198 | /* return id (s/n) string for *disk to *id_str |
199 | */ | ||
200 | static int virtblk_get_id(struct gendisk *disk, char *id_str) | ||
187 | { | 201 | { |
188 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | 202 | struct virtio_blk *vblk = disk->private_data; |
189 | req->cmd[0] = REQ_LB_OP_FLUSH; | 203 | struct request *req; |
204 | struct bio *bio; | ||
205 | int err; | ||
206 | |||
207 | bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, | ||
208 | GFP_KERNEL); | ||
209 | if (IS_ERR(bio)) | ||
210 | return PTR_ERR(bio); | ||
211 | |||
212 | req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); | ||
213 | if (IS_ERR(req)) { | ||
214 | bio_put(bio); | ||
215 | return PTR_ERR(req); | ||
216 | } | ||
217 | |||
218 | req->cmd_type = REQ_TYPE_SPECIAL; | ||
219 | err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); | ||
220 | blk_put_request(req); | ||
221 | |||
222 | return err; | ||
190 | } | 223 | } |
191 | 224 | ||
192 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | 225 | static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, |
193 | unsigned cmd, unsigned long data) | 226 | unsigned cmd, unsigned long data) |
194 | { | 227 | { |
195 | struct gendisk *disk = bdev->bd_disk; | 228 | struct gendisk *disk = bdev->bd_disk; |
@@ -205,6 +238,18 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | |||
205 | (void __user *)data); | 238 | (void __user *)data); |
206 | } | 239 | } |
207 | 240 | ||
241 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | ||
242 | unsigned int cmd, unsigned long param) | ||
243 | { | ||
244 | int ret; | ||
245 | |||
246 | lock_kernel(); | ||
247 | ret = virtblk_locked_ioctl(bdev, mode, cmd, param); | ||
248 | unlock_kernel(); | ||
249 | |||
250 | return ret; | ||
251 | } | ||
252 | |||
208 | /* We provide getgeo only to please some old bootloader/partitioning tools */ | 253 | /* We provide getgeo only to please some old bootloader/partitioning tools */ |
209 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | 254 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) |
210 | { | 255 | { |
@@ -231,7 +276,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | |||
231 | } | 276 | } |
232 | 277 | ||
233 | static const struct block_device_operations virtblk_fops = { | 278 | static const struct block_device_operations virtblk_fops = { |
234 | .locked_ioctl = virtblk_ioctl, | 279 | .ioctl = virtblk_ioctl, |
235 | .owner = THIS_MODULE, | 280 | .owner = THIS_MODULE, |
236 | .getgeo = virtblk_getgeo, | 281 | .getgeo = virtblk_getgeo, |
237 | }; | 282 | }; |
@@ -241,6 +286,27 @@ static int index_to_minor(int index) | |||
241 | return index << PART_BITS; | 286 | return index << PART_BITS; |
242 | } | 287 | } |
243 | 288 | ||
289 | static ssize_t virtblk_serial_show(struct device *dev, | ||
290 | struct device_attribute *attr, char *buf) | ||
291 | { | ||
292 | struct gendisk *disk = dev_to_disk(dev); | ||
293 | int err; | ||
294 | |||
295 | /* sysfs gives us a PAGE_SIZE buffer */ | ||
296 | BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); | ||
297 | |||
298 | buf[VIRTIO_BLK_ID_BYTES] = '\0'; | ||
299 | err = virtblk_get_id(disk, buf); | ||
300 | if (!err) | ||
301 | return strlen(buf); | ||
302 | |||
303 | if (err == -EIO) /* Unsupported? Make it empty. */ | ||
304 | return 0; | ||
305 | |||
306 | return err; | ||
307 | } | ||
308 | DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); | ||
309 | |||
244 | static int __devinit virtblk_probe(struct virtio_device *vdev) | 310 | static int __devinit virtblk_probe(struct virtio_device *vdev) |
245 | { | 311 | { |
246 | struct virtio_blk *vblk; | 312 | struct virtio_blk *vblk; |
@@ -258,7 +324,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
258 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, | 324 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, |
259 | offsetof(struct virtio_blk_config, seg_max), | 325 | offsetof(struct virtio_blk_config, seg_max), |
260 | &sg_elems); | 326 | &sg_elems); |
261 | if (err) | 327 | |
328 | /* We need at least one SG element, whatever they say. */ | ||
329 | if (err || !sg_elems) | ||
262 | sg_elems = 1; | 330 | sg_elems = 1; |
263 | 331 | ||
264 | /* We need an extra sg elements at head and tail. */ | 332 | /* We need an extra sg elements at head and tail. */ |
@@ -324,12 +392,31 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
324 | vblk->disk->driverfs_dev = &vdev->dev; | 392 | vblk->disk->driverfs_dev = &vdev->dev; |
325 | index++; | 393 | index++; |
326 | 394 | ||
327 | /* If barriers are supported, tell block layer that queue is ordered */ | 395 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { |
328 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) | 396 | /* |
329 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, | 397 | * If the FLUSH feature is supported we do have support for |
330 | virtblk_prepare_flush); | 398 | * flushing a volatile write cache on the host. Use that |
331 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) | 399 | * to implement write barrier support. |
332 | blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); | 400 | */ |
401 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); | ||
402 | } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { | ||
403 | /* | ||
404 | * If the BARRIER feature is supported the host expects us | ||
405 | * to order request by tags. This implies there is not | ||
406 | * volatile write cache on the host, and that the host | ||
407 | * never re-orders outstanding I/O. This feature is not | ||
408 | * useful for real life scenarious and deprecated. | ||
409 | */ | ||
410 | blk_queue_ordered(q, QUEUE_ORDERED_TAG); | ||
411 | } else { | ||
412 | /* | ||
413 | * If the FLUSH feature is not supported we must assume that | ||
414 | * the host does not perform any kind of volatile write | ||
415 | * caching. We still need to drain the queue to provider | ||
416 | * proper barrier semantics. | ||
417 | */ | ||
418 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN); | ||
419 | } | ||
333 | 420 | ||
334 | /* If disk is read-only in the host, the guest should obey */ | 421 | /* If disk is read-only in the host, the guest should obey */ |
335 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | 422 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) |
@@ -403,8 +490,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
403 | 490 | ||
404 | 491 | ||
405 | add_disk(vblk->disk); | 492 | add_disk(vblk->disk); |
493 | err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); | ||
494 | if (err) | ||
495 | goto out_del_disk; | ||
496 | |||
406 | return 0; | 497 | return 0; |
407 | 498 | ||
499 | out_del_disk: | ||
500 | del_gendisk(vblk->disk); | ||
501 | blk_cleanup_queue(vblk->disk->queue); | ||
408 | out_put_disk: | 502 | out_put_disk: |
409 | put_disk(vblk->disk); | 503 | put_disk(vblk->disk); |
410 | out_mempool: | 504 | out_mempool: |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 18a80ff57ce8..d5a3cd750561 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/wait.h> | 47 | #include <linux/wait.h> |
48 | #include <linux/blkdev.h> | 48 | #include <linux/blkdev.h> |
49 | #include <linux/smp_lock.h> | ||
49 | #include <linux/blkpg.h> | 50 | #include <linux/blkpg.h> |
50 | #include <linux/delay.h> | 51 | #include <linux/delay.h> |
51 | #include <linux/io.h> | 52 | #include <linux/io.h> |
@@ -133,7 +134,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo); | |||
133 | 134 | ||
134 | static const struct block_device_operations xd_fops = { | 135 | static const struct block_device_operations xd_fops = { |
135 | .owner = THIS_MODULE, | 136 | .owner = THIS_MODULE, |
136 | .locked_ioctl = xd_ioctl, | 137 | .ioctl = xd_ioctl, |
137 | .getgeo = xd_getgeo, | 138 | .getgeo = xd_getgeo, |
138 | }; | 139 | }; |
139 | static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); | 140 | static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); |
@@ -322,7 +323,7 @@ static void do_xd_request (struct request_queue * q) | |||
322 | int res = -EIO; | 323 | int res = -EIO; |
323 | int retry; | 324 | int retry; |
324 | 325 | ||
325 | if (!blk_fs_request(req)) | 326 | if (req->cmd_type != REQ_TYPE_FS) |
326 | goto done; | 327 | goto done; |
327 | if (block + count > get_capacity(req->rq_disk)) | 328 | if (block + count > get_capacity(req->rq_disk)) |
328 | goto done; | 329 | goto done; |
@@ -347,7 +348,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
347 | } | 348 | } |
348 | 349 | ||
349 | /* xd_ioctl: handle device ioctl's */ | 350 | /* xd_ioctl: handle device ioctl's */ |
350 | static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) | 351 | static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) |
351 | { | 352 | { |
352 | switch (cmd) { | 353 | switch (cmd) { |
353 | case HDIO_SET_DMA: | 354 | case HDIO_SET_DMA: |
@@ -375,6 +376,18 @@ static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long a | |||
375 | } | 376 | } |
376 | } | 377 | } |
377 | 378 | ||
379 | static int xd_ioctl(struct block_device *bdev, fmode_t mode, | ||
380 | unsigned int cmd, unsigned long param) | ||
381 | { | ||
382 | int ret; | ||
383 | |||
384 | lock_kernel(); | ||
385 | ret = xd_locked_ioctl(bdev, mode, cmd, param); | ||
386 | unlock_kernel(); | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
378 | /* xd_readwrite: handle a read/write request */ | 391 | /* xd_readwrite: handle a read/write request */ |
379 | static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count) | 392 | static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count) |
380 | { | 393 | { |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 82ed403147c0..ab735a605cf3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/cdrom.h> | 41 | #include <linux/cdrom.h> |
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/smp_lock.h> | ||
44 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
45 | 46 | ||
46 | #include <xen/xen.h> | 47 | #include <xen/xen.h> |
@@ -48,6 +49,7 @@ | |||
48 | #include <xen/grant_table.h> | 49 | #include <xen/grant_table.h> |
49 | #include <xen/events.h> | 50 | #include <xen/events.h> |
50 | #include <xen/page.h> | 51 | #include <xen/page.h> |
52 | #include <xen/platform_pci.h> | ||
51 | 53 | ||
52 | #include <xen/interface/grant_table.h> | 54 | #include <xen/interface/grant_table.h> |
53 | #include <xen/interface/io/blkif.h> | 55 | #include <xen/interface/io/blkif.h> |
@@ -78,6 +80,7 @@ static const struct block_device_operations xlvbd_block_fops; | |||
78 | */ | 80 | */ |
79 | struct blkfront_info | 81 | struct blkfront_info |
80 | { | 82 | { |
83 | struct mutex mutex; | ||
81 | struct xenbus_device *xbdev; | 84 | struct xenbus_device *xbdev; |
82 | struct gendisk *gd; | 85 | struct gendisk *gd; |
83 | int vdevice; | 86 | int vdevice; |
@@ -94,16 +97,14 @@ struct blkfront_info | |||
94 | unsigned long shadow_free; | 97 | unsigned long shadow_free; |
95 | int feature_barrier; | 98 | int feature_barrier; |
96 | int is_ready; | 99 | int is_ready; |
97 | |||
98 | /** | ||
99 | * The number of people holding this device open. We won't allow a | ||
100 | * hot-unplug unless this is 0. | ||
101 | */ | ||
102 | int users; | ||
103 | }; | 100 | }; |
104 | 101 | ||
105 | static DEFINE_SPINLOCK(blkif_io_lock); | 102 | static DEFINE_SPINLOCK(blkif_io_lock); |
106 | 103 | ||
104 | static unsigned int nr_minors; | ||
105 | static unsigned long *minors; | ||
106 | static DEFINE_SPINLOCK(minor_lock); | ||
107 | |||
107 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ | 108 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ |
108 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) | 109 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) |
109 | #define GRANT_INVALID_REF 0 | 110 | #define GRANT_INVALID_REF 0 |
@@ -138,6 +139,55 @@ static void add_id_to_freelist(struct blkfront_info *info, | |||
138 | info->shadow_free = id; | 139 | info->shadow_free = id; |
139 | } | 140 | } |
140 | 141 | ||
142 | static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) | ||
143 | { | ||
144 | unsigned int end = minor + nr; | ||
145 | int rc; | ||
146 | |||
147 | if (end > nr_minors) { | ||
148 | unsigned long *bitmap, *old; | ||
149 | |||
150 | bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), | ||
151 | GFP_KERNEL); | ||
152 | if (bitmap == NULL) | ||
153 | return -ENOMEM; | ||
154 | |||
155 | spin_lock(&minor_lock); | ||
156 | if (end > nr_minors) { | ||
157 | old = minors; | ||
158 | memcpy(bitmap, minors, | ||
159 | BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); | ||
160 | minors = bitmap; | ||
161 | nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; | ||
162 | } else | ||
163 | old = bitmap; | ||
164 | spin_unlock(&minor_lock); | ||
165 | kfree(old); | ||
166 | } | ||
167 | |||
168 | spin_lock(&minor_lock); | ||
169 | if (find_next_bit(minors, end, minor) >= end) { | ||
170 | for (; minor < end; ++minor) | ||
171 | __set_bit(minor, minors); | ||
172 | rc = 0; | ||
173 | } else | ||
174 | rc = -EBUSY; | ||
175 | spin_unlock(&minor_lock); | ||
176 | |||
177 | return rc; | ||
178 | } | ||
179 | |||
180 | static void xlbd_release_minors(unsigned int minor, unsigned int nr) | ||
181 | { | ||
182 | unsigned int end = minor + nr; | ||
183 | |||
184 | BUG_ON(end > nr_minors); | ||
185 | spin_lock(&minor_lock); | ||
186 | for (; minor < end; ++minor) | ||
187 | __clear_bit(minor, minors); | ||
188 | spin_unlock(&minor_lock); | ||
189 | } | ||
190 | |||
141 | static void blkif_restart_queue_callback(void *arg) | 191 | static void blkif_restart_queue_callback(void *arg) |
142 | { | 192 | { |
143 | struct blkfront_info *info = (struct blkfront_info *)arg; | 193 | struct blkfront_info *info = (struct blkfront_info *)arg; |
@@ -238,7 +288,7 @@ static int blkif_queue_request(struct request *req) | |||
238 | 288 | ||
239 | ring_req->operation = rq_data_dir(req) ? | 289 | ring_req->operation = rq_data_dir(req) ? |
240 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 290 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
241 | if (blk_barrier_rq(req)) | 291 | if (req->cmd_flags & REQ_HARDBARRIER) |
242 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | 292 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; |
243 | 293 | ||
244 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); | 294 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
@@ -309,7 +359,7 @@ static void do_blkif_request(struct request_queue *rq) | |||
309 | 359 | ||
310 | blk_start_request(req); | 360 | blk_start_request(req); |
311 | 361 | ||
312 | if (!blk_fs_request(req)) { | 362 | if (req->cmd_type != REQ_TYPE_FS) { |
313 | __blk_end_request_all(req, -EIO); | 363 | __blk_end_request_all(req, -EIO); |
314 | continue; | 364 | continue; |
315 | } | 365 | } |
@@ -371,17 +421,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
371 | static int xlvbd_barrier(struct blkfront_info *info) | 421 | static int xlvbd_barrier(struct blkfront_info *info) |
372 | { | 422 | { |
373 | int err; | 423 | int err; |
424 | const char *barrier; | ||
374 | 425 | ||
375 | err = blk_queue_ordered(info->rq, | 426 | switch (info->feature_barrier) { |
376 | info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, | 427 | case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; |
377 | NULL); | 428 | case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; |
429 | case QUEUE_ORDERED_NONE: barrier = "disabled"; break; | ||
430 | default: return -EINVAL; | ||
431 | } | ||
432 | |||
433 | err = blk_queue_ordered(info->rq, info->feature_barrier); | ||
378 | 434 | ||
379 | if (err) | 435 | if (err) |
380 | return err; | 436 | return err; |
381 | 437 | ||
382 | printk(KERN_INFO "blkfront: %s: barriers %s\n", | 438 | printk(KERN_INFO "blkfront: %s: barriers %s\n", |
383 | info->gd->disk_name, | 439 | info->gd->disk_name, barrier); |
384 | info->feature_barrier ? "enabled" : "disabled"); | ||
385 | return 0; | 440 | return 0; |
386 | } | 441 | } |
387 | 442 | ||
@@ -417,9 +472,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
417 | if ((minor % nr_parts) == 0) | 472 | if ((minor % nr_parts) == 0) |
418 | nr_minors = nr_parts; | 473 | nr_minors = nr_parts; |
419 | 474 | ||
475 | err = xlbd_reserve_minors(minor, nr_minors); | ||
476 | if (err) | ||
477 | goto out; | ||
478 | err = -ENODEV; | ||
479 | |||
420 | gd = alloc_disk(nr_minors); | 480 | gd = alloc_disk(nr_minors); |
421 | if (gd == NULL) | 481 | if (gd == NULL) |
422 | goto out; | 482 | goto release; |
423 | 483 | ||
424 | offset = minor / nr_parts; | 484 | offset = minor / nr_parts; |
425 | 485 | ||
@@ -450,14 +510,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
450 | 510 | ||
451 | if (xlvbd_init_blk_queue(gd, sector_size)) { | 511 | if (xlvbd_init_blk_queue(gd, sector_size)) { |
452 | del_gendisk(gd); | 512 | del_gendisk(gd); |
453 | goto out; | 513 | goto release; |
454 | } | 514 | } |
455 | 515 | ||
456 | info->rq = gd->queue; | 516 | info->rq = gd->queue; |
457 | info->gd = gd; | 517 | info->gd = gd; |
458 | 518 | ||
459 | if (info->feature_barrier) | 519 | xlvbd_barrier(info); |
460 | xlvbd_barrier(info); | ||
461 | 520 | ||
462 | if (vdisk_info & VDISK_READONLY) | 521 | if (vdisk_info & VDISK_READONLY) |
463 | set_disk_ro(gd, 1); | 522 | set_disk_ro(gd, 1); |
@@ -470,10 +529,45 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
470 | 529 | ||
471 | return 0; | 530 | return 0; |
472 | 531 | ||
532 | release: | ||
533 | xlbd_release_minors(minor, nr_minors); | ||
473 | out: | 534 | out: |
474 | return err; | 535 | return err; |
475 | } | 536 | } |
476 | 537 | ||
538 | static void xlvbd_release_gendisk(struct blkfront_info *info) | ||
539 | { | ||
540 | unsigned int minor, nr_minors; | ||
541 | unsigned long flags; | ||
542 | |||
543 | if (info->rq == NULL) | ||
544 | return; | ||
545 | |||
546 | spin_lock_irqsave(&blkif_io_lock, flags); | ||
547 | |||
548 | /* No more blkif_request(). */ | ||
549 | blk_stop_queue(info->rq); | ||
550 | |||
551 | /* No more gnttab callback work. */ | ||
552 | gnttab_cancel_free_callback(&info->callback); | ||
553 | spin_unlock_irqrestore(&blkif_io_lock, flags); | ||
554 | |||
555 | /* Flush gnttab callback work. Must be done with no locks held. */ | ||
556 | flush_scheduled_work(); | ||
557 | |||
558 | del_gendisk(info->gd); | ||
559 | |||
560 | minor = info->gd->first_minor; | ||
561 | nr_minors = info->gd->minors; | ||
562 | xlbd_release_minors(minor, nr_minors); | ||
563 | |||
564 | blk_cleanup_queue(info->rq); | ||
565 | info->rq = NULL; | ||
566 | |||
567 | put_disk(info->gd); | ||
568 | info->gd = NULL; | ||
569 | } | ||
570 | |||
477 | static void kick_pending_request_queues(struct blkfront_info *info) | 571 | static void kick_pending_request_queues(struct blkfront_info *info) |
478 | { | 572 | { |
479 | if (!RING_FULL(&info->ring)) { | 573 | if (!RING_FULL(&info->ring)) { |
@@ -568,7 +662,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
568 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | 662 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", |
569 | info->gd->disk_name); | 663 | info->gd->disk_name); |
570 | error = -EOPNOTSUPP; | 664 | error = -EOPNOTSUPP; |
571 | info->feature_barrier = 0; | 665 | info->feature_barrier = QUEUE_ORDERED_NONE; |
572 | xlvbd_barrier(info); | 666 | xlvbd_barrier(info); |
573 | } | 667 | } |
574 | /* fall through */ | 668 | /* fall through */ |
@@ -651,7 +745,7 @@ fail: | |||
651 | 745 | ||
652 | 746 | ||
653 | /* Common code used when first setting up, and when resuming. */ | 747 | /* Common code used when first setting up, and when resuming. */ |
654 | static int talk_to_backend(struct xenbus_device *dev, | 748 | static int talk_to_blkback(struct xenbus_device *dev, |
655 | struct blkfront_info *info) | 749 | struct blkfront_info *info) |
656 | { | 750 | { |
657 | const char *message = NULL; | 751 | const char *message = NULL; |
@@ -711,7 +805,6 @@ again: | |||
711 | return err; | 805 | return err; |
712 | } | 806 | } |
713 | 807 | ||
714 | |||
715 | /** | 808 | /** |
716 | * Entry point to this code when a new device is created. Allocate the basic | 809 | * Entry point to this code when a new device is created. Allocate the basic |
717 | * structures and the ring buffer for communication with the backend, and | 810 | * structures and the ring buffer for communication with the backend, and |
@@ -737,12 +830,42 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
737 | } | 830 | } |
738 | } | 831 | } |
739 | 832 | ||
833 | if (xen_hvm_domain()) { | ||
834 | char *type; | ||
835 | int len; | ||
836 | /* no unplug has been done: do not hook devices != xen vbds */ | ||
837 | if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { | ||
838 | int major; | ||
839 | |||
840 | if (!VDEV_IS_EXTENDED(vdevice)) | ||
841 | major = BLKIF_MAJOR(vdevice); | ||
842 | else | ||
843 | major = XENVBD_MAJOR; | ||
844 | |||
845 | if (major != XENVBD_MAJOR) { | ||
846 | printk(KERN_INFO | ||
847 | "%s: HVM does not support vbd %d as xen block device\n", | ||
848 | __FUNCTION__, vdevice); | ||
849 | return -ENODEV; | ||
850 | } | ||
851 | } | ||
852 | /* do not create a PV cdrom device if we are an HVM guest */ | ||
853 | type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); | ||
854 | if (IS_ERR(type)) | ||
855 | return -ENODEV; | ||
856 | if (strncmp(type, "cdrom", 5) == 0) { | ||
857 | kfree(type); | ||
858 | return -ENODEV; | ||
859 | } | ||
860 | kfree(type); | ||
861 | } | ||
740 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 862 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
741 | if (!info) { | 863 | if (!info) { |
742 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | 864 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); |
743 | return -ENOMEM; | 865 | return -ENOMEM; |
744 | } | 866 | } |
745 | 867 | ||
868 | mutex_init(&info->mutex); | ||
746 | info->xbdev = dev; | 869 | info->xbdev = dev; |
747 | info->vdevice = vdevice; | 870 | info->vdevice = vdevice; |
748 | info->connected = BLKIF_STATE_DISCONNECTED; | 871 | info->connected = BLKIF_STATE_DISCONNECTED; |
@@ -756,7 +879,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
756 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 879 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
757 | dev_set_drvdata(&dev->dev, info); | 880 | dev_set_drvdata(&dev->dev, info); |
758 | 881 | ||
759 | err = talk_to_backend(dev, info); | 882 | err = talk_to_blkback(dev, info); |
760 | if (err) { | 883 | if (err) { |
761 | kfree(info); | 884 | kfree(info); |
762 | dev_set_drvdata(&dev->dev, NULL); | 885 | dev_set_drvdata(&dev->dev, NULL); |
@@ -851,13 +974,50 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
851 | 974 | ||
852 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 975 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
853 | 976 | ||
854 | err = talk_to_backend(dev, info); | 977 | err = talk_to_blkback(dev, info); |
855 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) | 978 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) |
856 | err = blkif_recover(info); | 979 | err = blkif_recover(info); |
857 | 980 | ||
858 | return err; | 981 | return err; |
859 | } | 982 | } |
860 | 983 | ||
984 | static void | ||
985 | blkfront_closing(struct blkfront_info *info) | ||
986 | { | ||
987 | struct xenbus_device *xbdev = info->xbdev; | ||
988 | struct block_device *bdev = NULL; | ||
989 | |||
990 | mutex_lock(&info->mutex); | ||
991 | |||
992 | if (xbdev->state == XenbusStateClosing) { | ||
993 | mutex_unlock(&info->mutex); | ||
994 | return; | ||
995 | } | ||
996 | |||
997 | if (info->gd) | ||
998 | bdev = bdget_disk(info->gd, 0); | ||
999 | |||
1000 | mutex_unlock(&info->mutex); | ||
1001 | |||
1002 | if (!bdev) { | ||
1003 | xenbus_frontend_closed(xbdev); | ||
1004 | return; | ||
1005 | } | ||
1006 | |||
1007 | mutex_lock(&bdev->bd_mutex); | ||
1008 | |||
1009 | if (bdev->bd_openers) { | ||
1010 | xenbus_dev_error(xbdev, -EBUSY, | ||
1011 | "Device in use; refusing to close"); | ||
1012 | xenbus_switch_state(xbdev, XenbusStateClosing); | ||
1013 | } else { | ||
1014 | xlvbd_release_gendisk(info); | ||
1015 | xenbus_frontend_closed(xbdev); | ||
1016 | } | ||
1017 | |||
1018 | mutex_unlock(&bdev->bd_mutex); | ||
1019 | bdput(bdev); | ||
1020 | } | ||
861 | 1021 | ||
862 | /* | 1022 | /* |
863 | * Invoked when the backend is finally 'ready' (and has told produced | 1023 | * Invoked when the backend is finally 'ready' (and has told produced |
@@ -869,11 +1029,31 @@ static void blkfront_connect(struct blkfront_info *info) | |||
869 | unsigned long sector_size; | 1029 | unsigned long sector_size; |
870 | unsigned int binfo; | 1030 | unsigned int binfo; |
871 | int err; | 1031 | int err; |
872 | 1032 | int barrier; | |
873 | if ((info->connected == BLKIF_STATE_CONNECTED) || | 1033 | |
874 | (info->connected == BLKIF_STATE_SUSPENDED) ) | 1034 | switch (info->connected) { |
1035 | case BLKIF_STATE_CONNECTED: | ||
1036 | /* | ||
1037 | * Potentially, the back-end may be signalling | ||
1038 | * a capacity change; update the capacity. | ||
1039 | */ | ||
1040 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
1041 | "sectors", "%Lu", §ors); | ||
1042 | if (XENBUS_EXIST_ERR(err)) | ||
1043 | return; | ||
1044 | printk(KERN_INFO "Setting capacity to %Lu\n", | ||
1045 | sectors); | ||
1046 | set_capacity(info->gd, sectors); | ||
1047 | revalidate_disk(info->gd); | ||
1048 | |||
1049 | /* fall through */ | ||
1050 | case BLKIF_STATE_SUSPENDED: | ||
875 | return; | 1051 | return; |
876 | 1052 | ||
1053 | default: | ||
1054 | break; | ||
1055 | } | ||
1056 | |||
877 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", | 1057 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", |
878 | __func__, info->xbdev->otherend); | 1058 | __func__, info->xbdev->otherend); |
879 | 1059 | ||
@@ -890,10 +1070,26 @@ static void blkfront_connect(struct blkfront_info *info) | |||
890 | } | 1070 | } |
891 | 1071 | ||
892 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | 1072 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
893 | "feature-barrier", "%lu", &info->feature_barrier, | 1073 | "feature-barrier", "%lu", &barrier, |
894 | NULL); | 1074 | NULL); |
1075 | |||
1076 | /* | ||
1077 | * If there's no "feature-barrier" defined, then it means | ||
1078 | * we're dealing with a very old backend which writes | ||
1079 | * synchronously; draining will do what needs to get done. | ||
1080 | * | ||
1081 | * If there are barriers, then we can do full queued writes | ||
1082 | * with tagged barriers. | ||
1083 | * | ||
1084 | * If barriers are not supported, then there's no much we can | ||
1085 | * do, so just set ordering to NONE. | ||
1086 | */ | ||
895 | if (err) | 1087 | if (err) |
896 | info->feature_barrier = 0; | 1088 | info->feature_barrier = QUEUE_ORDERED_DRAIN; |
1089 | else if (barrier) | ||
1090 | info->feature_barrier = QUEUE_ORDERED_TAG; | ||
1091 | else | ||
1092 | info->feature_barrier = QUEUE_ORDERED_NONE; | ||
897 | 1093 | ||
898 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1094 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
899 | if (err) { | 1095 | if (err) { |
@@ -916,52 +1112,14 @@ static void blkfront_connect(struct blkfront_info *info) | |||
916 | } | 1112 | } |
917 | 1113 | ||
918 | /** | 1114 | /** |
919 | * Handle the change of state of the backend to Closing. We must delete our | ||
920 | * device-layer structures now, to ensure that writes are flushed through to | ||
921 | * the backend. Once is this done, we can switch to Closed in | ||
922 | * acknowledgement. | ||
923 | */ | ||
924 | static void blkfront_closing(struct xenbus_device *dev) | ||
925 | { | ||
926 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | ||
927 | unsigned long flags; | ||
928 | |||
929 | dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); | ||
930 | |||
931 | if (info->rq == NULL) | ||
932 | goto out; | ||
933 | |||
934 | spin_lock_irqsave(&blkif_io_lock, flags); | ||
935 | |||
936 | /* No more blkif_request(). */ | ||
937 | blk_stop_queue(info->rq); | ||
938 | |||
939 | /* No more gnttab callback work. */ | ||
940 | gnttab_cancel_free_callback(&info->callback); | ||
941 | spin_unlock_irqrestore(&blkif_io_lock, flags); | ||
942 | |||
943 | /* Flush gnttab callback work. Must be done with no locks held. */ | ||
944 | flush_scheduled_work(); | ||
945 | |||
946 | blk_cleanup_queue(info->rq); | ||
947 | info->rq = NULL; | ||
948 | |||
949 | del_gendisk(info->gd); | ||
950 | |||
951 | out: | ||
952 | xenbus_frontend_closed(dev); | ||
953 | } | ||
954 | |||
955 | /** | ||
956 | * Callback received when the backend's state changes. | 1115 | * Callback received when the backend's state changes. |
957 | */ | 1116 | */ |
958 | static void backend_changed(struct xenbus_device *dev, | 1117 | static void blkback_changed(struct xenbus_device *dev, |
959 | enum xenbus_state backend_state) | 1118 | enum xenbus_state backend_state) |
960 | { | 1119 | { |
961 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | 1120 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
962 | struct block_device *bd; | ||
963 | 1121 | ||
964 | dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); | 1122 | dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); |
965 | 1123 | ||
966 | switch (backend_state) { | 1124 | switch (backend_state) { |
967 | case XenbusStateInitialising: | 1125 | case XenbusStateInitialising: |
@@ -976,35 +1134,56 @@ static void backend_changed(struct xenbus_device *dev, | |||
976 | break; | 1134 | break; |
977 | 1135 | ||
978 | case XenbusStateClosing: | 1136 | case XenbusStateClosing: |
979 | if (info->gd == NULL) { | 1137 | blkfront_closing(info); |
980 | xenbus_frontend_closed(dev); | ||
981 | break; | ||
982 | } | ||
983 | bd = bdget_disk(info->gd, 0); | ||
984 | if (bd == NULL) | ||
985 | xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); | ||
986 | |||
987 | mutex_lock(&bd->bd_mutex); | ||
988 | if (info->users > 0) | ||
989 | xenbus_dev_error(dev, -EBUSY, | ||
990 | "Device in use; refusing to close"); | ||
991 | else | ||
992 | blkfront_closing(dev); | ||
993 | mutex_unlock(&bd->bd_mutex); | ||
994 | bdput(bd); | ||
995 | break; | 1138 | break; |
996 | } | 1139 | } |
997 | } | 1140 | } |
998 | 1141 | ||
999 | static int blkfront_remove(struct xenbus_device *dev) | 1142 | static int blkfront_remove(struct xenbus_device *xbdev) |
1000 | { | 1143 | { |
1001 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | 1144 | struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); |
1145 | struct block_device *bdev = NULL; | ||
1146 | struct gendisk *disk; | ||
1002 | 1147 | ||
1003 | dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); | 1148 | dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); |
1004 | 1149 | ||
1005 | blkif_free(info, 0); | 1150 | blkif_free(info, 0); |
1006 | 1151 | ||
1007 | kfree(info); | 1152 | mutex_lock(&info->mutex); |
1153 | |||
1154 | disk = info->gd; | ||
1155 | if (disk) | ||
1156 | bdev = bdget_disk(disk, 0); | ||
1157 | |||
1158 | info->xbdev = NULL; | ||
1159 | mutex_unlock(&info->mutex); | ||
1160 | |||
1161 | if (!bdev) { | ||
1162 | kfree(info); | ||
1163 | return 0; | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * The xbdev was removed before we reached the Closed | ||
1168 | * state. See if it's safe to remove the disk. If the bdev | ||
1169 | * isn't closed yet, we let release take care of it. | ||
1170 | */ | ||
1171 | |||
1172 | mutex_lock(&bdev->bd_mutex); | ||
1173 | info = disk->private_data; | ||
1174 | |||
1175 | dev_warn(disk_to_dev(disk), | ||
1176 | "%s was hot-unplugged, %d stale handles\n", | ||
1177 | xbdev->nodename, bdev->bd_openers); | ||
1178 | |||
1179 | if (info && !bdev->bd_openers) { | ||
1180 | xlvbd_release_gendisk(info); | ||
1181 | disk->private_data = NULL; | ||
1182 | kfree(info); | ||
1183 | } | ||
1184 | |||
1185 | mutex_unlock(&bdev->bd_mutex); | ||
1186 | bdput(bdev); | ||
1008 | 1187 | ||
1009 | return 0; | 1188 | return 0; |
1010 | } | 1189 | } |
@@ -1013,30 +1192,78 @@ static int blkfront_is_ready(struct xenbus_device *dev) | |||
1013 | { | 1192 | { |
1014 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | 1193 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
1015 | 1194 | ||
1016 | return info->is_ready; | 1195 | return info->is_ready && info->xbdev; |
1017 | } | 1196 | } |
1018 | 1197 | ||
1019 | static int blkif_open(struct block_device *bdev, fmode_t mode) | 1198 | static int blkif_open(struct block_device *bdev, fmode_t mode) |
1020 | { | 1199 | { |
1021 | struct blkfront_info *info = bdev->bd_disk->private_data; | 1200 | struct gendisk *disk = bdev->bd_disk; |
1022 | info->users++; | 1201 | struct blkfront_info *info; |
1023 | return 0; | 1202 | int err = 0; |
1203 | |||
1204 | lock_kernel(); | ||
1205 | |||
1206 | info = disk->private_data; | ||
1207 | if (!info) { | ||
1208 | /* xbdev gone */ | ||
1209 | err = -ERESTARTSYS; | ||
1210 | goto out; | ||
1211 | } | ||
1212 | |||
1213 | mutex_lock(&info->mutex); | ||
1214 | |||
1215 | if (!info->gd) | ||
1216 | /* xbdev is closed */ | ||
1217 | err = -ERESTARTSYS; | ||
1218 | |||
1219 | mutex_unlock(&info->mutex); | ||
1220 | |||
1221 | out: | ||
1222 | unlock_kernel(); | ||
1223 | return err; | ||
1024 | } | 1224 | } |
1025 | 1225 | ||
1026 | static int blkif_release(struct gendisk *disk, fmode_t mode) | 1226 | static int blkif_release(struct gendisk *disk, fmode_t mode) |
1027 | { | 1227 | { |
1028 | struct blkfront_info *info = disk->private_data; | 1228 | struct blkfront_info *info = disk->private_data; |
1029 | info->users--; | 1229 | struct block_device *bdev; |
1030 | if (info->users == 0) { | 1230 | struct xenbus_device *xbdev; |
1031 | /* Check whether we have been instructed to close. We will | 1231 | |
1032 | have ignored this request initially, as the device was | 1232 | lock_kernel(); |
1033 | still mounted. */ | 1233 | |
1034 | struct xenbus_device *dev = info->xbdev; | 1234 | bdev = bdget_disk(disk, 0); |
1035 | enum xenbus_state state = xenbus_read_driver_state(dev->otherend); | 1235 | bdput(bdev); |
1036 | 1236 | ||
1037 | if (state == XenbusStateClosing && info->is_ready) | 1237 | if (bdev->bd_openers) |
1038 | blkfront_closing(dev); | 1238 | goto out; |
1239 | |||
1240 | /* | ||
1241 | * Check if we have been instructed to close. We will have | ||
1242 | * deferred this request, because the bdev was still open. | ||
1243 | */ | ||
1244 | |||
1245 | mutex_lock(&info->mutex); | ||
1246 | xbdev = info->xbdev; | ||
1247 | |||
1248 | if (xbdev && xbdev->state == XenbusStateClosing) { | ||
1249 | /* pending switch to state closed */ | ||
1250 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | ||
1251 | xlvbd_release_gendisk(info); | ||
1252 | xenbus_frontend_closed(info->xbdev); | ||
1253 | } | ||
1254 | |||
1255 | mutex_unlock(&info->mutex); | ||
1256 | |||
1257 | if (!xbdev) { | ||
1258 | /* sudden device removal */ | ||
1259 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | ||
1260 | xlvbd_release_gendisk(info); | ||
1261 | disk->private_data = NULL; | ||
1262 | kfree(info); | ||
1039 | } | 1263 | } |
1264 | |||
1265 | out: | ||
1266 | unlock_kernel(); | ||
1040 | return 0; | 1267 | return 0; |
1041 | } | 1268 | } |
1042 | 1269 | ||
@@ -1046,7 +1273,7 @@ static const struct block_device_operations xlvbd_block_fops = | |||
1046 | .open = blkif_open, | 1273 | .open = blkif_open, |
1047 | .release = blkif_release, | 1274 | .release = blkif_release, |
1048 | .getgeo = blkif_getgeo, | 1275 | .getgeo = blkif_getgeo, |
1049 | .locked_ioctl = blkif_ioctl, | 1276 | .ioctl = blkif_ioctl, |
1050 | }; | 1277 | }; |
1051 | 1278 | ||
1052 | 1279 | ||
@@ -1062,7 +1289,7 @@ static struct xenbus_driver blkfront = { | |||
1062 | .probe = blkfront_probe, | 1289 | .probe = blkfront_probe, |
1063 | .remove = blkfront_remove, | 1290 | .remove = blkfront_remove, |
1064 | .resume = blkfront_resume, | 1291 | .resume = blkfront_resume, |
1065 | .otherend_changed = backend_changed, | 1292 | .otherend_changed = blkback_changed, |
1066 | .is_ready = blkfront_is_ready, | 1293 | .is_ready = blkfront_is_ready, |
1067 | }; | 1294 | }; |
1068 | 1295 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index e1c95e208a66..057413bb16e2 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -89,10 +89,12 @@ | |||
89 | #include <linux/delay.h> | 89 | #include <linux/delay.h> |
90 | #include <linux/slab.h> | 90 | #include <linux/slab.h> |
91 | #include <linux/blkdev.h> | 91 | #include <linux/blkdev.h> |
92 | #include <linux/smp_lock.h> | ||
92 | #include <linux/ata.h> | 93 | #include <linux/ata.h> |
93 | #include <linux/hdreg.h> | 94 | #include <linux/hdreg.h> |
94 | #include <linux/platform_device.h> | 95 | #include <linux/platform_device.h> |
95 | #if defined(CONFIG_OF) | 96 | #if defined(CONFIG_OF) |
97 | #include <linux/of_address.h> | ||
96 | #include <linux/of_device.h> | 98 | #include <linux/of_device.h> |
97 | #include <linux/of_platform.h> | 99 | #include <linux/of_platform.h> |
98 | #endif | 100 | #endif |
@@ -465,7 +467,7 @@ struct request *ace_get_next_request(struct request_queue * q) | |||
465 | struct request *req; | 467 | struct request *req; |
466 | 468 | ||
467 | while ((req = blk_peek_request(q)) != NULL) { | 469 | while ((req = blk_peek_request(q)) != NULL) { |
468 | if (blk_fs_request(req)) | 470 | if (req->cmd_type == REQ_TYPE_FS) |
469 | break; | 471 | break; |
470 | blk_start_request(req); | 472 | blk_start_request(req); |
471 | __blk_end_request_all(req, -EIO); | 473 | __blk_end_request_all(req, -EIO); |
@@ -901,11 +903,14 @@ static int ace_open(struct block_device *bdev, fmode_t mode) | |||
901 | 903 | ||
902 | dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); | 904 | dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); |
903 | 905 | ||
906 | lock_kernel(); | ||
904 | spin_lock_irqsave(&ace->lock, flags); | 907 | spin_lock_irqsave(&ace->lock, flags); |
905 | ace->users++; | 908 | ace->users++; |
906 | spin_unlock_irqrestore(&ace->lock, flags); | 909 | spin_unlock_irqrestore(&ace->lock, flags); |
907 | 910 | ||
908 | check_disk_change(bdev); | 911 | check_disk_change(bdev); |
912 | unlock_kernel(); | ||
913 | |||
909 | return 0; | 914 | return 0; |
910 | } | 915 | } |
911 | 916 | ||
@@ -917,6 +922,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) | |||
917 | 922 | ||
918 | dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); | 923 | dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); |
919 | 924 | ||
925 | lock_kernel(); | ||
920 | spin_lock_irqsave(&ace->lock, flags); | 926 | spin_lock_irqsave(&ace->lock, flags); |
921 | ace->users--; | 927 | ace->users--; |
922 | if (ace->users == 0) { | 928 | if (ace->users == 0) { |
@@ -924,6 +930,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) | |||
924 | ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); | 930 | ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); |
925 | } | 931 | } |
926 | spin_unlock_irqrestore(&ace->lock, flags); | 932 | spin_unlock_irqrestore(&ace->lock, flags); |
933 | unlock_kernel(); | ||
927 | return 0; | 934 | return 0; |
928 | } | 935 | } |
929 | 936 | ||
@@ -1188,7 +1195,7 @@ static struct platform_driver ace_platform_driver = { | |||
1188 | 1195 | ||
1189 | #if defined(CONFIG_OF) | 1196 | #if defined(CONFIG_OF) |
1190 | static int __devinit | 1197 | static int __devinit |
1191 | ace_of_probe(struct of_device *op, const struct of_device_id *match) | 1198 | ace_of_probe(struct platform_device *op, const struct of_device_id *match) |
1192 | { | 1199 | { |
1193 | struct resource res; | 1200 | struct resource res; |
1194 | resource_size_t physaddr; | 1201 | resource_size_t physaddr; |
@@ -1198,10 +1205,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1198 | dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match); | 1205 | dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match); |
1199 | 1206 | ||
1200 | /* device id */ | 1207 | /* device id */ |
1201 | id = of_get_property(op->node, "port-number", NULL); | 1208 | id = of_get_property(op->dev.of_node, "port-number", NULL); |
1202 | 1209 | ||
1203 | /* physaddr */ | 1210 | /* physaddr */ |
1204 | rc = of_address_to_resource(op->node, 0, &res); | 1211 | rc = of_address_to_resource(op->dev.of_node, 0, &res); |
1205 | if (rc) { | 1212 | if (rc) { |
1206 | dev_err(&op->dev, "invalid address\n"); | 1213 | dev_err(&op->dev, "invalid address\n"); |
1207 | return rc; | 1214 | return rc; |
@@ -1209,18 +1216,18 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1209 | physaddr = res.start; | 1216 | physaddr = res.start; |
1210 | 1217 | ||
1211 | /* irq */ | 1218 | /* irq */ |
1212 | irq = irq_of_parse_and_map(op->node, 0); | 1219 | irq = irq_of_parse_and_map(op->dev.of_node, 0); |
1213 | 1220 | ||
1214 | /* bus width */ | 1221 | /* bus width */ |
1215 | bus_width = ACE_BUS_WIDTH_16; | 1222 | bus_width = ACE_BUS_WIDTH_16; |
1216 | if (of_find_property(op->node, "8-bit", NULL)) | 1223 | if (of_find_property(op->dev.of_node, "8-bit", NULL)) |
1217 | bus_width = ACE_BUS_WIDTH_8; | 1224 | bus_width = ACE_BUS_WIDTH_8; |
1218 | 1225 | ||
1219 | /* Call the bus-independant setup code */ | 1226 | /* Call the bus-independant setup code */ |
1220 | return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width); | 1227 | return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width); |
1221 | } | 1228 | } |
1222 | 1229 | ||
1223 | static int __devexit ace_of_remove(struct of_device *op) | 1230 | static int __devexit ace_of_remove(struct platform_device *op) |
1224 | { | 1231 | { |
1225 | ace_free(&op->dev); | 1232 | ace_free(&op->dev); |
1226 | return 0; | 1233 | return 0; |
@@ -1237,13 +1244,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = { | |||
1237 | MODULE_DEVICE_TABLE(of, ace_of_match); | 1244 | MODULE_DEVICE_TABLE(of, ace_of_match); |
1238 | 1245 | ||
1239 | static struct of_platform_driver ace_of_driver = { | 1246 | static struct of_platform_driver ace_of_driver = { |
1240 | .owner = THIS_MODULE, | ||
1241 | .name = "xsysace", | ||
1242 | .match_table = ace_of_match, | ||
1243 | .probe = ace_of_probe, | 1247 | .probe = ace_of_probe, |
1244 | .remove = __devexit_p(ace_of_remove), | 1248 | .remove = __devexit_p(ace_of_remove), |
1245 | .driver = { | 1249 | .driver = { |
1246 | .name = "xsysace", | 1250 | .name = "xsysace", |
1251 | .owner = THIS_MODULE, | ||
1252 | .of_match_table = ace_of_match, | ||
1247 | }, | 1253 | }, |
1248 | }; | 1254 | }; |
1249 | 1255 | ||
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 9114654b54d9..d75b2bb601ad 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/blkdev.h> | 34 | #include <linux/blkdev.h> |
35 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
36 | #include <linux/smp_lock.h> | ||
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
37 | 38 | ||
38 | #include <asm/setup.h> | 39 | #include <asm/setup.h> |
@@ -153,6 +154,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode) | |||
153 | 154 | ||
154 | device = MINOR(bdev->bd_dev); | 155 | device = MINOR(bdev->bd_dev); |
155 | 156 | ||
157 | lock_kernel(); | ||
156 | if ( current_device != -1 && current_device != device ) | 158 | if ( current_device != -1 && current_device != device ) |
157 | { | 159 | { |
158 | rc = -EBUSY; | 160 | rc = -EBUSY; |
@@ -294,20 +296,25 @@ static int z2_open(struct block_device *bdev, fmode_t mode) | |||
294 | set_capacity(z2ram_gendisk, z2ram_size >> 9); | 296 | set_capacity(z2ram_gendisk, z2ram_size >> 9); |
295 | } | 297 | } |
296 | 298 | ||
299 | unlock_kernel(); | ||
297 | return 0; | 300 | return 0; |
298 | 301 | ||
299 | err_out_kfree: | 302 | err_out_kfree: |
300 | kfree(z2ram_map); | 303 | kfree(z2ram_map); |
301 | err_out: | 304 | err_out: |
305 | unlock_kernel(); | ||
302 | return rc; | 306 | return rc; |
303 | } | 307 | } |
304 | 308 | ||
305 | static int | 309 | static int |
306 | z2_release(struct gendisk *disk, fmode_t mode) | 310 | z2_release(struct gendisk *disk, fmode_t mode) |
307 | { | 311 | { |
308 | if ( current_device == -1 ) | 312 | lock_kernel(); |
309 | return 0; | 313 | if ( current_device == -1 ) { |
310 | 314 | unlock_kernel(); | |
315 | return 0; | ||
316 | } | ||
317 | unlock_kernel(); | ||
311 | /* | 318 | /* |
312 | * FIXME: unmap memory | 319 | * FIXME: unmap memory |
313 | */ | 320 | */ |