diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-01-05 07:56:44 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-01-05 07:56:44 -0500 |
commit | 4045407fd7740642b18bc802e8bab064e79a61e5 (patch) | |
tree | 9412a38886bfad562bc5897c6e4c23bb88703204 /drivers/block | |
parent | 59136ef3c596606d3eef920dc3e0fdfa2ce52c6f (diff) | |
parent | 4c5f830c4c9d4f19c1eef356c0cd322b46d695c9 (diff) |
Merge branch 'restart-cleanup' into restart
Conflicts:
arch/arm/kernel/setup.c
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/cciss.c | 12 | ||||
-rw-r--r-- | drivers/block/cciss_scsi.c | 1 | ||||
-rw-r--r-- | drivers/block/loop.c | 51 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 1 | ||||
-rw-r--r-- | drivers/block/rbd.c | 101 | ||||
-rw-r--r-- | drivers/block/swim3.c | 362 |
6 files changed, 270 insertions, 258 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 486f94ef24d4..587cce57adae 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/pci-aspm.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
29 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
@@ -2600,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, | |||
2600 | c->Request.Timeout = 0; | 2601 | c->Request.Timeout = 0; |
2601 | c->Request.CDB[0] = BMIC_WRITE; | 2602 | c->Request.CDB[0] = BMIC_WRITE; |
2602 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | 2603 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; |
2604 | c->Request.CDB[7] = (size >> 8) & 0xFF; | ||
2605 | c->Request.CDB[8] = size & 0xFF; | ||
2603 | break; | 2606 | break; |
2604 | case TEST_UNIT_READY: | 2607 | case TEST_UNIT_READY: |
2605 | c->Request.CDBLen = 6; | 2608 | c->Request.CDBLen = 6; |
@@ -4319,6 +4322,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) | |||
4319 | dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); | 4322 | dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); |
4320 | return -ENODEV; | 4323 | return -ENODEV; |
4321 | } | 4324 | } |
4325 | |||
4326 | pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | | ||
4327 | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); | ||
4328 | |||
4322 | err = pci_enable_device(h->pdev); | 4329 | err = pci_enable_device(h->pdev); |
4323 | if (err) { | 4330 | if (err) { |
4324 | dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); | 4331 | dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); |
@@ -4875,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h, | |||
4875 | { | 4882 | { |
4876 | if (h->msix_vector || h->msi_vector) { | 4883 | if (h->msix_vector || h->msi_vector) { |
4877 | if (!request_irq(h->intr[h->intr_mode], msixhandler, | 4884 | if (!request_irq(h->intr[h->intr_mode], msixhandler, |
4878 | IRQF_DISABLED, h->devname, h)) | 4885 | 0, h->devname, h)) |
4879 | return 0; | 4886 | return 0; |
4880 | dev_err(&h->pdev->dev, "Unable to get msi irq %d" | 4887 | dev_err(&h->pdev->dev, "Unable to get msi irq %d" |
4881 | " for %s\n", h->intr[h->intr_mode], | 4888 | " for %s\n", h->intr[h->intr_mode], |
@@ -4884,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h, | |||
4884 | } | 4891 | } |
4885 | 4892 | ||
4886 | if (!request_irq(h->intr[h->intr_mode], intxhandler, | 4893 | if (!request_irq(h->intr[h->intr_mode], intxhandler, |
4887 | IRQF_DISABLED, h->devname, h)) | 4894 | IRQF_SHARED, h->devname, h)) |
4888 | return 0; | 4895 | return 0; |
4889 | dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", | 4896 | dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", |
4890 | h->intr[h->intr_mode], h->devname); | 4897 | h->intr[h->intr_mode], h->devname); |
@@ -5158,6 +5165,7 @@ reinit_after_soft_reset: | |||
5158 | h->cciss_max_sectors = 8192; | 5165 | h->cciss_max_sectors = 8192; |
5159 | 5166 | ||
5160 | rebuild_lun_table(h, 1, 0); | 5167 | rebuild_lun_table(h, 1, 0); |
5168 | cciss_engage_scsi(h); | ||
5161 | h->busy_initializing = 0; | 5169 | h->busy_initializing = 0; |
5162 | return 1; | 5170 | return 1; |
5163 | 5171 | ||
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 951a4e33b92b..e820b68d2f6c 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1720 | /* If no tape support, then these become defined out of existence */ | 1720 | /* If no tape support, then these become defined out of existence */ |
1721 | 1721 | ||
1722 | #define cciss_scsi_setup(cntl_num) | 1722 | #define cciss_scsi_setup(cntl_num) |
1723 | #define cciss_engage_scsi(h) | ||
1723 | 1724 | ||
1724 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 1725 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3d806820280e..1e888c9e85b3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { | |||
161 | &xor_funcs | 161 | &xor_funcs |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static loff_t get_loop_size(struct loop_device *lo, struct file *file) | 164 | static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) |
165 | { | 165 | { |
166 | loff_t size, offset, loopsize; | 166 | loff_t size, loopsize; |
167 | 167 | ||
168 | /* Compute loopsize in bytes */ | 168 | /* Compute loopsize in bytes */ |
169 | size = i_size_read(file->f_mapping->host); | 169 | size = i_size_read(file->f_mapping->host); |
170 | offset = lo->lo_offset; | ||
171 | loopsize = size - offset; | 170 | loopsize = size - offset; |
172 | if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) | 171 | /* offset is beyond i_size, wierd but possible */ |
173 | loopsize = lo->lo_sizelimit; | 172 | if (loopsize < 0) |
173 | return 0; | ||
174 | 174 | ||
175 | if (sizelimit > 0 && sizelimit < loopsize) | ||
176 | loopsize = sizelimit; | ||
175 | /* | 177 | /* |
176 | * Unfortunately, if we want to do I/O on the device, | 178 | * Unfortunately, if we want to do I/O on the device, |
177 | * the number of 512-byte sectors has to fit into a sector_t. | 179 | * the number of 512-byte sectors has to fit into a sector_t. |
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file) | |||
179 | return loopsize >> 9; | 181 | return loopsize >> 9; |
180 | } | 182 | } |
181 | 183 | ||
184 | static loff_t get_loop_size(struct loop_device *lo, struct file *file) | ||
185 | { | ||
186 | return get_size(lo->lo_offset, lo->lo_sizelimit, file); | ||
187 | } | ||
188 | |||
182 | static int | 189 | static int |
183 | figure_loop_size(struct loop_device *lo) | 190 | figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) |
184 | { | 191 | { |
185 | loff_t size = get_loop_size(lo, lo->lo_backing_file); | 192 | loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); |
186 | sector_t x = (sector_t)size; | 193 | sector_t x = (sector_t)size; |
187 | 194 | ||
188 | if (unlikely((loff_t)x != size)) | 195 | if (unlikely((loff_t)x != size)) |
189 | return -EFBIG; | 196 | return -EFBIG; |
190 | 197 | if (lo->lo_offset != offset) | |
198 | lo->lo_offset = offset; | ||
199 | if (lo->lo_sizelimit != sizelimit) | ||
200 | lo->lo_sizelimit = sizelimit; | ||
191 | set_capacity(lo->lo_disk, x); | 201 | set_capacity(lo->lo_disk, x); |
192 | return 0; | 202 | return 0; |
193 | } | 203 | } |
194 | 204 | ||
195 | static inline int | 205 | static inline int |
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo, | |||
372 | 382 | ||
373 | if (retval < 0) | 383 | if (retval < 0) |
374 | return retval; | 384 | return retval; |
375 | 385 | if (retval != bvec->bv_len) | |
386 | return -EIO; | ||
376 | return 0; | 387 | return 0; |
377 | } | 388 | } |
378 | 389 | ||
@@ -411,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
411 | 422 | ||
412 | /* | 423 | /* |
413 | * We use punch hole to reclaim the free space used by the | 424 | * We use punch hole to reclaim the free space used by the |
414 | * image a.k.a. discard. However we do support discard if | 425 | * image a.k.a. discard. However we do not support discard if |
415 | * encryption is enabled, because it may give an attacker | 426 | * encryption is enabled, because it may give an attacker |
416 | * useful information. | 427 | * useful information. |
417 | */ | 428 | */ |
@@ -786,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo) | |||
786 | } | 797 | } |
787 | 798 | ||
788 | q->limits.discard_granularity = inode->i_sb->s_blocksize; | 799 | q->limits.discard_granularity = inode->i_sb->s_blocksize; |
789 | q->limits.discard_alignment = inode->i_sb->s_blocksize; | 800 | q->limits.discard_alignment = 0; |
790 | q->limits.max_discard_sectors = UINT_MAX >> 9; | 801 | q->limits.max_discard_sectors = UINT_MAX >> 9; |
791 | q->limits.discard_zeroes_data = 1; | 802 | q->limits.discard_zeroes_data = 1; |
792 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 803 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
1058 | 1069 | ||
1059 | if (lo->lo_offset != info->lo_offset || | 1070 | if (lo->lo_offset != info->lo_offset || |
1060 | lo->lo_sizelimit != info->lo_sizelimit) { | 1071 | lo->lo_sizelimit != info->lo_sizelimit) { |
1061 | lo->lo_offset = info->lo_offset; | 1072 | if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) |
1062 | lo->lo_sizelimit = info->lo_sizelimit; | ||
1063 | if (figure_loop_size(lo)) | ||
1064 | return -EFBIG; | 1073 | return -EFBIG; |
1065 | } | 1074 | } |
1066 | loop_config_discard(lo); | 1075 | loop_config_discard(lo); |
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) | |||
1246 | err = -ENXIO; | 1255 | err = -ENXIO; |
1247 | if (unlikely(lo->lo_state != Lo_bound)) | 1256 | if (unlikely(lo->lo_state != Lo_bound)) |
1248 | goto out; | 1257 | goto out; |
1249 | err = figure_loop_size(lo); | 1258 | err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); |
1250 | if (unlikely(err)) | 1259 | if (unlikely(err)) |
1251 | goto out; | 1260 | goto out; |
1252 | sec = get_capacity(lo->lo_disk); | 1261 | sec = get_capacity(lo->lo_disk); |
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, | |||
1284 | goto out_unlocked; | 1293 | goto out_unlocked; |
1285 | break; | 1294 | break; |
1286 | case LOOP_SET_STATUS: | 1295 | case LOOP_SET_STATUS: |
1287 | err = loop_set_status_old(lo, (struct loop_info __user *) arg); | 1296 | err = -EPERM; |
1297 | if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) | ||
1298 | err = loop_set_status_old(lo, | ||
1299 | (struct loop_info __user *)arg); | ||
1288 | break; | 1300 | break; |
1289 | case LOOP_GET_STATUS: | 1301 | case LOOP_GET_STATUS: |
1290 | err = loop_get_status_old(lo, (struct loop_info __user *) arg); | 1302 | err = loop_get_status_old(lo, (struct loop_info __user *) arg); |
1291 | break; | 1303 | break; |
1292 | case LOOP_SET_STATUS64: | 1304 | case LOOP_SET_STATUS64: |
1293 | err = loop_set_status64(lo, (struct loop_info64 __user *) arg); | 1305 | err = -EPERM; |
1306 | if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) | ||
1307 | err = loop_set_status64(lo, | ||
1308 | (struct loop_info64 __user *) arg); | ||
1294 | break; | 1309 | break; |
1295 | case LOOP_GET_STATUS64: | 1310 | case LOOP_GET_STATUS64: |
1296 | err = loop_get_status64(lo, (struct loop_info64 __user *) arg); | 1311 | err = loop_get_status64(lo, (struct loop_info64 __user *) arg); |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 6b9a2000d56a..a79fb4f7ff62 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t | |||
630 | if (dev->status & 0x10) | 630 | if (dev->status & 0x10) |
631 | return -ETIME; | 631 | return -ETIME; |
632 | 632 | ||
633 | memset(&hdr, 0, sizeof(hdr)); | ||
633 | hdr.magic = PG_MAGIC; | 634 | hdr.magic = PG_MAGIC; |
634 | hdr.dlen = dev->dlen; | 635 | hdr.dlen = dev->dlen; |
635 | copy = 0; | 636 | copy = 0; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 65cc424359b0..148ab944378d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */ | |||
183 | 183 | ||
184 | static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); | 184 | static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); |
185 | static void rbd_dev_release(struct device *dev); | 185 | static void rbd_dev_release(struct device *dev); |
186 | static ssize_t rbd_snap_rollback(struct device *dev, | ||
187 | struct device_attribute *attr, | ||
188 | const char *buf, | ||
189 | size_t size); | ||
190 | static ssize_t rbd_snap_add(struct device *dev, | 186 | static ssize_t rbd_snap_add(struct device *dev, |
191 | struct device_attribute *attr, | 187 | struct device_attribute *attr, |
192 | const char *buf, | 188 | const char *buf, |
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header, | |||
461 | u32 snap_count = le32_to_cpu(ondisk->snap_count); | 457 | u32 snap_count = le32_to_cpu(ondisk->snap_count); |
462 | int ret = -ENOMEM; | 458 | int ret = -ENOMEM; |
463 | 459 | ||
460 | if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) { | ||
461 | return -ENXIO; | ||
462 | } | ||
463 | |||
464 | init_rwsem(&header->snap_rwsem); | 464 | init_rwsem(&header->snap_rwsem); |
465 | header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); | 465 | header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); |
466 | header->snapc = kmalloc(sizeof(struct ceph_snap_context) + | 466 | header->snapc = kmalloc(sizeof(struct ceph_snap_context) + |
@@ -1356,32 +1356,6 @@ fail: | |||
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | /* | 1358 | /* |
1359 | * Request sync osd rollback | ||
1360 | */ | ||
1361 | static int rbd_req_sync_rollback_obj(struct rbd_device *dev, | ||
1362 | u64 snapid, | ||
1363 | const char *obj) | ||
1364 | { | ||
1365 | struct ceph_osd_req_op *ops; | ||
1366 | int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0); | ||
1367 | if (ret < 0) | ||
1368 | return ret; | ||
1369 | |||
1370 | ops[0].snap.snapid = snapid; | ||
1371 | |||
1372 | ret = rbd_req_sync_op(dev, NULL, | ||
1373 | CEPH_NOSNAP, | ||
1374 | 0, | ||
1375 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | ||
1376 | ops, | ||
1377 | 1, obj, 0, 0, NULL, NULL, NULL); | ||
1378 | |||
1379 | rbd_destroy_ops(ops); | ||
1380 | |||
1381 | return ret; | ||
1382 | } | ||
1383 | |||
1384 | /* | ||
1385 | * Request sync osd read | 1359 | * Request sync osd read |
1386 | */ | 1360 | */ |
1387 | static int rbd_req_sync_exec(struct rbd_device *dev, | 1361 | static int rbd_req_sync_exec(struct rbd_device *dev, |
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev, | |||
1610 | goto out_dh; | 1584 | goto out_dh; |
1611 | 1585 | ||
1612 | rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); | 1586 | rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); |
1613 | if (rc < 0) | 1587 | if (rc < 0) { |
1588 | if (rc == -ENXIO) { | ||
1589 | pr_warning("unrecognized header format" | ||
1590 | " for image %s", rbd_dev->obj); | ||
1591 | } | ||
1614 | goto out_dh; | 1592 | goto out_dh; |
1593 | } | ||
1615 | 1594 | ||
1616 | if (snap_count != header->total_snaps) { | 1595 | if (snap_count != header->total_snaps) { |
1617 | snap_count = header->total_snaps; | 1596 | snap_count = header->total_snaps; |
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); | |||
1882 | static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); | 1861 | static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); |
1883 | static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); | 1862 | static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); |
1884 | static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); | 1863 | static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); |
1885 | static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback); | ||
1886 | 1864 | ||
1887 | static struct attribute *rbd_attrs[] = { | 1865 | static struct attribute *rbd_attrs[] = { |
1888 | &dev_attr_size.attr, | 1866 | &dev_attr_size.attr, |
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = { | |||
1893 | &dev_attr_current_snap.attr, | 1871 | &dev_attr_current_snap.attr, |
1894 | &dev_attr_refresh.attr, | 1872 | &dev_attr_refresh.attr, |
1895 | &dev_attr_create_snap.attr, | 1873 | &dev_attr_create_snap.attr, |
1896 | &dev_attr_rollback_snap.attr, | ||
1897 | NULL | 1874 | NULL |
1898 | }; | 1875 | }; |
1899 | 1876 | ||
@@ -2424,64 +2401,6 @@ err_unlock: | |||
2424 | return ret; | 2401 | return ret; |
2425 | } | 2402 | } |
2426 | 2403 | ||
2427 | static ssize_t rbd_snap_rollback(struct device *dev, | ||
2428 | struct device_attribute *attr, | ||
2429 | const char *buf, | ||
2430 | size_t count) | ||
2431 | { | ||
2432 | struct rbd_device *rbd_dev = dev_to_rbd(dev); | ||
2433 | int ret; | ||
2434 | u64 snapid; | ||
2435 | u64 cur_ofs; | ||
2436 | char *seg_name = NULL; | ||
2437 | char *snap_name = kmalloc(count + 1, GFP_KERNEL); | ||
2438 | ret = -ENOMEM; | ||
2439 | if (!snap_name) | ||
2440 | return ret; | ||
2441 | |||
2442 | /* parse snaps add command */ | ||
2443 | snprintf(snap_name, count, "%s", buf); | ||
2444 | seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); | ||
2445 | if (!seg_name) | ||
2446 | goto done; | ||
2447 | |||
2448 | mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); | ||
2449 | |||
2450 | ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); | ||
2451 | if (ret < 0) | ||
2452 | goto done_unlock; | ||
2453 | |||
2454 | dout("snapid=%lld\n", snapid); | ||
2455 | |||
2456 | cur_ofs = 0; | ||
2457 | while (cur_ofs < rbd_dev->header.image_size) { | ||
2458 | cur_ofs += rbd_get_segment(&rbd_dev->header, | ||
2459 | rbd_dev->obj, | ||
2460 | cur_ofs, (u64)-1, | ||
2461 | seg_name, NULL); | ||
2462 | dout("seg_name=%s\n", seg_name); | ||
2463 | |||
2464 | ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name); | ||
2465 | if (ret < 0) | ||
2466 | pr_warning("could not roll back obj %s err=%d\n", | ||
2467 | seg_name, ret); | ||
2468 | } | ||
2469 | |||
2470 | ret = __rbd_update_snaps(rbd_dev); | ||
2471 | if (ret < 0) | ||
2472 | goto done_unlock; | ||
2473 | |||
2474 | ret = count; | ||
2475 | |||
2476 | done_unlock: | ||
2477 | mutex_unlock(&ctl_mutex); | ||
2478 | done: | ||
2479 | kfree(seg_name); | ||
2480 | kfree(snap_name); | ||
2481 | |||
2482 | return ret; | ||
2483 | } | ||
2484 | |||
2485 | static struct bus_attribute rbd_bus_attrs[] = { | 2404 | static struct bus_attribute rbd_bus_attrs[] = { |
2486 | __ATTR(add, S_IWUSR, NULL, rbd_add), | 2405 | __ATTR(add, S_IWUSR, NULL, rbd_add), |
2487 | __ATTR(remove, S_IWUSR, NULL, rbd_remove), | 2406 | __ATTR(remove, S_IWUSR, NULL, rbd_remove), |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index ae3e167e17ad..89ddab127e33 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -16,6 +16,8 @@ | |||
16 | * handle GCR disks | 16 | * handle GCR disks |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #undef DEBUG | ||
20 | |||
19 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
20 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
21 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
@@ -36,13 +38,11 @@ | |||
36 | #include <asm/machdep.h> | 38 | #include <asm/machdep.h> |
37 | #include <asm/pmac_feature.h> | 39 | #include <asm/pmac_feature.h> |
38 | 40 | ||
39 | static DEFINE_MUTEX(swim3_mutex); | ||
40 | static struct request_queue *swim3_queue; | ||
41 | static struct gendisk *disks[2]; | ||
42 | static struct request *fd_req; | ||
43 | |||
44 | #define MAX_FLOPPIES 2 | 41 | #define MAX_FLOPPIES 2 |
45 | 42 | ||
43 | static DEFINE_MUTEX(swim3_mutex); | ||
44 | static struct gendisk *disks[MAX_FLOPPIES]; | ||
45 | |||
46 | enum swim_state { | 46 | enum swim_state { |
47 | idle, | 47 | idle, |
48 | locating, | 48 | locating, |
@@ -177,7 +177,6 @@ struct swim3 { | |||
177 | 177 | ||
178 | struct floppy_state { | 178 | struct floppy_state { |
179 | enum swim_state state; | 179 | enum swim_state state; |
180 | spinlock_t lock; | ||
181 | struct swim3 __iomem *swim3; /* hardware registers */ | 180 | struct swim3 __iomem *swim3; /* hardware registers */ |
182 | struct dbdma_regs __iomem *dma; /* DMA controller registers */ | 181 | struct dbdma_regs __iomem *dma; /* DMA controller registers */ |
183 | int swim3_intr; /* interrupt number for SWIM3 */ | 182 | int swim3_intr; /* interrupt number for SWIM3 */ |
@@ -204,8 +203,20 @@ struct floppy_state { | |||
204 | int wanted; | 203 | int wanted; |
205 | struct macio_dev *mdev; | 204 | struct macio_dev *mdev; |
206 | char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; | 205 | char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; |
206 | int index; | ||
207 | struct request *cur_req; | ||
207 | }; | 208 | }; |
208 | 209 | ||
210 | #define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) | ||
211 | #define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) | ||
212 | #define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) | ||
213 | |||
214 | #ifdef DEBUG | ||
215 | #define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) | ||
216 | #else | ||
217 | #define swim3_dbg(fmt, arg...) do { } while(0) | ||
218 | #endif | ||
219 | |||
209 | static struct floppy_state floppy_states[MAX_FLOPPIES]; | 220 | static struct floppy_state floppy_states[MAX_FLOPPIES]; |
210 | static int floppy_count = 0; | 221 | static int floppy_count = 0; |
211 | static DEFINE_SPINLOCK(swim3_lock); | 222 | static DEFINE_SPINLOCK(swim3_lock); |
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = { | |||
224 | 0, 0, 0, 0, 0, 0 | 235 | 0, 0, 0, 0, 0, 0 |
225 | }; | 236 | }; |
226 | 237 | ||
227 | static void swim3_select(struct floppy_state *fs, int sel); | ||
228 | static void swim3_action(struct floppy_state *fs, int action); | ||
229 | static int swim3_readbit(struct floppy_state *fs, int bit); | ||
230 | static void do_fd_request(struct request_queue * q); | ||
231 | static void start_request(struct floppy_state *fs); | ||
232 | static void set_timeout(struct floppy_state *fs, int nticks, | ||
233 | void (*proc)(unsigned long)); | ||
234 | static void scan_track(struct floppy_state *fs); | ||
235 | static void seek_track(struct floppy_state *fs, int n); | 238 | static void seek_track(struct floppy_state *fs, int n); |
236 | static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); | 239 | static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); |
237 | static void setup_transfer(struct floppy_state *fs); | ||
238 | static void act(struct floppy_state *fs); | 240 | static void act(struct floppy_state *fs); |
239 | static void scan_timeout(unsigned long data); | 241 | static void scan_timeout(unsigned long data); |
240 | static void seek_timeout(unsigned long data); | 242 | static void seek_timeout(unsigned long data); |
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
254 | unsigned int clearing); | 256 | unsigned int clearing); |
255 | static int floppy_revalidate(struct gendisk *disk); | 257 | static int floppy_revalidate(struct gendisk *disk); |
256 | 258 | ||
257 | static bool swim3_end_request(int err, unsigned int nr_bytes) | 259 | static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes) |
258 | { | 260 | { |
259 | if (__blk_end_request(fd_req, err, nr_bytes)) | 261 | struct request *req = fs->cur_req; |
260 | return true; | 262 | int rc; |
261 | 263 | ||
262 | fd_req = NULL; | 264 | swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n", |
263 | return false; | 265 | err, nr_bytes, req); |
264 | } | ||
265 | 266 | ||
266 | static bool swim3_end_request_cur(int err) | 267 | if (err) |
267 | { | 268 | nr_bytes = blk_rq_cur_bytes(req); |
268 | return swim3_end_request(err, blk_rq_cur_bytes(fd_req)); | 269 | rc = __blk_end_request(req, err, nr_bytes); |
270 | if (rc) | ||
271 | return true; | ||
272 | fs->cur_req = NULL; | ||
273 | return false; | ||
269 | } | 274 | } |
270 | 275 | ||
271 | static void swim3_select(struct floppy_state *fs, int sel) | 276 | static void swim3_select(struct floppy_state *fs, int sel) |
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit) | |||
303 | return (stat & DATA) == 0; | 308 | return (stat & DATA) == 0; |
304 | } | 309 | } |
305 | 310 | ||
306 | static void do_fd_request(struct request_queue * q) | ||
307 | { | ||
308 | int i; | ||
309 | |||
310 | for(i=0; i<floppy_count; i++) { | ||
311 | struct floppy_state *fs = &floppy_states[i]; | ||
312 | if (fs->mdev->media_bay && | ||
313 | check_media_bay(fs->mdev->media_bay) != MB_FD) | ||
314 | continue; | ||
315 | start_request(fs); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | static void start_request(struct floppy_state *fs) | 311 | static void start_request(struct floppy_state *fs) |
320 | { | 312 | { |
321 | struct request *req; | 313 | struct request *req; |
322 | unsigned long x; | 314 | unsigned long x; |
323 | 315 | ||
316 | swim3_dbg("start request, initial state=%d\n", fs->state); | ||
317 | |||
324 | if (fs->state == idle && fs->wanted) { | 318 | if (fs->state == idle && fs->wanted) { |
325 | fs->state = available; | 319 | fs->state = available; |
326 | wake_up(&fs->wait); | 320 | wake_up(&fs->wait); |
327 | return; | 321 | return; |
328 | } | 322 | } |
329 | while (fs->state == idle) { | 323 | while (fs->state == idle) { |
330 | if (!fd_req) { | 324 | swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req); |
331 | fd_req = blk_fetch_request(swim3_queue); | 325 | if (!fs->cur_req) { |
332 | if (!fd_req) | 326 | fs->cur_req = blk_fetch_request(disks[fs->index]->queue); |
327 | swim3_dbg(" fetched request %p\n", fs->cur_req); | ||
328 | if (!fs->cur_req) | ||
333 | break; | 329 | break; |
334 | } | 330 | } |
335 | req = fd_req; | 331 | req = fs->cur_req; |
336 | #if 0 | 332 | |
337 | printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", | 333 | if (fs->mdev->media_bay && |
338 | req->rq_disk->disk_name, req->cmd, | 334 | check_media_bay(fs->mdev->media_bay) != MB_FD) { |
339 | (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); | 335 | swim3_dbg("%s", " media bay absent, dropping req\n"); |
340 | printk(" errors=%d current_nr_sectors=%u\n", | 336 | swim3_end_request(fs, -ENODEV, 0); |
341 | req->errors, blk_rq_cur_sectors(req)); | 337 | continue; |
338 | } | ||
339 | |||
340 | #if 0 /* This is really too verbose */ | ||
341 | swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", | ||
342 | req->rq_disk->disk_name, req->cmd, | ||
343 | (long)blk_rq_pos(req), blk_rq_sectors(req), | ||
344 | req->buffer); | ||
345 | swim3_dbg(" errors=%d current_nr_sectors=%u\n", | ||
346 | req->errors, blk_rq_cur_sectors(req)); | ||
342 | #endif | 347 | #endif |
343 | 348 | ||
344 | if (blk_rq_pos(req) >= fs->total_secs) { | 349 | if (blk_rq_pos(req) >= fs->total_secs) { |
345 | swim3_end_request_cur(-EIO); | 350 | swim3_dbg(" pos out of bounds (%ld, max is %ld)\n", |
351 | (long)blk_rq_pos(req), (long)fs->total_secs); | ||
352 | swim3_end_request(fs, -EIO, 0); | ||
346 | continue; | 353 | continue; |
347 | } | 354 | } |
348 | if (fs->ejected) { | 355 | if (fs->ejected) { |
349 | swim3_end_request_cur(-EIO); | 356 | swim3_dbg("%s", " disk ejected\n"); |
357 | swim3_end_request(fs, -EIO, 0); | ||
350 | continue; | 358 | continue; |
351 | } | 359 | } |
352 | 360 | ||
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs) | |||
354 | if (fs->write_prot < 0) | 362 | if (fs->write_prot < 0) |
355 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); | 363 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); |
356 | if (fs->write_prot) { | 364 | if (fs->write_prot) { |
357 | swim3_end_request_cur(-EIO); | 365 | swim3_dbg("%s", " try to write, disk write protected\n"); |
366 | swim3_end_request(fs, -EIO, 0); | ||
358 | continue; | 367 | continue; |
359 | } | 368 | } |
360 | } | 369 | } |
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs) | |||
369 | x = ((long)blk_rq_pos(req)) % fs->secpercyl; | 378 | x = ((long)blk_rq_pos(req)) % fs->secpercyl; |
370 | fs->head = x / fs->secpertrack; | 379 | fs->head = x / fs->secpertrack; |
371 | fs->req_sector = x % fs->secpertrack + 1; | 380 | fs->req_sector = x % fs->secpertrack + 1; |
372 | fd_req = req; | ||
373 | fs->state = do_transfer; | 381 | fs->state = do_transfer; |
374 | fs->retries = 0; | 382 | fs->retries = 0; |
375 | 383 | ||
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs) | |||
377 | } | 385 | } |
378 | } | 386 | } |
379 | 387 | ||
388 | static void do_fd_request(struct request_queue * q) | ||
389 | { | ||
390 | start_request(q->queuedata); | ||
391 | } | ||
392 | |||
380 | static void set_timeout(struct floppy_state *fs, int nticks, | 393 | static void set_timeout(struct floppy_state *fs, int nticks, |
381 | void (*proc)(unsigned long)) | 394 | void (*proc)(unsigned long)) |
382 | { | 395 | { |
383 | unsigned long flags; | ||
384 | |||
385 | spin_lock_irqsave(&fs->lock, flags); | ||
386 | if (fs->timeout_pending) | 396 | if (fs->timeout_pending) |
387 | del_timer(&fs->timeout); | 397 | del_timer(&fs->timeout); |
388 | fs->timeout.expires = jiffies + nticks; | 398 | fs->timeout.expires = jiffies + nticks; |
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks, | |||
390 | fs->timeout.data = (unsigned long) fs; | 400 | fs->timeout.data = (unsigned long) fs; |
391 | add_timer(&fs->timeout); | 401 | add_timer(&fs->timeout); |
392 | fs->timeout_pending = 1; | 402 | fs->timeout_pending = 1; |
393 | spin_unlock_irqrestore(&fs->lock, flags); | ||
394 | } | 403 | } |
395 | 404 | ||
396 | static inline void scan_track(struct floppy_state *fs) | 405 | static inline void scan_track(struct floppy_state *fs) |
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs) | |||
442 | struct swim3 __iomem *sw = fs->swim3; | 451 | struct swim3 __iomem *sw = fs->swim3; |
443 | struct dbdma_cmd *cp = fs->dma_cmd; | 452 | struct dbdma_cmd *cp = fs->dma_cmd; |
444 | struct dbdma_regs __iomem *dr = fs->dma; | 453 | struct dbdma_regs __iomem *dr = fs->dma; |
454 | struct request *req = fs->cur_req; | ||
445 | 455 | ||
446 | if (blk_rq_cur_sectors(fd_req) <= 0) { | 456 | if (blk_rq_cur_sectors(req) <= 0) { |
447 | printk(KERN_ERR "swim3: transfer 0 sectors?\n"); | 457 | swim3_warn("%s", "Transfer 0 sectors ?\n"); |
448 | return; | 458 | return; |
449 | } | 459 | } |
450 | if (rq_data_dir(fd_req) == WRITE) | 460 | if (rq_data_dir(req) == WRITE) |
451 | n = 1; | 461 | n = 1; |
452 | else { | 462 | else { |
453 | n = fs->secpertrack - fs->req_sector + 1; | 463 | n = fs->secpertrack - fs->req_sector + 1; |
454 | if (n > blk_rq_cur_sectors(fd_req)) | 464 | if (n > blk_rq_cur_sectors(req)) |
455 | n = blk_rq_cur_sectors(fd_req); | 465 | n = blk_rq_cur_sectors(req); |
456 | } | 466 | } |
467 | |||
468 | swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n", | ||
469 | fs->req_sector, fs->secpertrack, fs->head, n); | ||
470 | |||
457 | fs->scount = n; | 471 | fs->scount = n; |
458 | swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); | 472 | swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); |
459 | out_8(&sw->sector, fs->req_sector); | 473 | out_8(&sw->sector, fs->req_sector); |
460 | out_8(&sw->nsect, n); | 474 | out_8(&sw->nsect, n); |
461 | out_8(&sw->gap3, 0); | 475 | out_8(&sw->gap3, 0); |
462 | out_le32(&dr->cmdptr, virt_to_bus(cp)); | 476 | out_le32(&dr->cmdptr, virt_to_bus(cp)); |
463 | if (rq_data_dir(fd_req) == WRITE) { | 477 | if (rq_data_dir(req) == WRITE) { |
464 | /* Set up 3 dma commands: write preamble, data, postamble */ | 478 | /* Set up 3 dma commands: write preamble, data, postamble */ |
465 | init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); | 479 | init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); |
466 | ++cp; | 480 | ++cp; |
467 | init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512); | 481 | init_dma(cp, OUTPUT_MORE, req->buffer, 512); |
468 | ++cp; | 482 | ++cp; |
469 | init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); | 483 | init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); |
470 | } else { | 484 | } else { |
471 | init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512); | 485 | init_dma(cp, INPUT_LAST, req->buffer, n * 512); |
472 | } | 486 | } |
473 | ++cp; | 487 | ++cp; |
474 | out_le16(&cp->command, DBDMA_STOP); | 488 | out_le16(&cp->command, DBDMA_STOP); |
475 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); | 489 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); |
476 | in_8(&sw->error); | 490 | in_8(&sw->error); |
477 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); | 491 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); |
478 | if (rq_data_dir(fd_req) == WRITE) | 492 | if (rq_data_dir(req) == WRITE) |
479 | out_8(&sw->control_bis, WRITE_SECTORS); | 493 | out_8(&sw->control_bis, WRITE_SECTORS); |
480 | in_8(&sw->intr); | 494 | in_8(&sw->intr); |
481 | out_le32(&dr->control, (RUN << 16) | RUN); | 495 | out_le32(&dr->control, (RUN << 16) | RUN); |
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs) | |||
488 | static void act(struct floppy_state *fs) | 502 | static void act(struct floppy_state *fs) |
489 | { | 503 | { |
490 | for (;;) { | 504 | for (;;) { |
505 | swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n", | ||
506 | fs->state, fs->req_cyl, fs->cur_cyl); | ||
507 | |||
491 | switch (fs->state) { | 508 | switch (fs->state) { |
492 | case idle: | 509 | case idle: |
493 | return; /* XXX shouldn't get here */ | 510 | return; /* XXX shouldn't get here */ |
494 | 511 | ||
495 | case locating: | 512 | case locating: |
496 | if (swim3_readbit(fs, TRACK_ZERO)) { | 513 | if (swim3_readbit(fs, TRACK_ZERO)) { |
514 | swim3_dbg("%s", " locate track 0\n"); | ||
497 | fs->cur_cyl = 0; | 515 | fs->cur_cyl = 0; |
498 | if (fs->req_cyl == 0) | 516 | if (fs->req_cyl == 0) |
499 | fs->state = do_transfer; | 517 | fs->state = do_transfer; |
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs) | |||
511 | break; | 529 | break; |
512 | } | 530 | } |
513 | if (fs->req_cyl == fs->cur_cyl) { | 531 | if (fs->req_cyl == fs->cur_cyl) { |
514 | printk("whoops, seeking 0\n"); | 532 | swim3_warn("%s", "Whoops, seeking 0\n"); |
515 | fs->state = do_transfer; | 533 | fs->state = do_transfer; |
516 | break; | 534 | break; |
517 | } | 535 | } |
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs) | |||
527 | case do_transfer: | 545 | case do_transfer: |
528 | if (fs->cur_cyl != fs->req_cyl) { | 546 | if (fs->cur_cyl != fs->req_cyl) { |
529 | if (fs->retries > 5) { | 547 | if (fs->retries > 5) { |
530 | swim3_end_request_cur(-EIO); | 548 | swim3_err("Wrong cylinder in transfer, want: %d got %d\n", |
549 | fs->req_cyl, fs->cur_cyl); | ||
550 | swim3_end_request(fs, -EIO, 0); | ||
531 | fs->state = idle; | 551 | fs->state = idle; |
532 | return; | 552 | return; |
533 | } | 553 | } |
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs) | |||
542 | return; | 562 | return; |
543 | 563 | ||
544 | default: | 564 | default: |
545 | printk(KERN_ERR"swim3: unknown state %d\n", fs->state); | 565 | swim3_err("Unknown state %d\n", fs->state); |
546 | return; | 566 | return; |
547 | } | 567 | } |
548 | } | 568 | } |
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data) | |||
552 | { | 572 | { |
553 | struct floppy_state *fs = (struct floppy_state *) data; | 573 | struct floppy_state *fs = (struct floppy_state *) data; |
554 | struct swim3 __iomem *sw = fs->swim3; | 574 | struct swim3 __iomem *sw = fs->swim3; |
575 | unsigned long flags; | ||
576 | |||
577 | swim3_dbg("* scan timeout, state=%d\n", fs->state); | ||
555 | 578 | ||
579 | spin_lock_irqsave(&swim3_lock, flags); | ||
556 | fs->timeout_pending = 0; | 580 | fs->timeout_pending = 0; |
557 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); | 581 | out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); |
558 | out_8(&sw->select, RELAX); | 582 | out_8(&sw->select, RELAX); |
559 | out_8(&sw->intr_enable, 0); | 583 | out_8(&sw->intr_enable, 0); |
560 | fs->cur_cyl = -1; | 584 | fs->cur_cyl = -1; |
561 | if (fs->retries > 5) { | 585 | if (fs->retries > 5) { |
562 | swim3_end_request_cur(-EIO); | 586 | swim3_end_request(fs, -EIO, 0); |
563 | fs->state = idle; | 587 | fs->state = idle; |
564 | start_request(fs); | 588 | start_request(fs); |
565 | } else { | 589 | } else { |
566 | fs->state = jogging; | 590 | fs->state = jogging; |
567 | act(fs); | 591 | act(fs); |
568 | } | 592 | } |
593 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
569 | } | 594 | } |
570 | 595 | ||
571 | static void seek_timeout(unsigned long data) | 596 | static void seek_timeout(unsigned long data) |
572 | { | 597 | { |
573 | struct floppy_state *fs = (struct floppy_state *) data; | 598 | struct floppy_state *fs = (struct floppy_state *) data; |
574 | struct swim3 __iomem *sw = fs->swim3; | 599 | struct swim3 __iomem *sw = fs->swim3; |
600 | unsigned long flags; | ||
601 | |||
602 | swim3_dbg("* seek timeout, state=%d\n", fs->state); | ||
575 | 603 | ||
604 | spin_lock_irqsave(&swim3_lock, flags); | ||
576 | fs->timeout_pending = 0; | 605 | fs->timeout_pending = 0; |
577 | out_8(&sw->control_bic, DO_SEEK); | 606 | out_8(&sw->control_bic, DO_SEEK); |
578 | out_8(&sw->select, RELAX); | 607 | out_8(&sw->select, RELAX); |
579 | out_8(&sw->intr_enable, 0); | 608 | out_8(&sw->intr_enable, 0); |
580 | printk(KERN_ERR "swim3: seek timeout\n"); | 609 | swim3_err("%s", "Seek timeout\n"); |
581 | swim3_end_request_cur(-EIO); | 610 | swim3_end_request(fs, -EIO, 0); |
582 | fs->state = idle; | 611 | fs->state = idle; |
583 | start_request(fs); | 612 | start_request(fs); |
613 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
584 | } | 614 | } |
585 | 615 | ||
586 | static void settle_timeout(unsigned long data) | 616 | static void settle_timeout(unsigned long data) |
587 | { | 617 | { |
588 | struct floppy_state *fs = (struct floppy_state *) data; | 618 | struct floppy_state *fs = (struct floppy_state *) data; |
589 | struct swim3 __iomem *sw = fs->swim3; | 619 | struct swim3 __iomem *sw = fs->swim3; |
620 | unsigned long flags; | ||
621 | |||
622 | swim3_dbg("* settle timeout, state=%d\n", fs->state); | ||
590 | 623 | ||
624 | spin_lock_irqsave(&swim3_lock, flags); | ||
591 | fs->timeout_pending = 0; | 625 | fs->timeout_pending = 0; |
592 | if (swim3_readbit(fs, SEEK_COMPLETE)) { | 626 | if (swim3_readbit(fs, SEEK_COMPLETE)) { |
593 | out_8(&sw->select, RELAX); | 627 | out_8(&sw->select, RELAX); |
594 | fs->state = locating; | 628 | fs->state = locating; |
595 | act(fs); | 629 | act(fs); |
596 | return; | 630 | goto unlock; |
597 | } | 631 | } |
598 | out_8(&sw->select, RELAX); | 632 | out_8(&sw->select, RELAX); |
599 | if (fs->settle_time < 2*HZ) { | 633 | if (fs->settle_time < 2*HZ) { |
600 | ++fs->settle_time; | 634 | ++fs->settle_time; |
601 | set_timeout(fs, 1, settle_timeout); | 635 | set_timeout(fs, 1, settle_timeout); |
602 | return; | 636 | goto unlock; |
603 | } | 637 | } |
604 | printk(KERN_ERR "swim3: seek settle timeout\n"); | 638 | swim3_err("%s", "Seek settle timeout\n"); |
605 | swim3_end_request_cur(-EIO); | 639 | swim3_end_request(fs, -EIO, 0); |
606 | fs->state = idle; | 640 | fs->state = idle; |
607 | start_request(fs); | 641 | start_request(fs); |
642 | unlock: | ||
643 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
608 | } | 644 | } |
609 | 645 | ||
610 | static void xfer_timeout(unsigned long data) | 646 | static void xfer_timeout(unsigned long data) |
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data) | |||
612 | struct floppy_state *fs = (struct floppy_state *) data; | 648 | struct floppy_state *fs = (struct floppy_state *) data; |
613 | struct swim3 __iomem *sw = fs->swim3; | 649 | struct swim3 __iomem *sw = fs->swim3; |
614 | struct dbdma_regs __iomem *dr = fs->dma; | 650 | struct dbdma_regs __iomem *dr = fs->dma; |
651 | unsigned long flags; | ||
615 | int n; | 652 | int n; |
616 | 653 | ||
654 | swim3_dbg("* xfer timeout, state=%d\n", fs->state); | ||
655 | |||
656 | spin_lock_irqsave(&swim3_lock, flags); | ||
617 | fs->timeout_pending = 0; | 657 | fs->timeout_pending = 0; |
618 | out_le32(&dr->control, RUN << 16); | 658 | out_le32(&dr->control, RUN << 16); |
619 | /* We must wait a bit for dbdma to stop */ | 659 | /* We must wait a bit for dbdma to stop */ |
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data) | |||
622 | out_8(&sw->intr_enable, 0); | 662 | out_8(&sw->intr_enable, 0); |
623 | out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); | 663 | out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); |
624 | out_8(&sw->select, RELAX); | 664 | out_8(&sw->select, RELAX); |
625 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", | 665 | swim3_err("Timeout %sing sector %ld\n", |
626 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), | 666 | (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), |
627 | (long)blk_rq_pos(fd_req)); | 667 | (long)blk_rq_pos(fs->cur_req)); |
628 | swim3_end_request_cur(-EIO); | 668 | swim3_end_request(fs, -EIO, 0); |
629 | fs->state = idle; | 669 | fs->state = idle; |
630 | start_request(fs); | 670 | start_request(fs); |
671 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
631 | } | 672 | } |
632 | 673 | ||
633 | static irqreturn_t swim3_interrupt(int irq, void *dev_id) | 674 | static irqreturn_t swim3_interrupt(int irq, void *dev_id) |
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
638 | int stat, resid; | 679 | int stat, resid; |
639 | struct dbdma_regs __iomem *dr; | 680 | struct dbdma_regs __iomem *dr; |
640 | struct dbdma_cmd *cp; | 681 | struct dbdma_cmd *cp; |
682 | unsigned long flags; | ||
683 | struct request *req = fs->cur_req; | ||
684 | |||
685 | swim3_dbg("* interrupt, state=%d\n", fs->state); | ||
641 | 686 | ||
687 | spin_lock_irqsave(&swim3_lock, flags); | ||
642 | intr = in_8(&sw->intr); | 688 | intr = in_8(&sw->intr); |
643 | err = (intr & ERROR_INTR)? in_8(&sw->error): 0; | 689 | err = (intr & ERROR_INTR)? in_8(&sw->error): 0; |
644 | if ((intr & ERROR_INTR) && fs->state != do_transfer) | 690 | if ((intr & ERROR_INTR) && fs->state != do_transfer) |
645 | printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n", | 691 | swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n", |
646 | fs->state, rq_data_dir(fd_req), intr, err); | 692 | fs->state, rq_data_dir(req), intr, err); |
647 | switch (fs->state) { | 693 | switch (fs->state) { |
648 | case locating: | 694 | case locating: |
649 | if (intr & SEEN_SECTOR) { | 695 | if (intr & SEEN_SECTOR) { |
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
653 | del_timer(&fs->timeout); | 699 | del_timer(&fs->timeout); |
654 | fs->timeout_pending = 0; | 700 | fs->timeout_pending = 0; |
655 | if (sw->ctrack == 0xff) { | 701 | if (sw->ctrack == 0xff) { |
656 | printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); | 702 | swim3_err("%s", "Seen sector but cyl=ff?\n"); |
657 | fs->cur_cyl = -1; | 703 | fs->cur_cyl = -1; |
658 | if (fs->retries > 5) { | 704 | if (fs->retries > 5) { |
659 | swim3_end_request_cur(-EIO); | 705 | swim3_end_request(fs, -EIO, 0); |
660 | fs->state = idle; | 706 | fs->state = idle; |
661 | start_request(fs); | 707 | start_request(fs); |
662 | } else { | 708 | } else { |
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
668 | fs->cur_cyl = sw->ctrack; | 714 | fs->cur_cyl = sw->ctrack; |
669 | fs->cur_sector = sw->csect; | 715 | fs->cur_sector = sw->csect; |
670 | if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) | 716 | if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) |
671 | printk(KERN_ERR "swim3: expected cyl %d, got %d\n", | 717 | swim3_err("Expected cyl %d, got %d\n", |
672 | fs->expect_cyl, fs->cur_cyl); | 718 | fs->expect_cyl, fs->cur_cyl); |
673 | fs->state = do_transfer; | 719 | fs->state = do_transfer; |
674 | act(fs); | 720 | act(fs); |
675 | } | 721 | } |
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
704 | fs->timeout_pending = 0; | 750 | fs->timeout_pending = 0; |
705 | dr = fs->dma; | 751 | dr = fs->dma; |
706 | cp = fs->dma_cmd; | 752 | cp = fs->dma_cmd; |
707 | if (rq_data_dir(fd_req) == WRITE) | 753 | if (rq_data_dir(req) == WRITE) |
708 | ++cp; | 754 | ++cp; |
709 | /* | 755 | /* |
710 | * Check that the main data transfer has finished. | 756 | * Check that the main data transfer has finished. |
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
729 | if (intr & ERROR_INTR) { | 775 | if (intr & ERROR_INTR) { |
730 | n = fs->scount - 1 - resid / 512; | 776 | n = fs->scount - 1 - resid / 512; |
731 | if (n > 0) { | 777 | if (n > 0) { |
732 | blk_update_request(fd_req, 0, n << 9); | 778 | blk_update_request(req, 0, n << 9); |
733 | fs->req_sector += n; | 779 | fs->req_sector += n; |
734 | } | 780 | } |
735 | if (fs->retries < 5) { | 781 | if (fs->retries < 5) { |
736 | ++fs->retries; | 782 | ++fs->retries; |
737 | act(fs); | 783 | act(fs); |
738 | } else { | 784 | } else { |
739 | printk("swim3: error %sing block %ld (err=%x)\n", | 785 | swim3_err("Error %sing block %ld (err=%x)\n", |
740 | rq_data_dir(fd_req) == WRITE? "writ": "read", | 786 | rq_data_dir(req) == WRITE? "writ": "read", |
741 | (long)blk_rq_pos(fd_req), err); | 787 | (long)blk_rq_pos(req), err); |
742 | swim3_end_request_cur(-EIO); | 788 | swim3_end_request(fs, -EIO, 0); |
743 | fs->state = idle; | 789 | fs->state = idle; |
744 | } | 790 | } |
745 | } else { | 791 | } else { |
746 | if ((stat & ACTIVE) == 0 || resid != 0) { | 792 | if ((stat & ACTIVE) == 0 || resid != 0) { |
747 | /* musta been an error */ | 793 | /* musta been an error */ |
748 | printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); | 794 | swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid); |
749 | printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", | 795 | swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n", |
750 | fs->state, rq_data_dir(fd_req), intr, err); | 796 | fs->state, rq_data_dir(req), intr, err); |
751 | swim3_end_request_cur(-EIO); | 797 | swim3_end_request(fs, -EIO, 0); |
752 | fs->state = idle; | 798 | fs->state = idle; |
753 | start_request(fs); | 799 | start_request(fs); |
754 | break; | 800 | break; |
755 | } | 801 | } |
756 | if (swim3_end_request(0, fs->scount << 9)) { | 802 | fs->retries = 0; |
803 | if (swim3_end_request(fs, 0, fs->scount << 9)) { | ||
757 | fs->req_sector += fs->scount; | 804 | fs->req_sector += fs->scount; |
758 | if (fs->req_sector > fs->secpertrack) { | 805 | if (fs->req_sector > fs->secpertrack) { |
759 | fs->req_sector -= fs->secpertrack; | 806 | fs->req_sector -= fs->secpertrack; |
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
770 | start_request(fs); | 817 | start_request(fs); |
771 | break; | 818 | break; |
772 | default: | 819 | default: |
773 | printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state); | 820 | swim3_err("Don't know what to do in state %d\n", fs->state); |
774 | } | 821 | } |
822 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
775 | return IRQ_HANDLED; | 823 | return IRQ_HANDLED; |
776 | } | 824 | } |
777 | 825 | ||
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id) | |||
781 | } | 829 | } |
782 | */ | 830 | */ |
783 | 831 | ||
832 | /* Called under the mutex to grab exclusive access to a drive */ | ||
784 | static int grab_drive(struct floppy_state *fs, enum swim_state state, | 833 | static int grab_drive(struct floppy_state *fs, enum swim_state state, |
785 | int interruptible) | 834 | int interruptible) |
786 | { | 835 | { |
787 | unsigned long flags; | 836 | unsigned long flags; |
788 | 837 | ||
789 | spin_lock_irqsave(&fs->lock, flags); | 838 | swim3_dbg("%s", "-> grab drive\n"); |
790 | if (fs->state != idle) { | 839 | |
840 | spin_lock_irqsave(&swim3_lock, flags); | ||
841 | if (fs->state != idle && fs->state != available) { | ||
791 | ++fs->wanted; | 842 | ++fs->wanted; |
792 | while (fs->state != available) { | 843 | while (fs->state != available) { |
844 | spin_unlock_irqrestore(&swim3_lock, flags); | ||
793 | if (interruptible && signal_pending(current)) { | 845 | if (interruptible && signal_pending(current)) { |
794 | --fs->wanted; | 846 | --fs->wanted; |
795 | spin_unlock_irqrestore(&fs->lock, flags); | ||
796 | return -EINTR; | 847 | return -EINTR; |
797 | } | 848 | } |
798 | interruptible_sleep_on(&fs->wait); | 849 | interruptible_sleep_on(&fs->wait); |
850 | spin_lock_irqsave(&swim3_lock, flags); | ||
799 | } | 851 | } |
800 | --fs->wanted; | 852 | --fs->wanted; |
801 | } | 853 | } |
802 | fs->state = state; | 854 | fs->state = state; |
803 | spin_unlock_irqrestore(&fs->lock, flags); | 855 | spin_unlock_irqrestore(&swim3_lock, flags); |
856 | |||
804 | return 0; | 857 | return 0; |
805 | } | 858 | } |
806 | 859 | ||
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs) | |||
808 | { | 861 | { |
809 | unsigned long flags; | 862 | unsigned long flags; |
810 | 863 | ||
811 | spin_lock_irqsave(&fs->lock, flags); | 864 | swim3_dbg("%s", "-> release drive\n"); |
865 | |||
866 | spin_lock_irqsave(&swim3_lock, flags); | ||
812 | fs->state = idle; | 867 | fs->state = idle; |
813 | start_request(fs); | 868 | start_request(fs); |
814 | spin_unlock_irqrestore(&fs->lock, flags); | 869 | spin_unlock_irqrestore(&swim3_lock, flags); |
815 | } | 870 | } |
816 | 871 | ||
817 | static int fd_eject(struct floppy_state *fs) | 872 | static int fd_eject(struct floppy_state *fs) |
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) | |||
966 | { | 1021 | { |
967 | struct floppy_state *fs = disk->private_data; | 1022 | struct floppy_state *fs = disk->private_data; |
968 | struct swim3 __iomem *sw = fs->swim3; | 1023 | struct swim3 __iomem *sw = fs->swim3; |
1024 | |||
969 | mutex_lock(&swim3_mutex); | 1025 | mutex_lock(&swim3_mutex); |
970 | if (fs->ref_count > 0 && --fs->ref_count == 0) { | 1026 | if (fs->ref_count > 0 && --fs->ref_count == 0) { |
971 | swim3_action(fs, MOTOR_OFF); | 1027 | swim3_action(fs, MOTOR_OFF); |
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = { | |||
1031 | .revalidate_disk= floppy_revalidate, | 1087 | .revalidate_disk= floppy_revalidate, |
1032 | }; | 1088 | }; |
1033 | 1089 | ||
1090 | static void swim3_mb_event(struct macio_dev* mdev, int mb_state) | ||
1091 | { | ||
1092 | struct floppy_state *fs = macio_get_drvdata(mdev); | ||
1093 | struct swim3 __iomem *sw = fs->swim3; | ||
1094 | |||
1095 | if (!fs) | ||
1096 | return; | ||
1097 | if (mb_state != MB_FD) | ||
1098 | return; | ||
1099 | |||
1100 | /* Clear state */ | ||
1101 | out_8(&sw->intr_enable, 0); | ||
1102 | in_8(&sw->intr); | ||
1103 | in_8(&sw->error); | ||
1104 | } | ||
1105 | |||
1034 | static int swim3_add_device(struct macio_dev *mdev, int index) | 1106 | static int swim3_add_device(struct macio_dev *mdev, int index) |
1035 | { | 1107 | { |
1036 | struct device_node *swim = mdev->ofdev.dev.of_node; | 1108 | struct device_node *swim = mdev->ofdev.dev.of_node; |
1037 | struct floppy_state *fs = &floppy_states[index]; | 1109 | struct floppy_state *fs = &floppy_states[index]; |
1038 | int rc = -EBUSY; | 1110 | int rc = -EBUSY; |
1039 | 1111 | ||
1112 | /* Do this first for message macros */ | ||
1113 | memset(fs, 0, sizeof(*fs)); | ||
1114 | fs->mdev = mdev; | ||
1115 | fs->index = index; | ||
1116 | |||
1040 | /* Check & Request resources */ | 1117 | /* Check & Request resources */ |
1041 | if (macio_resource_count(mdev) < 2) { | 1118 | if (macio_resource_count(mdev) < 2) { |
1042 | printk(KERN_WARNING "ifd%d: no address for %s\n", | 1119 | swim3_err("%s", "No address in device-tree\n"); |
1043 | index, swim->full_name); | ||
1044 | return -ENXIO; | 1120 | return -ENXIO; |
1045 | } | 1121 | } |
1046 | if (macio_irq_count(mdev) < 2) { | 1122 | if (macio_irq_count(mdev) < 1) { |
1047 | printk(KERN_WARNING "fd%d: no intrs for device %s\n", | 1123 | swim3_err("%s", "No interrupt in device-tree\n"); |
1048 | index, swim->full_name); | 1124 | return -ENXIO; |
1049 | } | 1125 | } |
1050 | if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { | 1126 | if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { |
1051 | printk(KERN_ERR "fd%d: can't request mmio resource for %s\n", | 1127 | swim3_err("%s", "Can't request mmio resource\n"); |
1052 | index, swim->full_name); | ||
1053 | return -EBUSY; | 1128 | return -EBUSY; |
1054 | } | 1129 | } |
1055 | if (macio_request_resource(mdev, 1, "swim3 (dma)")) { | 1130 | if (macio_request_resource(mdev, 1, "swim3 (dma)")) { |
1056 | printk(KERN_ERR "fd%d: can't request dma resource for %s\n", | 1131 | swim3_err("%s", "Can't request dma resource\n"); |
1057 | index, swim->full_name); | ||
1058 | macio_release_resource(mdev, 0); | 1132 | macio_release_resource(mdev, 0); |
1059 | return -EBUSY; | 1133 | return -EBUSY; |
1060 | } | 1134 | } |
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1063 | if (mdev->media_bay == NULL) | 1137 | if (mdev->media_bay == NULL) |
1064 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); | 1138 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); |
1065 | 1139 | ||
1066 | memset(fs, 0, sizeof(*fs)); | ||
1067 | spin_lock_init(&fs->lock); | ||
1068 | fs->state = idle; | 1140 | fs->state = idle; |
1069 | fs->swim3 = (struct swim3 __iomem *) | 1141 | fs->swim3 = (struct swim3 __iomem *) |
1070 | ioremap(macio_resource_start(mdev, 0), 0x200); | 1142 | ioremap(macio_resource_start(mdev, 0), 0x200); |
1071 | if (fs->swim3 == NULL) { | 1143 | if (fs->swim3 == NULL) { |
1072 | printk("fd%d: couldn't map registers for %s\n", | 1144 | swim3_err("%s", "Couldn't map mmio registers\n"); |
1073 | index, swim->full_name); | ||
1074 | rc = -ENOMEM; | 1145 | rc = -ENOMEM; |
1075 | goto out_release; | 1146 | goto out_release; |
1076 | } | 1147 | } |
1077 | fs->dma = (struct dbdma_regs __iomem *) | 1148 | fs->dma = (struct dbdma_regs __iomem *) |
1078 | ioremap(macio_resource_start(mdev, 1), 0x200); | 1149 | ioremap(macio_resource_start(mdev, 1), 0x200); |
1079 | if (fs->dma == NULL) { | 1150 | if (fs->dma == NULL) { |
1080 | printk("fd%d: couldn't map DMA for %s\n", | 1151 | swim3_err("%s", "Couldn't map dma registers\n"); |
1081 | index, swim->full_name); | ||
1082 | iounmap(fs->swim3); | 1152 | iounmap(fs->swim3); |
1083 | rc = -ENOMEM; | 1153 | rc = -ENOMEM; |
1084 | goto out_release; | 1154 | goto out_release; |
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1090 | fs->secpercyl = 36; | 1160 | fs->secpercyl = 36; |
1091 | fs->secpertrack = 18; | 1161 | fs->secpertrack = 18; |
1092 | fs->total_secs = 2880; | 1162 | fs->total_secs = 2880; |
1093 | fs->mdev = mdev; | ||
1094 | init_waitqueue_head(&fs->wait); | 1163 | init_waitqueue_head(&fs->wait); |
1095 | 1164 | ||
1096 | fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); | 1165 | fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); |
1097 | memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); | 1166 | memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); |
1098 | st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); | 1167 | st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); |
1099 | 1168 | ||
1169 | if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD) | ||
1170 | swim3_mb_event(mdev, MB_FD); | ||
1171 | |||
1100 | if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { | 1172 | if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { |
1101 | printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n", | 1173 | swim3_err("%s", "Couldn't request interrupt\n"); |
1102 | index, fs->swim3_intr, swim->full_name); | ||
1103 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); | 1174 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); |
1104 | goto out_unmap; | 1175 | goto out_unmap; |
1105 | return -EBUSY; | 1176 | return -EBUSY; |
1106 | } | 1177 | } |
1107 | /* | ||
1108 | if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) { | ||
1109 | printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA", | ||
1110 | fs->dma_intr); | ||
1111 | return -EBUSY; | ||
1112 | } | ||
1113 | */ | ||
1114 | 1178 | ||
1115 | init_timer(&fs->timeout); | 1179 | init_timer(&fs->timeout); |
1116 | 1180 | ||
1117 | printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, | 1181 | swim3_info("SWIM3 floppy controller %s\n", |
1118 | mdev->media_bay ? "in media bay" : ""); | 1182 | mdev->media_bay ? "in media bay" : ""); |
1119 | 1183 | ||
1120 | return 0; | 1184 | return 0; |
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1132 | 1196 | ||
1133 | static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) | 1197 | static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) |
1134 | { | 1198 | { |
1135 | int i, rc; | ||
1136 | struct gendisk *disk; | 1199 | struct gendisk *disk; |
1200 | int index, rc; | ||
1201 | |||
1202 | index = floppy_count++; | ||
1203 | if (index >= MAX_FLOPPIES) | ||
1204 | return -ENXIO; | ||
1137 | 1205 | ||
1138 | /* Add the drive */ | 1206 | /* Add the drive */ |
1139 | rc = swim3_add_device(mdev, floppy_count); | 1207 | rc = swim3_add_device(mdev, index); |
1140 | if (rc) | 1208 | if (rc) |
1141 | return rc; | 1209 | return rc; |
1210 | /* Now register that disk. Same comment about failure handling */ | ||
1211 | disk = disks[index] = alloc_disk(1); | ||
1212 | if (disk == NULL) | ||
1213 | return -ENOMEM; | ||
1214 | disk->queue = blk_init_queue(do_fd_request, &swim3_lock); | ||
1215 | if (disk->queue == NULL) { | ||
1216 | put_disk(disk); | ||
1217 | return -ENOMEM; | ||
1218 | } | ||
1219 | disk->queue->queuedata = &floppy_states[index]; | ||
1142 | 1220 | ||
1143 | /* Now create the queue if not there yet */ | 1221 | if (index == 0) { |
1144 | if (swim3_queue == NULL) { | ||
1145 | /* If we failed, there isn't much we can do as the driver is still | 1222 | /* If we failed, there isn't much we can do as the driver is still |
1146 | * too dumb to remove the device, just bail out | 1223 | * too dumb to remove the device, just bail out |
1147 | */ | 1224 | */ |
1148 | if (register_blkdev(FLOPPY_MAJOR, "fd")) | 1225 | if (register_blkdev(FLOPPY_MAJOR, "fd")) |
1149 | return 0; | 1226 | return 0; |
1150 | swim3_queue = blk_init_queue(do_fd_request, &swim3_lock); | ||
1151 | if (swim3_queue == NULL) { | ||
1152 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | ||
1153 | return 0; | ||
1154 | } | ||
1155 | } | 1227 | } |
1156 | 1228 | ||
1157 | /* Now register that disk. Same comment about failure handling */ | ||
1158 | i = floppy_count++; | ||
1159 | disk = disks[i] = alloc_disk(1); | ||
1160 | if (disk == NULL) | ||
1161 | return 0; | ||
1162 | |||
1163 | disk->major = FLOPPY_MAJOR; | 1229 | disk->major = FLOPPY_MAJOR; |
1164 | disk->first_minor = i; | 1230 | disk->first_minor = index; |
1165 | disk->fops = &floppy_fops; | 1231 | disk->fops = &floppy_fops; |
1166 | disk->private_data = &floppy_states[i]; | 1232 | disk->private_data = &floppy_states[index]; |
1167 | disk->queue = swim3_queue; | ||
1168 | disk->flags |= GENHD_FL_REMOVABLE; | 1233 | disk->flags |= GENHD_FL_REMOVABLE; |
1169 | sprintf(disk->disk_name, "fd%d", i); | 1234 | sprintf(disk->disk_name, "fd%d", index); |
1170 | set_capacity(disk, 2880); | 1235 | set_capacity(disk, 2880); |
1171 | add_disk(disk); | 1236 | add_disk(disk); |
1172 | 1237 | ||
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver = | |||
1194 | .of_match_table = swim3_match, | 1259 | .of_match_table = swim3_match, |
1195 | }, | 1260 | }, |
1196 | .probe = swim3_attach, | 1261 | .probe = swim3_attach, |
1262 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1263 | .mediabay_event = swim3_mb_event, | ||
1264 | #endif | ||
1197 | #if 0 | 1265 | #if 0 |
1198 | .suspend = swim3_suspend, | 1266 | .suspend = swim3_suspend, |
1199 | .resume = swim3_resume, | 1267 | .resume = swim3_resume, |