aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/viodasd.c86
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/hvc_console.h4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/hwmon/ams/ams-core.c11
-rw-r--r--drivers/hwmon/ams/ams-i2c.c2
-rw-r--r--drivers/hwmon/ams/ams-pmu.c2
-rw-r--r--drivers/hwmon/ams/ams.h1
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c651
-rw-r--r--drivers/ide/au1xxx-ide.c21
-rw-r--r--drivers/macintosh/therm_adt746x.c36
-rw-r--r--drivers/mmc/host/au1xmmc.c12
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/alchemy-flash.c166
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/net/au1000_eth.c441
-rw-r--r--drivers/net/au1000_eth.h9
-rw-r--r--drivers/net/cpmac.c10
-rw-r--r--drivers/net/irda/au1k_ir.c14
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c6
-rw-r--r--drivers/pcmcia/Kconfig30
-rw-r--r--drivers/pcmcia/Makefile16
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/au1000_db1x00.c305
-rw-r--r--drivers/pcmcia/au1000_generic.c10
-rw-r--r--drivers/pcmcia/au1000_generic.h18
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c119
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c188
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c175
-rw-r--r--drivers/pcmcia/cistpl.c606
-rw-r--r--drivers/pcmcia/cs.c312
-rw-r--r--drivers/pcmcia/cs_internal.h89
-rw-r--r--drivers/pcmcia/db1xxx_ss.c623
-rw-r--r--drivers/pcmcia/ds.c333
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/i82365.h4
-rw-r--r--drivers/pcmcia/m32r_cfc.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/pcmcia/o2micro.h45
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c42
-rw-r--r--drivers/pcmcia/pcmcia_resource.c169
-rw-r--r--drivers/pcmcia/rsrc_mgr.c48
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c285
-rw-r--r--drivers/pcmcia/socket_sysfs.c196
-rw-r--r--drivers/pcmcia/xxs1500_ss.c350
-rw-r--r--drivers/pcmcia/yenta_socket.c5
-rw-r--r--drivers/ps3/ps3av.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-mpc5121.c387
-rw-r--r--drivers/s390/block/dasd.c48
-rw-r--r--drivers/s390/block/dasd_devmap.c13
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c109
-rw-r--r--drivers/s390/char/zcore.c163
-rw-r--r--drivers/s390/cio/ccwreq.c2
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c4
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/crw.c29
-rw-r--r--drivers/s390/cio/css.c79
-rw-r--r--drivers/s390/cio/css.h5
-rw-r--r--drivers/s390/cio/device.c160
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c43
-rw-r--r--drivers/s390/cio/qdio.h92
-rw-r--r--drivers/s390/cio/qdio_debug.c23
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c158
-rw-r--r--drivers/s390/kvm/kvm_virtio.c4
-rw-r--r--drivers/serial/8250.c15
-rw-r--r--drivers/serial/mpc52xx_uart.c251
-rw-r--r--drivers/serial/serial_cs.c7
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-defines.h34
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-mem.c124
-rw-r--r--drivers/staging/octeon/ethernet-proc.c144
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c56
-rw-r--r--drivers/staging/octeon/ethernet-rx.c384
-rw-r--r--drivers/staging/octeon/ethernet-rx.h25
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c441
-rw-r--r--drivers/staging/octeon/ethernet-tx.h29
-rw-r--r--drivers/staging/octeon/ethernet-util.h13
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c1
-rw-r--r--drivers/staging/octeon/ethernet.c254
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h58
-rw-r--r--drivers/staging/sm7xx/smtc2d.c2
-rw-r--r--drivers/staging/sm7xx/smtc2d.h2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/video/fsl-diu-fb.c5
-rw-r--r--drivers/watchdog/ar7_wdt.c18
106 files changed, 5077 insertions, 3724 deletions
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index a8c8b56b275e..1b3def1e8591 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -28,6 +28,9 @@
28 * All disk operations are performed by sending messages back and forth to 28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition. 29 * the OS/400 partition.
30 */ 30 */
31
32#define pr_fmt(fmt) "viod: " fmt
33
31#include <linux/major.h> 34#include <linux/major.h>
32#include <linux/fs.h> 35#include <linux/fs.h>
33#include <linux/module.h> 36#include <linux/module.h>
@@ -63,9 +66,6 @@ MODULE_LICENSE("GPL");
63 66
64#define VIOD_VERS "1.64" 67#define VIOD_VERS "1.64"
65 68
66#define VIOD_KERN_WARNING KERN_WARNING "viod: "
67#define VIOD_KERN_INFO KERN_INFO "viod: "
68
69enum { 69enum {
70 PARTITION_SHIFT = 3, 70 PARTITION_SHIFT = 3,
71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS, 71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
@@ -156,7 +156,7 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode)
156 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32), 156 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
157 0, 0, 0); 157 0, 0, 0);
158 if (hvrc != 0) { 158 if (hvrc != 0) {
159 printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc); 159 pr_warning("HV open failed %d\n", (int)hvrc);
160 return -EIO; 160 return -EIO;
161 } 161 }
162 162
@@ -167,9 +167,8 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode)
167 const struct vio_error_entry *err = 167 const struct vio_error_entry *err =
168 vio_lookup_rc(viodasd_err_table, we.sub_result); 168 vio_lookup_rc(viodasd_err_table, we.sub_result);
169 169
170 printk(VIOD_KERN_WARNING 170 pr_warning("bad rc opening disk: %d:0x%04x (%s)\n",
171 "bad rc opening disk: %d:0x%04x (%s)\n", 171 (int)we.rc, we.sub_result, err->msg);
172 (int)we.rc, we.sub_result, err->msg);
173 return -EIO; 172 return -EIO;
174 } 173 }
175 174
@@ -195,8 +194,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
195 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */, 194 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
196 0, 0, 0); 195 0, 0, 0);
197 if (hvrc != 0) 196 if (hvrc != 0)
198 printk(VIOD_KERN_WARNING "HV close call failed %d\n", 197 pr_warning("HV close call failed %d\n", (int)hvrc);
199 (int)hvrc);
200 return 0; 198 return 0;
201} 199}
202 200
@@ -288,8 +286,7 @@ static int send_request(struct request *req)
288 bevent = (struct vioblocklpevent *) 286 bevent = (struct vioblocklpevent *)
289 vio_get_event_buffer(viomajorsubtype_blockio); 287 vio_get_event_buffer(viomajorsubtype_blockio);
290 if (bevent == NULL) { 288 if (bevent == NULL) {
291 printk(VIOD_KERN_WARNING 289 pr_warning("error allocating disk event buffer\n");
292 "error allocating disk event buffer\n");
293 goto error_ret; 290 goto error_ret;
294 } 291 }
295 292
@@ -333,9 +330,8 @@ static int send_request(struct request *req)
333 } 330 }
334 331
335 if (hvrc != HvLpEvent_Rc_Good) { 332 if (hvrc != HvLpEvent_Rc_Good) {
336 printk(VIOD_KERN_WARNING 333 pr_warning("error sending disk event to OS/400 (rc %d)\n",
337 "error sending disk event to OS/400 (rc %d)\n", 334 (int)hvrc);
338 (int)hvrc);
339 goto error_ret; 335 goto error_ret;
340 } 336 }
341 spin_unlock_irqrestore(&viodasd_spinlock, flags); 337 spin_unlock_irqrestore(&viodasd_spinlock, flags);
@@ -402,7 +398,7 @@ retry:
402 ((u64)dev_no << 48) | ((u64)flags<< 32), 398 ((u64)dev_no << 48) | ((u64)flags<< 32),
403 0, 0, 0); 399 0, 0, 0);
404 if (hvrc != 0) { 400 if (hvrc != 0) {
405 printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc); 401 pr_warning("bad rc on HV open %d\n", (int)hvrc);
406 return 0; 402 return 0;
407 } 403 }
408 404
@@ -416,9 +412,8 @@ retry:
416 goto retry; 412 goto retry;
417 } 413 }
418 if (we.max_disk > (MAX_DISKNO - 1)) { 414 if (we.max_disk > (MAX_DISKNO - 1)) {
419 printk_once(VIOD_KERN_INFO 415 printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"),
420 "Only examining the first %d of %d disks connected\n", 416 MAX_DISKNO, we.max_disk + 1);
421 MAX_DISKNO, we.max_disk + 1);
422 } 417 }
423 418
424 /* Send the close event to OS/400. We DON'T expect a response */ 419 /* Send the close event to OS/400. We DON'T expect a response */
@@ -432,17 +427,15 @@ retry:
432 ((u64)dev_no << 48) | ((u64)flags << 32), 427 ((u64)dev_no << 48) | ((u64)flags << 32),
433 0, 0, 0); 428 0, 0, 0);
434 if (hvrc != 0) { 429 if (hvrc != 0) {
435 printk(VIOD_KERN_WARNING 430 pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc);
436 "bad rc sending event to OS/400 %d\n", (int)hvrc);
437 return 0; 431 return 0;
438 } 432 }
439 433
440 if (d->dev == NULL) { 434 if (d->dev == NULL) {
441 /* this is when we reprobe for new disks */ 435 /* this is when we reprobe for new disks */
442 if (vio_create_viodasd(dev_no) == NULL) { 436 if (vio_create_viodasd(dev_no) == NULL) {
443 printk(VIOD_KERN_WARNING 437 pr_warning("cannot allocate virtual device for disk %d\n",
444 "cannot allocate virtual device for disk %d\n", 438 dev_no);
445 dev_no);
446 return 0; 439 return 0;
447 } 440 }
448 /* 441 /*
@@ -457,15 +450,13 @@ retry:
457 spin_lock_init(&d->q_lock); 450 spin_lock_init(&d->q_lock);
458 q = blk_init_queue(do_viodasd_request, &d->q_lock); 451 q = blk_init_queue(do_viodasd_request, &d->q_lock);
459 if (q == NULL) { 452 if (q == NULL) {
460 printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n", 453 pr_warning("cannot allocate queue for disk %d\n", dev_no);
461 dev_no);
462 return 0; 454 return 0;
463 } 455 }
464 g = alloc_disk(1 << PARTITION_SHIFT); 456 g = alloc_disk(1 << PARTITION_SHIFT);
465 if (g == NULL) { 457 if (g == NULL) {
466 printk(VIOD_KERN_WARNING 458 pr_warning("cannot allocate disk structure for disk %d\n",
467 "cannot allocate disk structure for disk %d\n", 459 dev_no);
468 dev_no);
469 blk_cleanup_queue(q); 460 blk_cleanup_queue(q);
470 return 0; 461 return 0;
471 } 462 }
@@ -489,13 +480,12 @@ retry:
489 g->driverfs_dev = d->dev; 480 g->driverfs_dev = d->dev;
490 set_capacity(g, d->size >> 9); 481 set_capacity(g, d->size >> 9);
491 482
492 printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) " 483 pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n",
493 "CHS=%d/%d/%d sector size %d%s\n", 484 dev_no, (unsigned long)(d->size >> 9),
494 dev_no, (unsigned long)(d->size >> 9), 485 (unsigned long)(d->size >> 20),
495 (unsigned long)(d->size >> 20), 486 (int)d->cylinders, (int)d->tracks,
496 (int)d->cylinders, (int)d->tracks, 487 (int)d->sectors, (int)d->bytes_per_sector,
497 (int)d->sectors, (int)d->bytes_per_sector, 488 d->read_only ? " (RO)" : "");
498 d->read_only ? " (RO)" : "");
499 489
500 /* register us in the global list */ 490 /* register us in the global list */
501 add_disk(g); 491 add_disk(g);
@@ -580,8 +570,8 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
580 if (error) { 570 if (error) {
581 const struct vio_error_entry *err; 571 const struct vio_error_entry *err;
582 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 572 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
583 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 573 pr_warning("read/write error %d:0x%04x (%s)\n",
584 event->xRc, bevent->sub_result, err->msg); 574 event->xRc, bevent->sub_result, err->msg);
585 num_sect = blk_rq_sectors(req); 575 num_sect = blk_rq_sectors(req);
586 } 576 }
587 qlock = req->q->queue_lock; 577 qlock = req->q->queue_lock;
@@ -606,8 +596,7 @@ static void handle_block_event(struct HvLpEvent *event)
606 return; 596 return;
607 /* First, we should NEVER get an int here...only acks */ 597 /* First, we should NEVER get an int here...only acks */
608 if (hvlpevent_is_int(event)) { 598 if (hvlpevent_is_int(event)) {
609 printk(VIOD_KERN_WARNING 599 pr_warning("Yikes! got an int in viodasd event handler!\n");
610 "Yikes! got an int in viodasd event handler!\n");
611 if (hvlpevent_need_ack(event)) { 600 if (hvlpevent_need_ack(event)) {
612 event->xRc = HvLpEvent_Rc_InvalidSubtype; 601 event->xRc = HvLpEvent_Rc_InvalidSubtype;
613 HvCallEvent_ackLpEvent(event); 602 HvCallEvent_ackLpEvent(event);
@@ -650,7 +639,7 @@ static void handle_block_event(struct HvLpEvent *event)
650 break; 639 break;
651 640
652 default: 641 default:
653 printk(VIOD_KERN_WARNING "invalid subtype!"); 642 pr_warning("invalid subtype!");
654 if (hvlpevent_need_ack(event)) { 643 if (hvlpevent_need_ack(event)) {
655 event->xRc = HvLpEvent_Rc_InvalidSubtype; 644 event->xRc = HvLpEvent_Rc_InvalidSubtype;
656 HvCallEvent_ackLpEvent(event); 645 HvCallEvent_ackLpEvent(event);
@@ -739,29 +728,26 @@ static int __init viodasd_init(void)
739 vio_set_hostlp(); 728 vio_set_hostlp();
740 729
741 if (viopath_hostLp == HvLpIndexInvalid) { 730 if (viopath_hostLp == HvLpIndexInvalid) {
742 printk(VIOD_KERN_WARNING "invalid hosting partition\n"); 731 pr_warning("invalid hosting partition\n");
743 rc = -EIO; 732 rc = -EIO;
744 goto early_fail; 733 goto early_fail;
745 } 734 }
746 735
747 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n", 736 pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp);
748 viopath_hostLp);
749 737
750 /* register the block device */ 738 /* register the block device */
751 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); 739 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
752 if (rc) { 740 if (rc) {
753 printk(VIOD_KERN_WARNING 741 pr_warning("Unable to get major number %d for %s\n",
754 "Unable to get major number %d for %s\n", 742 VIODASD_MAJOR, VIOD_GENHD_NAME);
755 VIODASD_MAJOR, VIOD_GENHD_NAME);
756 goto early_fail; 743 goto early_fail;
757 } 744 }
758 /* Actually open the path to the hosting partition */ 745 /* Actually open the path to the hosting partition */
759 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 746 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
760 VIOMAXREQ + 2); 747 VIOMAXREQ + 2);
761 if (rc) { 748 if (rc) {
762 printk(VIOD_KERN_WARNING 749 pr_warning("error opening path to host partition %d\n",
763 "error opening path to host partition %d\n", 750 viopath_hostLp);
764 viopath_hostLp);
765 goto unregister_blk; 751 goto unregister_blk;
766 } 752 }
767 753
@@ -770,7 +756,7 @@ static int __init viodasd_init(void)
770 756
771 rc = vio_register_driver(&viodasd_driver); 757 rc = vio_register_driver(&viodasd_driver);
772 if (rc) { 758 if (rc) {
773 printk(VIOD_KERN_WARNING "vio_register_driver failed\n"); 759 pr_warning("vio_register_driver failed\n");
774 goto unset_handler; 760 goto unset_handler;
775 } 761 }
776 762
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index d8dac5820f0e..4c3b59be286a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -748,9 +748,9 @@ static const struct tty_operations hvc_ops = {
748 .chars_in_buffer = hvc_chars_in_buffer, 748 .chars_in_buffer = hvc_chars_in_buffer,
749}; 749};
750 750
751struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, 751struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
752 const struct hv_ops *ops, 752 const struct hv_ops *ops,
753 int outbuf_size) 753 int outbuf_size)
754{ 754{
755 struct hvc_struct *hp; 755 struct hvc_struct *hp;
756 int i; 756 int i;
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 52ddf4d3716c..54381eba4e4a 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -80,8 +80,8 @@ extern int hvc_instantiate(uint32_t vtermno, int index,
80 const struct hv_ops *ops); 80 const struct hv_ops *ops);
81 81
82/* register a vterm for hvc tty operation (module_init or hotplug add) */ 82/* register a vterm for hvc tty operation (module_init or hotplug add) */
83extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, 83extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
84 const struct hv_ops *ops, int outbuf_size); 84 const struct hv_ops *ops, int outbuf_size);
85/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ 85/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
86extern int hvc_remove(struct hvc_struct *hp); 86extern int hvc_remove(struct hvc_struct *hp);
87 87
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 2db4c0a29b05..c9bc896d68af 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1047,7 +1047,7 @@ release_io:
1047static ssize_t cmm_write(struct file *filp, const char __user *buf, 1047static ssize_t cmm_write(struct file *filp, const char __user *buf,
1048 size_t count, loff_t *ppos) 1048 size_t count, loff_t *ppos)
1049{ 1049{
1050 struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data; 1050 struct cm4000_dev *dev = filp->private_data;
1051 unsigned int iobase = dev->p_dev->io.BasePort1; 1051 unsigned int iobase = dev->p_dev->io.BasePort1;
1052 unsigned short s; 1052 unsigned short s;
1053 unsigned char tmp; 1053 unsigned char tmp;
diff --git a/drivers/hwmon/ams/ams-core.c b/drivers/hwmon/ams/ams-core.c
index 6c9ace1b76f6..2ad62c339cd2 100644
--- a/drivers/hwmon/ams/ams-core.c
+++ b/drivers/hwmon/ams/ams-core.c
@@ -213,7 +213,7 @@ int __init ams_init(void)
213 return -ENODEV; 213 return -ENODEV;
214} 214}
215 215
216void ams_exit(void) 216void ams_sensor_detach(void)
217{ 217{
218 /* Remove input device */ 218 /* Remove input device */
219 ams_input_exit(); 219 ams_input_exit();
@@ -221,9 +221,6 @@ void ams_exit(void)
221 /* Remove attributes */ 221 /* Remove attributes */
222 device_remove_file(&ams_info.of_dev->dev, &dev_attr_current); 222 device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
223 223
224 /* Shut down implementation */
225 ams_info.exit();
226
227 /* Flush interrupt worker 224 /* Flush interrupt worker
228 * 225 *
229 * We do this after ams_info.exit(), because an interrupt might 226 * We do this after ams_info.exit(), because an interrupt might
@@ -239,6 +236,12 @@ void ams_exit(void)
239 pmf_unregister_irq_client(&ams_freefall_client); 236 pmf_unregister_irq_client(&ams_freefall_client);
240} 237}
241 238
239static void __exit ams_exit(void)
240{
241 /* Shut down implementation */
242 ams_info.exit();
243}
244
242MODULE_AUTHOR("Stelian Pop, Michael Hanselmann"); 245MODULE_AUTHOR("Stelian Pop, Michael Hanselmann");
243MODULE_DESCRIPTION("Apple Motion Sensor driver"); 246MODULE_DESCRIPTION("Apple Motion Sensor driver");
244MODULE_LICENSE("GPL"); 247MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ams/ams-i2c.c b/drivers/hwmon/ams/ams-i2c.c
index 2cbf8a6506c7..abeecd27b484 100644
--- a/drivers/hwmon/ams/ams-i2c.c
+++ b/drivers/hwmon/ams/ams-i2c.c
@@ -238,6 +238,8 @@ static int ams_i2c_probe(struct i2c_client *client,
238static int ams_i2c_remove(struct i2c_client *client) 238static int ams_i2c_remove(struct i2c_client *client)
239{ 239{
240 if (ams_info.has_device) { 240 if (ams_info.has_device) {
241 ams_sensor_detach();
242
241 /* Disable interrupts */ 243 /* Disable interrupts */
242 ams_i2c_set_irq(AMS_IRQ_ALL, 0); 244 ams_i2c_set_irq(AMS_IRQ_ALL, 0);
243 245
diff --git a/drivers/hwmon/ams/ams-pmu.c b/drivers/hwmon/ams/ams-pmu.c
index fb18b3d3162b..4f61b3ee1b08 100644
--- a/drivers/hwmon/ams/ams-pmu.c
+++ b/drivers/hwmon/ams/ams-pmu.c
@@ -133,6 +133,8 @@ static void ams_pmu_get_xyz(s8 *x, s8 *y, s8 *z)
133 133
134static void ams_pmu_exit(void) 134static void ams_pmu_exit(void)
135{ 135{
136 ams_sensor_detach();
137
136 /* Disable interrupts */ 138 /* Disable interrupts */
137 ams_pmu_set_irq(AMS_IRQ_ALL, 0); 139 ams_pmu_set_irq(AMS_IRQ_ALL, 0);
138 140
diff --git a/drivers/hwmon/ams/ams.h b/drivers/hwmon/ams/ams.h
index 5ed387b0bd9a..b28d7e27a031 100644
--- a/drivers/hwmon/ams/ams.h
+++ b/drivers/hwmon/ams/ams.h
@@ -61,6 +61,7 @@ extern struct ams ams_info;
61 61
62extern void ams_sensors(s8 *x, s8 *y, s8 *z); 62extern void ams_sensors(s8 *x, s8 *y, s8 *z);
63extern int ams_sensor_attach(void); 63extern int ams_sensor_attach(void);
64extern void ams_sensor_detach(void);
64 65
65extern int ams_pmu_init(struct device_node *np); 66extern int ams_pmu_init(struct device_node *np);
66extern int ams_i2c_init(struct device_node *np); 67extern int ams_i2c_init(struct device_node *np);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5f318ce29770..737f05200b1d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,6 +564,16 @@ config I2C_VERSATILE
564 This driver can also be built as a module. If so, the module 564 This driver can also be built as a module. If so, the module
565 will be called i2c-versatile. 565 will be called i2c-versatile.
566 566
567config I2C_OCTEON
568 tristate "Cavium OCTEON I2C bus support"
569 depends on CPU_CAVIUM_OCTEON
570 help
571 Say yes if you want to support the I2C serial bus on Cavium
572 OCTEON SOC.
573
574 This driver can also be built as a module. If so, the module
575 will be called i2c-octeon.
576
567comment "External I2C/SMBus adapter drivers" 577comment "External I2C/SMBus adapter drivers"
568 578
569config I2C_PARPORT 579config I2C_PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 302c551977bb..c2c4ea1908d8 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
54obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o 54obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
55obj-$(CONFIG_I2C_STU300) += i2c-stu300.o 55obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
56obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o 56obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
57obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
57 58
58# External I2C/SMBus adapter drivers 59# External I2C/SMBus adapter drivers
59obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o 60obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
new file mode 100644
index 000000000000..60375504fa49
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -0,0 +1,651 @@
1/*
2 * (C) Copyright 2009-2010
3 * Nokia Siemens Networks, michael.lawnick.ext@nsn.com
4 *
5 * Portions Copyright (C) 2010 Cavium Networks, Inc.
6 *
7 * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/init.h>
18
19#include <linux/io.h>
20#include <linux/i2c.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24
25#include <asm/octeon/octeon.h>
26
27#define DRV_NAME "i2c-octeon"
28
29/* The previous out-of-tree version was implicitly version 1.0. */
30#define DRV_VERSION "2.0"
31
32/* register offsets */
33#define SW_TWSI 0x00
34#define TWSI_INT 0x10
35
36/* Controller command patterns */
37#define SW_TWSI_V 0x8000000000000000ull
38#define SW_TWSI_EOP_TWSI_DATA 0x0C00000100000000ull
39#define SW_TWSI_EOP_TWSI_CTL 0x0C00000200000000ull
40#define SW_TWSI_EOP_TWSI_CLKCTL 0x0C00000300000000ull
41#define SW_TWSI_EOP_TWSI_STAT 0x0C00000300000000ull
42#define SW_TWSI_EOP_TWSI_RST 0x0C00000700000000ull
43#define SW_TWSI_OP_TWSI_CLK 0x0800000000000000ull
44#define SW_TWSI_R 0x0100000000000000ull
45
46/* Controller command and status bits */
47#define TWSI_CTL_CE 0x80
48#define TWSI_CTL_ENAB 0x40
49#define TWSI_CTL_STA 0x20
50#define TWSI_CTL_STP 0x10
51#define TWSI_CTL_IFLG 0x08
52#define TWSI_CTL_AAK 0x04
53
54/* Some status values */
55#define STAT_START 0x08
56#define STAT_RSTART 0x10
57#define STAT_TXADDR_ACK 0x18
58#define STAT_TXDATA_ACK 0x28
59#define STAT_RXADDR_ACK 0x40
60#define STAT_RXDATA_ACK 0x50
61#define STAT_IDLE 0xF8
62
63struct octeon_i2c {
64 wait_queue_head_t queue;
65 struct i2c_adapter adap;
66 int irq;
67 int twsi_freq;
68 int sys_freq;
69 resource_size_t twsi_phys;
70 void __iomem *twsi_base;
71 resource_size_t regsize;
72 struct device *dev;
73};
74
75/**
76 * octeon_i2c_write_sw - write an I2C core register.
77 * @i2c: The struct octeon_i2c.
78 * @eop_reg: Register selector.
79 * @data: Value to be written.
80 *
81 * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
82 */
83static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
84 u64 eop_reg,
85 u8 data)
86{
87 u64 tmp;
88
89 __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI);
90 do {
91 tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
92 } while ((tmp & SW_TWSI_V) != 0);
93}
94
95/**
96 * octeon_i2c_read_sw - write an I2C core register.
97 * @i2c: The struct octeon_i2c.
98 * @eop_reg: Register selector.
99 *
100 * Returns the data.
101 *
102 * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
103 */
104static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
105{
106 u64 tmp;
107
108 __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI);
109 do {
110 tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
111 } while ((tmp & SW_TWSI_V) != 0);
112
113 return tmp & 0xFF;
114}
115
116/**
117 * octeon_i2c_write_int - write the TWSI_INT register
118 * @i2c: The struct octeon_i2c.
119 * @data: Value to be written.
120 */
121static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
122{
123 u64 tmp;
124
125 __raw_writeq(data, i2c->twsi_base + TWSI_INT);
126 tmp = __raw_readq(i2c->twsi_base + TWSI_INT);
127}
128
129/**
130 * octeon_i2c_int_enable - enable the TS interrupt.
131 * @i2c: The struct octeon_i2c.
132 *
133 * The interrupt will be asserted when there is non-STAT_IDLE state in
134 * the SW_TWSI_EOP_TWSI_STAT register.
135 */
136static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
137{
138 octeon_i2c_write_int(i2c, 0x40);
139}
140
141/**
142 * octeon_i2c_int_disable - disable the TS interrupt.
143 * @i2c: The struct octeon_i2c.
144 */
145static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
146{
147 octeon_i2c_write_int(i2c, 0);
148}
149
150/**
151 * octeon_i2c_unblock - unblock the bus.
152 * @i2c: The struct octeon_i2c.
153 *
154 * If there was a reset while a device was driving 0 to bus,
155 * bus is blocked. We toggle it free manually by some clock
156 * cycles and send a stop.
157 */
158static void octeon_i2c_unblock(struct octeon_i2c *i2c)
159{
160 int i;
161
162 dev_dbg(i2c->dev, "%s\n", __func__);
163 for (i = 0; i < 9; i++) {
164 octeon_i2c_write_int(i2c, 0x0);
165 udelay(5);
166 octeon_i2c_write_int(i2c, 0x200);
167 udelay(5);
168 }
169 octeon_i2c_write_int(i2c, 0x300);
170 udelay(5);
171 octeon_i2c_write_int(i2c, 0x100);
172 udelay(5);
173 octeon_i2c_write_int(i2c, 0x0);
174}
175
176/**
177 * octeon_i2c_isr - the interrupt service routine.
178 * @int: The irq, unused.
179 * @dev_id: Our struct octeon_i2c.
180 */
181static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
182{
183 struct octeon_i2c *i2c = dev_id;
184
185 octeon_i2c_int_disable(i2c);
186 wake_up_interruptible(&i2c->queue);
187
188 return IRQ_HANDLED;
189}
190
191
192static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
193{
194 return (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_CTL) & TWSI_CTL_IFLG) != 0;
195}
196
197/**
198 * octeon_i2c_wait - wait for the IFLG to be set.
199 * @i2c: The struct octeon_i2c.
200 *
201 * Returns 0 on success, otherwise a negative errno.
202 */
203static int octeon_i2c_wait(struct octeon_i2c *i2c)
204{
205 int result;
206
207 octeon_i2c_int_enable(i2c);
208
209 result = wait_event_interruptible_timeout(i2c->queue,
210 octeon_i2c_test_iflg(i2c),
211 i2c->adap.timeout);
212
213 octeon_i2c_int_disable(i2c);
214
215 if (result < 0) {
216 dev_dbg(i2c->dev, "%s: wait interrupted\n", __func__);
217 return result;
218 } else if (result == 0) {
219 dev_dbg(i2c->dev, "%s: timeout\n", __func__);
220 result = -ETIMEDOUT;
221 }
222
223 return 0;
224}
225
226/**
227 * octeon_i2c_start - send START to the bus.
228 * @i2c: The struct octeon_i2c.
229 *
230 * Returns 0 on success, otherwise a negative errno.
231 */
232static int octeon_i2c_start(struct octeon_i2c *i2c)
233{
234 u8 data;
235 int result;
236
237 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
238 TWSI_CTL_ENAB | TWSI_CTL_STA);
239
240 result = octeon_i2c_wait(i2c);
241 if (result) {
242 if (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT) == STAT_IDLE) {
243 /*
244 * Controller refused to send start flag May
245 * be a client is holding SDA low - let's try
246 * to free it.
247 */
248 octeon_i2c_unblock(i2c);
249 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
250 TWSI_CTL_ENAB | TWSI_CTL_STA);
251
252 result = octeon_i2c_wait(i2c);
253 }
254 if (result)
255 return result;
256 }
257
258 data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
259 if ((data != STAT_START) && (data != STAT_RSTART)) {
260 dev_err(i2c->dev, "%s: bad status (0x%x)\n", __func__, data);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267/**
268 * octeon_i2c_stop - send STOP to the bus.
269 * @i2c: The struct octeon_i2c.
270 *
271 * Returns 0 on success, otherwise a negative errno.
272 */
273static int octeon_i2c_stop(struct octeon_i2c *i2c)
274{
275 u8 data;
276
277 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
278 TWSI_CTL_ENAB | TWSI_CTL_STP);
279
280 data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
281
282 if (data != STAT_IDLE) {
283 dev_err(i2c->dev, "%s: bad status(0x%x)\n", __func__, data);
284 return -EIO;
285 }
286 return 0;
287}
288
289/**
290 * octeon_i2c_write - send data to the bus.
291 * @i2c: The struct octeon_i2c.
292 * @target: Target address.
293 * @data: Pointer to the data to be sent.
294 * @length: Length of the data.
295 *
296 * The address is sent over the bus, then the data.
297 *
298 * Returns 0 on success, otherwise a negative errno.
299 */
300static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
301 const u8 *data, int length)
302{
303 int i, result;
304 u8 tmp;
305
306 result = octeon_i2c_start(i2c);
307 if (result)
308 return result;
309
310 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, target << 1);
311 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
312
313 result = octeon_i2c_wait(i2c);
314 if (result)
315 return result;
316
317 for (i = 0; i < length; i++) {
318 tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
319 if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
320 dev_err(i2c->dev,
321 "%s: bad status before write (0x%x)\n",
322 __func__, tmp);
323 return -EIO;
324 }
325
326 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, data[i]);
327 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
328
329 result = octeon_i2c_wait(i2c);
330 if (result)
331 return result;
332 }
333
334 return 0;
335}
336
337/**
338 * octeon_i2c_read - receive data from the bus.
339 * @i2c: The struct octeon_i2c.
340 * @target: Target address.
341 * @data: Pointer to the location to store the datae .
342 * @length: Length of the data.
343 *
344 * The address is sent over the bus, then the data is read.
345 *
346 * Returns 0 on success, otherwise a negative errno.
347 */
348static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
349 u8 *data, int length)
350{
351 int i, result;
352 u8 tmp;
353
354 if (length < 1)
355 return -EINVAL;
356
357 result = octeon_i2c_start(i2c);
358 if (result)
359 return result;
360
361 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target<<1) | 1);
362 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
363
364 result = octeon_i2c_wait(i2c);
365 if (result)
366 return result;
367
368 for (i = 0; i < length; i++) {
369 tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
370 if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
371 dev_err(i2c->dev,
372 "%s: bad status before read (0x%x)\n",
373 __func__, tmp);
374 return -EIO;
375 }
376
377 if (i+1 < length)
378 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
379 TWSI_CTL_ENAB | TWSI_CTL_AAK);
380 else
381 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
382 TWSI_CTL_ENAB);
383
384 result = octeon_i2c_wait(i2c);
385 if (result)
386 return result;
387
388 data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
389 }
390 return 0;
391}
392
393/**
394 * octeon_i2c_xfer - The driver's master_xfer function.
395 * @adap: Pointer to the i2c_adapter structure.
396 * @msgs: Pointer to the messages to be processed.
397 * @num: Length of the MSGS array.
398 *
399 * Returns the number of messages processed, or a negative errno on
400 * failure.
401 */
402static int octeon_i2c_xfer(struct i2c_adapter *adap,
403 struct i2c_msg *msgs,
404 int num)
405{
406 struct i2c_msg *pmsg;
407 int i;
408 int ret = 0;
409 struct octeon_i2c *i2c = i2c_get_adapdata(adap);
410
411 for (i = 0; ret == 0 && i < num; i++) {
412 pmsg = &msgs[i];
413 dev_dbg(i2c->dev,
414 "Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
415 pmsg->flags & I2C_M_RD ? "read" : "write",
416 pmsg->len, pmsg->addr, i + 1, num);
417 if (pmsg->flags & I2C_M_RD)
418 ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
419 pmsg->len);
420 else
421 ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
422 pmsg->len);
423 }
424 octeon_i2c_stop(i2c);
425
426 return (ret != 0) ? ret : num;
427}
428
429static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
430{
431 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
432}
433
434static const struct i2c_algorithm octeon_i2c_algo = {
435 .master_xfer = octeon_i2c_xfer,
436 .functionality = octeon_i2c_functionality,
437};
438
439static struct i2c_adapter octeon_i2c_ops = {
440 .owner = THIS_MODULE,
441 .name = "OCTEON adapter",
442 .algo = &octeon_i2c_algo,
443 .timeout = 2,
444};
445
446/**
447 * octeon_i2c_setclock - Calculate and set clock divisors.
448 */
449static int __init octeon_i2c_setclock(struct octeon_i2c *i2c)
450{
451 int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
452 int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
453
454 for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
455 /*
456 * An mdiv value of less than 2 seems to not work well
457 * with ds1337 RTCs, so we constrain it to larger
458 * values.
459 */
460 for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
461 /*
462 * For given ndiv and mdiv values check the
463 * two closest thp values.
464 */
465 tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
466 tclk *= (1 << ndiv_idx);
467 thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
468 for (inc = 0; inc <= 1; inc++) {
469 thp_idx = thp_base + inc;
470 if (thp_idx < 5 || thp_idx > 0xff)
471 continue;
472
473 foscl = i2c->sys_freq / (2 * (thp_idx + 1));
474 foscl = foscl / (1 << ndiv_idx);
475 foscl = foscl / (mdiv_idx + 1) / 10;
476 diff = abs(foscl - i2c->twsi_freq);
477 if (diff < delta_hz) {
478 delta_hz = diff;
479 thp = thp_idx;
480 mdiv = mdiv_idx;
481 ndiv = ndiv_idx;
482 }
483 }
484 }
485 }
486 octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
487 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
488
489 return 0;
490}
491
492static int __init octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
493{
494 u8 status;
495 int tries;
496
497 /* disable high level controller, enable bus access */
498 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
499
500 /* reset controller */
501 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_RST, 0);
502
503 for (tries = 10; tries; tries--) {
504 udelay(1);
505 status = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
506 if (status == STAT_IDLE)
507 return 0;
508 }
509 dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status);
510 return -EIO;
511}
512
513static int __devinit octeon_i2c_probe(struct platform_device *pdev)
514{
515 int irq, result = 0;
516 struct octeon_i2c *i2c;
517 struct octeon_i2c_data *i2c_data;
518 struct resource *res_mem;
519
520 /* All adaptors have an irq. */
521 irq = platform_get_irq(pdev, 0);
522 if (irq < 0)
523 return irq;
524
525 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
526 if (!i2c) {
527 dev_err(&pdev->dev, "kzalloc failed\n");
528 result = -ENOMEM;
529 goto out;
530 }
531 i2c->dev = &pdev->dev;
532 i2c_data = pdev->dev.platform_data;
533
534 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
535
536 if (res_mem == NULL) {
537 dev_err(i2c->dev, "found no memory resource\n");
538 result = -ENXIO;
539 goto fail_region;
540 }
541
542 if (i2c_data == NULL) {
543 dev_err(i2c->dev, "no I2C frequency data\n");
544 result = -ENXIO;
545 goto fail_region;
546 }
547
548 i2c->twsi_phys = res_mem->start;
549 i2c->regsize = resource_size(res_mem);
550 i2c->twsi_freq = i2c_data->i2c_freq;
551 i2c->sys_freq = i2c_data->sys_freq;
552
553 if (!request_mem_region(i2c->twsi_phys, i2c->regsize, res_mem->name)) {
554 dev_err(i2c->dev, "request_mem_region failed\n");
555 goto fail_region;
556 }
557 i2c->twsi_base = ioremap(i2c->twsi_phys, i2c->regsize);
558
559 init_waitqueue_head(&i2c->queue);
560
561 i2c->irq = irq;
562
563 result = request_irq(i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c);
564 if (result < 0) {
565 dev_err(i2c->dev, "failed to attach interrupt\n");
566 goto fail_irq;
567 }
568
569 result = octeon_i2c_initlowlevel(i2c);
570 if (result) {
571 dev_err(i2c->dev, "init low level failed\n");
572 goto fail_add;
573 }
574
575 result = octeon_i2c_setclock(i2c);
576 if (result) {
577 dev_err(i2c->dev, "clock init failed\n");
578 goto fail_add;
579 }
580
581 i2c->adap = octeon_i2c_ops;
582 i2c->adap.dev.parent = &pdev->dev;
583 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
584 i2c_set_adapdata(&i2c->adap, i2c);
585 platform_set_drvdata(pdev, i2c);
586
587 result = i2c_add_numbered_adapter(&i2c->adap);
588 if (result < 0) {
589 dev_err(i2c->dev, "failed to add adapter\n");
590 goto fail_add;
591 }
592
593 dev_info(i2c->dev, "version %s\n", DRV_VERSION);
594
595 return result;
596
597fail_add:
598 platform_set_drvdata(pdev, NULL);
599 free_irq(i2c->irq, i2c);
600fail_irq:
601 iounmap(i2c->twsi_base);
602 release_mem_region(i2c->twsi_phys, i2c->regsize);
603fail_region:
604 kfree(i2c);
605out:
606 return result;
607};
608
609static int __devexit octeon_i2c_remove(struct platform_device *pdev)
610{
611 struct octeon_i2c *i2c = platform_get_drvdata(pdev);
612
613 i2c_del_adapter(&i2c->adap);
614 platform_set_drvdata(pdev, NULL);
615 free_irq(i2c->irq, i2c);
616 iounmap(i2c->twsi_base);
617 release_mem_region(i2c->twsi_phys, i2c->regsize);
618 kfree(i2c);
619 return 0;
620};
621
622static struct platform_driver octeon_i2c_driver = {
623 .probe = octeon_i2c_probe,
624 .remove = __devexit_p(octeon_i2c_remove),
625 .driver = {
626 .owner = THIS_MODULE,
627 .name = DRV_NAME,
628 },
629};
630
631static int __init octeon_i2c_init(void)
632{
633 int rv;
634
635 rv = platform_driver_register(&octeon_i2c_driver);
636 return rv;
637}
638
639static void __exit octeon_i2c_exit(void)
640{
641 platform_driver_unregister(&octeon_i2c_driver);
642}
643
644MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
645MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
646MODULE_LICENSE("GPL");
647MODULE_VERSION(DRV_VERSION);
648MODULE_ALIAS("platform:" DRV_NAME);
649
650module_init(octeon_i2c_init);
651module_exit(octeon_i2c_exit);
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 87cef0c440ad..349a67bf1a36 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -56,8 +56,8 @@ static inline void auide_insw(unsigned long port, void *addr, u32 count)
56 chan_tab_t *ctp; 56 chan_tab_t *ctp;
57 au1x_ddma_desc_t *dp; 57 au1x_ddma_desc_t *dp;
58 58
59 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 59 if (!au1xxx_dbdma_put_dest(ahwif->rx_chan, virt_to_phys(addr),
60 DDMA_FLAGS_NOIE)) { 60 count << 1, DDMA_FLAGS_NOIE)) {
61 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__); 61 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
62 return; 62 return;
63 } 63 }
@@ -74,8 +74,8 @@ static inline void auide_outsw(unsigned long port, void *addr, u32 count)
74 chan_tab_t *ctp; 74 chan_tab_t *ctp;
75 au1x_ddma_desc_t *dp; 75 au1x_ddma_desc_t *dp;
76 76
77 if(!put_source_flags(ahwif->tx_chan, (void*)addr, 77 if (!au1xxx_dbdma_put_source(ahwif->tx_chan, virt_to_phys(addr),
78 count << 1, DDMA_FLAGS_NOIE)) { 78 count << 1, DDMA_FLAGS_NOIE)) {
79 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__); 79 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
80 return; 80 return;
81 } 81 }
@@ -246,17 +246,14 @@ static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
246 flags = DDMA_FLAGS_NOIE; 246 flags = DDMA_FLAGS_NOIE;
247 247
248 if (iswrite) { 248 if (iswrite) {
249 if(!put_source_flags(ahwif->tx_chan, 249 if (!au1xxx_dbdma_put_source(ahwif->tx_chan,
250 (void*) sg_virt(sg), 250 sg_phys(sg), tc, flags)) {
251 tc, flags)) {
252 printk(KERN_ERR "%s failed %d\n", 251 printk(KERN_ERR "%s failed %d\n",
253 __func__, __LINE__); 252 __func__, __LINE__);
254 } 253 }
255 } else 254 } else {
256 { 255 if (!au1xxx_dbdma_put_dest(ahwif->rx_chan,
257 if(!put_dest_flags(ahwif->rx_chan, 256 sg_phys(sg), tc, flags)) {
258 (void*) sg_virt(sg),
259 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n", 257 printk(KERN_ERR "%s failed %d\n",
261 __func__, __LINE__); 258 __func__, __LINE__);
262 } 259 }
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 5ff47ba7f2d0..c42eeb43042d 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -90,6 +90,8 @@ static struct task_struct *thread_therm = NULL;
90 90
91static void write_both_fan_speed(struct thermostat *th, int speed); 91static void write_both_fan_speed(struct thermostat *th, int speed);
92static void write_fan_speed(struct thermostat *th, int speed, int fan); 92static void write_fan_speed(struct thermostat *th, int speed, int fan);
93static void thermostat_create_files(void);
94static void thermostat_remove_files(void);
93 95
94static int 96static int
95write_reg(struct thermostat* th, int reg, u8 data) 97write_reg(struct thermostat* th, int reg, u8 data)
@@ -161,6 +163,8 @@ remove_thermostat(struct i2c_client *client)
161 struct thermostat *th = i2c_get_clientdata(client); 163 struct thermostat *th = i2c_get_clientdata(client);
162 int i; 164 int i;
163 165
166 thermostat_remove_files();
167
164 if (thread_therm != NULL) { 168 if (thread_therm != NULL) {
165 kthread_stop(thread_therm); 169 kthread_stop(thread_therm);
166 } 170 }
@@ -312,7 +316,7 @@ static void update_fans_speed (struct thermostat *th)
312 316
313 if (verbose) 317 if (verbose)
314 printk(KERN_DEBUG "adt746x: Setting fans speed to %d " 318 printk(KERN_DEBUG "adt746x: Setting fans speed to %d "
315 "(limit exceeded by %d on %s) \n", 319 "(limit exceeded by %d on %s)\n",
316 new_speed, var, 320 new_speed, var,
317 sensor_location[fan_number+1]); 321 sensor_location[fan_number+1]);
318 write_both_fan_speed(th, new_speed); 322 write_both_fan_speed(th, new_speed);
@@ -449,6 +453,8 @@ static int probe_thermostat(struct i2c_client *client,
449 return -ENOMEM; 453 return -ENOMEM;
450 } 454 }
451 455
456 thermostat_create_files();
457
452 return 0; 458 return 0;
453} 459}
454 460
@@ -566,7 +572,6 @@ thermostat_init(void)
566 struct device_node* np; 572 struct device_node* np;
567 const u32 *prop; 573 const u32 *prop;
568 int i = 0, offset = 0; 574 int i = 0, offset = 0;
569 int err;
570 575
571 np = of_find_node_by_name(NULL, "fan"); 576 np = of_find_node_by_name(NULL, "fan");
572 if (!np) 577 if (!np)
@@ -633,6 +638,17 @@ thermostat_init(void)
633 return -ENODEV; 638 return -ENODEV;
634 } 639 }
635 640
641#ifndef CONFIG_I2C_POWERMAC
642 request_module("i2c-powermac");
643#endif
644
645 return i2c_add_driver(&thermostat_driver);
646}
647
648static void thermostat_create_files(void)
649{
650 int err;
651
636 err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature); 652 err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
637 err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature); 653 err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
638 err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit); 654 err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
@@ -647,16 +663,9 @@ thermostat_init(void)
647 if (err) 663 if (err)
648 printk(KERN_WARNING 664 printk(KERN_WARNING
649 "Failed to create tempertaure attribute file(s).\n"); 665 "Failed to create tempertaure attribute file(s).\n");
650
651#ifndef CONFIG_I2C_POWERMAC
652 request_module("i2c-powermac");
653#endif
654
655 return i2c_add_driver(&thermostat_driver);
656} 666}
657 667
658static void __exit 668static void thermostat_remove_files(void)
659thermostat_exit(void)
660{ 669{
661 if (of_dev) { 670 if (of_dev) {
662 device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature); 671 device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature);
@@ -673,9 +682,14 @@ thermostat_exit(void)
673 device_remove_file(&of_dev->dev, 682 device_remove_file(&of_dev->dev,
674 &dev_attr_sensor2_fan_speed); 683 &dev_attr_sensor2_fan_speed);
675 684
676 of_device_unregister(of_dev);
677 } 685 }
686}
687
688static void __exit
689thermostat_exit(void)
690{
678 i2c_del_driver(&thermostat_driver); 691 i2c_del_driver(&thermostat_driver);
692 of_device_unregister(of_dev);
679} 693}
680 694
681module_init(thermostat_init); 695module_init(thermostat_init);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index d3f55615c099..57b21198828f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -650,11 +650,11 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
650 flags = DDMA_FLAGS_IE; 650 flags = DDMA_FLAGS_IE;
651 651
652 if (host->flags & HOST_F_XMIT) { 652 if (host->flags & HOST_F_XMIT) {
653 ret = au1xxx_dbdma_put_source_flags(channel, 653 ret = au1xxx_dbdma_put_source(channel,
654 (void *)sg_virt(sg), len, flags); 654 sg_phys(sg), len, flags);
655 } else { 655 } else {
656 ret = au1xxx_dbdma_put_dest_flags(channel, 656 ret = au1xxx_dbdma_put_dest(channel,
657 (void *)sg_virt(sg), len, flags); 657 sg_phys(sg), len, flags);
658 } 658 }
659 659
660 if (!ret) 660 if (!ret)
@@ -1017,6 +1017,10 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
1017 } else 1017 } else
1018 mmc->caps |= MMC_CAP_NEEDS_POLL; 1018 mmc->caps |= MMC_CAP_NEEDS_POLL;
1019 1019
1020 /* platform may not be able to use all advertised caps */
1021 if (host->platdata)
1022 mmc->caps &= ~(host->platdata->mask_host_caps);
1023
1020 tasklet_init(&host->data_task, au1xmmc_tasklet_data, 1024 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
1021 (unsigned long)host); 1025 (unsigned long)host);
1022 1026
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 2de0cc823d60..2bb03a8b9ef1 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -251,12 +251,6 @@ config MTD_NETtel
251 help 251 help
252 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 252 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
253 253
254config MTD_ALCHEMY
255 tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
256 depends on SOC_AU1X00 && MTD_PARTITIONS && MTD_CFI
257 help
258 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
259
260config MTD_DILNETPC 254config MTD_DILNETPC
261 tristate "CFI Flash device mapped on DIL/Net PC" 255 tristate "CFI Flash device mapped on DIL/Net PC"
262 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 256 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ce315214ff2b..a44919f3f3d2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
40obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o 40obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
41obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 41obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
42obj-$(CONFIG_MTD_PCI) += pci.o 42obj-$(CONFIG_MTD_PCI) += pci.o
43obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o
44obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 43obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
45obj-$(CONFIG_MTD_EDB7312) += edb7312.o 44obj-$(CONFIG_MTD_EDB7312) += edb7312.o
46obj-$(CONFIG_MTD_IMPA7) += impa7.o 45obj-$(CONFIG_MTD_IMPA7) += impa7.o
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
deleted file mode 100644
index 845ad4f2a542..000000000000
--- a/drivers/mtd/maps/alchemy-flash.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * Flash memory access on AMD Alchemy evaluation boards
3 *
4 * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
5 */
6
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14#include <linux/mtd/partitions.h>
15
16#include <asm/io.h>
17
18#ifdef CONFIG_MIPS_PB1000
19#define BOARD_MAP_NAME "Pb1000 Flash"
20#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */
21#define BOARD_FLASH_WIDTH 4 /* 32-bits */
22#endif
23
24#ifdef CONFIG_MIPS_PB1500
25#define BOARD_MAP_NAME "Pb1500 Flash"
26#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
27#define BOARD_FLASH_WIDTH 4 /* 32-bits */
28#endif
29
30#ifdef CONFIG_MIPS_PB1100
31#define BOARD_MAP_NAME "Pb1100 Flash"
32#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
33#define BOARD_FLASH_WIDTH 4 /* 32-bits */
34#endif
35
36#ifdef CONFIG_MIPS_PB1550
37#define BOARD_MAP_NAME "Pb1550 Flash"
38#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
39#define BOARD_FLASH_WIDTH 4 /* 32-bits */
40#endif
41
42#ifdef CONFIG_MIPS_PB1200
43#define BOARD_MAP_NAME "Pb1200 Flash"
44#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
45#define BOARD_FLASH_WIDTH 2 /* 16-bits */
46#endif
47
48#ifdef CONFIG_MIPS_DB1000
49#define BOARD_MAP_NAME "Db1000 Flash"
50#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
51#define BOARD_FLASH_WIDTH 4 /* 32-bits */
52#endif
53
54#ifdef CONFIG_MIPS_DB1500
55#define BOARD_MAP_NAME "Db1500 Flash"
56#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
57#define BOARD_FLASH_WIDTH 4 /* 32-bits */
58#endif
59
60#ifdef CONFIG_MIPS_DB1100
61#define BOARD_MAP_NAME "Db1100 Flash"
62#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
63#define BOARD_FLASH_WIDTH 4 /* 32-bits */
64#endif
65
66#ifdef CONFIG_MIPS_DB1550
67#define BOARD_MAP_NAME "Db1550 Flash"
68#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
69#define BOARD_FLASH_WIDTH 4 /* 32-bits */
70#endif
71
72#ifdef CONFIG_MIPS_DB1200
73#define BOARD_MAP_NAME "Db1200 Flash"
74#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
75#define BOARD_FLASH_WIDTH 2 /* 16-bits */
76#endif
77
78#ifdef CONFIG_MIPS_BOSPORUS
79#define BOARD_MAP_NAME "Bosporus Flash"
80#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
81#define BOARD_FLASH_WIDTH 2 /* 16-bits */
82#endif
83
84#ifdef CONFIG_MIPS_MIRAGE
85#define BOARD_MAP_NAME "Mirage Flash"
86#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
87#define BOARD_FLASH_WIDTH 4 /* 32-bits */
88#define USE_LOCAL_ACCESSORS /* why? */
89#endif
90
91static struct map_info alchemy_map = {
92 .name = BOARD_MAP_NAME,
93};
94
95static struct mtd_partition alchemy_partitions[] = {
96 {
97 .name = "User FS",
98 .size = BOARD_FLASH_SIZE - 0x00400000,
99 .offset = 0x0000000
100 },{
101 .name = "YAMON",
102 .size = 0x0100000,
103 .offset = MTDPART_OFS_APPEND,
104 .mask_flags = MTD_WRITEABLE
105 },{
106 .name = "raw kernel",
107 .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
108 .offset = MTDPART_OFS_APPEND,
109 }
110};
111
112static struct mtd_info *mymtd;
113
114static int __init alchemy_mtd_init(void)
115{
116 struct mtd_partition *parts;
117 int nb_parts = 0;
118 unsigned long window_addr;
119 unsigned long window_size;
120
121 /* Default flash buswidth */
122 alchemy_map.bankwidth = BOARD_FLASH_WIDTH;
123
124 window_addr = 0x20000000 - BOARD_FLASH_SIZE;
125 window_size = BOARD_FLASH_SIZE;
126
127 /*
128 * Static partition definition selection
129 */
130 parts = alchemy_partitions;
131 nb_parts = ARRAY_SIZE(alchemy_partitions);
132 alchemy_map.size = window_size;
133
134 /*
135 * Now let's probe for the actual flash. Do it here since
136 * specific machine settings might have been set above.
137 */
138 printk(KERN_NOTICE BOARD_MAP_NAME ": probing %d-bit flash bus\n",
139 alchemy_map.bankwidth*8);
140 alchemy_map.virt = ioremap(window_addr, window_size);
141 mymtd = do_map_probe("cfi_probe", &alchemy_map);
142 if (!mymtd) {
143 iounmap(alchemy_map.virt);
144 return -ENXIO;
145 }
146 mymtd->owner = THIS_MODULE;
147
148 add_mtd_partitions(mymtd, parts, nb_parts);
149 return 0;
150}
151
152static void __exit alchemy_mtd_cleanup(void)
153{
154 if (mymtd) {
155 del_mtd_partitions(mymtd);
156 map_destroy(mymtd);
157 iounmap(alchemy_map.virt);
158 }
159}
160
161module_init(alchemy_mtd_init);
162module_exit(alchemy_mtd_cleanup);
163
164MODULE_AUTHOR("Embedded Alley Solutions, Inc");
165MODULE_DESCRIPTION(BOARD_MAP_NAME " MTD driver");
166MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 92c334ff4508..43d46e424040 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21#include <asm/mach-au1x00/au1xxx.h> 21#include <asm/mach-au1x00/au1xxx.h>
22#include <asm/mach-db1x00/bcsr.h>
22 23
23/* 24/*
24 * MTD structure for NAND controller 25 * MTD structure for NAND controller
@@ -475,7 +476,8 @@ static int __init au1xxx_nand_init(void)
475 /* set gpio206 high */ 476 /* set gpio206 high */
476 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); 477 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR);
477 478
478 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr->status >> 6) & 0x1); 479 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
480
479 switch (boot_swapboot) { 481 switch (boot_swapboot) {
480 case 0: 482 case 0:
481 case 2: 483 case 2:
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a88..6e5a68ecde09 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -55,6 +55,7 @@
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/crc32.h> 56#include <linux/crc32.h>
57#include <linux/phy.h> 57#include <linux/phy.h>
58#include <linux/platform_device.h>
58 59
59#include <asm/cpu.h> 60#include <asm/cpu.h>
60#include <asm/mipsregs.h> 61#include <asm/mipsregs.h>
@@ -63,6 +64,7 @@
63#include <asm/processor.h> 64#include <asm/processor.h>
64 65
65#include <au1000.h> 66#include <au1000.h>
67#include <au1xxx_eth.h>
66#include <prom.h> 68#include <prom.h>
67 69
68#include "au1000_eth.h" 70#include "au1000_eth.h"
@@ -112,15 +114,15 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
112 * 114 *
113 * PHY detection algorithm 115 * PHY detection algorithm
114 * 116 *
115 * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is 117 * If phy_static_config is undefined, the PHY setup is
116 * autodetected: 118 * autodetected:
117 * 119 *
118 * mii_probe() first searches the current MAC's MII bus for a PHY, 120 * mii_probe() first searches the current MAC's MII bus for a PHY,
119 * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is 121 * selecting the first (or last, if phy_search_highest_addr is
120 * defined) PHY address not already claimed by another netdev. 122 * defined) PHY address not already claimed by another netdev.
121 * 123 *
122 * If nothing was found that way when searching for the 2nd ethernet 124 * If nothing was found that way when searching for the 2nd ethernet
123 * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then 125 * controller's PHY and phy1_search_mac0 is defined, then
124 * the first MII bus is searched as well for an unclaimed PHY; this is 126 * the first MII bus is searched as well for an unclaimed PHY; this is
125 * needed in case of a dual-PHY accessible only through the MAC0's MII 127 * needed in case of a dual-PHY accessible only through the MAC0's MII
126 * bus. 128 * bus.
@@ -129,9 +131,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
129 * controller is not registered to the network subsystem. 131 * controller is not registered to the network subsystem.
130 */ 132 */
131 133
132/* autodetection defaults */ 134/* autodetection defaults: phy1_search_mac0 */
133#undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
134#define AU1XXX_PHY1_SEARCH_ON_MAC0
135 135
136/* static PHY setup 136/* static PHY setup
137 * 137 *
@@ -148,29 +148,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
148 * specific irq-map 148 * specific irq-map
149 */ 149 */
150 150
151#if defined(CONFIG_MIPS_BOSPORUS)
152/*
153 * Micrel/Kendin 5 port switch attached to MAC0,
154 * MAC0 is associated with PHY address 5 (== WAN port)
155 * MAC1 is not associated with any PHY, since it's connected directly
156 * to the switch.
157 * no interrupts are used
158 */
159# define AU1XXX_PHY_STATIC_CONFIG
160
161# define AU1XXX_PHY0_ADDR 5
162# define AU1XXX_PHY0_BUSID 0
163# undef AU1XXX_PHY0_IRQ
164
165# undef AU1XXX_PHY1_ADDR
166# undef AU1XXX_PHY1_BUSID
167# undef AU1XXX_PHY1_IRQ
168#endif
169
170#if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
171# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
172#endif
173
174static void enable_mac(struct net_device *dev, int force_reset) 151static void enable_mac(struct net_device *dev, int force_reset)
175{ 152{
176 unsigned long flags; 153 unsigned long flags;
@@ -390,67 +367,55 @@ static int mii_probe (struct net_device *dev)
390 struct au1000_private *const aup = netdev_priv(dev); 367 struct au1000_private *const aup = netdev_priv(dev);
391 struct phy_device *phydev = NULL; 368 struct phy_device *phydev = NULL;
392 369
393#if defined(AU1XXX_PHY_STATIC_CONFIG) 370 if (aup->phy_static_config) {
394 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); 371 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
395 372
396 if(aup->mac_id == 0) { /* get PHY0 */ 373 if (aup->phy_addr)
397# if defined(AU1XXX_PHY0_ADDR) 374 phydev = aup->mii_bus->phy_map[aup->phy_addr];
398 phydev = au_macs[AU1XXX_PHY0_BUSID]->mii_bus->phy_map[AU1XXX_PHY0_ADDR]; 375 else
399# else 376 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
400 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n", 377 dev->name);
401 dev->name);
402 return 0;
403# endif /* defined(AU1XXX_PHY0_ADDR) */
404 } else if (aup->mac_id == 1) { /* get PHY1 */
405# if defined(AU1XXX_PHY1_ADDR)
406 phydev = au_macs[AU1XXX_PHY1_BUSID]->mii_bus->phy_map[AU1XXX_PHY1_ADDR];
407# else
408 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
409 dev->name);
410 return 0; 378 return 0;
411# endif /* defined(AU1XXX_PHY1_ADDR) */ 379 } else {
412 } 380 int phy_addr;
413 381
414#else /* defined(AU1XXX_PHY_STATIC_CONFIG) */ 382 /* find the first (lowest address) PHY on the current MAC's MII bus */
415 int phy_addr; 383 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
416 384 if (aup->mii_bus->phy_map[phy_addr]) {
417 /* find the first (lowest address) PHY on the current MAC's MII bus */ 385 phydev = aup->mii_bus->phy_map[phy_addr];
418 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) 386 if (!aup->phy_search_highest_addr)
419 if (aup->mii_bus->phy_map[phy_addr]) { 387 break; /* break out with first one found */
420 phydev = aup->mii_bus->phy_map[phy_addr]; 388 }
421# if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
422 break; /* break out with first one found */
423# endif
424 }
425 389
426# if defined(AU1XXX_PHY1_SEARCH_ON_MAC0) 390 if (aup->phy1_search_mac0) {
427 /* try harder to find a PHY */ 391 /* try harder to find a PHY */
428 if (!phydev && (aup->mac_id == 1)) { 392 if (!phydev && (aup->mac_id == 1)) {
429 /* no PHY found, maybe we have a dual PHY? */ 393 /* no PHY found, maybe we have a dual PHY? */
430 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, " 394 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
431 "let's see if it's attached to MAC0...\n"); 395 "let's see if it's attached to MAC0...\n");
432 396
433 BUG_ON(!au_macs[0]); 397 /* find the first (lowest address) non-attached PHY on
398 * the MAC0 MII bus */
399 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
400 struct phy_device *const tmp_phydev =
401 aup->mii_bus->phy_map[phy_addr];
434 402
435 /* find the first (lowest address) non-attached PHY on 403 if (aup->mac_id == 1)
436 * the MAC0 MII bus */ 404 break;
437 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
438 struct phy_device *const tmp_phydev =
439 au_macs[0]->mii_bus->phy_map[phy_addr];
440 405
441 if (!tmp_phydev) 406 if (!tmp_phydev)
442 continue; /* no PHY here... */ 407 continue; /* no PHY here... */
443 408
444 if (tmp_phydev->attached_dev) 409 if (tmp_phydev->attached_dev)
445 continue; /* already claimed by MAC0 */ 410 continue; /* already claimed by MAC0 */
446 411
447 phydev = tmp_phydev; 412 phydev = tmp_phydev;
448 break; /* found it */ 413 break; /* found it */
414 }
415 }
449 } 416 }
450 } 417 }
451# endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
452 418
453#endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
454 if (!phydev) { 419 if (!phydev) {
455 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name); 420 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
456 return -1; 421 return -1;
@@ -578,31 +543,6 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
578 } 543 }
579} 544}
580 545
581static struct {
582 u32 base_addr;
583 u32 macen_addr;
584 int irq;
585 struct net_device *dev;
586} iflist[2] = {
587#ifdef CONFIG_SOC_AU1000
588 {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
589 {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
590#endif
591#ifdef CONFIG_SOC_AU1100
592 {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
593#endif
594#ifdef CONFIG_SOC_AU1500
595 {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
596 {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
597#endif
598#ifdef CONFIG_SOC_AU1550
599 {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
600 {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
601#endif
602};
603
604static int num_ifs;
605
606/* 546/*
607 * ethtool operations 547 * ethtool operations
608 */ 548 */
@@ -711,7 +651,6 @@ static int au1000_init(struct net_device *dev)
711 651
712static inline void update_rx_stats(struct net_device *dev, u32 status) 652static inline void update_rx_stats(struct net_device *dev, u32 status)
713{ 653{
714 struct au1000_private *aup = netdev_priv(dev);
715 struct net_device_stats *ps = &dev->stats; 654 struct net_device_stats *ps = &dev->stats;
716 655
717 ps->rx_packets++; 656 ps->rx_packets++;
@@ -969,7 +908,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
969 } 908 }
970 909
971 pDB = aup->tx_db_inuse[aup->tx_head]; 910 pDB = aup->tx_db_inuse[aup->tx_head];
972 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); 911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
973 if (skb->len < ETH_ZLEN) { 912 if (skb->len < ETH_ZLEN) {
974 for (i=skb->len; i<ETH_ZLEN; i++) { 913 for (i=skb->len; i<ETH_ZLEN; i++) {
975 ((char *)pDB->vaddr)[i] = 0; 914 ((char *)pDB->vaddr)[i] = 0;
@@ -1058,53 +997,59 @@ static const struct net_device_ops au1000_netdev_ops = {
1058 .ndo_change_mtu = eth_change_mtu, 997 .ndo_change_mtu = eth_change_mtu,
1059}; 998};
1060 999
1061static struct net_device * au1000_probe(int port_num) 1000static int __devinit au1000_probe(struct platform_device *pdev)
1062{ 1001{
1063 static unsigned version_printed = 0; 1002 static unsigned version_printed = 0;
1064 struct au1000_private *aup = NULL; 1003 struct au1000_private *aup = NULL;
1004 struct au1000_eth_platform_data *pd;
1065 struct net_device *dev = NULL; 1005 struct net_device *dev = NULL;
1066 db_dest_t *pDB, *pDBfree; 1006 db_dest_t *pDB, *pDBfree;
1007 int irq, i, err = 0;
1008 struct resource *base, *macen;
1067 char ethaddr[6]; 1009 char ethaddr[6];
1068 int irq, i, err;
1069 u32 base, macen;
1070 1010
1071 if (port_num >= NUM_ETH_INTERFACES) 1011 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1072 return NULL; 1012 if (!base) {
1013 printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
1014 err = -ENODEV;
1015 goto out;
1016 }
1073 1017
1074 base = CPHYSADDR(iflist[port_num].base_addr ); 1018 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1075 macen = CPHYSADDR(iflist[port_num].macen_addr); 1019 if (!macen) {
1076 irq = iflist[port_num].irq; 1020 printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
1021 err = -ENODEV;
1022 goto out;
1023 }
1077 1024
1078 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || 1025 irq = platform_get_irq(pdev, 0);
1079 !request_mem_region(macen, 4, "Au1x00 ENET")) 1026 if (irq < 0) {
1080 return NULL; 1027 printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
1028 err = -ENODEV;
1029 goto out;
1030 }
1081 1031
1082 if (version_printed++ == 0) 1032 if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
1083 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1033 printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
1034 err = -ENXIO;
1035 goto out;
1036 }
1037
1038 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
1039 printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
1040 err = -ENXIO;
1041 goto err_request;
1042 }
1084 1043
1085 dev = alloc_etherdev(sizeof(struct au1000_private)); 1044 dev = alloc_etherdev(sizeof(struct au1000_private));
1086 if (!dev) { 1045 if (!dev) {
1087 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); 1046 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
1088 return NULL; 1047 err = -ENOMEM;
1048 goto err_alloc;
1089 } 1049 }
1090 1050
1091 dev->base_addr = base; 1051 SET_NETDEV_DEV(dev, &pdev->dev);
1092 dev->irq = irq; 1052 platform_set_drvdata(pdev, dev);
1093 dev->netdev_ops = &au1000_netdev_ops;
1094 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1095 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1096
1097 err = register_netdev(dev);
1098 if (err != 0) {
1099 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1100 DRV_NAME, err);
1101 free_netdev(dev);
1102 return NULL;
1103 }
1104
1105 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1106 dev->name, base, irq);
1107
1108 aup = netdev_priv(dev); 1053 aup = netdev_priv(dev);
1109 1054
1110 spin_lock_init(&aup->lock); 1055 spin_lock_init(&aup->lock);
@@ -1115,21 +1060,29 @@ static struct net_device * au1000_probe(int port_num)
1115 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1060 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1116 &aup->dma_addr, 0); 1061 &aup->dma_addr, 0);
1117 if (!aup->vaddr) { 1062 if (!aup->vaddr) {
1118 free_netdev(dev); 1063 printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
1119 release_mem_region( base, MAC_IOSIZE); 1064 err = -ENOMEM;
1120 release_mem_region(macen, 4); 1065 goto err_vaddr;
1121 return NULL;
1122 } 1066 }
1123 1067
1124 /* aup->mac is the base address of the MAC's registers */ 1068 /* aup->mac is the base address of the MAC's registers */
1125 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; 1069 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
1070 if (!aup->mac) {
1071 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
1072 err = -ENXIO;
1073 goto err_remap1;
1074 }
1126 1075
1127 /* Setup some variables for quick register address access */ 1076 /* Setup some variables for quick register address access */
1128 aup->enable = (volatile u32 *)iflist[port_num].macen_addr; 1077 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
1129 aup->mac_id = port_num; 1078 if (!aup->enable) {
1130 au_macs[port_num] = aup; 1079 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
1080 err = -ENXIO;
1081 goto err_remap2;
1082 }
1083 aup->mac_id = pdev->id;
1131 1084
1132 if (port_num == 0) { 1085 if (pdev->id == 0) {
1133 if (prom_get_ethernet_addr(ethaddr) == 0) 1086 if (prom_get_ethernet_addr(ethaddr) == 0)
1134 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); 1087 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1135 else { 1088 else {
@@ -1139,7 +1092,7 @@ static struct net_device * au1000_probe(int port_num)
1139 } 1092 }
1140 1093
1141 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1094 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1142 } else if (port_num == 1) 1095 } else if (pdev->id == 1)
1143 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1096 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1144 1097
1145 /* 1098 /*
@@ -1147,14 +1100,37 @@ static struct net_device * au1000_probe(int port_num)
1147 * to match those that are printed on their stickers 1100 * to match those that are printed on their stickers
1148 */ 1101 */
1149 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); 1102 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1150 dev->dev_addr[5] += port_num; 1103 dev->dev_addr[5] += pdev->id;
1151 1104
1152 *aup->enable = 0; 1105 *aup->enable = 0;
1153 aup->mac_enabled = 0; 1106 aup->mac_enabled = 0;
1154 1107
1108 pd = pdev->dev.platform_data;
1109 if (!pd) {
1110 printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
1111 aup->phy1_search_mac0 = 1;
1112 } else {
1113 aup->phy_static_config = pd->phy_static_config;
1114 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1115 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1116 aup->phy_addr = pd->phy_addr;
1117 aup->phy_busid = pd->phy_busid;
1118 aup->phy_irq = pd->phy_irq;
1119 }
1120
1121 if (aup->phy_busid && aup->phy_busid > 0) {
1122 printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
1123 "bus not supported yet\n");
1124 err = -ENODEV;
1125 goto err_mdiobus_alloc;
1126 }
1127
1155 aup->mii_bus = mdiobus_alloc(); 1128 aup->mii_bus = mdiobus_alloc();
1156 if (aup->mii_bus == NULL) 1129 if (aup->mii_bus == NULL) {
1157 goto err_out; 1130 printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
1131 err = -ENOMEM;
1132 goto err_mdiobus_alloc;
1133 }
1158 1134
1159 aup->mii_bus->priv = dev; 1135 aup->mii_bus->priv = dev;
1160 aup->mii_bus->read = au1000_mdiobus_read; 1136 aup->mii_bus->read = au1000_mdiobus_read;
@@ -1168,23 +1144,19 @@ static struct net_device * au1000_probe(int port_num)
1168 1144
1169 for(i = 0; i < PHY_MAX_ADDR; ++i) 1145 for(i = 0; i < PHY_MAX_ADDR; ++i)
1170 aup->mii_bus->irq[i] = PHY_POLL; 1146 aup->mii_bus->irq[i] = PHY_POLL;
1171
1172 /* if known, set corresponding PHY IRQs */ 1147 /* if known, set corresponding PHY IRQs */
1173#if defined(AU1XXX_PHY_STATIC_CONFIG) 1148 if (aup->phy_static_config)
1174# if defined(AU1XXX_PHY0_IRQ) 1149 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1175 if (AU1XXX_PHY0_BUSID == aup->mac_id) 1150 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1176 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; 1151
1177# endif 1152 err = mdiobus_register(aup->mii_bus);
1178# if defined(AU1XXX_PHY1_IRQ) 1153 if (err) {
1179 if (AU1XXX_PHY1_BUSID == aup->mac_id) 1154 printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
1180 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; 1155 goto err_mdiobus_reg;
1181# endif 1156 }
1182#endif
1183 mdiobus_register(aup->mii_bus);
1184 1157
1185 if (mii_probe(dev) != 0) { 1158 if (mii_probe(dev) != 0)
1186 goto err_out; 1159 goto err_out;
1187 }
1188 1160
1189 pDBfree = NULL; 1161 pDBfree = NULL;
1190 /* setup the data buffer descriptors and attach a buffer to each one */ 1162 /* setup the data buffer descriptors and attach a buffer to each one */
@@ -1216,19 +1188,35 @@ static struct net_device * au1000_probe(int port_num)
1216 aup->tx_db_inuse[i] = pDB; 1188 aup->tx_db_inuse[i] = pDB;
1217 } 1189 }
1218 1190
1191 dev->base_addr = base->start;
1192 dev->irq = irq;
1193 dev->netdev_ops = &au1000_netdev_ops;
1194 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1195 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1196
1219 /* 1197 /*
1220 * The boot code uses the ethernet controller, so reset it to start 1198 * The boot code uses the ethernet controller, so reset it to start
1221 * fresh. au1000_init() expects that the device is in reset state. 1199 * fresh. au1000_init() expects that the device is in reset state.
1222 */ 1200 */
1223 reset_mac(dev); 1201 reset_mac(dev);
1224 1202
1225 return dev; 1203 err = register_netdev(dev);
1204 if (err) {
1205 printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
1206 dev->name);
1207 goto err_out;
1208 }
1209
1210 printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1211 dev->name, (unsigned long)base->start, irq);
1212 if (version_printed++ == 0)
1213 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1214
1215 return 0;
1226 1216
1227err_out: 1217err_out:
1228 if (aup->mii_bus != NULL) { 1218 if (aup->mii_bus != NULL)
1229 mdiobus_unregister(aup->mii_bus); 1219 mdiobus_unregister(aup->mii_bus);
1230 mdiobus_free(aup->mii_bus);
1231 }
1232 1220
1233 /* here we should have a valid dev plus aup-> register addresses 1221 /* here we should have a valid dev plus aup-> register addresses
1234 * so we can reset the mac properly.*/ 1222 * so we can reset the mac properly.*/
@@ -1242,67 +1230,84 @@ err_out:
1242 if (aup->tx_db_inuse[i]) 1230 if (aup->tx_db_inuse[i])
1243 ReleaseDB(aup, aup->tx_db_inuse[i]); 1231 ReleaseDB(aup, aup->tx_db_inuse[i]);
1244 } 1232 }
1233err_mdiobus_reg:
1234 mdiobus_free(aup->mii_bus);
1235err_mdiobus_alloc:
1236 iounmap(aup->enable);
1237err_remap2:
1238 iounmap(aup->mac);
1239err_remap1:
1245 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), 1240 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1246 (void *)aup->vaddr, aup->dma_addr); 1241 (void *)aup->vaddr, aup->dma_addr);
1247 unregister_netdev(dev); 1242err_vaddr:
1248 free_netdev(dev); 1243 free_netdev(dev);
1249 release_mem_region( base, MAC_IOSIZE); 1244err_alloc:
1250 release_mem_region(macen, 4); 1245 release_mem_region(macen->start, resource_size(macen));
1251 return NULL; 1246err_request:
1247 release_mem_region(base->start, resource_size(base));
1248out:
1249 return err;
1252} 1250}
1253 1251
1254/* 1252static int __devexit au1000_remove(struct platform_device *pdev)
1255 * Setup the base address and interrupt of the Au1xxx ethernet macs
1256 * based on cpu type and whether the interface is enabled in sys_pinfunc
1257 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1258 */
1259static int __init au1000_init_module(void)
1260{ 1253{
1261 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); 1254 struct net_device *dev = platform_get_drvdata(pdev);
1262 struct net_device *dev; 1255 struct au1000_private *aup = netdev_priv(dev);
1263 int i, found_one = 0; 1256 int i;
1257 struct resource *base, *macen;
1264 1258
1265 num_ifs = NUM_ETH_INTERFACES - ni; 1259 platform_set_drvdata(pdev, NULL);
1260
1261 unregister_netdev(dev);
1262 mdiobus_unregister(aup->mii_bus);
1263 mdiobus_free(aup->mii_bus);
1264
1265 for (i = 0; i < NUM_RX_DMA; i++)
1266 if (aup->rx_db_inuse[i])
1267 ReleaseDB(aup, aup->rx_db_inuse[i]);
1268
1269 for (i = 0; i < NUM_TX_DMA; i++)
1270 if (aup->tx_db_inuse[i])
1271 ReleaseDB(aup, aup->tx_db_inuse[i]);
1272
1273 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1274 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1275 (void *)aup->vaddr, aup->dma_addr);
1276
1277 iounmap(aup->mac);
1278 iounmap(aup->enable);
1279
1280 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1281 release_mem_region(base->start, resource_size(base));
1282
1283 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1284 release_mem_region(macen->start, resource_size(macen));
1285
1286 free_netdev(dev);
1266 1287
1267 for(i = 0; i < num_ifs; i++) {
1268 dev = au1000_probe(i);
1269 iflist[i].dev = dev;
1270 if (dev)
1271 found_one++;
1272 }
1273 if (!found_one)
1274 return -ENODEV;
1275 return 0; 1288 return 0;
1276} 1289}
1277 1290
1278static void __exit au1000_cleanup_module(void) 1291static struct platform_driver au1000_eth_driver = {
1292 .probe = au1000_probe,
1293 .remove = __devexit_p(au1000_remove),
1294 .driver = {
1295 .name = "au1000-eth",
1296 .owner = THIS_MODULE,
1297 },
1298};
1299MODULE_ALIAS("platform:au1000-eth");
1300
1301
1302static int __init au1000_init_module(void)
1303{
1304 return platform_driver_register(&au1000_eth_driver);
1305}
1306
1307static void __exit au1000_exit_module(void)
1279{ 1308{
1280 int i, j; 1309 platform_driver_unregister(&au1000_eth_driver);
1281 struct net_device *dev;
1282 struct au1000_private *aup;
1283
1284 for (i = 0; i < num_ifs; i++) {
1285 dev = iflist[i].dev;
1286 if (dev) {
1287 aup = netdev_priv(dev);
1288 unregister_netdev(dev);
1289 mdiobus_unregister(aup->mii_bus);
1290 mdiobus_free(aup->mii_bus);
1291 for (j = 0; j < NUM_RX_DMA; j++)
1292 if (aup->rx_db_inuse[j])
1293 ReleaseDB(aup, aup->rx_db_inuse[j]);
1294 for (j = 0; j < NUM_TX_DMA; j++)
1295 if (aup->tx_db_inuse[j])
1296 ReleaseDB(aup, aup->tx_db_inuse[j]);
1297 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1298 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1299 (void *)aup->vaddr, aup->dma_addr);
1300 release_mem_region(dev->base_addr, MAC_IOSIZE);
1301 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1302 free_netdev(dev);
1303 }
1304 }
1305} 1310}
1306 1311
1307module_init(au1000_init_module); 1312module_init(au1000_init_module);
1308module_exit(au1000_cleanup_module); 1313module_exit(au1000_exit_module);
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index 824ecd5ff3a8..f9d29a29b8fd 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -108,6 +108,15 @@ struct au1000_private {
108 struct phy_device *phy_dev; 108 struct phy_device *phy_dev;
109 struct mii_bus *mii_bus; 109 struct mii_bus *mii_bus;
110 110
111 /* PHY configuration */
112 int phy_static_config;
113 int phy_search_highest_addr;
114 int phy1_search_mac0;
115
116 int phy_addr;
117 int phy_busid;
118 int phy_irq;
119
111 /* These variables are just for quick access to certain regs addresses. */ 120 /* These variables are just for quick access to certain regs addresses. */
112 volatile mac_reg_t *mac; /* mac registers */ 121 volatile mac_reg_t *mac; /* mac registers */
113 volatile u32 *enable; /* address of MAC Enable Register */ 122 volatile u32 *enable; /* address of MAC Enable Register */
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 8d0be26f94e3..bf2072e54200 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -36,6 +36,7 @@
36#include <linux/phy_fixed.h> 36#include <linux/phy_fixed.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/clk.h>
39#include <asm/gpio.h> 40#include <asm/gpio.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41 42
@@ -294,9 +295,16 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
294 295
295static int cpmac_mdio_reset(struct mii_bus *bus) 296static int cpmac_mdio_reset(struct mii_bus *bus)
296{ 297{
298 struct clk *cpmac_clk;
299
300 cpmac_clk = clk_get(&bus->dev, "cpmac");
301 if (IS_ERR(cpmac_clk)) {
302 printk(KERN_ERR "unable to get cpmac clock\n");
303 return -1;
304 }
297 ar7_device_reset(AR7_RESET_BIT_MDIO); 305 ar7_device_reset(AR7_RESET_BIT_MDIO);
298 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 306 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
299 MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); 307 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
300 return 0; 308 return 0;
301} 309}
302 310
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 9b2eebdbb25b..b5cbd39d0685 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -36,6 +36,7 @@
36#include <asm/pb1000.h> 36#include <asm/pb1000.h>
37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
38#include <asm/db1x00.h> 38#include <asm/db1x00.h>
39#include <asm/mach-db1x00/bcsr.h>
39#else 40#else
40#error au1k_ir: unsupported board 41#error au1k_ir: unsupported board
41#endif 42#endif
@@ -66,10 +67,6 @@ static char version[] __devinitdata =
66 67
67#define RUN_AT(x) (jiffies + (x)) 68#define RUN_AT(x) (jiffies + (x))
68 69
69#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
70static BCSR * const bcsr = (BCSR *)0xAE000000;
71#endif
72
73static DEFINE_SPINLOCK(ir_lock); 70static DEFINE_SPINLOCK(ir_lock);
74 71
75/* 72/*
@@ -282,9 +279,8 @@ static int au1k_irda_net_init(struct net_device *dev)
282 279
283#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 280#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
284 /* power on */ 281 /* power on */
285 bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK; 282 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
286 bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL; 283 BCSR_RESETS_IRDA_MODE_FULL);
287 au_sync();
288#endif 284#endif
289 285
290 return 0; 286 return 0;
@@ -720,14 +716,14 @@ au1k_irda_set_speed(struct net_device *dev, int speed)
720 716
721 if (speed == 4000000) { 717 if (speed == 4000000) {
722#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 718#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
723 bcsr->resets |= BCSR_RESETS_FIR_SEL; 719 bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
724#else /* Pb1000 and Pb1100 */ 720#else /* Pb1000 and Pb1100 */
725 writel(1<<13, CPLD_AUX1); 721 writel(1<<13, CPLD_AUX1);
726#endif 722#endif
727 } 723 }
728 else { 724 else {
729#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 725#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
730 bcsr->resets &= ~BCSR_RESETS_FIR_SEL; 726 bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
731#else /* Pb1000 and Pb1100 */ 727#else /* Pb1000 and Pb1100 */
732 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1); 728 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
733#endif 729#endif
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 6dd486d2977b..aa57cfd1e3fb 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -453,8 +453,7 @@ static int mhz_mfc_config(struct pcmcia_device *link)
453 453
454 link->conf.Attributes |= CONF_ENABLE_SPKR; 454 link->conf.Attributes |= CONF_ENABLE_SPKR;
455 link->conf.Status = CCSR_AUDIO_ENA; 455 link->conf.Status = CCSR_AUDIO_ENA;
456 link->irq.Attributes = 456 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
457 IRQ_TYPE_DYNAMIC_SHARING;
458 link->io.IOAddrLines = 16; 457 link->io.IOAddrLines = 16;
459 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 458 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
460 link->io.NumPorts2 = 8; 459 link->io.NumPorts2 = 8;
@@ -652,8 +651,7 @@ static int osi_config(struct pcmcia_device *link)
652 651
653 link->conf.Attributes |= CONF_ENABLE_SPKR; 652 link->conf.Attributes |= CONF_ENABLE_SPKR;
654 link->conf.Status = CCSR_AUDIO_ENA; 653 link->conf.Status = CCSR_AUDIO_ENA;
655 link->irq.Attributes = 654 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
656 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
657 link->io.NumPorts1 = 64; 655 link->io.NumPorts1 = 64;
658 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 656 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
659 link->io.NumPorts2 = 8; 657 link->io.NumPorts2 = 8;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 9f3adbd9f700..0a6601c76809 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -84,7 +84,7 @@ config YENTA
84 tristate "CardBus yenta-compatible bridge support" 84 tristate "CardBus yenta-compatible bridge support"
85 depends on PCI 85 depends on PCI
86 select CARDBUS if !EMBEDDED 86 select CARDBUS if !EMBEDDED
87 select PCCARD_NONSTATIC 87 select PCCARD_NONSTATIC if PCMCIA != n
88 ---help--- 88 ---help---
89 This option enables support for CardBus host bridges. Virtually 89 This option enables support for CardBus host bridges. Virtually
90 all modern PCMCIA bridges are CardBus compatible. A "bridge" is 90 all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -161,9 +161,8 @@ config TCIC
161 161
162config PCMCIA_M8XX 162config PCMCIA_M8XX
163 tristate "MPC8xx PCMCIA support" 163 tristate "MPC8xx PCMCIA support"
164 depends on PCMCIA && PPC && 8xx 164 depends on PCCARD && PPC && 8xx
165 select PCCARD_IODYN 165 select PCCARD_IODYN if PCMCIA != n
166 select PCCARD_NONSTATIC
167 help 166 help
168 Say Y here to include support for PowerPC 8xx series PCMCIA 167 Say Y here to include support for PowerPC 8xx series PCMCIA
169 controller. 168 controller.
@@ -174,6 +173,27 @@ config PCMCIA_AU1X00
174 tristate "Au1x00 pcmcia support" 173 tristate "Au1x00 pcmcia support"
175 depends on SOC_AU1X00 && PCMCIA 174 depends on SOC_AU1X00 && PCMCIA
176 175
176config PCMCIA_ALCHEMY_DEVBOARD
177 tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
178 depends on SOC_AU1X00 && PCMCIA
179 select 64BIT_PHYS_ADDR
180 help
181 Enable this driver of you want PCMCIA support on your Alchemy
182 Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200 board.
183 NOT suitable for the PB1000!
184
185 This driver is also available as a module called db1xxx_ss.ko
186
187config PCMCIA_XXS1500
188 tristate "MyCable XXS1500 PCMCIA socket support"
189 depends on PCMCIA && MIPS_XXS1500
190 select 64BIT_PHYS_ADDR
191 help
192 Support for the PCMCIA/CF socket interface on MyCable XXS1500
193 systems.
194
195 This driver is also available as a module called xxs1500_ss.ko
196
177config PCMCIA_BCM63XX 197config PCMCIA_BCM63XX
178 tristate "bcm63xx pcmcia support" 198 tristate "bcm63xx pcmcia support"
179 depends on BCM63XX && PCMCIA 199 depends on BCM63XX && PCMCIA
@@ -238,14 +258,12 @@ config PCMCIA_PROBE
238config M32R_PCC 258config M32R_PCC
239 bool "M32R PCMCIA I/F" 259 bool "M32R PCMCIA I/F"
240 depends on M32R && CHIP_M32700 && PCMCIA 260 depends on M32R && CHIP_M32700 && PCMCIA
241 select PCCARD_NONSTATIC
242 help 261 help
243 Say Y here to use the M32R PCMCIA controller. 262 Say Y here to use the M32R PCMCIA controller.
244 263
245config M32R_CFC 264config M32R_CFC
246 bool "M32R CF I/F Controller" 265 bool "M32R CF I/F Controller"
247 depends on M32R && (PLAT_USRV || PLAT_M32700UT || PLAT_MAPPI2 || PLAT_MAPPI3 || PLAT_OPSPUT) 266 depends on M32R && (PLAT_USRV || PLAT_M32700UT || PLAT_MAPPI2 || PLAT_MAPPI3 || PLAT_OPSPUT)
248 select PCCARD_NONSTATIC
249 help 267 help
250 Say Y here to use the M32R CompactFlash controller. 268 Say Y here to use the M32R CompactFlash controller.
251 269
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 83ff802de544..381b031d9d75 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -2,11 +2,11 @@
2# Makefile for the kernel pcmcia subsystem (c/o David Hinds) 2# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
3# 3#
4 4
5pcmcia_core-y += cs.o cistpl.o rsrc_mgr.o socket_sysfs.o 5pcmcia_core-y += cs.o rsrc_mgr.o socket_sysfs.o
6pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o 6pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
7obj-$(CONFIG_PCCARD) += pcmcia_core.o 7obj-$(CONFIG_PCCARD) += pcmcia_core.o
8 8
9pcmcia-y += ds.o pcmcia_resource.o 9pcmcia-y += ds.o pcmcia_resource.o cistpl.o
10pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o 10pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
11obj-$(CONFIG_PCMCIA) += pcmcia.o 11obj-$(CONFIG_PCMCIA) += pcmcia.o
12 12
@@ -35,18 +35,10 @@ obj-$(CONFIG_OMAP_CF) += omap_cf.o
35obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o 35obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
36obj-$(CONFIG_AT91_CF) += at91_cf.o 36obj-$(CONFIG_AT91_CF) += at91_cf.o
37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
38obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
38 39
39au1x00_ss-y += au1000_generic.o 40au1x00_ss-y += au1000_generic.o
40au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o 41au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
41au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o
42au1x00_ss-$(CONFIG_MIPS_PB1200) += au1000_db1x00.o
43au1x00_ss-$(CONFIG_MIPS_PB1500) += au1000_pb1x00.o
44au1x00_ss-$(CONFIG_MIPS_DB1000) += au1000_db1x00.o
45au1x00_ss-$(CONFIG_MIPS_DB1100) += au1000_db1x00.o
46au1x00_ss-$(CONFIG_MIPS_DB1200) += au1000_db1x00.o
47au1x00_ss-$(CONFIG_MIPS_DB1500) += au1000_db1x00.o
48au1x00_ss-$(CONFIG_MIPS_DB1550) += au1000_db1x00.o
49au1x00_ss-$(CONFIG_MIPS_XXS1500) += au1000_xxs1500.o
50 42
51sa1111_cs-y += sa1111_generic.o 43sa1111_cs-y += sa1111_generic.o
52sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o 44sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
@@ -76,3 +68,5 @@ pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 68pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
77 69
78obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) 70obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
71
72obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index e1dccedc5960..5d228071ec69 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -52,8 +52,6 @@ struct at91_cf_socket {
52 unsigned long phys_baseaddr; 52 unsigned long phys_baseaddr;
53}; 53};
54 54
55#define SZ_2K (2 * SZ_1K)
56
57static inline int at91_cf_present(struct at91_cf_socket *cf) 55static inline int at91_cf_present(struct at91_cf_socket *cf)
58{ 56{
59 return !gpio_get_value(cf->board->det_pin); 57 return !gpio_get_value(cf->board->det_pin);
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
deleted file mode 100644
index c78d77fd7e3b..000000000000
--- a/drivers/pcmcia/au1000_db1x00.c
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 *
3 * Alchemy Semi Db1x00 boards specific pcmcia routines.
4 *
5 * Copyright 2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 * ppopov@mvista.com or source@mvista.com
8 *
9 * Copyright 2004 Pete Popov, updated the driver to 2.6.
10 * Followed the sa11xx API and largely copied many of the hardware
11 * independent functions.
12 *
13 * ########################################################################
14 *
15 * This program is free software; you can distribute it and/or modify it
16 * under the terms of the GNU General Public License (Version 2) as
17 * published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
27 *
28 * ########################################################################
29 *
30 *
31 */
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/device.h>
38#include <linux/init.h>
39
40#include <asm/irq.h>
41#include <asm/signal.h>
42#include <asm/mach-au1x00/au1000.h>
43
44#if defined(CONFIG_MIPS_DB1200)
45 #include <db1200.h>
46#elif defined(CONFIG_MIPS_PB1200)
47 #include <pb1200.h>
48#else
49 #include <asm/mach-db1x00/db1x00.h>
50 static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
51#endif
52
53#include "au1000_generic.h"
54
55#if 0
56#define debug(x,args...) printk(KERN_DEBUG "%s: " x, __func__ , ##args)
57#else
58#define debug(x,args...)
59#endif
60
61
62struct au1000_pcmcia_socket au1000_pcmcia_socket[PCMCIA_NUM_SOCKS];
63extern int au1x00_pcmcia_socket_probe(struct device *, struct pcmcia_low_level *, int, int);
64
65static int db1x00_pcmcia_hw_init(struct au1000_pcmcia_socket *skt)
66{
67#ifdef CONFIG_MIPS_DB1550
68 skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_3;
69#elif defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
70 skt->irq = skt->nr ? BOARD_PC1_INT : BOARD_PC0_INT;
71#else
72 skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_2;
73#endif
74 return 0;
75}
76
77static void db1x00_pcmcia_shutdown(struct au1000_pcmcia_socket *skt)
78{
79 bcsr->pcmcia = 0; /* turn off power */
80 au_sync_delay(2);
81}
82
83static void
84db1x00_pcmcia_socket_state(struct au1000_pcmcia_socket *skt, struct pcmcia_state *state)
85{
86 u32 inserted;
87 unsigned char vs;
88
89 state->ready = 0;
90 state->vs_Xv = 0;
91 state->vs_3v = 0;
92 state->detect = 0;
93
94 switch (skt->nr) {
95 case 0:
96 vs = bcsr->status & 0x3;
97#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
98 inserted = BOARD_CARD_INSERTED(0);
99#else
100 inserted = !(bcsr->status & (1<<4));
101#endif
102 break;
103 case 1:
104 vs = (bcsr->status & 0xC)>>2;
105#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
106 inserted = BOARD_CARD_INSERTED(1);
107#else
108 inserted = !(bcsr->status & (1<<5));
109#endif
110 break;
111 default:/* should never happen */
112 return;
113 }
114
115 if (inserted)
116 debug("db1x00 socket %d: inserted %d, vs %d pcmcia %x\n",
117 skt->nr, inserted, vs, bcsr->pcmcia);
118
119 if (inserted) {
120 switch (vs) {
121 case 0:
122 case 2:
123 state->vs_3v=1;
124 break;
125 case 3: /* 5V */
126 break;
127 default:
128 /* return without setting 'detect' */
129 printk(KERN_ERR "db1x00 bad VS (%d)\n",
130 vs);
131 }
132 state->detect = 1;
133 state->ready = 1;
134 }
135 else {
136 /* if the card was previously inserted and then ejected,
137 * we should turn off power to it
138 */
139 if ((skt->nr == 0) && (bcsr->pcmcia & BCSR_PCMCIA_PC0RST)) {
140 bcsr->pcmcia &= ~(BCSR_PCMCIA_PC0RST |
141 BCSR_PCMCIA_PC0DRVEN |
142 BCSR_PCMCIA_PC0VPP |
143 BCSR_PCMCIA_PC0VCC);
144 au_sync_delay(10);
145 }
146 else if ((skt->nr == 1) && bcsr->pcmcia & BCSR_PCMCIA_PC1RST) {
147 bcsr->pcmcia &= ~(BCSR_PCMCIA_PC1RST |
148 BCSR_PCMCIA_PC1DRVEN |
149 BCSR_PCMCIA_PC1VPP |
150 BCSR_PCMCIA_PC1VCC);
151 au_sync_delay(10);
152 }
153 }
154
155 state->bvd1=1;
156 state->bvd2=1;
157 state->wrprot=0;
158}
159
160static int
161db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_state_t *state)
162{
163 u16 pwr;
164 int sock = skt->nr;
165
166 debug("config_skt %d Vcc %dV Vpp %dV, reset %d\n",
167 sock, state->Vcc, state->Vpp,
168 state->flags & SS_RESET);
169
170 /* pcmcia reg was set to zero at init time. Be careful when
171 * initializing a socket not to wipe out the settings of the
172 * other socket.
173 */
174 pwr = bcsr->pcmcia;
175 pwr &= ~(0xf << sock*8); /* clear voltage settings */
176
177 state->Vpp = 0;
178 switch(state->Vcc){
179 case 0: /* Vcc 0 */
180 pwr |= SET_VCC_VPP(0,0,sock);
181 break;
182 case 50: /* Vcc 5V */
183 switch(state->Vpp) {
184 case 0:
185 pwr |= SET_VCC_VPP(2,0,sock);
186 break;
187 case 50:
188 pwr |= SET_VCC_VPP(2,1,sock);
189 break;
190 case 12:
191 pwr |= SET_VCC_VPP(2,2,sock);
192 break;
193 case 33:
194 default:
195 pwr |= SET_VCC_VPP(0,0,sock);
196 printk("%s: bad Vcc/Vpp (%d:%d)\n",
197 __func__,
198 state->Vcc,
199 state->Vpp);
200 break;
201 }
202 break;
203 case 33: /* Vcc 3.3V */
204 switch(state->Vpp) {
205 case 0:
206 pwr |= SET_VCC_VPP(1,0,sock);
207 break;
208 case 12:
209 pwr |= SET_VCC_VPP(1,2,sock);
210 break;
211 case 33:
212 pwr |= SET_VCC_VPP(1,1,sock);
213 break;
214 case 50:
215 default:
216 pwr |= SET_VCC_VPP(0,0,sock);
217 printk("%s: bad Vcc/Vpp (%d:%d)\n",
218 __func__,
219 state->Vcc,
220 state->Vpp);
221 break;
222 }
223 break;
224 default: /* what's this ? */
225 pwr |= SET_VCC_VPP(0,0,sock);
226 printk(KERN_ERR "%s: bad Vcc %d\n",
227 __func__, state->Vcc);
228 break;
229 }
230
231 bcsr->pcmcia = pwr;
232 au_sync_delay(300);
233
234 if (sock == 0) {
235 if (!(state->flags & SS_RESET)) {
236 pwr |= BCSR_PCMCIA_PC0DRVEN;
237 bcsr->pcmcia = pwr;
238 au_sync_delay(300);
239 pwr |= BCSR_PCMCIA_PC0RST;
240 bcsr->pcmcia = pwr;
241 au_sync_delay(100);
242 }
243 else {
244 pwr &= ~(BCSR_PCMCIA_PC0RST | BCSR_PCMCIA_PC0DRVEN);
245 bcsr->pcmcia = pwr;
246 au_sync_delay(100);
247 }
248 }
249 else {
250 if (!(state->flags & SS_RESET)) {
251 pwr |= BCSR_PCMCIA_PC1DRVEN;
252 bcsr->pcmcia = pwr;
253 au_sync_delay(300);
254 pwr |= BCSR_PCMCIA_PC1RST;
255 bcsr->pcmcia = pwr;
256 au_sync_delay(100);
257 }
258 else {
259 pwr &= ~(BCSR_PCMCIA_PC1RST | BCSR_PCMCIA_PC1DRVEN);
260 bcsr->pcmcia = pwr;
261 au_sync_delay(100);
262 }
263 }
264 return 0;
265}
266
267/*
268 * Enable card status IRQs on (re-)initialisation. This can
269 * be called at initialisation, power management event, or
270 * pcmcia event.
271 */
272void db1x00_socket_init(struct au1000_pcmcia_socket *skt)
273{
274 /* nothing to do for now */
275}
276
277/*
278 * Disable card status IRQs and PCMCIA bus on suspend.
279 */
280void db1x00_socket_suspend(struct au1000_pcmcia_socket *skt)
281{
282 /* nothing to do for now */
283}
284
285struct pcmcia_low_level db1x00_pcmcia_ops = {
286 .owner = THIS_MODULE,
287
288 .hw_init = db1x00_pcmcia_hw_init,
289 .hw_shutdown = db1x00_pcmcia_shutdown,
290
291 .socket_state = db1x00_pcmcia_socket_state,
292 .configure_socket = db1x00_pcmcia_configure_socket,
293
294 .socket_init = db1x00_socket_init,
295 .socket_suspend = db1x00_socket_suspend
296};
297
298int au1x_board_init(struct device *dev)
299{
300 int ret = -ENODEV;
301 bcsr->pcmcia = 0; /* turn off power, if it's not already off */
302 au_sync_delay(2);
303 ret = au1x00_pcmcia_socket_probe(dev, &db1x00_pcmcia_ops, 0, 2);
304 return ret;
305}
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 02088704ac2c..171c8a654887 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -405,18 +405,16 @@ int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops,
405 skt->virt_io = (void *) 405 skt->virt_io = (void *)
406 (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) - 406 (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) -
407 (u32)mips_io_port_base); 407 (u32)mips_io_port_base);
408 skt->phys_attr = AU1X_SOCK0_PSEUDO_PHYS_ATTR; 408 skt->phys_attr = AU1X_SOCK0_PHYS_ATTR;
409 skt->phys_mem = AU1X_SOCK0_PSEUDO_PHYS_MEM; 409 skt->phys_mem = AU1X_SOCK0_PHYS_MEM;
410 } 410 }
411#ifndef CONFIG_MIPS_XXS1500
412 else { 411 else {
413 skt->virt_io = (void *) 412 skt->virt_io = (void *)
414 (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) - 413 (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) -
415 (u32)mips_io_port_base); 414 (u32)mips_io_port_base);
416 skt->phys_attr = AU1X_SOCK1_PSEUDO_PHYS_ATTR; 415 skt->phys_attr = AU1X_SOCK1_PHYS_ATTR;
417 skt->phys_mem = AU1X_SOCK1_PSEUDO_PHYS_MEM; 416 skt->phys_mem = AU1X_SOCK1_PHYS_MEM;
418 } 417 }
419#endif
420 pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io; 418 pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io;
421 ret = ops->hw_init(skt); 419 ret = ops->hw_init(skt);
422 420
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index 13a4fbc58711..a324d329dea6 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -36,30 +36,14 @@
36#define AU1X_SOCK0_IO 0xF00000000ULL 36#define AU1X_SOCK0_IO 0xF00000000ULL
37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL 37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL 38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
39/* pseudo 32 bit phys addresses, which get fixed up to the
40 * real 36 bit address in fixup_bigphys_addr() */
41#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000
42#define AU1X_SOCK0_PSEUDO_PHYS_MEM 0xF8000000
43 39
44/* pcmcia socket 1 needs external glue logic so the memory map 40/* pcmcia socket 1 needs external glue logic so the memory map
45 * differs from board to board. 41 * differs from board to board.
46 */ 42 */
47#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || \ 43#if defined(CONFIG_MIPS_PB1000)
48 defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || \
49 defined(CONFIG_MIPS_PB1200)
50#define AU1X_SOCK1_IO 0xF08000000ULL 44#define AU1X_SOCK1_IO 0xF08000000ULL
51#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL 45#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
52#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL 46#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
53#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000
54#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000
55#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \
56 defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || \
57 defined(CONFIG_MIPS_DB1200)
58#define AU1X_SOCK1_IO 0xF04000000ULL
59#define AU1X_SOCK1_PHYS_ATTR 0xF44000000ULL
60#define AU1X_SOCK1_PHYS_MEM 0xF84000000ULL
61#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000
62#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000
63#endif 47#endif
64 48
65struct pcmcia_state { 49struct pcmcia_state {
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index b1984ed72d1d..5a979cb8f3e6 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * Alchemy Semi Pb1x00 boards specific pcmcia routines. 3 * Alchemy Semi Pb1000 boards specific pcmcia routines.
4 * 4 *
5 * Copyright 2002 MontaVista Software Inc. 5 * Copyright 2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc. 6 * Author: MontaVista Software, Inc.
@@ -46,20 +46,11 @@
46 46
47#define debug(fmt, arg...) do { } while (0) 47#define debug(fmt, arg...) do { } while (0)
48 48
49#ifdef CONFIG_MIPS_PB1000
50#include <asm/pb1000.h> 49#include <asm/pb1000.h>
51#define PCMCIA_IRQ AU1000_GPIO_15 50#define PCMCIA_IRQ AU1000_GPIO_15
52#elif defined (CONFIG_MIPS_PB1500)
53#include <asm/pb1500.h>
54#define PCMCIA_IRQ AU1500_GPIO_203
55#elif defined (CONFIG_MIPS_PB1100)
56#include <asm/pb1100.h>
57#define PCMCIA_IRQ AU1000_GPIO_11
58#endif
59 51
60static int pb1x00_pcmcia_init(struct pcmcia_init *init) 52static int pb1x00_pcmcia_init(struct pcmcia_init *init)
61{ 53{
62#ifdef CONFIG_MIPS_PB1000
63 u16 pcr; 54 u16 pcr;
64 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST; 55 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
65 56
@@ -74,21 +65,10 @@ static int pb1x00_pcmcia_init(struct pcmcia_init *init)
74 au_sync_delay(20); 65 au_sync_delay(20);
75 66
76 return PCMCIA_NUM_SOCKS; 67 return PCMCIA_NUM_SOCKS;
77
78#else /* fixme -- take care of the Pb1500 at some point */
79
80 u16 pcr;
81 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
82 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
83 au_writew(pcr, PCMCIA_BOARD_REG);
84 au_sync_delay(500);
85 return PCMCIA_NUM_SOCKS;
86#endif
87} 68}
88 69
89static int pb1x00_pcmcia_shutdown(void) 70static int pb1x00_pcmcia_shutdown(void)
90{ 71{
91#ifdef CONFIG_MIPS_PB1000
92 u16 pcr; 72 u16 pcr;
93 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST; 73 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
94 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0); 74 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
@@ -96,14 +76,6 @@ static int pb1x00_pcmcia_shutdown(void)
96 au_writel(pcr, PB1000_PCR); 76 au_writel(pcr, PB1000_PCR);
97 au_sync_delay(20); 77 au_sync_delay(20);
98 return 0; 78 return 0;
99#else
100 u16 pcr;
101 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
102 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
103 au_writew(pcr, PCMCIA_BOARD_REG);
104 au_sync_delay(2);
105 return 0;
106#endif
107} 79}
108 80
109static int 81static int
@@ -112,21 +84,11 @@ pb1x00_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
112 u32 inserted0, inserted1; 84 u32 inserted0, inserted1;
113 u16 vs0, vs1; 85 u16 vs0, vs1;
114 86
115#ifdef CONFIG_MIPS_PB1000
116 vs0 = vs1 = (u16)au_readl(PB1000_ACR1); 87 vs0 = vs1 = (u16)au_readl(PB1000_ACR1);
117 inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2)); 88 inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2));
118 inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2)); 89 inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2));
119 vs0 = (vs0 >> 4) & 0x3; 90 vs0 = (vs0 >> 4) & 0x3;
120 vs1 = (vs1 >> 12) & 0x3; 91 vs1 = (vs1 >> 12) & 0x3;
121#else
122 vs0 = (au_readw(BOARD_STATUS_REG) >> 4) & 0x3;
123#ifdef CONFIG_MIPS_PB1500
124 inserted0 = !((au_readl(GPIO2_PINSTATE) >> 1) & 0x1); /* gpio 201 */
125#else /* Pb1100 */
126 inserted0 = !((au_readl(SYS_PINSTATERD) >> 9) & 0x1); /* gpio 9 */
127#endif
128 inserted1 = 0;
129#endif
130 92
131 state->ready = 0; 93 state->ready = 0;
132 state->vs_Xv = 0; 94 state->vs_Xv = 0;
@@ -203,7 +165,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
203 165
204 if(configure->sock > PCMCIA_MAX_SOCK) return -1; 166 if(configure->sock > PCMCIA_MAX_SOCK) return -1;
205 167
206#ifdef CONFIG_MIPS_PB1000
207 pcr = au_readl(PB1000_PCR); 168 pcr = au_readl(PB1000_PCR);
208 169
209 if (configure->sock == 0) { 170 if (configure->sock == 0) {
@@ -323,84 +284,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
323 au_writel(pcr, PB1000_PCR); 284 au_writel(pcr, PB1000_PCR);
324 au_sync_delay(300); 285 au_sync_delay(300);
325 286
326#else
327
328 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf;
329
330 debug("Vcc %dV Vpp %dV, pcr %x, reset %d\n",
331 configure->vcc, configure->vpp, pcr, configure->reset);
332
333
334 switch(configure->vcc){
335 case 0: /* Vcc 0 */
336 pcr |= SET_VCC_VPP(0,0);
337 break;
338 case 50: /* Vcc 5V */
339 switch(configure->vpp) {
340 case 0:
341 pcr |= SET_VCC_VPP(2,0);
342 break;
343 case 50:
344 pcr |= SET_VCC_VPP(2,1);
345 break;
346 case 12:
347 pcr |= SET_VCC_VPP(2,2);
348 break;
349 case 33:
350 default:
351 pcr |= SET_VCC_VPP(0,0);
352 printk("%s: bad Vcc/Vpp (%d:%d)\n",
353 __func__,
354 configure->vcc,
355 configure->vpp);
356 break;
357 }
358 break;
359 case 33: /* Vcc 3.3V */
360 switch(configure->vpp) {
361 case 0:
362 pcr |= SET_VCC_VPP(1,0);
363 break;
364 case 12:
365 pcr |= SET_VCC_VPP(1,2);
366 break;
367 case 33:
368 pcr |= SET_VCC_VPP(1,1);
369 break;
370 case 50:
371 default:
372 pcr |= SET_VCC_VPP(0,0);
373 printk("%s: bad Vcc/Vpp (%d:%d)\n",
374 __func__,
375 configure->vcc,
376 configure->vpp);
377 break;
378 }
379 break;
380 default: /* what's this ? */
381 pcr |= SET_VCC_VPP(0,0);
382 printk(KERN_ERR "%s: bad Vcc %d\n",
383 __func__, configure->vcc);
384 break;
385 }
386
387 au_writew(pcr, PCMCIA_BOARD_REG);
388 au_sync_delay(300);
389
390 if (!configure->reset) {
391 pcr |= PC_DRV_EN;
392 au_writew(pcr, PCMCIA_BOARD_REG);
393 au_sync_delay(100);
394 pcr |= PC_DEASSERT_RST;
395 au_writew(pcr, PCMCIA_BOARD_REG);
396 au_sync_delay(100);
397 }
398 else {
399 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
400 au_writew(pcr, PCMCIA_BOARD_REG);
401 au_sync_delay(100);
402 }
403#endif
404 return 0; 287 return 0;
405} 288}
406 289
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
deleted file mode 100644
index b43d47b50819..000000000000
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 *
3 * MyCable board specific pcmcia routines.
4 *
5 * Copyright 2003 MontaVista Software Inc.
6 * Author: Pete Popov, MontaVista Software, Inc.
7 * ppopov@mvista.com or source@mvista.com
8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 *
24 * ########################################################################
25 *
26 *
27 */
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/ioport.h>
32#include <linux/kernel.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/proc_fs.h>
36#include <linux/types.h>
37
38#include <pcmcia/cs_types.h>
39#include <pcmcia/cs.h>
40#include <pcmcia/ss.h>
41#include <pcmcia/cistpl.h>
42#include <pcmcia/bus_ops.h>
43
44#include <asm/io.h>
45#include <asm/irq.h>
46#include <asm/system.h>
47
48#include <asm/au1000.h>
49#include <asm/au1000_pcmcia.h>
50
51#define PCMCIA_MAX_SOCK 0
52#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
53#define PCMCIA_IRQ AU1000_GPIO_4
54
55#if 0
56#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
57#else
58#define DEBUG(x,args...)
59#endif
60
61static int xxs1500_pcmcia_init(struct pcmcia_init *init)
62{
63 return PCMCIA_NUM_SOCKS;
64}
65
66static int xxs1500_pcmcia_shutdown(void)
67{
68 /* turn off power */
69 au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
70 GPIO2_OUTPUT);
71 au_sync_delay(100);
72
73 /* assert reset */
74 au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
75 GPIO2_OUTPUT);
76 au_sync_delay(100);
77 return 0;
78}
79
80
81static int
82xxs1500_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
83{
84 u32 inserted; u32 vs;
85 unsigned long gpio, gpio2;
86
87 if(sock > PCMCIA_MAX_SOCK) return -1;
88
89 gpio = au_readl(SYS_PINSTATERD);
90 gpio2 = au_readl(GPIO2_PINSTATE);
91
92 vs = gpio2 & ((1<<8) | (1<<9));
93 inserted = (!(gpio & 0x1) && !(gpio & 0x2));
94
95 state->ready = 0;
96 state->vs_Xv = 0;
97 state->vs_3v = 0;
98 state->detect = 0;
99
100 if (inserted) {
101 switch (vs) {
102 case 0:
103 case 1:
104 case 2:
105 state->vs_3v=1;
106 break;
107 case 3: /* 5V */
108 default:
109 /* return without setting 'detect' */
110 printk(KERN_ERR "au1x00_cs: unsupported VS\n",
111 vs);
112 return;
113 }
114 state->detect = 1;
115 }
116
117 if (state->detect) {
118 state->ready = 1;
119 }
120
121 state->bvd1= gpio2 & (1<<10);
122 state->bvd2 = gpio2 & (1<<11);
123 state->wrprot=0;
124 return 1;
125}
126
127
128static int xxs1500_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
129{
130
131 if(info->sock > PCMCIA_MAX_SOCK) return -1;
132 info->irq = PCMCIA_IRQ;
133 return 0;
134}
135
136
137static int
138xxs1500_pcmcia_configure_socket(const struct pcmcia_configure *configure)
139{
140
141 if(configure->sock > PCMCIA_MAX_SOCK) return -1;
142
143 DEBUG("Vcc %dV Vpp %dV, reset %d\n",
144 configure->vcc, configure->vpp, configure->reset);
145
146 switch(configure->vcc){
147 case 33: /* Vcc 3.3V */
148 /* turn on power */
149 DEBUG("turn on power\n");
150 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<14))|(1<<30),
151 GPIO2_OUTPUT);
152 au_sync_delay(100);
153 break;
154 case 50: /* Vcc 5V */
155 default: /* what's this ? */
156 printk(KERN_ERR "au1x00_cs: unsupported VCC\n");
157 case 0: /* Vcc 0 */
158 /* turn off power */
159 au_sync_delay(100);
160 au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
161 GPIO2_OUTPUT);
162 break;
163 }
164
165 if (!configure->reset) {
166 DEBUG("deassert reset\n");
167 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<4))|(1<<20),
168 GPIO2_OUTPUT);
169 au_sync_delay(100);
170 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<5))|(1<<21),
171 GPIO2_OUTPUT);
172 }
173 else {
174 DEBUG("assert reset\n");
175 au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
176 GPIO2_OUTPUT);
177 }
178 au_sync_delay(100);
179 return 0;
180}
181
182struct pcmcia_low_level xxs1500_pcmcia_ops = {
183 xxs1500_pcmcia_init,
184 xxs1500_pcmcia_shutdown,
185 xxs1500_pcmcia_socket_state,
186 xxs1500_pcmcia_get_irq_info,
187 xxs1500_pcmcia_configure_socket
188};
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 300b368605c9..2482ce7ac6dc 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -205,7 +205,7 @@ static int __devinit bfin_cf_probe(struct platform_device *pdev)
205 dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n"); 205 dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n");
206 206
207 irq = platform_get_irq(pdev, 0); 207 irq = platform_get_irq(pdev, 0);
208 if (!irq) 208 if (irq <= 0)
209 return -EINVAL; 209 return -EINVAL;
210 210
211 cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */ 211 cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index d99f846451a3..ac0686efbf75 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -20,170 +20,12 @@
20 */ 20 */
21 21
22 22
23#include <linux/module.h>
24#include <linux/kernel.h> 23#include <linux/kernel.h>
25#include <linux/string.h> 24#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/mm.h>
28#include <linux/pci.h> 25#include <linux/pci.h>
29#include <linux/ioport.h>
30#include <linux/io.h>
31#include <asm/irq.h>
32 26
33#include <pcmcia/cs_types.h>
34#include <pcmcia/ss.h> 27#include <pcmcia/ss.h>
35#include <pcmcia/cs.h>
36#include <pcmcia/cistpl.h>
37#include "cs_internal.h"
38
39/*====================================================================*/
40
41/* Offsets in the Expansion ROM Image Header */
42#define ROM_SIGNATURE 0x0000 /* 2 bytes */
43#define ROM_DATA_PTR 0x0018 /* 2 bytes */
44
45/* Offsets in the CardBus PC Card Data Structure */
46#define PCDATA_SIGNATURE 0x0000 /* 4 bytes */
47#define PCDATA_VPD_PTR 0x0008 /* 2 bytes */
48#define PCDATA_LENGTH 0x000a /* 2 bytes */
49#define PCDATA_REVISION 0x000c
50#define PCDATA_IMAGE_SZ 0x0010 /* 2 bytes */
51#define PCDATA_ROM_LEVEL 0x0012 /* 2 bytes */
52#define PCDATA_CODE_TYPE 0x0014
53#define PCDATA_INDICATOR 0x0015
54
55/*=====================================================================
56
57 Expansion ROM's have a special layout, and pointers specify an
58 image number and an offset within that image. xlate_rom_addr()
59 converts an image/offset address to an absolute offset from the
60 ROM's base address.
61
62=====================================================================*/
63
64static u_int xlate_rom_addr(void __iomem *b, u_int addr)
65{
66 u_int img = 0, ofs = 0, sz;
67 u_short data;
68 while ((readb(b) == 0x55) && (readb(b + 1) == 0xaa)) {
69 if (img == (addr >> 28))
70 return (addr & 0x0fffffff) + ofs;
71 data = readb(b + ROM_DATA_PTR) + (readb(b + ROM_DATA_PTR + 1) << 8);
72 sz = 512 * (readb(b + data + PCDATA_IMAGE_SZ) +
73 (readb(b + data + PCDATA_IMAGE_SZ + 1) << 8));
74 if ((sz == 0) || (readb(b + data + PCDATA_INDICATOR) & 0x80))
75 break;
76 b += sz;
77 ofs += sz;
78 img++;
79 }
80 return 0;
81}
82
83/*=====================================================================
84
85 These are similar to setup_cis_mem and release_cis_mem for 16-bit
86 cards. The "result" that is used externally is the cb_cis_virt
87 pointer in the struct pcmcia_socket structure.
88
89=====================================================================*/
90
91static void cb_release_cis_mem(struct pcmcia_socket *s)
92{
93 if (s->cb_cis_virt) {
94 dev_dbg(&s->dev, "cb_release_cis_mem()\n");
95 iounmap(s->cb_cis_virt);
96 s->cb_cis_virt = NULL;
97 s->cb_cis_res = NULL;
98 }
99}
100
101static int cb_setup_cis_mem(struct pcmcia_socket *s, struct resource *res)
102{
103 unsigned int start, size;
104
105 if (res == s->cb_cis_res)
106 return 0;
107
108 if (s->cb_cis_res)
109 cb_release_cis_mem(s);
110
111 start = res->start;
112 size = res->end - start + 1;
113 s->cb_cis_virt = ioremap(start, size);
114
115 if (!s->cb_cis_virt)
116 return -1;
117
118 s->cb_cis_res = res;
119
120 return 0;
121}
122
123/*=====================================================================
124
125 This is used by the CIS processing code to read CIS information
126 from a CardBus device.
127
128=====================================================================*/
129
130int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
131 void *ptr)
132{
133 struct pci_dev *dev;
134 struct resource *res;
135
136 dev_dbg(&s->dev, "read_cb_mem(%d, %#x, %u)\n", space, addr, len);
137 28
138 dev = pci_get_slot(s->cb_dev->subordinate, 0);
139 if (!dev)
140 goto fail;
141
142 /* Config space? */
143 if (space == 0) {
144 if (addr + len > 0x100)
145 goto failput;
146 for (; len; addr++, ptr++, len--)
147 pci_read_config_byte(dev, addr, ptr);
148 return 0;
149 }
150
151 res = dev->resource + space - 1;
152
153 pci_dev_put(dev);
154
155 if (!res->flags)
156 goto fail;
157
158 if (cb_setup_cis_mem(s, res) != 0)
159 goto fail;
160
161 if (space == 7) {
162 addr = xlate_rom_addr(s->cb_cis_virt, addr);
163 if (addr == 0)
164 goto fail;
165 }
166
167 if (addr + len > res->end - res->start)
168 goto fail;
169
170 memcpy_fromio(ptr, s->cb_cis_virt + addr, len);
171 return 0;
172
173failput:
174 pci_dev_put(dev);
175fail:
176 memset(ptr, 0xff, len);
177 return -1;
178}
179
180/*=====================================================================
181
182 cb_alloc() and cb_free() allocate and free the kernel data
183 structures for a Cardbus device, and handle the lowest level PCI
184 device setup issues.
185
186=====================================================================*/
187 29
188static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq) 30static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
189{ 31{
@@ -215,6 +57,13 @@ static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
215 } 57 }
216} 58}
217 59
60/**
61 * cb_alloc() - add CardBus device
62 * @s: the pcmcia_socket where the CardBus device is located
63 *
64 * cb_alloc() allocates the kernel data structures for a Cardbus device
65 * and handles the lowest level PCI device setup issues.
66 */
218int __ref cb_alloc(struct pcmcia_socket *s) 67int __ref cb_alloc(struct pcmcia_socket *s)
219{ 68{
220 struct pci_bus *bus = s->cb_dev->subordinate; 69 struct pci_bus *bus = s->cb_dev->subordinate;
@@ -249,12 +98,16 @@ int __ref cb_alloc(struct pcmcia_socket *s)
249 return 0; 98 return 0;
250} 99}
251 100
101/**
102 * cb_free() - remove CardBus device
103 * @s: the pcmcia_socket where the CardBus device was located
104 *
105 * cb_free() handles the lowest level PCI device cleanup.
106 */
252void cb_free(struct pcmcia_socket *s) 107void cb_free(struct pcmcia_socket *s)
253{ 108{
254 struct pci_dev *bridge = s->cb_dev; 109 struct pci_dev *bridge = s->cb_dev;
255 110
256 cb_release_cis_mem(s);
257
258 if (bridge) 111 if (bridge)
259 pci_remove_behind_bridge(bridge); 112 pci_remove_behind_bridge(bridge);
260} 113}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 25b1cd219e37..2f3622dd4b69 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -64,6 +64,7 @@ module_param(cis_width, int, 0444);
64 64
65void release_cis_mem(struct pcmcia_socket *s) 65void release_cis_mem(struct pcmcia_socket *s)
66{ 66{
67 mutex_lock(&s->ops_mutex);
67 if (s->cis_mem.flags & MAP_ACTIVE) { 68 if (s->cis_mem.flags & MAP_ACTIVE) {
68 s->cis_mem.flags &= ~MAP_ACTIVE; 69 s->cis_mem.flags &= ~MAP_ACTIVE;
69 s->ops->set_mem_map(s, &s->cis_mem); 70 s->ops->set_mem_map(s, &s->cis_mem);
@@ -75,13 +76,15 @@ void release_cis_mem(struct pcmcia_socket *s)
75 iounmap(s->cis_virt); 76 iounmap(s->cis_virt);
76 s->cis_virt = NULL; 77 s->cis_virt = NULL;
77 } 78 }
79 mutex_unlock(&s->ops_mutex);
78} 80}
79EXPORT_SYMBOL(release_cis_mem);
80 81
81/* 82/*
82 * Map the card memory at "card_offset" into virtual space. 83 * Map the card memory at "card_offset" into virtual space.
83 * If flags & MAP_ATTRIB, map the attribute space, otherwise 84 * If flags & MAP_ATTRIB, map the attribute space, otherwise
84 * map the memory space. 85 * map the memory space.
86 *
87 * Must be called with ops_mutex held.
85 */ 88 */
86static void __iomem * 89static void __iomem *
87set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags) 90set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags)
@@ -140,6 +143,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
140 143
141 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); 144 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
142 145
146 mutex_lock(&s->ops_mutex);
143 if (attr & IS_INDIRECT) { 147 if (attr & IS_INDIRECT) {
144 /* Indirect accesses use a bunch of special registers at fixed 148 /* Indirect accesses use a bunch of special registers at fixed
145 locations in common memory */ 149 locations in common memory */
@@ -151,7 +155,9 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
151 155
152 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 156 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
153 if (!sys) { 157 if (!sys) {
158 dev_dbg(&s->dev, "could not map memory\n");
154 memset(ptr, 0xff, len); 159 memset(ptr, 0xff, len);
160 mutex_unlock(&s->ops_mutex);
155 return -1; 161 return -1;
156 } 162 }
157 163
@@ -165,6 +171,9 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
165 } else { 171 } else {
166 u_int inc = 1, card_offset, flags; 172 u_int inc = 1, card_offset, flags;
167 173
174 if (addr > CISTPL_MAX_CIS_SIZE)
175 dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr);
176
168 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); 177 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
169 if (attr) { 178 if (attr) {
170 flags |= MAP_ATTRIB; 179 flags |= MAP_ATTRIB;
@@ -176,7 +185,9 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
176 while (len) { 185 while (len) {
177 sys = set_cis_map(s, card_offset, flags); 186 sys = set_cis_map(s, card_offset, flags);
178 if (!sys) { 187 if (!sys) {
188 dev_dbg(&s->dev, "could not map memory\n");
179 memset(ptr, 0xff, len); 189 memset(ptr, 0xff, len);
190 mutex_unlock(&s->ops_mutex);
180 return -1; 191 return -1;
181 } 192 }
182 end = sys + s->map_size; 193 end = sys + s->map_size;
@@ -190,12 +201,12 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
190 addr = 0; 201 addr = 0;
191 } 202 }
192 } 203 }
204 mutex_unlock(&s->ops_mutex);
193 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 205 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
194 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 206 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
195 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 207 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
196 return 0; 208 return 0;
197} 209}
198EXPORT_SYMBOL(pcmcia_read_cis_mem);
199 210
200 211
201void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 212void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
@@ -206,6 +217,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
206 217
207 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); 218 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
208 219
220 mutex_lock(&s->ops_mutex);
209 if (attr & IS_INDIRECT) { 221 if (attr & IS_INDIRECT) {
210 /* Indirect accesses use a bunch of special registers at fixed 222 /* Indirect accesses use a bunch of special registers at fixed
211 locations in common memory */ 223 locations in common memory */
@@ -216,8 +228,11 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
216 } 228 }
217 229
218 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 230 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
219 if (!sys) 231 if (!sys) {
232 dev_dbg(&s->dev, "could not map memory\n");
233 mutex_unlock(&s->ops_mutex);
220 return; /* FIXME: Error */ 234 return; /* FIXME: Error */
235 }
221 236
222 writeb(flags, sys+CISREG_ICTRL0); 237 writeb(flags, sys+CISREG_ICTRL0);
223 writeb(addr & 0xff, sys+CISREG_IADDR0); 238 writeb(addr & 0xff, sys+CISREG_IADDR0);
@@ -239,8 +254,11 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
239 card_offset = addr & ~(s->map_size-1); 254 card_offset = addr & ~(s->map_size-1);
240 while (len) { 255 while (len) {
241 sys = set_cis_map(s, card_offset, flags); 256 sys = set_cis_map(s, card_offset, flags);
242 if (!sys) 257 if (!sys) {
258 dev_dbg(&s->dev, "could not map memory\n");
259 mutex_unlock(&s->ops_mutex);
243 return; /* FIXME: error */ 260 return; /* FIXME: error */
261 }
244 262
245 end = sys + s->map_size; 263 end = sys + s->map_size;
246 sys = sys + (addr & (s->map_size-1)); 264 sys = sys + (addr & (s->map_size-1));
@@ -253,8 +271,8 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
253 addr = 0; 271 addr = 0;
254 } 272 }
255 } 273 }
274 mutex_unlock(&s->ops_mutex);
256} 275}
257EXPORT_SYMBOL(pcmcia_write_cis_mem);
258 276
259 277
260/*====================================================================== 278/*======================================================================
@@ -265,32 +283,36 @@ EXPORT_SYMBOL(pcmcia_write_cis_mem);
265 283
266======================================================================*/ 284======================================================================*/
267 285
268static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, 286static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
269 size_t len, void *ptr) 287 size_t len, void *ptr)
270{ 288{
271 struct cis_cache_entry *cis; 289 struct cis_cache_entry *cis;
272 int ret; 290 int ret = 0;
273 291
274 if (s->fake_cis) { 292 if (s->state & SOCKET_CARDBUS)
275 if (s->fake_cis_len >= addr+len) 293 return -EINVAL;
276 memcpy(ptr, s->fake_cis+addr, len);
277 else
278 memset(ptr, 0xff, len);
279 return;
280 }
281 294
282 list_for_each_entry(cis, &s->cis_cache, node) { 295 mutex_lock(&s->ops_mutex);
283 if (cis->addr == addr && cis->len == len && cis->attr == attr) { 296 if (s->fake_cis) {
284 memcpy(ptr, cis->cache, len); 297 if (s->fake_cis_len >= addr+len)
285 return; 298 memcpy(ptr, s->fake_cis+addr, len);
299 else {
300 memset(ptr, 0xff, len);
301 ret = -EINVAL;
302 }
303 mutex_unlock(&s->ops_mutex);
304 return ret;
286 } 305 }
287 }
288 306
289#ifdef CONFIG_CARDBUS 307 list_for_each_entry(cis, &s->cis_cache, node) {
290 if (s->state & SOCKET_CARDBUS) 308 if (cis->addr == addr && cis->len == len && cis->attr == attr) {
291 ret = read_cb_mem(s, attr, addr, len, ptr); 309 memcpy(ptr, cis->cache, len);
292 else 310 mutex_unlock(&s->ops_mutex);
293#endif 311 return 0;
312 }
313 }
314 mutex_unlock(&s->ops_mutex);
315
294 ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr); 316 ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr);
295 317
296 if (ret == 0) { 318 if (ret == 0) {
@@ -301,9 +323,12 @@ static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
301 cis->len = len; 323 cis->len = len;
302 cis->attr = attr; 324 cis->attr = attr;
303 memcpy(cis->cache, ptr, len); 325 memcpy(cis->cache, ptr, len);
326 mutex_lock(&s->ops_mutex);
304 list_add(&cis->node, &s->cis_cache); 327 list_add(&cis->node, &s->cis_cache);
328 mutex_unlock(&s->ops_mutex);
305 } 329 }
306 } 330 }
331 return ret;
307} 332}
308 333
309static void 334static void
@@ -311,32 +336,35 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
311{ 336{
312 struct cis_cache_entry *cis; 337 struct cis_cache_entry *cis;
313 338
339 mutex_lock(&s->ops_mutex);
314 list_for_each_entry(cis, &s->cis_cache, node) 340 list_for_each_entry(cis, &s->cis_cache, node)
315 if (cis->addr == addr && cis->len == len && cis->attr == attr) { 341 if (cis->addr == addr && cis->len == len && cis->attr == attr) {
316 list_del(&cis->node); 342 list_del(&cis->node);
317 kfree(cis); 343 kfree(cis);
318 break; 344 break;
319 } 345 }
346 mutex_unlock(&s->ops_mutex);
320} 347}
321 348
349/**
350 * destroy_cis_cache() - destroy the CIS cache
351 * @s: pcmcia_socket for which CIS cache shall be destroyed
352 *
353 * This destroys the CIS cache but keeps any fake CIS alive. Must be
354 * called with ops_mutex held.
355 */
356
322void destroy_cis_cache(struct pcmcia_socket *s) 357void destroy_cis_cache(struct pcmcia_socket *s)
323{ 358{
324 struct list_head *l, *n; 359 struct list_head *l, *n;
360 struct cis_cache_entry *cis;
325 361
326 list_for_each_safe(l, n, &s->cis_cache) { 362 list_for_each_safe(l, n, &s->cis_cache) {
327 struct cis_cache_entry *cis = list_entry(l, struct cis_cache_entry, node); 363 cis = list_entry(l, struct cis_cache_entry, node);
328
329 list_del(&cis->node); 364 list_del(&cis->node);
330 kfree(cis); 365 kfree(cis);
331 } 366 }
332
333 /*
334 * If there was a fake CIS, destroy that as well.
335 */
336 kfree(s->fake_cis);
337 s->fake_cis = NULL;
338} 367}
339EXPORT_SYMBOL(destroy_cis_cache);
340 368
341/*====================================================================== 369/*======================================================================
342 370
@@ -349,6 +377,10 @@ int verify_cis_cache(struct pcmcia_socket *s)
349{ 377{
350 struct cis_cache_entry *cis; 378 struct cis_cache_entry *cis;
351 char *buf; 379 char *buf;
380 int ret;
381
382 if (s->state & SOCKET_CARDBUS)
383 return -EINVAL;
352 384
353 buf = kmalloc(256, GFP_KERNEL); 385 buf = kmalloc(256, GFP_KERNEL);
354 if (buf == NULL) { 386 if (buf == NULL) {
@@ -361,14 +393,9 @@ int verify_cis_cache(struct pcmcia_socket *s)
361 393
362 if (len > 256) 394 if (len > 256)
363 len = 256; 395 len = 256;
364#ifdef CONFIG_CARDBUS
365 if (s->state & SOCKET_CARDBUS)
366 read_cb_mem(s, cis->attr, cis->addr, len, buf);
367 else
368#endif
369 pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
370 396
371 if (memcmp(buf, cis->cache, len) != 0) { 397 ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
398 if (ret || memcmp(buf, cis->cache, len) != 0) {
372 kfree(buf); 399 kfree(buf);
373 return -1; 400 return -1;
374 } 401 }
@@ -391,17 +418,20 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
391 dev_printk(KERN_WARNING, &s->dev, "replacement CIS too big\n"); 418 dev_printk(KERN_WARNING, &s->dev, "replacement CIS too big\n");
392 return -EINVAL; 419 return -EINVAL;
393 } 420 }
421 mutex_lock(&s->ops_mutex);
394 kfree(s->fake_cis); 422 kfree(s->fake_cis);
395 s->fake_cis = kmalloc(len, GFP_KERNEL); 423 s->fake_cis = kmalloc(len, GFP_KERNEL);
396 if (s->fake_cis == NULL) { 424 if (s->fake_cis == NULL) {
397 dev_printk(KERN_WARNING, &s->dev, "no memory to replace CIS\n"); 425 dev_printk(KERN_WARNING, &s->dev, "no memory to replace CIS\n");
426 mutex_unlock(&s->ops_mutex);
398 return -ENOMEM; 427 return -ENOMEM;
399 } 428 }
400 s->fake_cis_len = len; 429 s->fake_cis_len = len;
401 memcpy(s->fake_cis, data, len); 430 memcpy(s->fake_cis, data, len);
431 dev_info(&s->dev, "Using replacement CIS\n");
432 mutex_unlock(&s->ops_mutex);
402 return 0; 433 return 0;
403} 434}
404EXPORT_SYMBOL(pcmcia_replace_cis);
405 435
406/*====================================================================== 436/*======================================================================
407 437
@@ -425,25 +455,16 @@ int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple
425{ 455{
426 if (!s) 456 if (!s)
427 return -EINVAL; 457 return -EINVAL;
428 if (!(s->state & SOCKET_PRESENT)) 458
459 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
429 return -ENODEV; 460 return -ENODEV;
430 tuple->TupleLink = tuple->Flags = 0; 461 tuple->TupleLink = tuple->Flags = 0;
431#ifdef CONFIG_CARDBUS 462
432 if (s->state & SOCKET_CARDBUS) { 463 /* Assume presence of a LONGLINK_C to address 0 */
433 struct pci_dev *dev = s->cb_dev; 464 tuple->CISOffset = tuple->LinkOffset = 0;
434 u_int ptr; 465 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
435 pci_bus_read_config_dword(dev->subordinate, 0, PCI_CARDBUS_CIS, &ptr); 466
436 tuple->CISOffset = ptr & ~7; 467 if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
437 SPACE(tuple->Flags) = (ptr & 7);
438 } else
439#endif
440 {
441 /* Assume presence of a LONGLINK_C to address 0 */
442 tuple->CISOffset = tuple->LinkOffset = 0;
443 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
444 }
445 if (!(s->state & SOCKET_CARDBUS) && (s->functions > 1) &&
446 !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
447 cisdata_t req = tuple->DesiredTuple; 468 cisdata_t req = tuple->DesiredTuple;
448 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 469 tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
449 if (pccard_get_next_tuple(s, function, tuple) == 0) { 470 if (pccard_get_next_tuple(s, function, tuple) == 0) {
@@ -456,17 +477,19 @@ int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple
456 } 477 }
457 return pccard_get_next_tuple(s, function, tuple); 478 return pccard_get_next_tuple(s, function, tuple);
458} 479}
459EXPORT_SYMBOL(pccard_get_first_tuple);
460 480
461static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) 481static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
462{ 482{
463 u_char link[5]; 483 u_char link[5];
464 u_int ofs; 484 u_int ofs;
485 int ret;
465 486
466 if (MFC_FN(tuple->Flags)) { 487 if (MFC_FN(tuple->Flags)) {
467 /* Get indirect link from the MFC tuple */ 488 /* Get indirect link from the MFC tuple */
468 read_cis_cache(s, LINK_SPACE(tuple->Flags), 489 ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
469 tuple->LinkOffset, 5, link); 490 tuple->LinkOffset, 5, link);
491 if (ret)
492 return -1;
470 ofs = get_unaligned_le32(link + 1); 493 ofs = get_unaligned_le32(link + 1);
471 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); 494 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
472 /* Move to the next indirect link */ 495 /* Move to the next indirect link */
@@ -479,10 +502,12 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
479 } else { 502 } else {
480 return -1; 503 return -1;
481 } 504 }
482 if (!(s->state & SOCKET_CARDBUS) && SPACE(tuple->Flags)) { 505 if (SPACE(tuple->Flags)) {
483 /* This is ugly, but a common CIS error is to code the long 506 /* This is ugly, but a common CIS error is to code the long
484 link offset incorrectly, so we check the right spot... */ 507 link offset incorrectly, so we check the right spot... */
485 read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); 508 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
509 if (ret)
510 return -1;
486 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && 511 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
487 (strncmp(link+2, "CIS", 3) == 0)) 512 (strncmp(link+2, "CIS", 3) == 0))
488 return ofs; 513 return ofs;
@@ -490,7 +515,9 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
490 /* Then, we try the wrong spot... */ 515 /* Then, we try the wrong spot... */
491 ofs = ofs >> 1; 516 ofs = ofs >> 1;
492 } 517 }
493 read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); 518 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
519 if (ret)
520 return -1;
494 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && 521 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
495 (strncmp(link+2, "CIS", 3) == 0)) 522 (strncmp(link+2, "CIS", 3) == 0))
496 return ofs; 523 return ofs;
@@ -502,10 +529,11 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
502{ 529{
503 u_char link[2], tmp; 530 u_char link[2], tmp;
504 int ofs, i, attr; 531 int ofs, i, attr;
532 int ret;
505 533
506 if (!s) 534 if (!s)
507 return -EINVAL; 535 return -EINVAL;
508 if (!(s->state & SOCKET_PRESENT)) 536 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
509 return -ENODEV; 537 return -ENODEV;
510 538
511 link[1] = tuple->TupleLink; 539 link[1] = tuple->TupleLink;
@@ -516,7 +544,9 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
516 if (link[1] == 0xff) { 544 if (link[1] == 0xff) {
517 link[0] = CISTPL_END; 545 link[0] = CISTPL_END;
518 } else { 546 } else {
519 read_cis_cache(s, attr, ofs, 2, link); 547 ret = read_cis_cache(s, attr, ofs, 2, link);
548 if (ret)
549 return -1;
520 if (link[0] == CISTPL_NULL) { 550 if (link[0] == CISTPL_NULL) {
521 ofs++; continue; 551 ofs++; continue;
522 } 552 }
@@ -528,7 +558,9 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
528 if (ofs < 0) 558 if (ofs < 0)
529 return -ENOSPC; 559 return -ENOSPC;
530 attr = SPACE(tuple->Flags); 560 attr = SPACE(tuple->Flags);
531 read_cis_cache(s, attr, ofs, 2, link); 561 ret = read_cis_cache(s, attr, ofs, 2, link);
562 if (ret)
563 return -1;
532 } 564 }
533 565
534 /* Is this a link tuple? Make a note of it */ 566 /* Is this a link tuple? Make a note of it */
@@ -542,12 +574,16 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
542 case CISTPL_LONGLINK_A: 574 case CISTPL_LONGLINK_A:
543 HAS_LINK(tuple->Flags) = 1; 575 HAS_LINK(tuple->Flags) = 1;
544 LINK_SPACE(tuple->Flags) = attr | IS_ATTR; 576 LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
545 read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); 577 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
578 if (ret)
579 return -1;
546 break; 580 break;
547 case CISTPL_LONGLINK_C: 581 case CISTPL_LONGLINK_C:
548 HAS_LINK(tuple->Flags) = 1; 582 HAS_LINK(tuple->Flags) = 1;
549 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR; 583 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
550 read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); 584 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
585 if (ret)
586 return -1;
551 break; 587 break;
552 case CISTPL_INDIRECT: 588 case CISTPL_INDIRECT:
553 HAS_LINK(tuple->Flags) = 1; 589 HAS_LINK(tuple->Flags) = 1;
@@ -559,7 +595,9 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
559 LINK_SPACE(tuple->Flags) = attr; 595 LINK_SPACE(tuple->Flags) = attr;
560 if (function == BIND_FN_ALL) { 596 if (function == BIND_FN_ALL) {
561 /* Follow all the MFC links */ 597 /* Follow all the MFC links */
562 read_cis_cache(s, attr, ofs+2, 1, &tmp); 598 ret = read_cis_cache(s, attr, ofs+2, 1, &tmp);
599 if (ret)
600 return -1;
563 MFC_FN(tuple->Flags) = tmp; 601 MFC_FN(tuple->Flags) = tmp;
564 } else { 602 } else {
565 /* Follow exactly one of the links */ 603 /* Follow exactly one of the links */
@@ -592,7 +630,6 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
592 tuple->CISOffset = ofs + 2; 630 tuple->CISOffset = ofs + 2;
593 return 0; 631 return 0;
594} 632}
595EXPORT_SYMBOL(pccard_get_next_tuple);
596 633
597/*====================================================================*/ 634/*====================================================================*/
598 635
@@ -601,6 +638,7 @@ EXPORT_SYMBOL(pccard_get_next_tuple);
601int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple) 638int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
602{ 639{
603 u_int len; 640 u_int len;
641 int ret;
604 642
605 if (!s) 643 if (!s)
606 return -EINVAL; 644 return -EINVAL;
@@ -611,12 +649,13 @@ int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
611 tuple->TupleDataLen = tuple->TupleLink; 649 tuple->TupleDataLen = tuple->TupleLink;
612 if (len == 0) 650 if (len == 0)
613 return 0; 651 return 0;
614 read_cis_cache(s, SPACE(tuple->Flags), 652 ret = read_cis_cache(s, SPACE(tuple->Flags),
615 tuple->CISOffset + tuple->TupleOffset, 653 tuple->CISOffset + tuple->TupleOffset,
616 _MIN(len, tuple->TupleDataMax), tuple->TupleData); 654 _MIN(len, tuple->TupleDataMax), tuple->TupleData);
655 if (ret)
656 return -1;
617 return 0; 657 return 0;
618} 658}
619EXPORT_SYMBOL(pccard_get_tuple_data);
620 659
621 660
622/*====================================================================== 661/*======================================================================
@@ -1190,119 +1229,6 @@ static int parse_cftable_entry(tuple_t *tuple,
1190 1229
1191/*====================================================================*/ 1230/*====================================================================*/
1192 1231
1193#ifdef CONFIG_CARDBUS
1194
1195static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
1196{
1197 u_char *p;
1198 if (tuple->TupleDataLen < 6)
1199 return -EINVAL;
1200 p = (u_char *)tuple->TupleData;
1201 bar->attr = *p;
1202 p += 2;
1203 bar->size = get_unaligned_le32(p);
1204 return 0;
1205}
1206
1207static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
1208{
1209 u_char *p;
1210
1211 p = (u_char *)tuple->TupleData;
1212 if ((*p != 3) || (tuple->TupleDataLen < 6))
1213 return -EINVAL;
1214 config->last_idx = *(++p);
1215 p++;
1216 config->base = get_unaligned_le32(p);
1217 config->subtuples = tuple->TupleDataLen - 6;
1218 return 0;
1219}
1220
1221static int parse_cftable_entry_cb(tuple_t *tuple,
1222 cistpl_cftable_entry_cb_t *entry)
1223{
1224 u_char *p, *q, features;
1225
1226 p = tuple->TupleData;
1227 q = p + tuple->TupleDataLen;
1228 entry->index = *p & 0x3f;
1229 entry->flags = 0;
1230 if (*p & 0x40)
1231 entry->flags |= CISTPL_CFTABLE_DEFAULT;
1232
1233 /* Process optional features */
1234 if (++p == q)
1235 return -EINVAL;
1236 features = *p; p++;
1237
1238 /* Power options */
1239 if ((features & 3) > 0) {
1240 p = parse_power(p, q, &entry->vcc);
1241 if (p == NULL)
1242 return -EINVAL;
1243 } else
1244 entry->vcc.present = 0;
1245 if ((features & 3) > 1) {
1246 p = parse_power(p, q, &entry->vpp1);
1247 if (p == NULL)
1248 return -EINVAL;
1249 } else
1250 entry->vpp1.present = 0;
1251 if ((features & 3) > 2) {
1252 p = parse_power(p, q, &entry->vpp2);
1253 if (p == NULL)
1254 return -EINVAL;
1255 } else
1256 entry->vpp2.present = 0;
1257
1258 /* I/O window options */
1259 if (features & 0x08) {
1260 if (p == q)
1261 return -EINVAL;
1262 entry->io = *p; p++;
1263 } else
1264 entry->io = 0;
1265
1266 /* Interrupt options */
1267 if (features & 0x10) {
1268 p = parse_irq(p, q, &entry->irq);
1269 if (p == NULL)
1270 return -EINVAL;
1271 } else
1272 entry->irq.IRQInfo1 = 0;
1273
1274 if (features & 0x20) {
1275 if (p == q)
1276 return -EINVAL;
1277 entry->mem = *p; p++;
1278 } else
1279 entry->mem = 0;
1280
1281 /* Misc features */
1282 if (features & 0x80) {
1283 if (p == q)
1284 return -EINVAL;
1285 entry->flags |= (*p << 8);
1286 if (*p & 0x80) {
1287 if (++p == q)
1288 return -EINVAL;
1289 entry->flags |= (*p << 16);
1290 }
1291 while (*p & 0x80)
1292 if (++p == q)
1293 return -EINVAL;
1294 p++;
1295 }
1296
1297 entry->subtuples = q-p;
1298
1299 return 0;
1300}
1301
1302#endif
1303
1304/*====================================================================*/
1305
1306static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) 1232static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
1307{ 1233{
1308 u_char *p, *q; 1234 u_char *p, *q;
@@ -1404,17 +1330,6 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
1404 case CISTPL_DEVICE_A: 1330 case CISTPL_DEVICE_A:
1405 ret = parse_device(tuple, &parse->device); 1331 ret = parse_device(tuple, &parse->device);
1406 break; 1332 break;
1407#ifdef CONFIG_CARDBUS
1408 case CISTPL_BAR:
1409 ret = parse_bar(tuple, &parse->bar);
1410 break;
1411 case CISTPL_CONFIG_CB:
1412 ret = parse_config_cb(tuple, &parse->config);
1413 break;
1414 case CISTPL_CFTABLE_ENTRY_CB:
1415 ret = parse_cftable_entry_cb(tuple, &parse->cftable_entry_cb);
1416 break;
1417#endif
1418 case CISTPL_CHECKSUM: 1333 case CISTPL_CHECKSUM:
1419 ret = parse_checksum(tuple, &parse->checksum); 1334 ret = parse_checksum(tuple, &parse->checksum);
1420 break; 1335 break;
@@ -1513,7 +1428,6 @@ done:
1513 kfree(buf); 1428 kfree(buf);
1514 return ret; 1429 return ret;
1515} 1430}
1516EXPORT_SYMBOL(pccard_read_tuple);
1517 1431
1518 1432
1519/** 1433/**
@@ -1573,84 +1487,238 @@ next_entry:
1573 kfree(buf); 1487 kfree(buf);
1574 return ret; 1488 return ret;
1575} 1489}
1576EXPORT_SYMBOL(pccard_loop_tuple);
1577 1490
1578 1491
1579/*====================================================================== 1492/**
1580 1493 * pccard_validate_cis() - check whether card has a sensible CIS
1581 This tries to determine if a card has a sensible CIS. It returns 1494 * @s: the struct pcmcia_socket we are to check
1582 the number of tuples in the CIS, or 0 if the CIS looks bad. The 1495 * @info: returns the number of tuples in the (valid) CIS, or 0
1583 checks include making sure several critical tuples are present and 1496 *
1584 valid; seeing if the total number of tuples is reasonable; and 1497 * This tries to determine if a card has a sensible CIS. In @info, it
1585 looking for tuples that use reserved codes. 1498 * returns the number of tuples in the CIS, or 0 if the CIS looks bad. The
1586 1499 * checks include making sure several critical tuples are present and
1587======================================================================*/ 1500 * valid; seeing if the total number of tuples is reasonable; and
1588 1501 * looking for tuples that use reserved codes.
1502 *
1503 * The function returns 0 on success.
1504 */
1589int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) 1505int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
1590{ 1506{
1591 tuple_t *tuple; 1507 tuple_t *tuple;
1592 cisparse_t *p; 1508 cisparse_t *p;
1593 unsigned int count = 0; 1509 unsigned int count = 0;
1594 int ret, reserved, dev_ok = 0, ident_ok = 0; 1510 int ret, reserved, dev_ok = 0, ident_ok = 0;
1595 1511
1596 if (!s) 1512 if (!s)
1597 return -EINVAL; 1513 return -EINVAL;
1598 1514
1599 tuple = kmalloc(sizeof(*tuple), GFP_KERNEL); 1515 /* We do not want to validate the CIS cache... */
1600 if (tuple == NULL) { 1516 mutex_lock(&s->ops_mutex);
1601 dev_printk(KERN_WARNING, &s->dev, "no memory to validate CIS\n"); 1517 destroy_cis_cache(s);
1602 return -ENOMEM; 1518 mutex_unlock(&s->ops_mutex);
1603 }
1604 p = kmalloc(sizeof(*p), GFP_KERNEL);
1605 if (p == NULL) {
1606 kfree(tuple);
1607 dev_printk(KERN_WARNING, &s->dev, "no memory to validate CIS\n");
1608 return -ENOMEM;
1609 }
1610 1519
1611 count = reserved = 0; 1520 tuple = kmalloc(sizeof(*tuple), GFP_KERNEL);
1612 tuple->DesiredTuple = RETURN_FIRST_TUPLE; 1521 if (tuple == NULL) {
1613 tuple->Attributes = TUPLE_RETURN_COMMON; 1522 dev_warn(&s->dev, "no memory to validate CIS\n");
1614 ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple); 1523 return -ENOMEM;
1615 if (ret != 0) 1524 }
1616 goto done; 1525 p = kmalloc(sizeof(*p), GFP_KERNEL);
1617 1526 if (p == NULL) {
1618 /* First tuple should be DEVICE; we should really have either that 1527 kfree(tuple);
1619 or a CFTABLE_ENTRY of some sort */ 1528 dev_warn(&s->dev, "no memory to validate CIS\n");
1620 if ((tuple->TupleCode == CISTPL_DEVICE) || 1529 return -ENOMEM;
1621 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p) == 0) || 1530 }
1622 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p) == 0)) 1531
1623 dev_ok++; 1532 count = reserved = 0;
1624 1533 tuple->DesiredTuple = RETURN_FIRST_TUPLE;
1625 /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2 1534 tuple->Attributes = TUPLE_RETURN_COMMON;
1626 tuple, for card identification. Certain old D-Link and Linksys 1535 ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple);
1627 cards have only a broken VERS_2 tuple; hence the bogus test. */
1628 if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
1629 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
1630 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
1631 ident_ok++;
1632
1633 if (!dev_ok && !ident_ok)
1634 goto done;
1635
1636 for (count = 1; count < MAX_TUPLES; count++) {
1637 ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple);
1638 if (ret != 0) 1536 if (ret != 0)
1639 break; 1537 goto done;
1640 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || 1538
1641 ((tuple->TupleCode > 0x47) && (tuple->TupleCode < 0x80)) || 1539 /* First tuple should be DEVICE; we should really have either that
1642 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) 1540 or a CFTABLE_ENTRY of some sort */
1643 reserved++; 1541 if ((tuple->TupleCode == CISTPL_DEVICE) ||
1644 } 1542 (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p)) ||
1645 if ((count == MAX_TUPLES) || (reserved > 5) || 1543 (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p)))
1646 ((!dev_ok || !ident_ok) && (count > 10))) 1544 dev_ok++;
1647 count = 0; 1545
1546 /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
1547 tuple, for card identification. Certain old D-Link and Linksys
1548 cards have only a broken VERS_2 tuple; hence the bogus test. */
1549 if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
1550 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
1551 (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
1552 ident_ok++;
1553
1554 if (!dev_ok && !ident_ok)
1555 goto done;
1556
1557 for (count = 1; count < MAX_TUPLES; count++) {
1558 ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple);
1559 if (ret != 0)
1560 break;
1561 if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
1562 ((tuple->TupleCode > 0x47) && (tuple->TupleCode < 0x80)) ||
1563 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
1564 reserved++;
1565 }
1566 if ((count == MAX_TUPLES) || (reserved > 5) ||
1567 ((!dev_ok || !ident_ok) && (count > 10)))
1568 count = 0;
1569
1570 ret = 0;
1648 1571
1649done: 1572done:
1650 if (info) 1573 /* invalidate CIS cache on failure */
1651 *info = count; 1574 if (!dev_ok || !ident_ok || !count) {
1652 kfree(tuple); 1575 mutex_lock(&s->ops_mutex);
1653 kfree(p); 1576 destroy_cis_cache(s);
1654 return 0; 1577 mutex_unlock(&s->ops_mutex);
1578 ret = -EIO;
1579 }
1580
1581 if (info)
1582 *info = count;
1583 kfree(tuple);
1584 kfree(p);
1585 return ret;
1655} 1586}
1656EXPORT_SYMBOL(pccard_validate_cis); 1587
1588
1589#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev)
1590
1591static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf,
1592 loff_t off, size_t count)
1593{
1594 tuple_t tuple;
1595 int status, i;
1596 loff_t pointer = 0;
1597 ssize_t ret = 0;
1598 u_char *tuplebuffer;
1599 u_char *tempbuffer;
1600
1601 tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL);
1602 if (!tuplebuffer)
1603 return -ENOMEM;
1604
1605 tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL);
1606 if (!tempbuffer) {
1607 ret = -ENOMEM;
1608 goto free_tuple;
1609 }
1610
1611 memset(&tuple, 0, sizeof(tuple_t));
1612
1613 tuple.Attributes = TUPLE_RETURN_LINK | TUPLE_RETURN_COMMON;
1614 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
1615 tuple.TupleOffset = 0;
1616
1617 status = pccard_get_first_tuple(s, BIND_FN_ALL, &tuple);
1618 while (!status) {
1619 tuple.TupleData = tuplebuffer;
1620 tuple.TupleDataMax = 255;
1621 memset(tuplebuffer, 0, sizeof(u_char) * 255);
1622
1623 status = pccard_get_tuple_data(s, &tuple);
1624 if (status)
1625 break;
1626
1627 if (off < (pointer + 2 + tuple.TupleDataLen)) {
1628 tempbuffer[0] = tuple.TupleCode & 0xff;
1629 tempbuffer[1] = tuple.TupleLink & 0xff;
1630 for (i = 0; i < tuple.TupleDataLen; i++)
1631 tempbuffer[i + 2] = tuplebuffer[i] & 0xff;
1632
1633 for (i = 0; i < (2 + tuple.TupleDataLen); i++) {
1634 if (((i + pointer) >= off) &&
1635 (i + pointer) < (off + count)) {
1636 buf[ret] = tempbuffer[i];
1637 ret++;
1638 }
1639 }
1640 }
1641
1642 pointer += 2 + tuple.TupleDataLen;
1643
1644 if (pointer >= (off + count))
1645 break;
1646
1647 if (tuple.TupleCode == CISTPL_END)
1648 break;
1649 status = pccard_get_next_tuple(s, BIND_FN_ALL, &tuple);
1650 }
1651
1652 kfree(tempbuffer);
1653 free_tuple:
1654 kfree(tuplebuffer);
1655
1656 return ret;
1657}
1658
1659
1660static ssize_t pccard_show_cis(struct kobject *kobj,
1661 struct bin_attribute *bin_attr,
1662 char *buf, loff_t off, size_t count)
1663{
1664 unsigned int size = 0x200;
1665
1666 if (off >= size)
1667 count = 0;
1668 else {
1669 struct pcmcia_socket *s;
1670 unsigned int chains;
1671
1672 if (off + count > size)
1673 count = size - off;
1674
1675 s = to_socket(container_of(kobj, struct device, kobj));
1676
1677 if (!(s->state & SOCKET_PRESENT))
1678 return -ENODEV;
1679 if (pccard_validate_cis(s, &chains))
1680 return -EIO;
1681 if (!chains)
1682 return -ENODATA;
1683
1684 count = pccard_extract_cis(s, buf, off, count);
1685 }
1686
1687 return count;
1688}
1689
1690
1691static ssize_t pccard_store_cis(struct kobject *kobj,
1692 struct bin_attribute *bin_attr,
1693 char *buf, loff_t off, size_t count)
1694{
1695 struct pcmcia_socket *s;
1696 int error;
1697
1698 s = to_socket(container_of(kobj, struct device, kobj));
1699
1700 if (off)
1701 return -EINVAL;
1702
1703 if (count >= CISTPL_MAX_CIS_SIZE)
1704 return -EINVAL;
1705
1706 if (!(s->state & SOCKET_PRESENT))
1707 return -ENODEV;
1708
1709 error = pcmcia_replace_cis(s, buf, count);
1710 if (error)
1711 return -EIO;
1712
1713 pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY);
1714
1715 return count;
1716}
1717
1718
1719struct bin_attribute pccard_cis_attr = {
1720 .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
1721 .size = 0x200,
1722 .read = pccard_show_cis,
1723 .write = pccard_store_cis,
1724};
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 6d6f82b38a68..e679e708db63 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -140,19 +140,13 @@ struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt)
140 struct device *dev = get_device(&skt->dev); 140 struct device *dev = get_device(&skt->dev);
141 if (!dev) 141 if (!dev)
142 return NULL; 142 return NULL;
143 skt = dev_get_drvdata(dev); 143 return dev_get_drvdata(dev);
144 if (!try_module_get(skt->owner)) {
145 put_device(&skt->dev);
146 return NULL;
147 }
148 return skt;
149} 144}
150EXPORT_SYMBOL(pcmcia_get_socket); 145EXPORT_SYMBOL(pcmcia_get_socket);
151 146
152 147
153void pcmcia_put_socket(struct pcmcia_socket *skt) 148void pcmcia_put_socket(struct pcmcia_socket *skt)
154{ 149{
155 module_put(skt->owner);
156 put_device(&skt->dev); 150 put_device(&skt->dev);
157} 151}
158EXPORT_SYMBOL(pcmcia_put_socket); 152EXPORT_SYMBOL(pcmcia_put_socket);
@@ -181,8 +175,6 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
181 175
182 dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops); 176 dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops);
183 177
184 spin_lock_init(&socket->lock);
185
186 /* try to obtain a socket number [yes, it gets ugly if we 178 /* try to obtain a socket number [yes, it gets ugly if we
187 * register more than 2^sizeof(unsigned int) pcmcia 179 * register more than 2^sizeof(unsigned int) pcmcia
188 * sockets... but the socket number is deprecated 180 * sockets... but the socket number is deprecated
@@ -228,10 +220,13 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
228 init_completion(&socket->socket_released); 220 init_completion(&socket->socket_released);
229 init_completion(&socket->thread_done); 221 init_completion(&socket->thread_done);
230 mutex_init(&socket->skt_mutex); 222 mutex_init(&socket->skt_mutex);
223 mutex_init(&socket->ops_mutex);
231 spin_lock_init(&socket->thread_lock); 224 spin_lock_init(&socket->thread_lock);
232 225
233 if (socket->resource_ops->init) { 226 if (socket->resource_ops->init) {
227 mutex_lock(&socket->ops_mutex);
234 ret = socket->resource_ops->init(socket); 228 ret = socket->resource_ops->init(socket);
229 mutex_unlock(&socket->ops_mutex);
235 if (ret) 230 if (ret)
236 goto err; 231 goto err;
237 } 232 }
@@ -283,15 +278,17 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
283 if (socket->thread) 278 if (socket->thread)
284 kthread_stop(socket->thread); 279 kthread_stop(socket->thread);
285 280
286 release_cis_mem(socket);
287
288 /* remove from our own list */ 281 /* remove from our own list */
289 down_write(&pcmcia_socket_list_rwsem); 282 down_write(&pcmcia_socket_list_rwsem);
290 list_del(&socket->socket_list); 283 list_del(&socket->socket_list);
291 up_write(&pcmcia_socket_list_rwsem); 284 up_write(&pcmcia_socket_list_rwsem);
292 285
293 /* wait for sysfs to drop all references */ 286 /* wait for sysfs to drop all references */
294 release_resource_db(socket); 287 if (socket->resource_ops->exit) {
288 mutex_lock(&socket->ops_mutex);
289 socket->resource_ops->exit(socket);
290 mutex_unlock(&socket->ops_mutex);
291 }
295 wait_for_completion(&socket->socket_released); 292 wait_for_completion(&socket->socket_released);
296} /* pcmcia_unregister_socket */ 293} /* pcmcia_unregister_socket */
297EXPORT_SYMBOL(pcmcia_unregister_socket); 294EXPORT_SYMBOL(pcmcia_unregister_socket);
@@ -328,7 +325,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
328{ 325{
329 int ret; 326 int ret;
330 327
331 if (s->state & SOCKET_CARDBUS) 328 if ((s->state & SOCKET_CARDBUS) && (event != CS_EVENT_CARD_REMOVAL))
332 return 0; 329 return 0;
333 330
334 dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n", 331 dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n",
@@ -346,13 +343,6 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
346 return ret; 343 return ret;
347} 344}
348 345
349static void socket_remove_drivers(struct pcmcia_socket *skt)
350{
351 dev_dbg(&skt->dev, "remove_drivers\n");
352
353 send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
354}
355
356static int socket_reset(struct pcmcia_socket *skt) 346static int socket_reset(struct pcmcia_socket *skt)
357{ 347{
358 int status, i; 348 int status, i;
@@ -395,7 +385,9 @@ static void socket_shutdown(struct pcmcia_socket *s)
395 385
396 dev_dbg(&s->dev, "shutdown\n"); 386 dev_dbg(&s->dev, "shutdown\n");
397 387
398 socket_remove_drivers(s); 388 send_event(s, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
389
390 mutex_lock(&s->ops_mutex);
399 s->state &= SOCKET_INUSE | SOCKET_PRESENT; 391 s->state &= SOCKET_INUSE | SOCKET_PRESENT;
400 msleep(shutdown_delay * 10); 392 msleep(shutdown_delay * 10);
401 s->state &= SOCKET_INUSE; 393 s->state &= SOCKET_INUSE;
@@ -406,11 +398,21 @@ static void socket_shutdown(struct pcmcia_socket *s)
406 s->ops->set_socket(s, &s->socket); 398 s->ops->set_socket(s, &s->socket);
407 s->irq.AssignedIRQ = s->irq.Config = 0; 399 s->irq.AssignedIRQ = s->irq.Config = 0;
408 s->lock_count = 0; 400 s->lock_count = 0;
409 destroy_cis_cache(s); 401 kfree(s->fake_cis);
402 s->fake_cis = NULL;
403 s->functions = 0;
404
405 /* From here on we can be sure that only we (that is, the
406 * pccardd thread) accesses this socket, and all (16-bit)
407 * PCMCIA interactions are gone. Therefore, release
408 * ops_mutex so that we don't get a sysfs-related lockdep
409 * warning.
410 */
411 mutex_unlock(&s->ops_mutex);
412
410#ifdef CONFIG_CARDBUS 413#ifdef CONFIG_CARDBUS
411 cb_free(s); 414 cb_free(s);
412#endif 415#endif
413 s->functions = 0;
414 416
415 /* give socket some time to power down */ 417 /* give socket some time to power down */
416 msleep(100); 418 msleep(100);
@@ -421,7 +423,7 @@ static void socket_shutdown(struct pcmcia_socket *s)
421 "*** DANGER *** unable to remove socket power\n"); 423 "*** DANGER *** unable to remove socket power\n");
422 } 424 }
423 425
424 cs_socket_put(s); 426 s->state &= ~SOCKET_INUSE;
425} 427}
426 428
427static int socket_setup(struct pcmcia_socket *skt, int initial_delay) 429static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
@@ -460,7 +462,8 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
460 return -EINVAL; 462 return -EINVAL;
461 } 463 }
462 skt->state |= SOCKET_CARDBUS; 464 skt->state |= SOCKET_CARDBUS;
463 } 465 } else
466 skt->state &= ~SOCKET_CARDBUS;
464 467
465 /* 468 /*
466 * Decode the card voltage requirements, and apply power to the card. 469 * Decode the card voltage requirements, and apply power to the card.
@@ -509,8 +512,12 @@ static int socket_insert(struct pcmcia_socket *skt)
509 512
510 dev_dbg(&skt->dev, "insert\n"); 513 dev_dbg(&skt->dev, "insert\n");
511 514
512 if (!cs_socket_get(skt)) 515 mutex_lock(&skt->ops_mutex);
513 return -ENODEV; 516 if (skt->state & SOCKET_INUSE) {
517 mutex_unlock(&skt->ops_mutex);
518 return -EINVAL;
519 }
520 skt->state |= SOCKET_INUSE;
514 521
515 ret = socket_setup(skt, setup_delay); 522 ret = socket_setup(skt, setup_delay);
516 if (ret == 0) { 523 if (ret == 0) {
@@ -528,9 +535,11 @@ static int socket_insert(struct pcmcia_socket *skt)
528 } 535 }
529#endif 536#endif
530 dev_dbg(&skt->dev, "insert done\n"); 537 dev_dbg(&skt->dev, "insert done\n");
538 mutex_unlock(&skt->ops_mutex);
531 539
532 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); 540 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
533 } else { 541 } else {
542 mutex_unlock(&skt->ops_mutex);
534 socket_shutdown(skt); 543 socket_shutdown(skt);
535 } 544 }
536 545
@@ -542,58 +551,66 @@ static int socket_suspend(struct pcmcia_socket *skt)
542 if (skt->state & SOCKET_SUSPEND) 551 if (skt->state & SOCKET_SUSPEND)
543 return -EBUSY; 552 return -EBUSY;
544 553
554 mutex_lock(&skt->ops_mutex);
555 skt->suspended_state = skt->state;
556
545 send_event(skt, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW); 557 send_event(skt, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
546 skt->socket = dead_socket; 558 skt->socket = dead_socket;
547 skt->ops->set_socket(skt, &skt->socket); 559 skt->ops->set_socket(skt, &skt->socket);
548 if (skt->ops->suspend) 560 if (skt->ops->suspend)
549 skt->ops->suspend(skt); 561 skt->ops->suspend(skt);
550 skt->state |= SOCKET_SUSPEND; 562 skt->state |= SOCKET_SUSPEND;
551 563 mutex_unlock(&skt->ops_mutex);
552 return 0; 564 return 0;
553} 565}
554 566
555static int socket_early_resume(struct pcmcia_socket *skt) 567static int socket_early_resume(struct pcmcia_socket *skt)
556{ 568{
569 mutex_lock(&skt->ops_mutex);
557 skt->socket = dead_socket; 570 skt->socket = dead_socket;
558 skt->ops->init(skt); 571 skt->ops->init(skt);
559 skt->ops->set_socket(skt, &skt->socket); 572 skt->ops->set_socket(skt, &skt->socket);
560 if (skt->state & SOCKET_PRESENT) 573 if (skt->state & SOCKET_PRESENT)
561 skt->resume_status = socket_setup(skt, resume_delay); 574 skt->resume_status = socket_setup(skt, resume_delay);
575 mutex_unlock(&skt->ops_mutex);
562 return 0; 576 return 0;
563} 577}
564 578
565static int socket_late_resume(struct pcmcia_socket *skt) 579static int socket_late_resume(struct pcmcia_socket *skt)
566{ 580{
567 if (!(skt->state & SOCKET_PRESENT)) { 581 mutex_lock(&skt->ops_mutex);
568 skt->state &= ~SOCKET_SUSPEND; 582 skt->state &= ~SOCKET_SUSPEND;
583 mutex_unlock(&skt->ops_mutex);
584
585 if (!(skt->state & SOCKET_PRESENT))
569 return socket_insert(skt); 586 return socket_insert(skt);
587
588 if (skt->resume_status) {
589 socket_shutdown(skt);
590 return 0;
570 } 591 }
571 592
572 if (skt->resume_status == 0) { 593 if (skt->suspended_state != skt->state) {
573 /* 594 dev_dbg(&skt->dev,
574 * FIXME: need a better check here for cardbus cards. 595 "suspend state 0x%x != resume state 0x%x\n",
575 */ 596 skt->suspended_state, skt->state);
576 if (verify_cis_cache(skt) != 0) { 597
577 dev_dbg(&skt->dev, "cis mismatch - different card\n");
578 socket_remove_drivers(skt);
579 destroy_cis_cache(skt);
580 /*
581 * Workaround: give DS time to schedule removal.
582 * Remove me once the 100ms delay is eliminated
583 * in ds.c
584 */
585 msleep(200);
586 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
587 } else {
588 dev_dbg(&skt->dev, "cis matches cache\n");
589 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
590 }
591 } else {
592 socket_shutdown(skt); 598 socket_shutdown(skt);
599 return socket_insert(skt);
593 } 600 }
594 601
595 skt->state &= ~SOCKET_SUSPEND; 602#ifdef CONFIG_CARDBUS
603 if (skt->state & SOCKET_CARDBUS) {
604 /* We can't be sure the CardBus card is the same
605 * as the one previously inserted. Therefore, remove
606 * and re-add... */
607 cb_free(skt);
608 cb_alloc(skt);
609 return 0;
610 }
611#endif
596 612
613 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
597 return 0; 614 return 0;
598} 615}
599 616
@@ -672,20 +689,26 @@ static int pccardd(void *__skt)
672 689
673 complete(&skt->thread_done); 690 complete(&skt->thread_done);
674 691
692 /* wait for userspace to catch up */
693 msleep(250);
694
675 set_freezable(); 695 set_freezable();
676 for (;;) { 696 for (;;) {
677 unsigned long flags; 697 unsigned long flags;
678 unsigned int events; 698 unsigned int events;
699 unsigned int sysfs_events;
679 700
680 set_current_state(TASK_INTERRUPTIBLE); 701 set_current_state(TASK_INTERRUPTIBLE);
681 702
682 spin_lock_irqsave(&skt->thread_lock, flags); 703 spin_lock_irqsave(&skt->thread_lock, flags);
683 events = skt->thread_events; 704 events = skt->thread_events;
684 skt->thread_events = 0; 705 skt->thread_events = 0;
706 sysfs_events = skt->sysfs_events;
707 skt->sysfs_events = 0;
685 spin_unlock_irqrestore(&skt->thread_lock, flags); 708 spin_unlock_irqrestore(&skt->thread_lock, flags);
686 709
710 mutex_lock(&skt->skt_mutex);
687 if (events) { 711 if (events) {
688 mutex_lock(&skt->skt_mutex);
689 if (events & SS_DETECT) 712 if (events & SS_DETECT)
690 socket_detect_change(skt); 713 socket_detect_change(skt);
691 if (events & SS_BATDEAD) 714 if (events & SS_BATDEAD)
@@ -694,10 +717,39 @@ static int pccardd(void *__skt)
694 send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW); 717 send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
695 if (events & SS_READY) 718 if (events & SS_READY)
696 send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW); 719 send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
697 mutex_unlock(&skt->skt_mutex);
698 continue;
699 } 720 }
700 721
722 if (sysfs_events) {
723 if (sysfs_events & PCMCIA_UEVENT_EJECT)
724 socket_remove(skt);
725 if (sysfs_events & PCMCIA_UEVENT_INSERT)
726 socket_insert(skt);
727 if ((sysfs_events & PCMCIA_UEVENT_RESUME) &&
728 !(skt->state & SOCKET_CARDBUS)) {
729 ret = socket_resume(skt);
730 if (!ret && skt->callback)
731 skt->callback->resume(skt);
732 }
733 if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) &&
734 !(skt->state & SOCKET_CARDBUS)) {
735 if (skt->callback)
736 ret = skt->callback->suspend(skt);
737 else
738 ret = 0;
739 if (!ret)
740 socket_suspend(skt);
741 }
742 if ((sysfs_events & PCMCIA_UEVENT_REQUERY) &&
743 !(skt->state & SOCKET_CARDBUS)) {
744 if (!ret && skt->callback)
745 skt->callback->requery(skt);
746 }
747 }
748 mutex_unlock(&skt->skt_mutex);
749
750 if (events || sysfs_events)
751 continue;
752
701 if (kthread_should_stop()) 753 if (kthread_should_stop())
702 break; 754 break;
703 755
@@ -707,6 +759,13 @@ static int pccardd(void *__skt)
707 /* make sure we are running before we exit */ 759 /* make sure we are running before we exit */
708 set_current_state(TASK_RUNNING); 760 set_current_state(TASK_RUNNING);
709 761
762 /* shut down socket, if a device is still present */
763 if (skt->state & SOCKET_PRESENT) {
764 mutex_lock(&skt->skt_mutex);
765 socket_remove(skt);
766 mutex_unlock(&skt->skt_mutex);
767 }
768
710 /* remove from the device core */ 769 /* remove from the device core */
711 pccard_sysfs_remove_socket(&skt->dev); 770 pccard_sysfs_remove_socket(&skt->dev);
712 device_unregister(&skt->dev); 771 device_unregister(&skt->dev);
@@ -732,6 +791,31 @@ void pcmcia_parse_events(struct pcmcia_socket *s, u_int events)
732} /* pcmcia_parse_events */ 791} /* pcmcia_parse_events */
733EXPORT_SYMBOL(pcmcia_parse_events); 792EXPORT_SYMBOL(pcmcia_parse_events);
734 793
794/**
795 * pcmcia_parse_uevents() - tell pccardd to issue manual commands
796 * @s: the PCMCIA socket we wan't to command
797 * @events: events to pass to pccardd
798 *
799 * userspace-issued insert, eject, suspend and resume commands must be
800 * handled by pccardd to avoid any sysfs-related deadlocks. Valid events
801 * are PCMCIA_UEVENT_EJECT (for eject), PCMCIA_UEVENT__INSERT (for insert),
802 * PCMCIA_UEVENT_RESUME (for resume), PCMCIA_UEVENT_SUSPEND (for suspend)
803 * and PCMCIA_UEVENT_REQUERY (for re-querying the PCMCIA card).
804 */
805void pcmcia_parse_uevents(struct pcmcia_socket *s, u_int events)
806{
807 unsigned long flags;
808 dev_dbg(&s->dev, "parse_uevents: events %08x\n", events);
809 if (s->thread) {
810 spin_lock_irqsave(&s->thread_lock, flags);
811 s->sysfs_events |= events;
812 spin_unlock_irqrestore(&s->thread_lock, flags);
813
814 wake_up_process(s->thread);
815 }
816}
817EXPORT_SYMBOL(pcmcia_parse_uevents);
818
735 819
736/* register pcmcia_callback */ 820/* register pcmcia_callback */
737int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c) 821int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
@@ -796,7 +880,10 @@ int pcmcia_reset_card(struct pcmcia_socket *skt)
796 send_event(skt, CS_EVENT_RESET_PHYSICAL, CS_EVENT_PRI_LOW); 880 send_event(skt, CS_EVENT_RESET_PHYSICAL, CS_EVENT_PRI_LOW);
797 if (skt->callback) 881 if (skt->callback)
798 skt->callback->suspend(skt); 882 skt->callback->suspend(skt);
799 if (socket_reset(skt) == 0) { 883 mutex_lock(&skt->ops_mutex);
884 ret = socket_reset(skt);
885 mutex_unlock(&skt->ops_mutex);
886 if (ret == 0) {
800 send_event(skt, CS_EVENT_CARD_RESET, CS_EVENT_PRI_LOW); 887 send_event(skt, CS_EVENT_CARD_RESET, CS_EVENT_PRI_LOW);
801 if (skt->callback) 888 if (skt->callback)
802 skt->callback->resume(skt); 889 skt->callback->resume(skt);
@@ -812,121 +899,6 @@ int pcmcia_reset_card(struct pcmcia_socket *skt)
812EXPORT_SYMBOL(pcmcia_reset_card); 899EXPORT_SYMBOL(pcmcia_reset_card);
813 900
814 901
815/* These shut down or wake up a socket. They are sort of user
816 * initiated versions of the APM suspend and resume actions.
817 */
818int pcmcia_suspend_card(struct pcmcia_socket *skt)
819{
820 int ret;
821
822 dev_dbg(&skt->dev, "suspending socket\n");
823
824 mutex_lock(&skt->skt_mutex);
825 do {
826 if (!(skt->state & SOCKET_PRESENT)) {
827 ret = -ENODEV;
828 break;
829 }
830 if (skt->state & SOCKET_CARDBUS) {
831 ret = -EPERM;
832 break;
833 }
834 if (skt->callback) {
835 ret = skt->callback->suspend(skt);
836 if (ret)
837 break;
838 }
839 ret = socket_suspend(skt);
840 } while (0);
841 mutex_unlock(&skt->skt_mutex);
842
843 return ret;
844} /* suspend_card */
845EXPORT_SYMBOL(pcmcia_suspend_card);
846
847
848int pcmcia_resume_card(struct pcmcia_socket *skt)
849{
850 int ret;
851
852 dev_dbg(&skt->dev, "waking up socket\n");
853
854 mutex_lock(&skt->skt_mutex);
855 do {
856 if (!(skt->state & SOCKET_PRESENT)) {
857 ret = -ENODEV;
858 break;
859 }
860 if (skt->state & SOCKET_CARDBUS) {
861 ret = -EPERM;
862 break;
863 }
864 ret = socket_resume(skt);
865 if (!ret && skt->callback)
866 skt->callback->resume(skt);
867 } while (0);
868 mutex_unlock(&skt->skt_mutex);
869
870 return ret;
871} /* resume_card */
872EXPORT_SYMBOL(pcmcia_resume_card);
873
874
875/* These handle user requests to eject or insert a card. */
876int pcmcia_eject_card(struct pcmcia_socket *skt)
877{
878 int ret;
879
880 dev_dbg(&skt->dev, "user eject request\n");
881
882 mutex_lock(&skt->skt_mutex);
883 do {
884 if (!(skt->state & SOCKET_PRESENT)) {
885 ret = -ENODEV;
886 break;
887 }
888
889 ret = send_event(skt, CS_EVENT_EJECTION_REQUEST, CS_EVENT_PRI_LOW);
890 if (ret != 0) {
891 ret = -EINVAL;
892 break;
893 }
894
895 socket_remove(skt);
896 ret = 0;
897 } while (0);
898 mutex_unlock(&skt->skt_mutex);
899
900 return ret;
901} /* eject_card */
902EXPORT_SYMBOL(pcmcia_eject_card);
903
904
905int pcmcia_insert_card(struct pcmcia_socket *skt)
906{
907 int ret;
908
909 dev_dbg(&skt->dev, "user insert request\n");
910
911 mutex_lock(&skt->skt_mutex);
912 do {
913 if (skt->state & SOCKET_PRESENT) {
914 ret = -EBUSY;
915 break;
916 }
917 if (socket_insert(skt) == -ENODEV) {
918 ret = -ENODEV;
919 break;
920 }
921 ret = 0;
922 } while (0);
923 mutex_unlock(&skt->skt_mutex);
924
925 return ret;
926} /* insert_card */
927EXPORT_SYMBOL(pcmcia_insert_card);
928
929
930static int pcmcia_socket_uevent(struct device *dev, 902static int pcmcia_socket_uevent(struct device *dev,
931 struct kobj_uevent_env *env) 903 struct kobj_uevent_env *env)
932{ 904{
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 3bc02d53a3a3..f95864c2191e 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -87,37 +87,11 @@ struct pccard_resource_ops {
87#define SOCKET_CARDBUS 0x8000 87#define SOCKET_CARDBUS 0x8000
88#define SOCKET_CARDBUS_CONFIG 0x10000 88#define SOCKET_CARDBUS_CONFIG 0x10000
89 89
90static inline int cs_socket_get(struct pcmcia_socket *skt)
91{
92 int ret;
93
94 WARN_ON(skt->state & SOCKET_INUSE);
95
96 ret = try_module_get(skt->owner);
97 if (ret)
98 skt->state |= SOCKET_INUSE;
99 return ret;
100}
101
102static inline void cs_socket_put(struct pcmcia_socket *skt)
103{
104 if (skt->state & SOCKET_INUSE) {
105 skt->state &= ~SOCKET_INUSE;
106 module_put(skt->owner);
107 }
108}
109
110 90
111/* 91/*
112 * Stuff internal to module "pcmcia_core": 92 * Stuff internal to module "pcmcia_core":
113 */ 93 */
114 94
115/* cistpl.c */
116int verify_cis_cache(struct pcmcia_socket *s);
117
118/* rsrc_mgr.c */
119void release_resource_db(struct pcmcia_socket *s);
120
121/* socket_sysfs.c */ 95/* socket_sysfs.c */
122extern int pccard_sysfs_add_socket(struct device *dev); 96extern int pccard_sysfs_add_socket(struct device *dev);
123extern void pccard_sysfs_remove_socket(struct device *dev); 97extern void pccard_sysfs_remove_socket(struct device *dev);
@@ -125,8 +99,6 @@ extern void pccard_sysfs_remove_socket(struct device *dev);
125/* cardbus.c */ 99/* cardbus.c */
126int cb_alloc(struct pcmcia_socket *s); 100int cb_alloc(struct pcmcia_socket *s);
127void cb_free(struct pcmcia_socket *s); 101void cb_free(struct pcmcia_socket *s);
128int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
129 void *ptr);
130 102
131 103
132 104
@@ -138,7 +110,8 @@ struct pcmcia_callback{
138 struct module *owner; 110 struct module *owner;
139 int (*event) (struct pcmcia_socket *s, 111 int (*event) (struct pcmcia_socket *s,
140 event_t event, int priority); 112 event_t event, int priority);
141 void (*requery) (struct pcmcia_socket *s, int new_cis); 113 void (*requery) (struct pcmcia_socket *s);
114 int (*validate) (struct pcmcia_socket *s, unsigned int *i);
142 int (*suspend) (struct pcmcia_socket *s); 115 int (*suspend) (struct pcmcia_socket *s);
143 int (*resume) (struct pcmcia_socket *s); 116 int (*resume) (struct pcmcia_socket *s);
144}; 117};
@@ -151,16 +124,35 @@ extern struct class pcmcia_socket_class;
151int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); 124int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
152struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr); 125struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
153 126
154int pcmcia_suspend_card(struct pcmcia_socket *skt); 127void pcmcia_parse_uevents(struct pcmcia_socket *socket, unsigned int events);
155int pcmcia_resume_card(struct pcmcia_socket *skt); 128#define PCMCIA_UEVENT_EJECT 0x0001
156 129#define PCMCIA_UEVENT_INSERT 0x0002
157int pcmcia_eject_card(struct pcmcia_socket *skt); 130#define PCMCIA_UEVENT_SUSPEND 0x0004
158int pcmcia_insert_card(struct pcmcia_socket *skt); 131#define PCMCIA_UEVENT_RESUME 0x0008
132#define PCMCIA_UEVENT_REQUERY 0x0010
159 133
160struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt); 134struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt);
161void pcmcia_put_socket(struct pcmcia_socket *skt); 135void pcmcia_put_socket(struct pcmcia_socket *skt);
162 136
137/*
138 * Stuff internal to module "pcmcia".
139 */
140/* ds.c */
141extern struct bus_type pcmcia_bus_type;
142
143/* pcmcia_resource.c */
144extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
145extern int pcmcia_validate_mem(struct pcmcia_socket *s);
146extern struct resource *pcmcia_find_mem_region(u_long base,
147 u_long num,
148 u_long align,
149 int low,
150 struct pcmcia_socket *s);
151
152
163/* cistpl.c */ 153/* cistpl.c */
154extern struct bin_attribute pccard_cis_attr;
155
164int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, 156int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
165 u_int addr, u_int len, void *ptr); 157 u_int addr, u_int len, void *ptr);
166void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, 158void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr,
@@ -172,8 +164,8 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
172int pcmcia_replace_cis(struct pcmcia_socket *s, 164int pcmcia_replace_cis(struct pcmcia_socket *s,
173 const u8 *data, const size_t len); 165 const u8 *data, const size_t len);
174int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); 166int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
167int verify_cis_cache(struct pcmcia_socket *s);
175 168
176/* loop over CIS entries */
177int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, 169int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
178 cisdata_t code, cisparse_t *parse, void *priv_data, 170 cisdata_t code, cisparse_t *parse, void *priv_data,
179 int (*loop_tuple) (tuple_t *tuple, 171 int (*loop_tuple) (tuple_t *tuple,
@@ -189,35 +181,8 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
189int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); 181int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple);
190 182
191 183
192/* rsrc_mgr.c */
193int pcmcia_validate_mem(struct pcmcia_socket *s);
194struct resource *pcmcia_find_io_region(unsigned long base,
195 int num,
196 unsigned long align,
197 struct pcmcia_socket *s);
198int pcmcia_adjust_io_region(struct resource *res,
199 unsigned long r_start,
200 unsigned long r_end,
201 struct pcmcia_socket *s);
202struct resource *pcmcia_find_mem_region(u_long base,
203 u_long num,
204 u_long align,
205 int low,
206 struct pcmcia_socket *s);
207
208/*
209 * Stuff internal to module "pcmcia".
210 */
211/* ds.c */
212extern struct bus_type pcmcia_bus_type;
213
214/* pcmcia_resource.c */
215extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
216
217#ifdef CONFIG_PCMCIA_IOCTL 184#ifdef CONFIG_PCMCIA_IOCTL
218/* ds.c */ 185/* ds.c */
219extern spinlock_t pcmcia_dev_list_lock;
220
221extern struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev); 186extern struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev);
222extern void pcmcia_put_dev(struct pcmcia_device *p_dev); 187extern void pcmcia_put_dev(struct pcmcia_device *p_dev);
223 188
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
new file mode 100644
index 000000000000..3889cf07d6ce
--- /dev/null
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -0,0 +1,623 @@
1/*
2 * PCMCIA socket code for the Alchemy Db1xxx/Pb1xxx boards.
3 *
4 * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
5 *
6 */
7
8/* This is a fairly generic PCMCIA socket driver suitable for the
9 * following Alchemy Development boards:
10 * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200.
11 *
12 * The Db1000 is used as a reference: Per-socket card-, carddetect- and
13 * statuschange IRQs connected to SoC GPIOs, control and status register
14 * bits arranged in per-socket groups in an external PLD. All boards
15 * listed here use this layout, including bit positions and meanings.
16 * Of course there are exceptions in later boards:
17 *
18 * - Pb1100/Pb1500: single socket only; voltage key bits VS are
19 * at STATUS[5:4] (instead of STATUS[1:0]).
20 * - Au1200-based: additional card-eject irqs, irqs not gpios!
21 */
22
23#include <linux/delay.h>
24#include <linux/gpio.h>
25#include <linux/interrupt.h>
26#include <linux/pm.h>
27#include <linux/platform_device.h>
28#include <linux/resource.h>
29#include <linux/spinlock.h>
30
31#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h>
33
34#include <asm/mach-au1x00/au1000.h>
35#include <asm/mach-db1x00/bcsr.h>
36
37#define MEM_MAP_SIZE 0x400000
38#define IO_MAP_SIZE 0x1000
39
40struct db1x_pcmcia_sock {
41 struct pcmcia_socket socket;
42 int nr; /* socket number */
43 void *virt_io;
44
45 /* the "pseudo" addresses of the PCMCIA space. */
46 phys_addr_t phys_io;
47 phys_addr_t phys_attr;
48 phys_addr_t phys_mem;
49
50 /* previous flags for set_socket() */
51 unsigned int old_flags;
52
53 /* interrupt sources: linux irq numbers! */
54 int insert_irq; /* default carddetect irq */
55 int stschg_irq; /* card-status-change irq */
56 int card_irq; /* card irq */
57 int eject_irq; /* db1200/pb1200 have these */
58
59#define BOARD_TYPE_DEFAULT 0 /* most boards */
60#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
61#define BOARD_TYPE_PB1100 2 /* VS bits slightly different */
62 int board_type;
63};
64
65#define to_db1x_socket(x) container_of(x, struct db1x_pcmcia_sock, socket)
66
67/* DB/PB1200: check CPLD SIGSTATUS register bit 10/12 */
68static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
69{
70 unsigned short sigstat;
71
72 sigstat = bcsr_read(BCSR_SIGSTAT);
73 return sigstat & 1 << (8 + 2 * sock->nr);
74}
75
76/* carddetect gpio: low-active */
77static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
78{
79 return !gpio_get_value(irq_to_gpio(sock->insert_irq));
80}
81
82static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
83{
84 switch (sock->board_type) {
85 case BOARD_TYPE_DB1200:
86 return db1200_card_inserted(sock);
87 default:
88 return db1000_card_inserted(sock);
89 }
90}
91
92/* STSCHG tends to bounce heavily when cards are inserted/ejected.
93 * To avoid this, the interrupt is normally disabled and only enabled
94 * after reset to a card has been de-asserted.
95 */
96static inline void set_stschg(struct db1x_pcmcia_sock *sock, int en)
97{
98 if (sock->stschg_irq != -1) {
99 if (en)
100 enable_irq(sock->stschg_irq);
101 else
102 disable_irq(sock->stschg_irq);
103 }
104}
105
106static irqreturn_t db1000_pcmcia_cdirq(int irq, void *data)
107{
108 struct db1x_pcmcia_sock *sock = data;
109
110 pcmcia_parse_events(&sock->socket, SS_DETECT);
111
112 return IRQ_HANDLED;
113}
114
115static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data)
116{
117 struct db1x_pcmcia_sock *sock = data;
118
119 pcmcia_parse_events(&sock->socket, SS_STSCHG);
120
121 return IRQ_HANDLED;
122}
123
124static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data)
125{
126 struct db1x_pcmcia_sock *sock = data;
127
128 /* Db/Pb1200 have separate per-socket insertion and ejection
129 * interrupts which stay asserted as long as the card is
130 * inserted/missing. The one which caused us to be called
131 * needs to be disabled and the other one enabled.
132 */
133 if (irq == sock->insert_irq) {
134 disable_irq_nosync(sock->insert_irq);
135 enable_irq(sock->eject_irq);
136 } else {
137 disable_irq_nosync(sock->eject_irq);
138 enable_irq(sock->insert_irq);
139 }
140
141 pcmcia_parse_events(&sock->socket, SS_DETECT);
142
143 return IRQ_HANDLED;
144}
145
146static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
147{
148 int ret;
149 unsigned long flags;
150
151 if (sock->stschg_irq != -1) {
152 ret = request_irq(sock->stschg_irq, db1000_pcmcia_stschgirq,
153 0, "pcmcia_stschg", sock);
154 if (ret)
155 return ret;
156 }
157
158 /* Db/Pb1200 have separate per-socket insertion and ejection
159 * interrupts, which should show edge behaviour but don't.
160 * So interrupts are disabled until both insertion and
161 * ejection handler have been registered and the currently
162 * active one disabled.
163 */
164 if (sock->board_type == BOARD_TYPE_DB1200) {
165 local_irq_save(flags);
166
167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
168 IRQF_DISABLED, "pcmcia_insert", sock);
169 if (ret)
170 goto out1;
171
172 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
173 IRQF_DISABLED, "pcmcia_eject", sock);
174 if (ret) {
175 free_irq(sock->insert_irq, sock);
176 local_irq_restore(flags);
177 goto out1;
178 }
179
180 /* disable the currently active one */
181 if (db1200_card_inserted(sock))
182 disable_irq_nosync(sock->insert_irq);
183 else
184 disable_irq_nosync(sock->eject_irq);
185
186 local_irq_restore(flags);
187 } else {
188 /* all other (older) Db1x00 boards use a GPIO to show
189 * card detection status: use both-edge triggers.
190 */
191 set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH);
192 ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq,
193 0, "pcmcia_carddetect", sock);
194
195 if (ret)
196 goto out1;
197 }
198
199 return 0; /* all done */
200
201out1:
202 if (sock->stschg_irq != -1)
203 free_irq(sock->stschg_irq, sock);
204
205 return ret;
206}
207
208static void db1x_pcmcia_free_irqs(struct db1x_pcmcia_sock *sock)
209{
210 if (sock->stschg_irq != -1)
211 free_irq(sock->stschg_irq, sock);
212
213 free_irq(sock->insert_irq, sock);
214 if (sock->eject_irq != -1)
215 free_irq(sock->eject_irq, sock);
216}
217
218/*
219 * configure a PCMCIA socket on the Db1x00 series of boards (and
220 * compatibles).
221 *
222 * 2 external registers are involved:
223 * pcmcia_status (offset 0x04): bits [0:1/2:3]: read card voltage id
224 * pcmcia_control(offset 0x10):
225 * bits[0:1] set vcc for card
226 * bits[2:3] set vpp for card
227 * bit 4: enable data buffers
228 * bit 7: reset# for card
229 * add 8 for second socket.
230 */
231static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
232 struct socket_state_t *state)
233{
234 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
235 unsigned short cr_clr, cr_set;
236 unsigned int changed;
237 int v, p, ret;
238
239 /* card voltage setup */
240 cr_clr = (0xf << (sock->nr * 8)); /* clear voltage settings */
241 cr_set = 0;
242 v = p = ret = 0;
243
244 switch (state->Vcc) {
245 case 50:
246 ++v;
247 case 33:
248 ++v;
249 case 0:
250 break;
251 default:
252 printk(KERN_INFO "pcmcia%d unsupported Vcc %d\n",
253 sock->nr, state->Vcc);
254 }
255
256 switch (state->Vpp) {
257 case 12:
258 ++p;
259 case 33:
260 case 50:
261 ++p;
262 case 0:
263 break;
264 default:
265 printk(KERN_INFO "pcmcia%d unsupported Vpp %d\n",
266 sock->nr, state->Vpp);
267 }
268
269 /* sanity check: Vpp must be 0, 12, or Vcc */
270 if (((state->Vcc == 33) && (state->Vpp == 50)) ||
271 ((state->Vcc == 50) && (state->Vpp == 33))) {
272 printk(KERN_INFO "pcmcia%d bad Vcc/Vpp combo (%d %d)\n",
273 sock->nr, state->Vcc, state->Vpp);
274 v = p = 0;
275 ret = -EINVAL;
276 }
277
278 /* create new voltage code */
279 cr_set |= ((v << 2) | p) << (sock->nr * 8);
280
281 changed = state->flags ^ sock->old_flags;
282
283 if (changed & SS_RESET) {
284 if (state->flags & SS_RESET) {
285 set_stschg(sock, 0);
286 /* assert reset, disable io buffers */
287 cr_clr |= (1 << (7 + (sock->nr * 8)));
288 cr_clr |= (1 << (4 + (sock->nr * 8)));
289 } else {
290 /* de-assert reset, enable io buffers */
291 cr_set |= 1 << (7 + (sock->nr * 8));
292 cr_set |= 1 << (4 + (sock->nr * 8));
293 }
294 }
295
296 /* update PCMCIA configuration */
297 bcsr_mod(BCSR_PCMCIA, cr_clr, cr_set);
298
299 sock->old_flags = state->flags;
300
301 /* reset was taken away: give card time to initialize properly */
302 if ((changed & SS_RESET) && !(state->flags & SS_RESET)) {
303 msleep(500);
304 set_stschg(sock, 1);
305 }
306
307 return ret;
308}
309
310/* VCC bits at [3:2]/[11:10] */
311#define GET_VCC(cr, socknr) \
312 ((((cr) >> 2) >> ((socknr) * 8)) & 3)
313
314/* VS bits at [0:1]/[3:2] */
315#define GET_VS(sr, socknr) \
316 (((sr) >> (2 * (socknr))) & 3)
317
318/* reset bits at [7]/[15] */
319#define GET_RESET(cr, socknr) \
320 ((cr) & (1 << (7 + (8 * (socknr)))))
321
322static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
323 unsigned int *value)
324{
325 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
326 unsigned short cr, sr;
327 unsigned int status;
328
329 status = db1x_card_inserted(sock) ? SS_DETECT : 0;
330
331 cr = bcsr_read(BCSR_PCMCIA);
332 sr = bcsr_read(BCSR_STATUS);
333
334 /* PB1100/PB1500: voltage key bits are at [5:4] */
335 if (sock->board_type == BOARD_TYPE_PB1100)
336 sr >>= 4;
337
338 /* determine card type */
339 switch (GET_VS(sr, sock->nr)) {
340 case 0:
341 case 2:
342 status |= SS_3VCARD; /* 3V card */
343 case 3:
344 break; /* 5V card: set nothing */
345 default:
346 status |= SS_XVCARD; /* treated as unsupported in core */
347 }
348
349 /* if Vcc is not zero, we have applied power to a card */
350 status |= GET_VCC(cr, sock->nr) ? SS_POWERON : 0;
351
352 /* reset de-asserted? then we're ready */
353 status |= (GET_RESET(cr, sock->nr)) ? SS_READY : SS_RESET;
354
355 *value = status;
356
357 return 0;
358}
359
360static int db1x_pcmcia_sock_init(struct pcmcia_socket *skt)
361{
362 return 0;
363}
364
365static int db1x_pcmcia_sock_suspend(struct pcmcia_socket *skt)
366{
367 return 0;
368}
369
370static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
371 struct pccard_io_map *map)
372{
373 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
374
375 map->start = (u32)sock->virt_io;
376 map->stop = map->start + IO_MAP_SIZE;
377
378 return 0;
379}
380
381static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
382 struct pccard_mem_map *map)
383{
384 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
385
386 if (map->flags & MAP_ATTRIB)
387 map->static_start = sock->phys_attr + map->card_start;
388 else
389 map->static_start = sock->phys_mem + map->card_start;
390
391 return 0;
392}
393
394static struct pccard_operations db1x_pcmcia_operations = {
395 .init = db1x_pcmcia_sock_init,
396 .suspend = db1x_pcmcia_sock_suspend,
397 .get_status = db1x_pcmcia_get_status,
398 .set_socket = db1x_pcmcia_configure,
399 .set_io_map = au1x00_pcmcia_set_io_map,
400 .set_mem_map = au1x00_pcmcia_set_mem_map,
401};
402
403static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
404{
405 struct db1x_pcmcia_sock *sock;
406 struct resource *r;
407 int ret, bid;
408
409 sock = kzalloc(sizeof(struct db1x_pcmcia_sock), GFP_KERNEL);
410 if (!sock)
411 return -ENOMEM;
412
413 sock->nr = pdev->id;
414
415 bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
416 switch (bid) {
417 case BCSR_WHOAMI_PB1500:
418 case BCSR_WHOAMI_PB1500R2:
419 case BCSR_WHOAMI_PB1100:
420 sock->board_type = BOARD_TYPE_PB1100;
421 break;
422 case BCSR_WHOAMI_DB1000 ... BCSR_WHOAMI_PB1550_SDR:
423 sock->board_type = BOARD_TYPE_DEFAULT;
424 break;
425 case BCSR_WHOAMI_PB1200 ... BCSR_WHOAMI_DB1200:
426 sock->board_type = BOARD_TYPE_DB1200;
427 break;
428 default:
429 printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
430 ret = -ENODEV;
431 goto out0;
432 };
433
434 /*
435 * gather resources necessary and optional nice-to-haves to
436 * operate a socket:
437 * This includes IRQs for Carddetection/ejection, the card
438 * itself and optional status change detection.
439 * Also, the memory areas covered by a socket. For these
440 * we require the 32bit "pseudo" addresses (see the au1000.h
441 * header for more information).
442 */
443
444 /* card: irq assigned to the card itself. */
445 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
446 sock->card_irq = r ? r->start : 0;
447
448 /* insert: irq which triggers on card insertion/ejection */
449 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
450 sock->insert_irq = r ? r->start : -1;
451
452 /* stschg: irq which trigger on card status change (optional) */
453 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
454 sock->stschg_irq = r ? r->start : -1;
455
456 /* eject: irq which triggers on ejection (DB1200/PB1200 only) */
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "eject");
458 sock->eject_irq = r ? r->start : -1;
459
460 ret = -ENODEV;
461
462 /*
463 * pseudo-attr: The 32bit address of the PCMCIA attribute space
464 * for this socket (usually the 36bit address shifted 4 to the
465 * right).
466 */
467 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
468 if (!r) {
469 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
470 sock->nr);
471 goto out0;
472 }
473 sock->phys_attr = r->start;
474
475 /*
476 * pseudo-mem: The 32bit address of the PCMCIA memory space for
477 * this socket (usually the 36bit address shifted 4 to the right)
478 */
479 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
480 if (!r) {
481 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
482 sock->nr);
483 goto out0;
484 }
485 sock->phys_mem = r->start;
486
487 /*
488 * pseudo-io: The 32bit address of the PCMCIA IO space for this
489 * socket (usually the 36bit address shifted 4 to the right).
490 */
491 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
492 if (!r) {
493 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
494 sock->nr);
495 goto out0;
496 }
497 sock->phys_io = r->start;
498
499 /*
500 * PCMCIA client drivers use the inb/outb macros to access
501 * the IO registers. Since mips_io_port_base is added
502 * to the access address of the mips implementation of
503 * inb/outb, we need to subtract it here because we want
504 * to access the I/O or MEM address directly, without
505 * going through this "mips_io_port_base" mechanism.
506 */
507 sock->virt_io = (void *)(ioremap(sock->phys_io, IO_MAP_SIZE) -
508 mips_io_port_base);
509
510 if (!sock->virt_io) {
511 printk(KERN_ERR "pcmcia%d: cannot remap IO area\n",
512 sock->nr);
513 ret = -ENOMEM;
514 goto out0;
515 }
516
517 sock->socket.ops = &db1x_pcmcia_operations;
518 sock->socket.owner = THIS_MODULE;
519 sock->socket.pci_irq = sock->card_irq;
520 sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
521 sock->socket.map_size = MEM_MAP_SIZE;
522 sock->socket.io_offset = (unsigned long)sock->virt_io;
523 sock->socket.dev.parent = &pdev->dev;
524 sock->socket.resource_ops = &pccard_static_ops;
525
526 platform_set_drvdata(pdev, sock);
527
528 ret = db1x_pcmcia_setup_irqs(sock);
529 if (ret) {
530 printk(KERN_ERR "pcmcia%d cannot setup interrupts\n",
531 sock->nr);
532 goto out1;
533 }
534
535 set_stschg(sock, 0);
536
537 ret = pcmcia_register_socket(&sock->socket);
538 if (ret) {
539 printk(KERN_ERR "pcmcia%d failed to register\n", sock->nr);
540 goto out2;
541 }
542
543 printk(KERN_INFO "Alchemy Db/Pb1xxx pcmcia%d @ io/attr/mem %09llx"
544 "(%p) %09llx %09llx card/insert/stschg/eject irqs @ %d "
545 "%d %d %d\n", sock->nr, sock->phys_io, sock->virt_io,
546 sock->phys_attr, sock->phys_mem, sock->card_irq,
547 sock->insert_irq, sock->stschg_irq, sock->eject_irq);
548
549 return 0;
550
551out2:
552 db1x_pcmcia_free_irqs(sock);
553out1:
554 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
555out0:
556 kfree(sock);
557 return ret;
558}
559
560static int __devexit db1x_pcmcia_socket_remove(struct platform_device *pdev)
561{
562 struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev);
563
564 db1x_pcmcia_free_irqs(sock);
565 pcmcia_unregister_socket(&sock->socket);
566 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
567 kfree(sock);
568
569 return 0;
570}
571
572#ifdef CONFIG_PM
573static int db1x_pcmcia_suspend(struct device *dev)
574{
575 return pcmcia_socket_dev_suspend(dev);
576}
577
578static int db1x_pcmcia_resume(struct device *dev)
579{
580 return pcmcia_socket_dev_resume(dev);
581}
582
583static struct dev_pm_ops db1x_pcmcia_pmops = {
584 .resume = db1x_pcmcia_resume,
585 .suspend = db1x_pcmcia_suspend,
586 .thaw = db1x_pcmcia_resume,
587 .freeze = db1x_pcmcia_suspend,
588};
589
590#define DB1XXX_SS_PMOPS &db1x_pcmcia_pmops
591
592#else
593
594#define DB1XXX_SS_PMOPS NULL
595
596#endif
597
598static struct platform_driver db1x_pcmcia_socket_driver = {
599 .driver = {
600 .name = "db1xxx_pcmcia",
601 .owner = THIS_MODULE,
602 .pm = DB1XXX_SS_PMOPS
603 },
604 .probe = db1x_pcmcia_socket_probe,
605 .remove = __devexit_p(db1x_pcmcia_socket_remove),
606};
607
608int __init db1x_pcmcia_socket_load(void)
609{
610 return platform_driver_register(&db1x_pcmcia_socket_driver);
611}
612
613void __exit db1x_pcmcia_socket_unload(void)
614{
615 platform_driver_unregister(&db1x_pcmcia_socket_driver);
616}
617
618module_init(db1x_pcmcia_socket_load);
619module_exit(db1x_pcmcia_socket_unload);
620
621MODULE_LICENSE("GPL");
622MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards");
623MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 1a4a3c49cc15..0f98be4450b7 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -42,8 +42,6 @@ MODULE_DESCRIPTION("PCMCIA Driver Services");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44 44
45spinlock_t pcmcia_dev_list_lock;
46
47/*====================================================================*/ 45/*====================================================================*/
48 46
49static void pcmcia_check_driver(struct pcmcia_driver *p_drv) 47static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
@@ -126,9 +124,9 @@ pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count)
126 dynid->id.device_no = device_no; 124 dynid->id.device_no = device_no;
127 memcpy(dynid->id.prod_id_hash, prod_id_hash, sizeof(__u32) * 4); 125 memcpy(dynid->id.prod_id_hash, prod_id_hash, sizeof(__u32) * 4);
128 126
129 spin_lock(&pdrv->dynids.lock); 127 mutex_lock(&pdrv->dynids.lock);
130 list_add_tail(&dynid->node, &pdrv->dynids.list); 128 list_add_tail(&dynid->node, &pdrv->dynids.list);
131 spin_unlock(&pdrv->dynids.lock); 129 mutex_unlock(&pdrv->dynids.lock);
132 130
133 if (get_driver(&pdrv->drv)) { 131 if (get_driver(&pdrv->drv)) {
134 retval = driver_attach(&pdrv->drv); 132 retval = driver_attach(&pdrv->drv);
@@ -146,12 +144,12 @@ pcmcia_free_dynids(struct pcmcia_driver *drv)
146{ 144{
147 struct pcmcia_dynid *dynid, *n; 145 struct pcmcia_dynid *dynid, *n;
148 146
149 spin_lock(&drv->dynids.lock); 147 mutex_lock(&drv->dynids.lock);
150 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 148 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
151 list_del(&dynid->node); 149 list_del(&dynid->node);
152 kfree(dynid); 150 kfree(dynid);
153 } 151 }
154 spin_unlock(&drv->dynids.lock); 152 mutex_unlock(&drv->dynids.lock);
155} 153}
156 154
157static int 155static int
@@ -182,7 +180,7 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
182 /* initialize common fields */ 180 /* initialize common fields */
183 driver->drv.bus = &pcmcia_bus_type; 181 driver->drv.bus = &pcmcia_bus_type;
184 driver->drv.owner = driver->owner; 182 driver->drv.owner = driver->owner;
185 spin_lock_init(&driver->dynids.lock); 183 mutex_init(&driver->dynids.lock);
186 INIT_LIST_HEAD(&driver->dynids.list); 184 INIT_LIST_HEAD(&driver->dynids.list);
187 185
188 pr_debug("registering driver %s\n", driver->drv.name); 186 pr_debug("registering driver %s\n", driver->drv.name);
@@ -239,30 +237,21 @@ static void pcmcia_release_function(struct kref *ref)
239static void pcmcia_release_dev(struct device *dev) 237static void pcmcia_release_dev(struct device *dev)
240{ 238{
241 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 239 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
240 int i;
242 dev_dbg(dev, "releasing device\n"); 241 dev_dbg(dev, "releasing device\n");
243 pcmcia_put_socket(p_dev->socket); 242 pcmcia_put_socket(p_dev->socket);
243 for (i = 0; i < 4; i++)
244 kfree(p_dev->prod_id[i]);
244 kfree(p_dev->devname); 245 kfree(p_dev->devname);
245 kref_put(&p_dev->function_config->ref, pcmcia_release_function); 246 kref_put(&p_dev->function_config->ref, pcmcia_release_function);
246 kfree(p_dev); 247 kfree(p_dev);
247} 248}
248 249
249static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
250{
251 if (!s->pcmcia_state.device_add_pending) {
252 dev_dbg(&s->dev, "scheduling to add %s secondary"
253 " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
254 s->pcmcia_state.device_add_pending = 1;
255 s->pcmcia_state.mfc_pfc = mfc;
256 schedule_work(&s->device_add);
257 }
258 return;
259}
260 250
261static int pcmcia_device_probe(struct device *dev) 251static int pcmcia_device_probe(struct device *dev)
262{ 252{
263 struct pcmcia_device *p_dev; 253 struct pcmcia_device *p_dev;
264 struct pcmcia_driver *p_drv; 254 struct pcmcia_driver *p_drv;
265 struct pcmcia_device_id *did;
266 struct pcmcia_socket *s; 255 struct pcmcia_socket *s;
267 cistpl_config_t cis_config; 256 cistpl_config_t cis_config;
268 int ret = 0; 257 int ret = 0;
@@ -275,18 +264,6 @@ static int pcmcia_device_probe(struct device *dev)
275 p_drv = to_pcmcia_drv(dev->driver); 264 p_drv = to_pcmcia_drv(dev->driver);
276 s = p_dev->socket; 265 s = p_dev->socket;
277 266
278 /* The PCMCIA code passes the match data in via dev_set_drvdata(dev)
279 * which is an ugly hack. Once the driver probe is called it may
280 * and often will overwrite the match data so we must save it first
281 *
282 * handle pseudo multifunction devices:
283 * there are at most two pseudo multifunction devices.
284 * if we're matching against the first, schedule a
285 * call which will then check whether there are two
286 * pseudo devices, and if not, add the second one.
287 */
288 did = dev_get_drvdata(&p_dev->dev);
289
290 dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name); 267 dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name);
291 268
292 if ((!p_drv->probe) || (!p_dev->function_config) || 269 if ((!p_drv->probe) || (!p_dev->function_config) ||
@@ -315,9 +292,11 @@ static int pcmcia_device_probe(struct device *dev)
315 goto put_module; 292 goto put_module;
316 } 293 }
317 294
318 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && 295 mutex_lock(&s->ops_mutex);
296 if ((s->pcmcia_state.has_pfc) &&
319 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) 297 (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
320 pcmcia_add_device_later(p_dev->socket, 0); 298 pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY);
299 mutex_unlock(&s->ops_mutex);
321 300
322put_module: 301put_module:
323 if (ret) 302 if (ret)
@@ -336,26 +315,27 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
336{ 315{
337 struct pcmcia_device *p_dev; 316 struct pcmcia_device *p_dev;
338 struct pcmcia_device *tmp; 317 struct pcmcia_device *tmp;
339 unsigned long flags;
340 318
341 dev_dbg(leftover ? &leftover->dev : &s->dev, 319 dev_dbg(leftover ? &leftover->dev : &s->dev,
342 "pcmcia_card_remove(%d) %s\n", s->sock, 320 "pcmcia_card_remove(%d) %s\n", s->sock,
343 leftover ? leftover->devname : ""); 321 leftover ? leftover->devname : "");
344 322
323 mutex_lock(&s->ops_mutex);
345 if (!leftover) 324 if (!leftover)
346 s->device_count = 0; 325 s->device_count = 0;
347 else 326 else
348 s->device_count = 1; 327 s->device_count = 1;
328 mutex_unlock(&s->ops_mutex);
349 329
350 /* unregister all pcmcia_devices registered with this socket, except leftover */ 330 /* unregister all pcmcia_devices registered with this socket, except leftover */
351 list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) { 331 list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) {
352 if (p_dev == leftover) 332 if (p_dev == leftover)
353 continue; 333 continue;
354 334
355 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 335 mutex_lock(&s->ops_mutex);
356 list_del(&p_dev->socket_device_list); 336 list_del(&p_dev->socket_device_list);
357 p_dev->_removed = 1; 337 p_dev->_removed = 1;
358 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 338 mutex_unlock(&s->ops_mutex);
359 339
360 dev_dbg(&p_dev->dev, "unregistering device\n"); 340 dev_dbg(&p_dev->dev, "unregistering device\n");
361 device_unregister(&p_dev->dev); 341 device_unregister(&p_dev->dev);
@@ -368,7 +348,6 @@ static int pcmcia_device_remove(struct device *dev)
368{ 348{
369 struct pcmcia_device *p_dev; 349 struct pcmcia_device *p_dev;
370 struct pcmcia_driver *p_drv; 350 struct pcmcia_driver *p_drv;
371 struct pcmcia_device_id *did;
372 int i; 351 int i;
373 352
374 p_dev = to_pcmcia_dev(dev); 353 p_dev = to_pcmcia_dev(dev);
@@ -380,9 +359,8 @@ static int pcmcia_device_remove(struct device *dev)
380 * pseudo multi-function card, we need to unbind 359 * pseudo multi-function card, we need to unbind
381 * all devices 360 * all devices
382 */ 361 */
383 did = dev_get_drvdata(&p_dev->dev); 362 if ((p_dev->socket->pcmcia_state.has_pfc) &&
384 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && 363 (p_dev->socket->device_count > 0) &&
385 (p_dev->socket->device_count != 0) &&
386 (p_dev->device_no == 0)) 364 (p_dev->device_no == 0))
387 pcmcia_card_remove(p_dev->socket, p_dev); 365 pcmcia_card_remove(p_dev->socket, p_dev);
388 366
@@ -431,16 +409,20 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
431 409
432 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, 410 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL,
433 CISTPL_MANFID, &manf_id)) { 411 CISTPL_MANFID, &manf_id)) {
412 mutex_lock(&p_dev->socket->ops_mutex);
434 p_dev->manf_id = manf_id.manf; 413 p_dev->manf_id = manf_id.manf;
435 p_dev->card_id = manf_id.card; 414 p_dev->card_id = manf_id.card;
436 p_dev->has_manf_id = 1; 415 p_dev->has_manf_id = 1;
437 p_dev->has_card_id = 1; 416 p_dev->has_card_id = 1;
417 mutex_unlock(&p_dev->socket->ops_mutex);
438 } 418 }
439 419
440 if (!pccard_read_tuple(p_dev->socket, p_dev->func, 420 if (!pccard_read_tuple(p_dev->socket, p_dev->func,
441 CISTPL_FUNCID, &func_id)) { 421 CISTPL_FUNCID, &func_id)) {
422 mutex_lock(&p_dev->socket->ops_mutex);
442 p_dev->func_id = func_id.func; 423 p_dev->func_id = func_id.func;
443 p_dev->has_func_id = 1; 424 p_dev->has_func_id = 1;
425 mutex_unlock(&p_dev->socket->ops_mutex);
444 } else { 426 } else {
445 /* rule of thumb: cards with no FUNCID, but with 427 /* rule of thumb: cards with no FUNCID, but with
446 * common memory device geometry information, are 428 * common memory device geometry information, are
@@ -457,17 +439,21 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
457 dev_dbg(&p_dev->dev, 439 dev_dbg(&p_dev->dev,
458 "mem device geometry probably means " 440 "mem device geometry probably means "
459 "FUNCID_MEMORY\n"); 441 "FUNCID_MEMORY\n");
442 mutex_lock(&p_dev->socket->ops_mutex);
460 p_dev->func_id = CISTPL_FUNCID_MEMORY; 443 p_dev->func_id = CISTPL_FUNCID_MEMORY;
461 p_dev->has_func_id = 1; 444 p_dev->has_func_id = 1;
445 mutex_unlock(&p_dev->socket->ops_mutex);
462 } 446 }
463 kfree(devgeo); 447 kfree(devgeo);
464 } 448 }
465 449
466 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1, 450 if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1,
467 vers1)) { 451 vers1)) {
452 mutex_lock(&p_dev->socket->ops_mutex);
468 for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) { 453 for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) {
469 char *tmp; 454 char *tmp;
470 unsigned int length; 455 unsigned int length;
456 char *new;
471 457
472 tmp = vers1->str + vers1->ofs[i]; 458 tmp = vers1->str + vers1->ofs[i];
473 459
@@ -475,14 +461,17 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
475 if ((length < 2) || (length > 255)) 461 if ((length < 2) || (length > 255))
476 continue; 462 continue;
477 463
478 p_dev->prod_id[i] = kmalloc(sizeof(char) * length, 464 new = kmalloc(sizeof(char) * length, GFP_KERNEL);
479 GFP_KERNEL); 465 if (!new)
480 if (!p_dev->prod_id[i])
481 continue; 466 continue;
482 467
483 p_dev->prod_id[i] = strncpy(p_dev->prod_id[i], 468 new = strncpy(new, tmp, length);
484 tmp, length); 469
470 tmp = p_dev->prod_id[i];
471 p_dev->prod_id[i] = new;
472 kfree(tmp);
485 } 473 }
474 mutex_unlock(&p_dev->socket->ops_mutex);
486 } 475 }
487 476
488 kfree(vers1); 477 kfree(vers1);
@@ -502,7 +491,7 @@ static DEFINE_MUTEX(device_add_lock);
502struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) 491struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
503{ 492{
504 struct pcmcia_device *p_dev, *tmp_dev; 493 struct pcmcia_device *p_dev, *tmp_dev;
505 unsigned long flags; 494 int i;
506 495
507 s = pcmcia_get_socket(s); 496 s = pcmcia_get_socket(s);
508 if (!s) 497 if (!s)
@@ -512,16 +501,19 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
512 501
513 pr_debug("adding device to %d, function %d\n", s->sock, function); 502 pr_debug("adding device to %d, function %d\n", s->sock, function);
514 503
515 /* max of 4 devices per card */
516 if (s->device_count == 4)
517 goto err_put;
518
519 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL); 504 p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
520 if (!p_dev) 505 if (!p_dev)
521 goto err_put; 506 goto err_put;
522 507
523 p_dev->socket = s; 508 mutex_lock(&s->ops_mutex);
524 p_dev->device_no = (s->device_count++); 509 p_dev->device_no = (s->device_count++);
510 mutex_unlock(&s->ops_mutex);
511
512 /* max of 2 devices per card */
513 if (p_dev->device_no >= 2)
514 goto err_free;
515
516 p_dev->socket = s;
525 p_dev->func = function; 517 p_dev->func = function;
526 518
527 p_dev->dev.bus = &pcmcia_bus_type; 519 p_dev->dev.bus = &pcmcia_bus_type;
@@ -538,7 +530,7 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
538 goto err_free; 530 goto err_free;
539 dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname); 531 dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname);
540 532
541 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 533 mutex_lock(&s->ops_mutex);
542 534
543 /* 535 /*
544 * p_dev->function_config must be the same for all card functions. 536 * p_dev->function_config must be the same for all card functions.
@@ -556,7 +548,7 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
556 /* Add to the list in pcmcia_bus_socket */ 548 /* Add to the list in pcmcia_bus_socket */
557 list_add(&p_dev->socket_device_list, &s->devices_list); 549 list_add(&p_dev->socket_device_list, &s->devices_list);
558 550
559 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 551 mutex_unlock(&s->ops_mutex);
560 552
561 if (!p_dev->function_config) { 553 if (!p_dev->function_config) {
562 dev_dbg(&p_dev->dev, "creating config_t\n"); 554 dev_dbg(&p_dev->dev, "creating config_t\n");
@@ -581,14 +573,19 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
581 return p_dev; 573 return p_dev;
582 574
583 err_unreg: 575 err_unreg:
584 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 576 mutex_lock(&s->ops_mutex);
585 list_del(&p_dev->socket_device_list); 577 list_del(&p_dev->socket_device_list);
586 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 578 mutex_unlock(&s->ops_mutex);
587 579
588 err_free: 580 err_free:
581 mutex_lock(&s->ops_mutex);
582 s->device_count--;
583 mutex_unlock(&s->ops_mutex);
584
585 for (i = 0; i < 4; i++)
586 kfree(p_dev->prod_id[i]);
589 kfree(p_dev->devname); 587 kfree(p_dev->devname);
590 kfree(p_dev); 588 kfree(p_dev);
591 s->device_count--;
592 err_put: 589 err_put:
593 mutex_unlock(&device_add_lock); 590 mutex_unlock(&device_add_lock);
594 pcmcia_put_socket(s); 591 pcmcia_put_socket(s);
@@ -601,19 +598,23 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
601{ 598{
602 cistpl_longlink_mfc_t mfc; 599 cistpl_longlink_mfc_t mfc;
603 unsigned int no_funcs, i, no_chains; 600 unsigned int no_funcs, i, no_chains;
604 int ret = 0; 601 int ret = -EAGAIN;
605 602
603 mutex_lock(&s->ops_mutex);
606 if (!(s->resource_setup_done)) { 604 if (!(s->resource_setup_done)) {
607 dev_dbg(&s->dev, 605 dev_dbg(&s->dev,
608 "no resources available, delaying card_add\n"); 606 "no resources available, delaying card_add\n");
607 mutex_unlock(&s->ops_mutex);
609 return -EAGAIN; /* try again, but later... */ 608 return -EAGAIN; /* try again, but later... */
610 } 609 }
611 610
612 if (pcmcia_validate_mem(s)) { 611 if (pcmcia_validate_mem(s)) {
613 dev_dbg(&s->dev, "validating mem resources failed, " 612 dev_dbg(&s->dev, "validating mem resources failed, "
614 "delaying card_add\n"); 613 "delaying card_add\n");
614 mutex_unlock(&s->ops_mutex);
615 return -EAGAIN; /* try again, but later... */ 615 return -EAGAIN; /* try again, but later... */
616 } 616 }
617 mutex_unlock(&s->ops_mutex);
617 618
618 ret = pccard_validate_cis(s, &no_chains); 619 ret = pccard_validate_cis(s, &no_chains);
619 if (ret || !no_chains) { 620 if (ret || !no_chains) {
@@ -634,17 +635,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
634} 635}
635 636
636 637
637static void pcmcia_delayed_add_device(struct work_struct *work) 638static int pcmcia_requery_callback(struct device *dev, void * _data)
638{
639 struct pcmcia_socket *s =
640 container_of(work, struct pcmcia_socket, device_add);
641 dev_dbg(&s->dev, "adding additional device to %d\n", s->sock);
642 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
643 s->pcmcia_state.device_add_pending = 0;
644 s->pcmcia_state.mfc_pfc = 0;
645}
646
647static int pcmcia_requery(struct device *dev, void * _data)
648{ 639{
649 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 640 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
650 if (!p_dev->dev.driver) { 641 if (!p_dev->dev.driver) {
@@ -655,45 +646,67 @@ static int pcmcia_requery(struct device *dev, void * _data)
655 return 0; 646 return 0;
656} 647}
657 648
658static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
659{
660 int no_devices = 0;
661 int ret = 0;
662 unsigned long flags;
663 649
664 /* must be called with skt_mutex held */ 650static void pcmcia_requery(struct pcmcia_socket *s)
665 dev_dbg(&skt->dev, "re-scanning socket %d\n", skt->sock); 651{
652 int present, has_pfc;
666 653
667 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 654 mutex_lock(&s->ops_mutex);
668 if (list_empty(&skt->devices_list)) 655 present = s->pcmcia_state.present;
669 no_devices = 1; 656 mutex_unlock(&s->ops_mutex);
670 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
671 657
672 /* If this is because of a CIS override, start over */ 658 if (!present)
673 if (new_cis && !no_devices) 659 return;
674 pcmcia_card_remove(skt, NULL);
675 660
676 /* if no devices were added for this socket yet because of 661 if (s->functions == 0) {
677 * missing resource information or other trouble, we need to 662 pcmcia_card_add(s);
678 * do this now. */ 663 return;
679 if (no_devices || new_cis) {
680 ret = pcmcia_card_add(skt);
681 if (ret)
682 return;
683 } 664 }
684 665
685 /* some device information might have changed because of a CIS 666 /* some device information might have changed because of a CIS
686 * update or because we can finally read it correctly... so 667 * update or because we can finally read it correctly... so
687 * determine it again, overwriting old values if necessary. */ 668 * determine it again, overwriting old values if necessary. */
688 bus_for_each_dev(&pcmcia_bus_type, NULL, NULL, pcmcia_requery); 669 bus_for_each_dev(&pcmcia_bus_type, NULL, NULL, pcmcia_requery_callback);
670
671 /* if the CIS changed, we need to check whether the number of
672 * functions changed. */
673 if (s->fake_cis) {
674 int old_funcs, new_funcs;
675 cistpl_longlink_mfc_t mfc;
676
677 /* does this cis override add or remove functions? */
678 old_funcs = s->functions;
679
680 if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC,
681 &mfc))
682 new_funcs = mfc.nfn;
683 else
684 new_funcs = 1;
685 if (old_funcs > new_funcs) {
686 pcmcia_card_remove(s, NULL);
687 pcmcia_card_add(s);
688 } else if (new_funcs > old_funcs) {
689 s->functions = new_funcs;
690 pcmcia_device_add(s, 1);
691 }
692 }
693
694 /* If the PCMCIA device consists of two pseudo devices,
695 * call pcmcia_device_add() -- which will fail if both
696 * devices are already registered. */
697 mutex_lock(&s->ops_mutex);
698 has_pfc = s->pcmcia_state.has_pfc;
699 mutex_unlock(&s->ops_mutex);
700 if (has_pfc)
701 pcmcia_device_add(s, 0);
689 702
690 /* we re-scan all devices, not just the ones connected to this 703 /* we re-scan all devices, not just the ones connected to this
691 * socket. This does not matter, though. */ 704 * socket. This does not matter, though. */
692 ret = bus_rescan_devices(&pcmcia_bus_type); 705 if (bus_rescan_devices(&pcmcia_bus_type))
693 if (ret) 706 dev_warn(&s->dev, "rescanning the bus failed\n");
694 printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n");
695} 707}
696 708
709
697#ifdef CONFIG_PCMCIA_LOAD_CIS 710#ifdef CONFIG_PCMCIA_LOAD_CIS
698 711
699/** 712/**
@@ -710,9 +723,6 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
710 struct pcmcia_socket *s = dev->socket; 723 struct pcmcia_socket *s = dev->socket;
711 const struct firmware *fw; 724 const struct firmware *fw;
712 int ret = -ENOMEM; 725 int ret = -ENOMEM;
713 int no_funcs;
714 int old_funcs;
715 cistpl_longlink_mfc_t mfc;
716 726
717 if (!filename) 727 if (!filename)
718 return -EINVAL; 728 return -EINVAL;
@@ -739,19 +749,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
739 /* update information */ 749 /* update information */
740 pcmcia_device_query(dev); 750 pcmcia_device_query(dev);
741 751
742 /* does this cis override add or remove functions? */ 752 /* requery (as number of functions might have changed) */
743 old_funcs = s->functions; 753 pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY);
744
745 if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc))
746 no_funcs = mfc.nfn;
747 else
748 no_funcs = 1;
749 s->functions = no_funcs;
750
751 if (old_funcs > no_funcs)
752 pcmcia_card_remove(s, dev);
753 else if (no_funcs > old_funcs)
754 pcmcia_add_device_later(s, 1);
755 } 754 }
756 release: 755 release:
757 release_firmware(fw); 756 release_firmware(fw);
@@ -818,9 +817,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
818 if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { 817 if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) {
819 if (dev->device_no != did->device_no) 818 if (dev->device_no != did->device_no)
820 return 0; 819 return 0;
820 mutex_lock(&dev->socket->ops_mutex);
821 dev->socket->pcmcia_state.has_pfc = 1;
822 mutex_unlock(&dev->socket->ops_mutex);
821 } 823 }
822 824
823 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { 825 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) {
826 int ret;
827
824 if ((!dev->has_func_id) || (dev->func_id != did->func_id)) 828 if ((!dev->has_func_id) || (dev->func_id != did->func_id))
825 return 0; 829 return 0;
826 830
@@ -835,10 +839,15 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
835 * after it has re-checked that there is no possible module 839 * after it has re-checked that there is no possible module
836 * with a prod_id/manf_id/card_id match. 840 * with a prod_id/manf_id/card_id match.
837 */ 841 */
838 dev_dbg(&dev->dev, 842 mutex_lock(&dev->socket->ops_mutex);
839 "skipping FUNC_ID match until userspace interaction\n"); 843 ret = dev->allow_func_id_match;
840 if (!dev->allow_func_id_match) 844 mutex_unlock(&dev->socket->ops_mutex);
845
846 if (!ret) {
847 dev_dbg(&dev->dev,
848 "skipping FUNC_ID match until userspace ACK\n");
841 return 0; 849 return 0;
850 }
842 } 851 }
843 852
844 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 853 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
@@ -859,8 +868,6 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
859 return 0; 868 return 0;
860 } 869 }
861 870
862 dev_set_drvdata(&dev->dev, did);
863
864 return 1; 871 return 1;
865} 872}
866 873
@@ -873,16 +880,16 @@ static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
873 struct pcmcia_dynid *dynid; 880 struct pcmcia_dynid *dynid;
874 881
875 /* match dynamic devices first */ 882 /* match dynamic devices first */
876 spin_lock(&p_drv->dynids.lock); 883 mutex_lock(&p_drv->dynids.lock);
877 list_for_each_entry(dynid, &p_drv->dynids.list, node) { 884 list_for_each_entry(dynid, &p_drv->dynids.list, node) {
878 dev_dbg(dev, "trying to match to %s\n", drv->name); 885 dev_dbg(dev, "trying to match to %s\n", drv->name);
879 if (pcmcia_devmatch(p_dev, &dynid->id)) { 886 if (pcmcia_devmatch(p_dev, &dynid->id)) {
880 dev_dbg(dev, "matched to %s\n", drv->name); 887 dev_dbg(dev, "matched to %s\n", drv->name);
881 spin_unlock(&p_drv->dynids.lock); 888 mutex_unlock(&p_drv->dynids.lock);
882 return 1; 889 return 1;
883 } 890 }
884 } 891 }
885 spin_unlock(&p_drv->dynids.lock); 892 mutex_unlock(&p_drv->dynids.lock);
886 893
887#ifdef CONFIG_PCMCIA_IOCTL 894#ifdef CONFIG_PCMCIA_IOCTL
888 /* matching by cardmgr */ 895 /* matching by cardmgr */
@@ -970,13 +977,14 @@ static int runtime_suspend(struct device *dev)
970 return rc; 977 return rc;
971} 978}
972 979
973static void runtime_resume(struct device *dev) 980static int runtime_resume(struct device *dev)
974{ 981{
975 int rc; 982 int rc;
976 983
977 down(&dev->sem); 984 down(&dev->sem);
978 rc = pcmcia_dev_resume(dev); 985 rc = pcmcia_dev_resume(dev);
979 up(&dev->sem); 986 up(&dev->sem);
987 return rc;
980} 988}
981 989
982/************************ per-device sysfs output ***************************/ 990/************************ per-device sysfs output ***************************/
@@ -1027,7 +1035,7 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
1027 if ((!p_dev->suspended) && !strncmp(buf, "off", 3)) 1035 if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
1028 ret = runtime_suspend(dev); 1036 ret = runtime_suspend(dev);
1029 else if (p_dev->suspended && !strncmp(buf, "on", 2)) 1037 else if (p_dev->suspended && !strncmp(buf, "on", 2))
1030 runtime_resume(dev); 1038 ret = runtime_resume(dev);
1031 1039
1032 return ret ? ret : count; 1040 return ret ? ret : count;
1033} 1041}
@@ -1059,19 +1067,14 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
1059 struct device_attribute *attr, const char *buf, size_t count) 1067 struct device_attribute *attr, const char *buf, size_t count)
1060{ 1068{
1061 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 1069 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
1062 int ret;
1063 1070
1064 if (!count) 1071 if (!count)
1065 return -EINVAL; 1072 return -EINVAL;
1066 1073
1067 mutex_lock(&p_dev->socket->skt_mutex); 1074 mutex_lock(&p_dev->socket->ops_mutex);
1068 p_dev->allow_func_id_match = 1; 1075 p_dev->allow_func_id_match = 1;
1069 mutex_unlock(&p_dev->socket->skt_mutex); 1076 mutex_unlock(&p_dev->socket->ops_mutex);
1070 1077 pcmcia_parse_uevents(p_dev->socket, PCMCIA_UEVENT_REQUERY);
1071 ret = bus_rescan_devices(&pcmcia_bus_type);
1072 if (ret)
1073 printk(KERN_INFO "pcmcia: bus_rescan_devices failed after "
1074 "allowing func_id matches\n");
1075 1078
1076 return count; 1079 return count;
1077} 1080}
@@ -1099,8 +1102,13 @@ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
1099 struct pcmcia_driver *p_drv = NULL; 1102 struct pcmcia_driver *p_drv = NULL;
1100 int ret = 0; 1103 int ret = 0;
1101 1104
1102 if (p_dev->suspended) 1105 mutex_lock(&p_dev->socket->ops_mutex);
1106 if (p_dev->suspended) {
1107 mutex_unlock(&p_dev->socket->ops_mutex);
1103 return 0; 1108 return 0;
1109 }
1110 p_dev->suspended = 1;
1111 mutex_unlock(&p_dev->socket->ops_mutex);
1104 1112
1105 dev_dbg(dev, "suspending\n"); 1113 dev_dbg(dev, "suspending\n");
1106 1114
@@ -1117,6 +1125,9 @@ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
1117 "pcmcia: device %s (driver %s) did " 1125 "pcmcia: device %s (driver %s) did "
1118 "not want to go to sleep (%d)\n", 1126 "not want to go to sleep (%d)\n",
1119 p_dev->devname, p_drv->drv.name, ret); 1127 p_dev->devname, p_drv->drv.name, ret);
1128 mutex_lock(&p_dev->socket->ops_mutex);
1129 p_dev->suspended = 0;
1130 mutex_unlock(&p_dev->socket->ops_mutex);
1120 goto out; 1131 goto out;
1121 } 1132 }
1122 } 1133 }
@@ -1127,8 +1138,6 @@ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
1127 } 1138 }
1128 1139
1129 out: 1140 out:
1130 if (!ret)
1131 p_dev->suspended = 1;
1132 return ret; 1141 return ret;
1133} 1142}
1134 1143
@@ -1139,8 +1148,13 @@ static int pcmcia_dev_resume(struct device *dev)
1139 struct pcmcia_driver *p_drv = NULL; 1148 struct pcmcia_driver *p_drv = NULL;
1140 int ret = 0; 1149 int ret = 0;
1141 1150
1142 if (!p_dev->suspended) 1151 mutex_lock(&p_dev->socket->ops_mutex);
1152 if (!p_dev->suspended) {
1153 mutex_unlock(&p_dev->socket->ops_mutex);
1143 return 0; 1154 return 0;
1155 }
1156 p_dev->suspended = 0;
1157 mutex_unlock(&p_dev->socket->ops_mutex);
1144 1158
1145 dev_dbg(dev, "resuming\n"); 1159 dev_dbg(dev, "resuming\n");
1146 1160
@@ -1161,8 +1175,6 @@ static int pcmcia_dev_resume(struct device *dev)
1161 ret = p_drv->resume(p_dev); 1175 ret = p_drv->resume(p_dev);
1162 1176
1163 out: 1177 out:
1164 if (!ret)
1165 p_dev->suspended = 0;
1166 return ret; 1178 return ret;
1167} 1179}
1168 1180
@@ -1237,13 +1249,22 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1237 1249
1238 switch (event) { 1250 switch (event) {
1239 case CS_EVENT_CARD_REMOVAL: 1251 case CS_EVENT_CARD_REMOVAL:
1252 mutex_lock(&s->ops_mutex);
1240 s->pcmcia_state.present = 0; 1253 s->pcmcia_state.present = 0;
1254 mutex_unlock(&s->ops_mutex);
1241 pcmcia_card_remove(skt, NULL); 1255 pcmcia_card_remove(skt, NULL);
1242 handle_event(skt, event); 1256 handle_event(skt, event);
1257 mutex_lock(&s->ops_mutex);
1258 destroy_cis_cache(s);
1259 mutex_unlock(&s->ops_mutex);
1243 break; 1260 break;
1244 1261
1245 case CS_EVENT_CARD_INSERTION: 1262 case CS_EVENT_CARD_INSERTION:
1263 mutex_lock(&s->ops_mutex);
1264 s->pcmcia_state.has_pfc = 0;
1246 s->pcmcia_state.present = 1; 1265 s->pcmcia_state.present = 1;
1266 destroy_cis_cache(s); /* to be on the safe side... */
1267 mutex_unlock(&s->ops_mutex);
1247 pcmcia_card_add(skt); 1268 pcmcia_card_add(skt);
1248 handle_event(skt, event); 1269 handle_event(skt, event);
1249 break; 1270 break;
@@ -1251,8 +1272,24 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1251 case CS_EVENT_EJECTION_REQUEST: 1272 case CS_EVENT_EJECTION_REQUEST:
1252 break; 1273 break;
1253 1274
1254 case CS_EVENT_PM_SUSPEND:
1255 case CS_EVENT_PM_RESUME: 1275 case CS_EVENT_PM_RESUME:
1276 if (verify_cis_cache(skt) != 0) {
1277 dev_dbg(&skt->dev, "cis mismatch - different card\n");
1278 /* first, remove the card */
1279 ds_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
1280 mutex_lock(&s->ops_mutex);
1281 destroy_cis_cache(skt);
1282 kfree(skt->fake_cis);
1283 skt->fake_cis = NULL;
1284 mutex_unlock(&s->ops_mutex);
1285 /* now, add the new card */
1286 ds_event(skt, CS_EVENT_CARD_INSERTION,
1287 CS_EVENT_PRI_LOW);
1288 }
1289 handle_event(skt, event);
1290 break;
1291
1292 case CS_EVENT_PM_SUSPEND:
1256 case CS_EVENT_RESET_PHYSICAL: 1293 case CS_EVENT_RESET_PHYSICAL:
1257 case CS_EVENT_CARD_RESET: 1294 case CS_EVENT_CARD_RESET:
1258 default: 1295 default:
@@ -1275,9 +1312,13 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
1275 if (!p_dev) 1312 if (!p_dev)
1276 return NULL; 1313 return NULL;
1277 1314
1315 mutex_lock(&p_dev->socket->ops_mutex);
1278 if (!p_dev->socket->pcmcia_state.present) 1316 if (!p_dev->socket->pcmcia_state.present)
1279 goto out; 1317 goto out;
1280 1318
1319 if (p_dev->socket->pcmcia_state.dead)
1320 goto out;
1321
1281 if (p_dev->_removed) 1322 if (p_dev->_removed)
1282 goto out; 1323 goto out;
1283 1324
@@ -1286,6 +1327,7 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
1286 1327
1287 ret = p_dev; 1328 ret = p_dev;
1288 out: 1329 out:
1330 mutex_unlock(&p_dev->socket->ops_mutex);
1289 pcmcia_put_dev(p_dev); 1331 pcmcia_put_dev(p_dev);
1290 return ret; 1332 return ret;
1291} 1333}
@@ -1295,7 +1337,8 @@ EXPORT_SYMBOL(pcmcia_dev_present);
1295static struct pcmcia_callback pcmcia_bus_callback = { 1337static struct pcmcia_callback pcmcia_bus_callback = {
1296 .owner = THIS_MODULE, 1338 .owner = THIS_MODULE,
1297 .event = ds_event, 1339 .event = ds_event,
1298 .requery = pcmcia_bus_rescan, 1340 .requery = pcmcia_requery,
1341 .validate = pccard_validate_cis,
1299 .suspend = pcmcia_bus_suspend, 1342 .suspend = pcmcia_bus_suspend,
1300 .resume = pcmcia_bus_resume, 1343 .resume = pcmcia_bus_resume,
1301}; 1344};
@@ -1313,17 +1356,17 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
1313 return -ENODEV; 1356 return -ENODEV;
1314 } 1357 }
1315 1358
1316 /* 1359 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
1317 * Ugly. But we want to wait for the socket threads to have started up. 1360 if (ret) {
1318 * We really should let the drivers themselves drive some of this.. 1361 dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n");
1319 */ 1362 pcmcia_put_socket(socket);
1320 msleep(250); 1363 return ret;
1364 }
1321 1365
1322#ifdef CONFIG_PCMCIA_IOCTL 1366#ifdef CONFIG_PCMCIA_IOCTL
1323 init_waitqueue_head(&socket->queue); 1367 init_waitqueue_head(&socket->queue);
1324#endif 1368#endif
1325 INIT_LIST_HEAD(&socket->devices_list); 1369 INIT_LIST_HEAD(&socket->devices_list);
1326 INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
1327 memset(&socket->pcmcia_state, 0, sizeof(u8)); 1370 memset(&socket->pcmcia_state, 0, sizeof(u8));
1328 socket->device_count = 0; 1371 socket->device_count = 0;
1329 1372
@@ -1345,14 +1388,20 @@ static void pcmcia_bus_remove_socket(struct device *dev,
1345 if (!socket) 1388 if (!socket)
1346 return; 1389 return;
1347 1390
1391 mutex_lock(&socket->ops_mutex);
1348 socket->pcmcia_state.dead = 1; 1392 socket->pcmcia_state.dead = 1;
1393 mutex_unlock(&socket->ops_mutex);
1394
1349 pccard_register_pcmcia(socket, NULL); 1395 pccard_register_pcmcia(socket, NULL);
1350 1396
1351 /* unregister any unbound devices */ 1397 /* unregister any unbound devices */
1352 mutex_lock(&socket->skt_mutex); 1398 mutex_lock(&socket->skt_mutex);
1353 pcmcia_card_remove(socket, NULL); 1399 pcmcia_card_remove(socket, NULL);
1400 release_cis_mem(socket);
1354 mutex_unlock(&socket->skt_mutex); 1401 mutex_unlock(&socket->skt_mutex);
1355 1402
1403 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
1404
1356 pcmcia_put_socket(socket); 1405 pcmcia_put_socket(socket);
1357 1406
1358 return; 1407 return;
@@ -1383,8 +1432,6 @@ static int __init init_pcmcia_bus(void)
1383{ 1432{
1384 int ret; 1433 int ret;
1385 1434
1386 spin_lock_init(&pcmcia_dev_list_lock);
1387
1388 ret = bus_register(&pcmcia_bus_type); 1435 ret = bus_register(&pcmcia_bus_type);
1389 if (ret < 0) { 1436 if (ret < 0) {
1390 printk(KERN_WARNING "pcmcia: bus_register error: %d\n", ret); 1437 printk(KERN_WARNING "pcmcia: bus_register error: %d\n", ret);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index d187ba4c5e0e..89cfddca089a 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -347,7 +347,7 @@ static int __devexit electra_cf_remove(struct of_device *ofdev)
347 return 0; 347 return 0;
348} 348}
349 349
350static struct of_device_id electra_cf_match[] = { 350static const struct of_device_id electra_cf_match[] = {
351 { 351 {
352 .compatible = "electra-cf", 352 .compatible = "electra-cf",
353 }, 353 },
diff --git a/drivers/pcmcia/i82365.h b/drivers/pcmcia/i82365.h
index 622860c689d9..849ef1b5d687 100644
--- a/drivers/pcmcia/i82365.h
+++ b/drivers/pcmcia/i82365.h
@@ -77,8 +77,8 @@
77#define I365_VPP2_5V 0x04 /* Vpp2 = 5.0v */ 77#define I365_VPP2_5V 0x04 /* Vpp2 = 5.0v */
78#define I365_VPP2_12V 0x08 /* Vpp2 = 12.0v */ 78#define I365_VPP2_12V 0x08 /* Vpp2 = 12.0v */
79#define I365_VPP1_MASK 0x03 /* Mask for turning off Vpp1 */ 79#define I365_VPP1_MASK 0x03 /* Mask for turning off Vpp1 */
80#define I365_VPP1_5V 0x01 /* Vpp2 = 5.0v */ 80#define I365_VPP1_5V 0x01 /* Vpp1 = 5.0v */
81#define I365_VPP1_12V 0x02 /* Vpp2 = 12.0v */ 81#define I365_VPP1_12V 0x02 /* Vpp1 = 12.0v */
82 82
83/* Flags for I365_INTCTL */ 83/* Flags for I365_INTCTL */
84#define I365_RING_ENA 0x80 84#define I365_RING_ENA 0x80
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 26a621c9e2fc..0ece2cd4a85e 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -764,7 +764,7 @@ static int __init init_m32r_pcc(void)
764 for (i = 0 ; i < pcc_sockets ; i++) { 764 for (i = 0 ; i < pcc_sockets ; i++) {
765 socket[i].socket.dev.parent = &pcc_device.dev; 765 socket[i].socket.dev.parent = &pcc_device.dev;
766 socket[i].socket.ops = &pcc_operations; 766 socket[i].socket.ops = &pcc_operations;
767 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 767 socket[i].socket.resource_ops = &pccard_static_ops;
768 socket[i].socket.owner = THIS_MODULE; 768 socket[i].socket.owner = THIS_MODULE;
769 socket[i].number = i; 769 socket[i].number = i;
770 ret = pcmcia_register_socket(&socket[i].socket); 770 ret = pcmcia_register_socket(&socket[i].socket);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 7f79c4e169ae..61c215918128 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1233,7 +1233,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
1233 socket[i].socket.io_offset = 0; 1233 socket[i].socket.io_offset = 0;
1234 socket[i].socket.pci_irq = pcmcia_schlvl; 1234 socket[i].socket.pci_irq = pcmcia_schlvl;
1235 socket[i].socket.ops = &m8xx_services; 1235 socket[i].socket.ops = &m8xx_services;
1236 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 1236 socket[i].socket.resource_ops = &pccard_iodyn_ops;
1237 socket[i].socket.cb_dev = NULL; 1237 socket[i].socket.cb_dev = NULL;
1238 socket[i].socket.dev.parent = &ofdev->dev; 1238 socket[i].socket.dev.parent = &ofdev->dev;
1239 socket[i].pcmcia = pcmcia; 1239 socket[i].pcmcia = pcmcia;
@@ -1303,7 +1303,7 @@ static int m8xx_resume(struct platform_device *pdev)
1303#define m8xx_resume NULL 1303#define m8xx_resume NULL
1304#endif 1304#endif
1305 1305
1306static struct of_device_id m8xx_pcmcia_match[] = { 1306static const struct of_device_id m8xx_pcmcia_match[] = {
1307 { 1307 {
1308 .type = "pcmcia", 1308 .type = "pcmcia",
1309 .compatible = "fsl,pq-pcmcia", 1309 .compatible = "fsl,pq-pcmcia",
diff --git a/drivers/pcmcia/o2micro.h b/drivers/pcmcia/o2micro.h
index 624442fc0d35..e74bebac2695 100644
--- a/drivers/pcmcia/o2micro.h
+++ b/drivers/pcmcia/o2micro.h
@@ -116,13 +116,12 @@ static int o2micro_override(struct yenta_socket *socket)
116 * from Eric Still, 02Micro. 116 * from Eric Still, 02Micro.
117 */ 117 */
118 u8 a, b; 118 u8 a, b;
119 bool use_speedup;
119 120
120 if (PCI_FUNC(socket->dev->devfn) == 0) { 121 if (PCI_FUNC(socket->dev->devfn) == 0) {
121 a = config_readb(socket, O2_RESERVED1); 122 a = config_readb(socket, O2_RESERVED1);
122 b = config_readb(socket, O2_RESERVED2); 123 b = config_readb(socket, O2_RESERVED2);
123 124 dev_dbg(&socket->dev->dev, "O2: 0x94/0xD4: %02x/%02x\n", a, b);
124 dev_printk(KERN_INFO, &socket->dev->dev,
125 "O2: res at 0x94/0xD4: %02x/%02x\n", a, b);
126 125
127 switch (socket->dev->device) { 126 switch (socket->dev->device) {
128 /* 127 /*
@@ -135,23 +134,37 @@ static int o2micro_override(struct yenta_socket *socket)
135 case PCI_DEVICE_ID_O2_6812: 134 case PCI_DEVICE_ID_O2_6812:
136 case PCI_DEVICE_ID_O2_6832: 135 case PCI_DEVICE_ID_O2_6832:
137 case PCI_DEVICE_ID_O2_6836: 136 case PCI_DEVICE_ID_O2_6836:
138 case PCI_DEVICE_ID_O2_6933: 137 case PCI_DEVICE_ID_O2_6933:
139 dev_printk(KERN_INFO, &socket->dev->dev, 138 use_speedup = false;
140 "Yenta O2: old bridge, disabling read "
141 "prefetch/write burst\n");
142 config_writeb(socket, O2_RESERVED1,
143 a & ~(O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST));
144 config_writeb(socket, O2_RESERVED2,
145 b & ~(O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST));
146 break; 139 break;
147
148 default: 140 default:
149 dev_printk(KERN_INFO , &socket->dev->dev, 141 use_speedup = true;
150 "O2: enabling read prefetch/write burst\n"); 142 break;
143 }
144
145 /* the user may override our decision */
146 if (strcasecmp(o2_speedup, "on") == 0)
147 use_speedup = true;
148 else if (strcasecmp(o2_speedup, "off") == 0)
149 use_speedup = false;
150 else if (strcasecmp(o2_speedup, "default") != 0)
151 dev_warn(&socket->dev->dev,
152 "O2: Unknown parameter, using 'default'");
153
154 if (use_speedup) {
155 dev_info(&socket->dev->dev,
156 "O2: enabling read prefetch/write burst\n");
157 config_writeb(socket, O2_RESERVED1,
158 a | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST);
159 config_writeb(socket, O2_RESERVED2,
160 b | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST);
161 } else {
162 dev_info(&socket->dev->dev,
163 "O2: disabling read prefetch/write burst\n");
151 config_writeb(socket, O2_RESERVED1, 164 config_writeb(socket, O2_RESERVED1,
152 a | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST); 165 a & ~(O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST));
153 config_writeb(socket, O2_RESERVED2, 166 config_writeb(socket, O2_RESERVED2,
154 b | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST); 167 b & ~(O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST));
155 } 168 }
156 } 169 }
157 170
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 663781d20129..3ef991552398 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -71,8 +71,6 @@ struct omap_cf_socket {
71 71
72#define POLL_INTERVAL (2 * HZ) 72#define POLL_INTERVAL (2 * HZ)
73 73
74#define SZ_2K (2 * SZ_1K)
75
76/*--------------------------------------------------------------------------*/ 74/*--------------------------------------------------------------------------*/
77 75
78static int omap_cf_ss_init(struct pcmcia_socket *s) 76static int omap_cf_ss_init(struct pcmcia_socket *s)
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index f73fd5beaa37..13a7132cf688 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -62,16 +62,15 @@ static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
62 unsigned int function) 62 unsigned int function)
63{ 63{
64 struct pcmcia_device *p_dev = NULL; 64 struct pcmcia_device *p_dev = NULL;
65 unsigned long flags;
66 65
67 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 66 mutex_lock(&s->ops_mutex);
68 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { 67 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
69 if (p_dev->func == function) { 68 if (p_dev->func == function) {
70 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 69 mutex_unlock(&s->ops_mutex);
71 return pcmcia_get_dev(p_dev); 70 return pcmcia_get_dev(p_dev);
72 } 71 }
73 } 72 }
74 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 73 mutex_unlock(&s->ops_mutex);
75 return NULL; 74 return NULL;
76} 75}
77 76
@@ -169,7 +168,6 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
169{ 168{
170 struct pcmcia_socket *s; 169 struct pcmcia_socket *s;
171 int ret = -ENOSYS; 170 int ret = -ENOSYS;
172 unsigned long flags;
173 171
174 down_read(&pcmcia_socket_list_rwsem); 172 down_read(&pcmcia_socket_list_rwsem);
175 list_for_each_entry(s, &pcmcia_socket_list, socket_list) { 173 list_for_each_entry(s, &pcmcia_socket_list, socket_list) {
@@ -182,14 +180,13 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
182 180
183 /* you can't use the old interface if the new 181 /* you can't use the old interface if the new
184 * one was used before */ 182 * one was used before */
185 spin_lock_irqsave(&s->lock, flags); 183 mutex_lock(&s->ops_mutex);
186 if ((s->resource_setup_new) && 184 if ((s->resource_setup_new) &&
187 !(s->resource_setup_old)) { 185 !(s->resource_setup_old)) {
188 spin_unlock_irqrestore(&s->lock, flags); 186 mutex_unlock(&s->ops_mutex);
189 continue; 187 continue;
190 } else if (!(s->resource_setup_old)) 188 } else if (!(s->resource_setup_old))
191 s->resource_setup_old = 1; 189 s->resource_setup_old = 1;
192 spin_unlock_irqrestore(&s->lock, flags);
193 190
194 switch (adj->Resource) { 191 switch (adj->Resource) {
195 case RES_MEMORY_RANGE: 192 case RES_MEMORY_RANGE:
@@ -208,10 +205,9 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
208 * last call to adjust_resource_info, we 205 * last call to adjust_resource_info, we
209 * always need to assume this is the latest 206 * always need to assume this is the latest
210 * one... */ 207 * one... */
211 spin_lock_irqsave(&s->lock, flags);
212 s->resource_setup_done = 1; 208 s->resource_setup_done = 1;
213 spin_unlock_irqrestore(&s->lock, flags);
214 } 209 }
210 mutex_unlock(&s->ops_mutex);
215 } 211 }
216 } 212 }
217 up_read(&pcmcia_socket_list_rwsem); 213 up_read(&pcmcia_socket_list_rwsem);
@@ -470,7 +466,6 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
470 struct pcmcia_driver *p_drv; 466 struct pcmcia_driver *p_drv;
471 struct pcmcia_device *p_dev; 467 struct pcmcia_device *p_dev;
472 int ret = 0; 468 int ret = 0;
473 unsigned long flags;
474 469
475 s = pcmcia_get_socket(s); 470 s = pcmcia_get_socket(s);
476 if (!s) 471 if (!s)
@@ -490,7 +485,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
490 goto err_put_driver; 485 goto err_put_driver;
491 } 486 }
492 487
493 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 488 mutex_lock(&s->ops_mutex);
494 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { 489 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
495 if (p_dev->func == bind_info->function) { 490 if (p_dev->func == bind_info->function) {
496 if ((p_dev->dev.driver == &p_drv->drv)) { 491 if ((p_dev->dev.driver == &p_drv->drv)) {
@@ -499,7 +494,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
499 * registered, and it was registered 494 * registered, and it was registered
500 * by userspace before, we need to 495 * by userspace before, we need to
501 * return the "instance". */ 496 * return the "instance". */
502 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 497 mutex_unlock(&s->ops_mutex);
503 bind_info->instance = p_dev; 498 bind_info->instance = p_dev;
504 ret = -EBUSY; 499 ret = -EBUSY;
505 goto err_put_module; 500 goto err_put_module;
@@ -507,7 +502,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
507 /* the correct driver managed to bind 502 /* the correct driver managed to bind
508 * itself magically to the correct 503 * itself magically to the correct
509 * device. */ 504 * device. */
510 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 505 mutex_unlock(&s->ops_mutex);
511 p_dev->cardmgr = p_drv; 506 p_dev->cardmgr = p_drv;
512 ret = 0; 507 ret = 0;
513 goto err_put_module; 508 goto err_put_module;
@@ -516,12 +511,12 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
516 /* there's already a device available where 511 /* there's already a device available where
517 * no device has been bound to yet. So we don't 512 * no device has been bound to yet. So we don't
518 * need to register a device! */ 513 * need to register a device! */
519 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 514 mutex_unlock(&s->ops_mutex);
520 goto rescan; 515 goto rescan;
521 } 516 }
522 } 517 }
523 } 518 }
524 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 519 mutex_unlock(&s->ops_mutex);
525 520
526 p_dev = pcmcia_device_add(s, bind_info->function); 521 p_dev = pcmcia_device_add(s, bind_info->function);
527 if (!p_dev) { 522 if (!p_dev) {
@@ -578,7 +573,6 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
578 dev_node_t *node; 573 dev_node_t *node;
579 struct pcmcia_device *p_dev; 574 struct pcmcia_device *p_dev;
580 struct pcmcia_driver *p_drv; 575 struct pcmcia_driver *p_drv;
581 unsigned long flags;
582 int ret = 0; 576 int ret = 0;
583 577
584#ifdef CONFIG_CARDBUS 578#ifdef CONFIG_CARDBUS
@@ -617,7 +611,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
617 } 611 }
618#endif 612#endif
619 613
620 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 614 mutex_lock(&s->ops_mutex);
621 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { 615 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
622 if (p_dev->func == bind_info->function) { 616 if (p_dev->func == bind_info->function) {
623 p_dev = pcmcia_get_dev(p_dev); 617 p_dev = pcmcia_get_dev(p_dev);
@@ -626,11 +620,11 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
626 goto found; 620 goto found;
627 } 621 }
628 } 622 }
629 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 623 mutex_unlock(&s->ops_mutex);
630 return -ENODEV; 624 return -ENODEV;
631 625
632 found: 626 found:
633 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 627 mutex_unlock(&s->ops_mutex);
634 628
635 p_drv = to_pcmcia_drv(p_dev->dev.driver); 629 p_drv = to_pcmcia_drv(p_dev->dev.driver);
636 if (p_drv && !p_dev->_locked) { 630 if (p_drv && !p_dev->_locked) {
@@ -931,16 +925,16 @@ static int ds_ioctl(struct inode *inode, struct file *file,
931 ret = pccard_validate_cis(s, &buf->cisinfo.Chains); 925 ret = pccard_validate_cis(s, &buf->cisinfo.Chains);
932 break; 926 break;
933 case DS_SUSPEND_CARD: 927 case DS_SUSPEND_CARD:
934 ret = pcmcia_suspend_card(s); 928 pcmcia_parse_uevents(s, PCMCIA_UEVENT_SUSPEND);
935 break; 929 break;
936 case DS_RESUME_CARD: 930 case DS_RESUME_CARD:
937 ret = pcmcia_resume_card(s); 931 pcmcia_parse_uevents(s, PCMCIA_UEVENT_RESUME);
938 break; 932 break;
939 case DS_EJECT_CARD: 933 case DS_EJECT_CARD:
940 err = pcmcia_eject_card(s); 934 pcmcia_parse_uevents(s, PCMCIA_UEVENT_EJECT);
941 break; 935 break;
942 case DS_INSERT_CARD: 936 case DS_INSERT_CARD:
943 err = pcmcia_insert_card(s); 937 pcmcia_parse_uevents(s, PCMCIA_UEVENT_INSERT);
944 break; 938 break;
945 case DS_ACCESS_CONFIGURATION_REGISTER: 939 case DS_ACCESS_CONFIGURATION_REGISTER:
946 if ((buf->conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN)) { 940 if ((buf->conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index d5db95644b64..b2df04199a21 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -43,6 +43,39 @@ module_param(io_speed, int, 0444);
43static u8 pcmcia_used_irq[NR_IRQS]; 43static u8 pcmcia_used_irq[NR_IRQS];
44#endif 44#endif
45 45
46static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
47 unsigned long end, struct pcmcia_socket *s)
48{
49 if (s->resource_ops->adjust_io_region)
50 return s->resource_ops->adjust_io_region(res, start, end, s);
51 return -ENOMEM;
52}
53
54static struct resource *pcmcia_find_io_region(unsigned long base, int num,
55 unsigned long align,
56 struct pcmcia_socket *s)
57{
58 if (s->resource_ops->find_io)
59 return s->resource_ops->find_io(base, num, align, s);
60 return NULL;
61}
62
63int pcmcia_validate_mem(struct pcmcia_socket *s)
64{
65 if (s->resource_ops->validate_mem)
66 return s->resource_ops->validate_mem(s);
67 /* if there is no callback, we can assume that everything is OK */
68 return 0;
69}
70
71struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
72 int low, struct pcmcia_socket *s)
73{
74 if (s->resource_ops->find_mem)
75 return s->resource_ops->find_mem(base, num, align, low, s);
76 return NULL;
77}
78
46 79
47/** alloc_io_space 80/** alloc_io_space
48 * 81 *
@@ -158,14 +191,18 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
158 return -EINVAL; 191 return -EINVAL;
159 192
160 s = p_dev->socket; 193 s = p_dev->socket;
194
195 mutex_lock(&s->ops_mutex);
161 c = p_dev->function_config; 196 c = p_dev->function_config;
162 197
163 if (!(c->state & CONFIG_LOCKED)) { 198 if (!(c->state & CONFIG_LOCKED)) {
164 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 199 dev_dbg(&s->dev, "Configuration isnt't locked\n");
200 mutex_unlock(&s->ops_mutex);
165 return -EACCES; 201 return -EACCES;
166 } 202 }
167 203
168 addr = (c->ConfigBase + reg->Offset) >> 1; 204 addr = (c->ConfigBase + reg->Offset) >> 1;
205 mutex_unlock(&s->ops_mutex);
169 206
170 switch (reg->Action) { 207 switch (reg->Action) {
171 case CS_READ: 208 case CS_READ:
@@ -190,6 +227,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
190 memreq_t *req) 227 memreq_t *req)
191{ 228{
192 struct pcmcia_socket *s = p_dev->socket; 229 struct pcmcia_socket *s = p_dev->socket;
230 int ret;
193 231
194 wh--; 232 wh--;
195 if (wh >= MAX_WIN) 233 if (wh >= MAX_WIN)
@@ -198,12 +236,13 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
198 dev_dbg(&s->dev, "failure: requested page is zero\n"); 236 dev_dbg(&s->dev, "failure: requested page is zero\n");
199 return -EINVAL; 237 return -EINVAL;
200 } 238 }
239 mutex_lock(&s->ops_mutex);
201 s->win[wh].card_start = req->CardOffset; 240 s->win[wh].card_start = req->CardOffset;
202 if (s->ops->set_mem_map(s, &s->win[wh]) != 0) { 241 ret = s->ops->set_mem_map(s, &s->win[wh]);
203 dev_dbg(&s->dev, "failed to set_mem_map\n"); 242 if (ret)
204 return -EIO; 243 dev_warn(&s->dev, "failed to set_mem_map\n");
205 } 244 mutex_unlock(&s->ops_mutex);
206 return 0; 245 return ret;
207} /* pcmcia_map_mem_page */ 246} /* pcmcia_map_mem_page */
208EXPORT_SYMBOL(pcmcia_map_mem_page); 247EXPORT_SYMBOL(pcmcia_map_mem_page);
209 248
@@ -219,14 +258,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
219 config_t *c; 258 config_t *c;
220 259
221 s = p_dev->socket; 260 s = p_dev->socket;
261
262 mutex_lock(&s->ops_mutex);
222 c = p_dev->function_config; 263 c = p_dev->function_config;
223 264
224 if (!(s->state & SOCKET_PRESENT)) { 265 if (!(s->state & SOCKET_PRESENT)) {
225 dev_dbg(&s->dev, "No card present\n"); 266 dev_dbg(&s->dev, "No card present\n");
267 mutex_unlock(&s->ops_mutex);
226 return -ENODEV; 268 return -ENODEV;
227 } 269 }
228 if (!(c->state & CONFIG_LOCKED)) { 270 if (!(c->state & CONFIG_LOCKED)) {
229 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 271 dev_dbg(&s->dev, "Configuration isnt't locked\n");
272 mutex_unlock(&s->ops_mutex);
230 return -EACCES; 273 return -EACCES;
231 } 274 }
232 275
@@ -251,10 +294,12 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
251 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 294 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
252 if (mod->Vpp1 != mod->Vpp2) { 295 if (mod->Vpp1 != mod->Vpp2) {
253 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 296 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
297 mutex_unlock(&s->ops_mutex);
254 return -EINVAL; 298 return -EINVAL;
255 } 299 }
256 s->socket.Vpp = mod->Vpp1; 300 s->socket.Vpp = mod->Vpp1;
257 if (s->ops->set_socket(s, &s->socket)) { 301 if (s->ops->set_socket(s, &s->socket)) {
302 mutex_unlock(&s->ops_mutex);
258 dev_printk(KERN_WARNING, &s->dev, 303 dev_printk(KERN_WARNING, &s->dev,
259 "Unable to set VPP\n"); 304 "Unable to set VPP\n");
260 return -EIO; 305 return -EIO;
@@ -262,6 +307,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
262 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 307 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
263 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 308 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
264 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 309 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
310 mutex_unlock(&s->ops_mutex);
265 return -EINVAL; 311 return -EINVAL;
266 } 312 }
267 313
@@ -286,6 +332,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
286 s->ops->set_io_map(s, &io_on); 332 s->ops->set_io_map(s, &io_on);
287 } 333 }
288 } 334 }
335 mutex_unlock(&s->ops_mutex);
289 336
290 return 0; 337 return 0;
291} /* modify_configuration */ 338} /* modify_configuration */
@@ -296,9 +343,11 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
296{ 343{
297 pccard_io_map io = { 0, 0, 0, 0, 1 }; 344 pccard_io_map io = { 0, 0, 0, 0, 1 };
298 struct pcmcia_socket *s = p_dev->socket; 345 struct pcmcia_socket *s = p_dev->socket;
299 config_t *c = p_dev->function_config; 346 config_t *c;
300 int i; 347 int i;
301 348
349 mutex_lock(&s->ops_mutex);
350 c = p_dev->function_config;
302 if (p_dev->_locked) { 351 if (p_dev->_locked) {
303 p_dev->_locked = 0; 352 p_dev->_locked = 0;
304 if (--(s->lock_count) == 0) { 353 if (--(s->lock_count) == 0) {
@@ -321,6 +370,7 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
321 s->ops->set_io_map(s, &io); 370 s->ops->set_io_map(s, &io);
322 } 371 }
323 } 372 }
373 mutex_unlock(&s->ops_mutex);
324 374
325 return 0; 375 return 0;
326} /* pcmcia_release_configuration */ 376} /* pcmcia_release_configuration */
@@ -337,10 +387,14 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
337static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req) 387static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
338{ 388{
339 struct pcmcia_socket *s = p_dev->socket; 389 struct pcmcia_socket *s = p_dev->socket;
340 config_t *c = p_dev->function_config; 390 int ret = -EINVAL;
391 config_t *c;
392
393 mutex_lock(&s->ops_mutex);
394 c = p_dev->function_config;
341 395
342 if (!p_dev->_io) 396 if (!p_dev->_io)
343 return -EINVAL; 397 goto out;
344 398
345 p_dev->_io = 0; 399 p_dev->_io = 0;
346 400
@@ -348,7 +402,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
348 (c->io.NumPorts1 != req->NumPorts1) || 402 (c->io.NumPorts1 != req->NumPorts1) ||
349 (c->io.BasePort2 != req->BasePort2) || 403 (c->io.BasePort2 != req->BasePort2) ||
350 (c->io.NumPorts2 != req->NumPorts2)) 404 (c->io.NumPorts2 != req->NumPorts2))
351 return -EINVAL; 405 goto out;
352 406
353 c->state &= ~CONFIG_IO_REQ; 407 c->state &= ~CONFIG_IO_REQ;
354 408
@@ -356,28 +410,38 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
356 if (req->NumPorts2) 410 if (req->NumPorts2)
357 release_io_space(s, req->BasePort2, req->NumPorts2); 411 release_io_space(s, req->BasePort2, req->NumPorts2);
358 412
359 return 0; 413out:
414 mutex_unlock(&s->ops_mutex);
415
416 return ret;
360} /* pcmcia_release_io */ 417} /* pcmcia_release_io */
361 418
362 419
363static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) 420static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
364{ 421{
365 struct pcmcia_socket *s = p_dev->socket; 422 struct pcmcia_socket *s = p_dev->socket;
366 config_t *c = p_dev->function_config; 423 config_t *c;
424 int ret = -EINVAL;
425
426 mutex_lock(&s->ops_mutex);
427
428 c = p_dev->function_config;
367 429
368 if (!p_dev->_irq) 430 if (!p_dev->_irq)
369 return -EINVAL; 431 goto out;
432
370 p_dev->_irq = 0; 433 p_dev->_irq = 0;
371 434
372 if (c->state & CONFIG_LOCKED) 435 if (c->state & CONFIG_LOCKED)
373 return -EACCES; 436 goto out;
437
374 if (c->irq.Attributes != req->Attributes) { 438 if (c->irq.Attributes != req->Attributes) {
375 dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n"); 439 dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
376 return -EINVAL; 440 goto out;
377 } 441 }
378 if (s->irq.AssignedIRQ != req->AssignedIRQ) { 442 if (s->irq.AssignedIRQ != req->AssignedIRQ) {
379 dev_dbg(&s->dev, "IRQ must match assigned one\n"); 443 dev_dbg(&s->dev, "IRQ must match assigned one\n");
380 return -EINVAL; 444 goto out;
381 } 445 }
382 if (--s->irq.Config == 0) { 446 if (--s->irq.Config == 0) {
383 c->state &= ~CONFIG_IRQ_REQ; 447 c->state &= ~CONFIG_IRQ_REQ;
@@ -390,8 +454,12 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
390#ifdef CONFIG_PCMCIA_PROBE 454#ifdef CONFIG_PCMCIA_PROBE
391 pcmcia_used_irq[req->AssignedIRQ]--; 455 pcmcia_used_irq[req->AssignedIRQ]--;
392#endif 456#endif
457 ret = 0;
393 458
394 return 0; 459out:
460 mutex_unlock(&s->ops_mutex);
461
462 return ret;
395} /* pcmcia_release_irq */ 463} /* pcmcia_release_irq */
396 464
397 465
@@ -404,10 +472,12 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
404 if (wh >= MAX_WIN) 472 if (wh >= MAX_WIN)
405 return -EINVAL; 473 return -EINVAL;
406 474
475 mutex_lock(&s->ops_mutex);
407 win = &s->win[wh]; 476 win = &s->win[wh];
408 477
409 if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) { 478 if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) {
410 dev_dbg(&s->dev, "not releasing unknown window\n"); 479 dev_dbg(&s->dev, "not releasing unknown window\n");
480 mutex_unlock(&s->ops_mutex);
411 return -EINVAL; 481 return -EINVAL;
412 } 482 }
413 483
@@ -423,6 +493,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
423 win->res = NULL; 493 win->res = NULL;
424 } 494 }
425 p_dev->_win &= ~CLIENT_WIN_REQ(wh); 495 p_dev->_win &= ~CLIENT_WIN_REQ(wh);
496 mutex_unlock(&s->ops_mutex);
426 497
427 return 0; 498 return 0;
428} /* pcmcia_release_window */ 499} /* pcmcia_release_window */
@@ -445,8 +516,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
445 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); 516 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
446 return -EINVAL; 517 return -EINVAL;
447 } 518 }
519
520 mutex_lock(&s->ops_mutex);
448 c = p_dev->function_config; 521 c = p_dev->function_config;
449 if (c->state & CONFIG_LOCKED) { 522 if (c->state & CONFIG_LOCKED) {
523 mutex_unlock(&s->ops_mutex);
450 dev_dbg(&s->dev, "Configuration is locked\n"); 524 dev_dbg(&s->dev, "Configuration is locked\n");
451 return -EACCES; 525 return -EACCES;
452 } 526 }
@@ -454,6 +528,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
454 /* Do power control. We don't allow changes in Vcc. */ 528 /* Do power control. We don't allow changes in Vcc. */
455 s->socket.Vpp = req->Vpp; 529 s->socket.Vpp = req->Vpp;
456 if (s->ops->set_socket(s, &s->socket)) { 530 if (s->ops->set_socket(s, &s->socket)) {
531 mutex_unlock(&s->ops_mutex);
457 dev_printk(KERN_WARNING, &s->dev, 532 dev_printk(KERN_WARNING, &s->dev,
458 "Unable to set socket state\n"); 533 "Unable to set socket state\n");
459 return -EINVAL; 534 return -EINVAL;
@@ -476,6 +551,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
476 s->socket.io_irq = 0; 551 s->socket.io_irq = 0;
477 s->ops->set_socket(s, &s->socket); 552 s->ops->set_socket(s, &s->socket);
478 s->lock_count++; 553 s->lock_count++;
554 mutex_unlock(&s->ops_mutex);
479 555
480 /* Set up CIS configuration registers */ 556 /* Set up CIS configuration registers */
481 base = c->ConfigBase = req->ConfigBase; 557 base = c->ConfigBase = req->ConfigBase;
@@ -524,6 +600,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
524 600
525 /* Configure I/O windows */ 601 /* Configure I/O windows */
526 if (c->state & CONFIG_IO_REQ) { 602 if (c->state & CONFIG_IO_REQ) {
603 mutex_lock(&s->ops_mutex);
527 iomap.speed = io_speed; 604 iomap.speed = io_speed;
528 for (i = 0; i < MAX_IO_WIN; i++) 605 for (i = 0; i < MAX_IO_WIN; i++)
529 if (s->io[i].res) { 606 if (s->io[i].res) {
@@ -542,6 +619,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
542 s->ops->set_io_map(s, &iomap); 619 s->ops->set_io_map(s, &iomap);
543 s->io[i].Config++; 620 s->io[i].Config++;
544 } 621 }
622 mutex_unlock(&s->ops_mutex);
545 } 623 }
546 624
547 c->state |= CONFIG_LOCKED; 625 c->state |= CONFIG_LOCKED;
@@ -560,54 +638,65 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
560{ 638{
561 struct pcmcia_socket *s = p_dev->socket; 639 struct pcmcia_socket *s = p_dev->socket;
562 config_t *c; 640 config_t *c;
641 int ret = -EINVAL;
642
643 mutex_lock(&s->ops_mutex);
563 644
564 if (!(s->state & SOCKET_PRESENT)) { 645 if (!(s->state & SOCKET_PRESENT)) {
565 dev_dbg(&s->dev, "No card present\n"); 646 dev_dbg(&s->dev, "No card present\n");
566 return -ENODEV; 647 goto out;
567 } 648 }
568 649
569 if (!req) 650 if (!req)
570 return -EINVAL; 651 goto out;
652
571 c = p_dev->function_config; 653 c = p_dev->function_config;
572 if (c->state & CONFIG_LOCKED) { 654 if (c->state & CONFIG_LOCKED) {
573 dev_dbg(&s->dev, "Configuration is locked\n"); 655 dev_dbg(&s->dev, "Configuration is locked\n");
574 return -EACCES; 656 goto out;
575 } 657 }
576 if (c->state & CONFIG_IO_REQ) { 658 if (c->state & CONFIG_IO_REQ) {
577 dev_dbg(&s->dev, "IO already configured\n"); 659 dev_dbg(&s->dev, "IO already configured\n");
578 return -EBUSY; 660 goto out;
579 } 661 }
580 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) { 662 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) {
581 dev_dbg(&s->dev, "bad attribute setting for IO region 1\n"); 663 dev_dbg(&s->dev, "bad attribute setting for IO region 1\n");
582 return -EINVAL; 664 goto out;
583 } 665 }
584 if ((req->NumPorts2 > 0) && 666 if ((req->NumPorts2 > 0) &&
585 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) { 667 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) {
586 dev_dbg(&s->dev, "bad attribute setting for IO region 2\n"); 668 dev_dbg(&s->dev, "bad attribute setting for IO region 2\n");
587 return -EINVAL; 669 goto out;
588 } 670 }
589 671
590 dev_dbg(&s->dev, "trying to allocate resource 1\n"); 672 dev_dbg(&s->dev, "trying to allocate resource 1\n");
591 if (alloc_io_space(s, req->Attributes1, &req->BasePort1, 673 ret = alloc_io_space(s, req->Attributes1, &req->BasePort1,
592 req->NumPorts1, req->IOAddrLines)) { 674 req->NumPorts1, req->IOAddrLines);
675 if (ret) {
593 dev_dbg(&s->dev, "allocation of resource 1 failed\n"); 676 dev_dbg(&s->dev, "allocation of resource 1 failed\n");
594 return -EBUSY; 677 goto out;
595 } 678 }
596 679
597 if (req->NumPorts2) { 680 if (req->NumPorts2) {
598 dev_dbg(&s->dev, "trying to allocate resource 2\n"); 681 dev_dbg(&s->dev, "trying to allocate resource 2\n");
599 if (alloc_io_space(s, req->Attributes2, &req->BasePort2, 682 ret = alloc_io_space(s, req->Attributes2, &req->BasePort2,
600 req->NumPorts2, req->IOAddrLines)) { 683 req->NumPorts2, req->IOAddrLines);
684 if (ret) {
601 dev_dbg(&s->dev, "allocation of resource 2 failed\n"); 685 dev_dbg(&s->dev, "allocation of resource 2 failed\n");
602 release_io_space(s, req->BasePort1, req->NumPorts1); 686 release_io_space(s, req->BasePort1, req->NumPorts1);
603 return -EBUSY; 687 goto out;
604 } 688 }
605 } 689 }
606 690
607 c->io = *req; 691 c->io = *req;
608 c->state |= CONFIG_IO_REQ; 692 c->state |= CONFIG_IO_REQ;
609 p_dev->_io = 1; 693 p_dev->_io = 1;
610 return 0; 694 dev_dbg(&s->dev, "allocating resources succeeded: %d\n", ret);
695
696out:
697 mutex_unlock(&s->ops_mutex);
698
699 return ret;
611} /* pcmcia_request_io */ 700} /* pcmcia_request_io */
612EXPORT_SYMBOL(pcmcia_request_io); 701EXPORT_SYMBOL(pcmcia_request_io);
613 702
@@ -636,18 +725,20 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
636 int ret = -EINVAL, irq = 0; 725 int ret = -EINVAL, irq = 0;
637 int type; 726 int type;
638 727
728 mutex_lock(&s->ops_mutex);
729
639 if (!(s->state & SOCKET_PRESENT)) { 730 if (!(s->state & SOCKET_PRESENT)) {
640 dev_dbg(&s->dev, "No card present\n"); 731 dev_dbg(&s->dev, "No card present\n");
641 return -ENODEV; 732 goto out;
642 } 733 }
643 c = p_dev->function_config; 734 c = p_dev->function_config;
644 if (c->state & CONFIG_LOCKED) { 735 if (c->state & CONFIG_LOCKED) {
645 dev_dbg(&s->dev, "Configuration is locked\n"); 736 dev_dbg(&s->dev, "Configuration is locked\n");
646 return -EACCES; 737 goto out;
647 } 738 }
648 if (c->state & CONFIG_IRQ_REQ) { 739 if (c->state & CONFIG_IRQ_REQ) {
649 dev_dbg(&s->dev, "IRQ already configured\n"); 740 dev_dbg(&s->dev, "IRQ already configured\n");
650 return -EBUSY; 741 goto out;
651 } 742 }
652 743
653 /* Decide what type of interrupt we are registering */ 744 /* Decide what type of interrupt we are registering */
@@ -708,7 +799,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
708 if (ret && !s->irq.AssignedIRQ) { 799 if (ret && !s->irq.AssignedIRQ) {
709 if (!s->pci_irq) { 800 if (!s->pci_irq) {
710 dev_printk(KERN_INFO, &s->dev, "no IRQ found\n"); 801 dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
711 return ret; 802 goto out;
712 } 803 }
713 type = IRQF_SHARED; 804 type = IRQF_SHARED;
714 irq = s->pci_irq; 805 irq = s->pci_irq;
@@ -720,7 +811,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
720 if (ret) { 811 if (ret) {
721 dev_printk(KERN_INFO, &s->dev, 812 dev_printk(KERN_INFO, &s->dev,
722 "request_irq() failed\n"); 813 "request_irq() failed\n");
723 return ret; 814 goto out;
724 } 815 }
725 } 816 }
726 817
@@ -743,7 +834,10 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
743 pcmcia_used_irq[irq]++; 834 pcmcia_used_irq[irq]++;
744#endif 835#endif
745 836
746 return 0; 837 ret = 0;
838out:
839 mutex_unlock(&s->ops_mutex);
840 return ret;
747} /* pcmcia_request_irq */ 841} /* pcmcia_request_irq */
748EXPORT_SYMBOL(pcmcia_request_irq); 842EXPORT_SYMBOL(pcmcia_request_irq);
749 843
@@ -796,6 +890,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
796 return -EINVAL; 890 return -EINVAL;
797 } 891 }
798 892
893 mutex_lock(&s->ops_mutex);
799 win = &s->win[w]; 894 win = &s->win[w];
800 895
801 if (!(s->features & SS_CAP_STATIC_MAP)) { 896 if (!(s->features & SS_CAP_STATIC_MAP)) {
@@ -803,6 +898,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
803 (req->Attributes & WIN_MAP_BELOW_1MB), s); 898 (req->Attributes & WIN_MAP_BELOW_1MB), s);
804 if (!win->res) { 899 if (!win->res) {
805 dev_dbg(&s->dev, "allocating mem region failed\n"); 900 dev_dbg(&s->dev, "allocating mem region failed\n");
901 mutex_unlock(&s->ops_mutex);
806 return -EINVAL; 902 return -EINVAL;
807 } 903 }
808 } 904 }
@@ -821,8 +917,10 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
821 if (req->Attributes & WIN_USE_WAIT) 917 if (req->Attributes & WIN_USE_WAIT)
822 win->flags |= MAP_USE_WAIT; 918 win->flags |= MAP_USE_WAIT;
823 win->card_start = 0; 919 win->card_start = 0;
920
824 if (s->ops->set_mem_map(s, win) != 0) { 921 if (s->ops->set_mem_map(s, win) != 0) {
825 dev_dbg(&s->dev, "failed to set memory mapping\n"); 922 dev_dbg(&s->dev, "failed to set memory mapping\n");
923 mutex_unlock(&s->ops_mutex);
826 return -EIO; 924 return -EIO;
827 } 925 }
828 s->state |= SOCKET_WIN_REQ(w); 926 s->state |= SOCKET_WIN_REQ(w);
@@ -833,6 +931,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
833 else 931 else
834 req->Base = win->res->start; 932 req->Base = win->res->start;
835 933
934 mutex_unlock(&s->ops_mutex);
836 *wh = w + 1; 935 *wh = w + 1;
837 936
838 return 0; 937 return 0;
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index f8401a0ef89b..e6f7d410aed6 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -21,60 +21,12 @@
21#include <pcmcia/cistpl.h> 21#include <pcmcia/cistpl.h>
22#include "cs_internal.h" 22#include "cs_internal.h"
23 23
24
25int pcmcia_validate_mem(struct pcmcia_socket *s)
26{
27 if (s->resource_ops->validate_mem)
28 return s->resource_ops->validate_mem(s);
29 /* if there is no callback, we can assume that everything is OK */
30 return 0;
31}
32EXPORT_SYMBOL(pcmcia_validate_mem);
33
34int pcmcia_adjust_io_region(struct resource *res, unsigned long r_start,
35 unsigned long r_end, struct pcmcia_socket *s)
36{
37 if (s->resource_ops->adjust_io_region)
38 return s->resource_ops->adjust_io_region(res, r_start, r_end, s);
39 return -ENOMEM;
40}
41EXPORT_SYMBOL(pcmcia_adjust_io_region);
42
43struct resource *pcmcia_find_io_region(unsigned long base, int num,
44 unsigned long align, struct pcmcia_socket *s)
45{
46 if (s->resource_ops->find_io)
47 return s->resource_ops->find_io(base, num, align, s);
48 return NULL;
49}
50EXPORT_SYMBOL(pcmcia_find_io_region);
51
52struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
53 int low, struct pcmcia_socket *s)
54{
55 if (s->resource_ops->find_mem)
56 return s->resource_ops->find_mem(base, num, align, low, s);
57 return NULL;
58}
59EXPORT_SYMBOL(pcmcia_find_mem_region);
60
61void release_resource_db(struct pcmcia_socket *s)
62{
63 if (s->resource_ops->exit)
64 s->resource_ops->exit(s);
65}
66
67
68static int static_init(struct pcmcia_socket *s) 24static int static_init(struct pcmcia_socket *s)
69{ 25{
70 unsigned long flags;
71
72 /* the good thing about SS_CAP_STATIC_MAP sockets is 26 /* the good thing about SS_CAP_STATIC_MAP sockets is
73 * that they don't need a resource database */ 27 * that they don't need a resource database */
74 28
75 spin_lock_irqsave(&s->lock, flags);
76 s->resource_setup_done = 1; 29 s->resource_setup_done = 1;
77 spin_unlock_irqrestore(&s->lock, flags);
78 30
79 return 0; 31 return 0;
80} 32}
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c67638fe6914..4663b3fa9f96 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -55,11 +55,10 @@ struct resource_map {
55 55
56struct socket_data { 56struct socket_data {
57 struct resource_map mem_db; 57 struct resource_map mem_db;
58 struct resource_map mem_db_valid;
58 struct resource_map io_db; 59 struct resource_map io_db;
59 unsigned int rsrc_mem_probe;
60}; 60};
61 61
62static DEFINE_MUTEX(rsrc_mutex);
63#define MEM_PROBE_LOW (1 << 0) 62#define MEM_PROBE_LOW (1 << 0)
64#define MEM_PROBE_HIGH (1 << 1) 63#define MEM_PROBE_HIGH (1 << 1)
65 64
@@ -125,8 +124,10 @@ static int add_interval(struct resource_map *map, u_long base, u_long num)
125 struct resource_map *p, *q; 124 struct resource_map *p, *q;
126 125
127 for (p = map; ; p = p->next) { 126 for (p = map; ; p = p->next) {
128 if ((p != map) && (p->base+p->num-1 >= base)) 127 if ((p != map) && (p->base+p->num >= base)) {
129 return -1; 128 p->num = max(num + base - p->base, p->num);
129 return 0;
130 }
130 if ((p->next == map) || (p->next->base > base+num-1)) 131 if ((p->next == map) || (p->next->base > base+num-1))
131 break; 132 break;
132 } 133 }
@@ -264,36 +265,44 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
264} 265}
265#endif 266#endif
266 267
267/*====================================================================== 268/*======================================================================*/
268
269 This is tricky... when we set up CIS memory, we try to validate
270 the memory window space allocations.
271
272======================================================================*/
273 269
274/* Validation function for cards with a valid CIS */ 270/**
271 * readable() - iomem validation function for cards with a valid CIS
272 */
275static int readable(struct pcmcia_socket *s, struct resource *res, 273static int readable(struct pcmcia_socket *s, struct resource *res,
276 unsigned int *count) 274 unsigned int *count)
277{ 275{
278 int ret = -1; 276 int ret = -EINVAL;
277
278 if (s->fake_cis) {
279 dev_dbg(&s->dev, "fake CIS is being used: can't validate mem\n");
280 return 0;
281 }
279 282
280 s->cis_mem.res = res; 283 s->cis_mem.res = res;
281 s->cis_virt = ioremap(res->start, s->map_size); 284 s->cis_virt = ioremap(res->start, s->map_size);
282 if (s->cis_virt) { 285 if (s->cis_virt) {
283 ret = pccard_validate_cis(s, count); 286 mutex_unlock(&s->ops_mutex);
284 /* invalidate mapping and CIS cache */ 287 /* as we're only called from pcmcia.c, we're safe */
288 if (s->callback->validate)
289 ret = s->callback->validate(s, count);
290 /* invalidate mapping */
291 mutex_lock(&s->ops_mutex);
285 iounmap(s->cis_virt); 292 iounmap(s->cis_virt);
286 s->cis_virt = NULL; 293 s->cis_virt = NULL;
287 destroy_cis_cache(s);
288 } 294 }
289 s->cis_mem.res = NULL; 295 s->cis_mem.res = NULL;
290 if ((ret != 0) || (*count == 0)) 296 if ((ret) || (*count == 0))
291 return 0; 297 return -EINVAL;
292 return 1; 298 return 0;
293} 299}
294 300
295/* Validation function for simple memory cards */ 301/**
296static int checksum(struct pcmcia_socket *s, struct resource *res) 302 * checksum() - iomem validation function for simple memory cards
303 */
304static int checksum(struct pcmcia_socket *s, struct resource *res,
305 unsigned int *value)
297{ 306{
298 pccard_mem_map map; 307 pccard_mem_map map;
299 int i, a = 0, b = -1, d; 308 int i, a = 0, b = -1, d;
@@ -321,61 +330,90 @@ static int checksum(struct pcmcia_socket *s, struct resource *res)
321 iounmap(virt); 330 iounmap(virt);
322 } 331 }
323 332
324 return (b == -1) ? -1 : (a>>1); 333 if (b == -1)
334 return -EINVAL;
335
336 *value = a;
337
338 return 0;
325} 339}
326 340
327static int 341/**
328cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size) 342 * do_validate_mem() - low level validate a memory region for PCMCIA use
343 * @s: PCMCIA socket to validate
344 * @base: start address of resource to check
345 * @size: size of resource to check
346 * @validate: validation function to use
347 *
348 * do_validate_mem() splits up the memory region which is to be checked
349 * into two parts. Both are passed to the @validate() function. If
350 * @validate() returns non-zero, or the value parameter to @validate()
351 * is zero, or the value parameter is different between both calls,
352 * the check fails, and -EINVAL is returned. Else, 0 is returned.
353 */
354static int do_validate_mem(struct pcmcia_socket *s,
355 unsigned long base, unsigned long size,
356 int validate (struct pcmcia_socket *s,
357 struct resource *res,
358 unsigned int *value))
329{ 359{
360 struct socket_data *s_data = s->resource_data;
330 struct resource *res1, *res2; 361 struct resource *res1, *res2;
331 unsigned int info1, info2; 362 unsigned int info1 = 1, info2 = 1;
332 int ret = 0; 363 int ret = -EINVAL;
333 364
334 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); 365 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
335 res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, 366 res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
336 "PCMCIA memprobe"); 367 "PCMCIA memprobe");
337 368
338 if (res1 && res2) { 369 if (res1 && res2) {
339 ret = readable(s, res1, &info1); 370 ret = 0;
340 ret += readable(s, res2, &info2); 371 if (validate) {
372 ret = validate(s, res1, &info1);
373 ret += validate(s, res2, &info2);
374 }
341 } 375 }
342 376
343 free_region(res2); 377 free_region(res2);
344 free_region(res1); 378 free_region(res1);
345 379
346 return (ret == 2) && (info1 == info2); 380 dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
347} 381 base, base+size-1, res1, res2, ret, info1, info2);
348 382
349static int 383 if ((ret) || (info1 != info2) || (info1 == 0))
350checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size) 384 return -EINVAL;
351{
352 struct resource *res1, *res2;
353 int a = -1, b = -1;
354
355 res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
356 res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
357 "PCMCIA memprobe");
358 385
359 if (res1 && res2) { 386 if (validate && !s->fake_cis) {
360 a = checksum(s, res1); 387 /* move it to the validated data set */
361 b = checksum(s, res2); 388 add_interval(&s_data->mem_db_valid, base, size);
389 sub_interval(&s_data->mem_db, base, size);
362 } 390 }
363 391
364 free_region(res2); 392 return 0;
365 free_region(res1);
366
367 return (a == b) && (a >= 0);
368} 393}
369 394
370/*======================================================================
371
372 The memory probe. If the memory list includes a 64K-aligned block
373 below 1MB, we probe in 64K chunks, and as soon as we accumulate at
374 least mem_limit free space, we quit.
375
376======================================================================*/
377 395
378static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s) 396/**
397 * do_mem_probe() - validate a memory region for PCMCIA use
398 * @s: PCMCIA socket to validate
399 * @base: start address of resource to check
400 * @num: size of resource to check
401 * @validate: validation function to use
402 * @fallback: validation function to use if validate fails
403 *
404 * do_mem_probe() checks a memory region for use by the PCMCIA subsystem.
405 * To do so, the area is split up into sensible parts, and then passed
406 * into the @validate() function. Only if @validate() and @fallback() fail,
407 * the area is marked as unavaibale for use by the PCMCIA subsystem. The
408 * function returns the size of the usable memory area.
409 */
410static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num,
411 int validate (struct pcmcia_socket *s,
412 struct resource *res,
413 unsigned int *value),
414 int fallback (struct pcmcia_socket *s,
415 struct resource *res,
416 unsigned int *value))
379{ 417{
380 struct socket_data *s_data = s->resource_data; 418 struct socket_data *s_data = s->resource_data;
381 u_long i, j, bad, fail, step; 419 u_long i, j, bad, fail, step;
@@ -393,15 +431,14 @@ static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
393 for (i = j = base; i < base+num; i = j + step) { 431 for (i = j = base; i < base+num; i = j + step) {
394 if (!fail) { 432 if (!fail) {
395 for (j = i; j < base+num; j += step) { 433 for (j = i; j < base+num; j += step) {
396 if (cis_readable(s, j, step)) 434 if (!do_validate_mem(s, j, step, validate))
397 break; 435 break;
398 } 436 }
399 fail = ((i == base) && (j == base+num)); 437 fail = ((i == base) && (j == base+num));
400 } 438 }
401 if (fail) { 439 if ((fail) && (fallback)) {
402 for (j = i; j < base+num; j += 2*step) 440 for (j = i; j < base+num; j += step)
403 if (checksum_match(s, j, step) && 441 if (!do_validate_mem(s, j, step, fallback))
404 checksum_match(s, j + step, step))
405 break; 442 break;
406 } 443 }
407 if (i != j) { 444 if (i != j) {
@@ -416,8 +453,14 @@ static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
416 return num - bad; 453 return num - bad;
417} 454}
418 455
456
419#ifdef CONFIG_PCMCIA_PROBE 457#ifdef CONFIG_PCMCIA_PROBE
420 458
459/**
460 * inv_probe() - top-to-bottom search for one usuable high memory area
461 * @s: PCMCIA socket to validate
462 * @m: resource_map to check
463 */
421static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s) 464static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s)
422{ 465{
423 struct socket_data *s_data = s->resource_data; 466 struct socket_data *s_data = s->resource_data;
@@ -432,9 +475,18 @@ static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s)
432 } 475 }
433 if (m->base < 0x100000) 476 if (m->base < 0x100000)
434 return 0; 477 return 0;
435 return do_mem_probe(m->base, m->num, s); 478 return do_mem_probe(s, m->base, m->num, readable, checksum);
436} 479}
437 480
481/**
482 * validate_mem() - memory probe function
483 * @s: PCMCIA socket to validate
484 * @probe_mask: MEM_PROBE_LOW | MEM_PROBE_HIGH
485 *
486 * The memory probe. If the memory list includes a 64K-aligned block
487 * below 1MB, we probe in 64K chunks, and as soon as we accumulate at
488 * least mem_limit free space, we quit. Returns 0 on usuable ports.
489 */
438static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) 490static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
439{ 491{
440 struct resource_map *m, mm; 492 struct resource_map *m, mm;
@@ -446,6 +498,8 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
446 if (probe_mask & MEM_PROBE_HIGH) { 498 if (probe_mask & MEM_PROBE_HIGH) {
447 if (inv_probe(s_data->mem_db.next, s) > 0) 499 if (inv_probe(s_data->mem_db.next, s) > 0)
448 return 0; 500 return 0;
501 if (s_data->mem_db_valid.next != &s_data->mem_db_valid)
502 return 0;
449 dev_printk(KERN_NOTICE, &s->dev, 503 dev_printk(KERN_NOTICE, &s->dev,
450 "cs: warning: no high memory space available!\n"); 504 "cs: warning: no high memory space available!\n");
451 return -ENODEV; 505 return -ENODEV;
@@ -457,7 +511,8 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
457 if (mm.base >= 0x100000) 511 if (mm.base >= 0x100000)
458 continue; 512 continue;
459 if ((mm.base | mm.num) & 0xffff) { 513 if ((mm.base | mm.num) & 0xffff) {
460 ok += do_mem_probe(mm.base, mm.num, s); 514 ok += do_mem_probe(s, mm.base, mm.num, readable,
515 checksum);
461 continue; 516 continue;
462 } 517 }
463 /* Special probe for 64K-aligned block */ 518 /* Special probe for 64K-aligned block */
@@ -467,7 +522,8 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
467 if (ok >= mem_limit) 522 if (ok >= mem_limit)
468 sub_interval(&s_data->mem_db, b, 0x10000); 523 sub_interval(&s_data->mem_db, b, 0x10000);
469 else 524 else
470 ok += do_mem_probe(b, 0x10000, s); 525 ok += do_mem_probe(s, b, 0x10000,
526 readable, checksum);
471 } 527 }
472 } 528 }
473 } 529 }
@@ -480,6 +536,13 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
480 536
481#else /* CONFIG_PCMCIA_PROBE */ 537#else /* CONFIG_PCMCIA_PROBE */
482 538
539/**
540 * validate_mem() - memory probe function
541 * @s: PCMCIA socket to validate
542 * @probe_mask: ignored
543 *
544 * Returns 0 on usuable ports.
545 */
483static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) 546static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
484{ 547{
485 struct resource_map *m, mm; 548 struct resource_map *m, mm;
@@ -488,7 +551,7 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
488 551
489 for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { 552 for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) {
490 mm = *m; 553 mm = *m;
491 ok += do_mem_probe(mm.base, mm.num, s); 554 ok += do_mem_probe(s, mm.base, mm.num, readable, checksum);
492 } 555 }
493 if (ok > 0) 556 if (ok > 0)
494 return 0; 557 return 0;
@@ -498,31 +561,31 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
498#endif /* CONFIG_PCMCIA_PROBE */ 561#endif /* CONFIG_PCMCIA_PROBE */
499 562
500 563
501/* 564/**
565 * pcmcia_nonstatic_validate_mem() - try to validate iomem for PCMCIA use
566 * @s: PCMCIA socket to validate
567 *
568 * This is tricky... when we set up CIS memory, we try to validate
569 * the memory window space allocations.
570 *
502 * Locking note: Must be called with skt_mutex held! 571 * Locking note: Must be called with skt_mutex held!
503 */ 572 */
504static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s) 573static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
505{ 574{
506 struct socket_data *s_data = s->resource_data; 575 struct socket_data *s_data = s->resource_data;
507 unsigned int probe_mask = MEM_PROBE_LOW; 576 unsigned int probe_mask = MEM_PROBE_LOW;
508 int ret = 0; 577 int ret;
509 578
510 if (!probe_mem) 579 if (!probe_mem || !(s->state & SOCKET_PRESENT))
511 return 0; 580 return 0;
512 581
513 mutex_lock(&rsrc_mutex);
514
515 if (s->features & SS_CAP_PAGE_REGS) 582 if (s->features & SS_CAP_PAGE_REGS)
516 probe_mask = MEM_PROBE_HIGH; 583 probe_mask = MEM_PROBE_HIGH;
517 584
518 if (probe_mask & ~s_data->rsrc_mem_probe) { 585 ret = validate_mem(s, probe_mask);
519 if (s->state & SOCKET_PRESENT)
520 ret = validate_mem(s, probe_mask);
521 if (!ret)
522 s_data->rsrc_mem_probe |= probe_mask;
523 }
524 586
525 mutex_unlock(&rsrc_mutex); 587 if (s_data->mem_db_valid.next != &s_data->mem_db_valid)
588 return 0;
526 589
527 return ret; 590 return ret;
528} 591}
@@ -602,7 +665,6 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
602 struct socket_data *s_data = s->resource_data; 665 struct socket_data *s_data = s->resource_data;
603 int ret = -ENOMEM; 666 int ret = -ENOMEM;
604 667
605 mutex_lock(&rsrc_mutex);
606 for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) { 668 for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) {
607 unsigned long start = m->base; 669 unsigned long start = m->base;
608 unsigned long end = m->base + m->num - 1; 670 unsigned long end = m->base + m->num - 1;
@@ -613,7 +675,6 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
613 ret = adjust_resource(res, r_start, r_end - r_start + 1); 675 ret = adjust_resource(res, r_start, r_end - r_start + 1);
614 break; 676 break;
615 } 677 }
616 mutex_unlock(&rsrc_mutex);
617 678
618 return ret; 679 return ret;
619} 680}
@@ -647,7 +708,6 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
647 data.offset = base & data.mask; 708 data.offset = base & data.mask;
648 data.map = &s_data->io_db; 709 data.map = &s_data->io_db;
649 710
650 mutex_lock(&rsrc_mutex);
651#ifdef CONFIG_PCI 711#ifdef CONFIG_PCI
652 if (s->cb_dev) { 712 if (s->cb_dev) {
653 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, 713 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
@@ -656,7 +716,6 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
656#endif 716#endif
657 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 717 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
658 1, pcmcia_align, &data); 718 1, pcmcia_align, &data);
659 mutex_unlock(&rsrc_mutex);
660 719
661 if (ret != 0) { 720 if (ret != 0) {
662 kfree(res); 721 kfree(res);
@@ -672,15 +731,15 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
672 struct socket_data *s_data = s->resource_data; 731 struct socket_data *s_data = s->resource_data;
673 struct pcmcia_align_data data; 732 struct pcmcia_align_data data;
674 unsigned long min, max; 733 unsigned long min, max;
675 int ret, i; 734 int ret, i, j;
676 735
677 low = low || !(s->features & SS_CAP_PAGE_REGS); 736 low = low || !(s->features & SS_CAP_PAGE_REGS);
678 737
679 data.mask = align - 1; 738 data.mask = align - 1;
680 data.offset = base & data.mask; 739 data.offset = base & data.mask;
681 data.map = &s_data->mem_db;
682 740
683 for (i = 0; i < 2; i++) { 741 for (i = 0; i < 2; i++) {
742 data.map = &s_data->mem_db_valid;
684 if (low) { 743 if (low) {
685 max = 0x100000UL; 744 max = 0x100000UL;
686 min = base < max ? base : 0; 745 min = base < max ? base : 0;
@@ -689,17 +748,23 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
689 min = 0x100000UL + base; 748 min = 0x100000UL + base;
690 } 749 }
691 750
692 mutex_lock(&rsrc_mutex); 751 for (j = 0; j < 2; j++) {
693#ifdef CONFIG_PCI 752#ifdef CONFIG_PCI
694 if (s->cb_dev) { 753 if (s->cb_dev) {
695 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 754 ret = pci_bus_alloc_resource(s->cb_dev->bus,
696 1, min, 0, 755 res, num, 1, min, 0,
697 pcmcia_align, &data); 756 pcmcia_align, &data);
698 } else 757 } else
699#endif 758#endif
700 ret = allocate_resource(&iomem_resource, res, num, min, 759 {
701 max, 1, pcmcia_align, &data); 760 ret = allocate_resource(&iomem_resource,
702 mutex_unlock(&rsrc_mutex); 761 res, num, min, max, 1,
762 pcmcia_align, &data);
763 }
764 if (ret == 0)
765 break;
766 data.map = &s_data->mem_db;
767 }
703 if (ret == 0 || low) 768 if (ret == 0 || low)
704 break; 769 break;
705 low = 1; 770 low = 1;
@@ -722,25 +787,18 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
722 if (end < start) 787 if (end < start)
723 return -EINVAL; 788 return -EINVAL;
724 789
725 mutex_lock(&rsrc_mutex);
726 switch (action) { 790 switch (action) {
727 case ADD_MANAGED_RESOURCE: 791 case ADD_MANAGED_RESOURCE:
728 ret = add_interval(&data->mem_db, start, size); 792 ret = add_interval(&data->mem_db, start, size);
793 if (!ret)
794 do_mem_probe(s, start, size, NULL, NULL);
729 break; 795 break;
730 case REMOVE_MANAGED_RESOURCE: 796 case REMOVE_MANAGED_RESOURCE:
731 ret = sub_interval(&data->mem_db, start, size); 797 ret = sub_interval(&data->mem_db, start, size);
732 if (!ret) {
733 struct pcmcia_socket *socket;
734 down_read(&pcmcia_socket_list_rwsem);
735 list_for_each_entry(socket, &pcmcia_socket_list, socket_list)
736 release_cis_mem(socket);
737 up_read(&pcmcia_socket_list_rwsem);
738 }
739 break; 798 break;
740 default: 799 default:
741 ret = -EINVAL; 800 ret = -EINVAL;
742 } 801 }
743 mutex_unlock(&rsrc_mutex);
744 802
745 return ret; 803 return ret;
746} 804}
@@ -758,7 +816,6 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
758 if (end > IO_SPACE_LIMIT) 816 if (end > IO_SPACE_LIMIT)
759 return -EINVAL; 817 return -EINVAL;
760 818
761 mutex_lock(&rsrc_mutex);
762 switch (action) { 819 switch (action) {
763 case ADD_MANAGED_RESOURCE: 820 case ADD_MANAGED_RESOURCE:
764 if (add_interval(&data->io_db, start, size) != 0) { 821 if (add_interval(&data->io_db, start, size) != 0) {
@@ -777,7 +834,6 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
777 ret = -EINVAL; 834 ret = -EINVAL;
778 break; 835 break;
779 } 836 }
780 mutex_unlock(&rsrc_mutex);
781 837
782 return ret; 838 return ret;
783} 839}
@@ -860,6 +916,7 @@ static int nonstatic_init(struct pcmcia_socket *s)
860 return -ENOMEM; 916 return -ENOMEM;
861 917
862 data->mem_db.next = &data->mem_db; 918 data->mem_db.next = &data->mem_db;
919 data->mem_db_valid.next = &data->mem_db_valid;
863 data->io_db.next = &data->io_db; 920 data->io_db.next = &data->io_db;
864 921
865 s->resource_data = (void *) data; 922 s->resource_data = (void *) data;
@@ -874,7 +931,10 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
874 struct socket_data *data = s->resource_data; 931 struct socket_data *data = s->resource_data;
875 struct resource_map *p, *q; 932 struct resource_map *p, *q;
876 933
877 mutex_lock(&rsrc_mutex); 934 for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) {
935 q = p->next;
936 kfree(p);
937 }
878 for (p = data->mem_db.next; p != &data->mem_db; p = q) { 938 for (p = data->mem_db.next; p != &data->mem_db; p = q) {
879 q = p->next; 939 q = p->next;
880 kfree(p); 940 kfree(p);
@@ -883,7 +943,6 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
883 q = p->next; 943 q = p->next;
884 kfree(p); 944 kfree(p);
885 } 945 }
886 mutex_unlock(&rsrc_mutex);
887} 946}
888 947
889 948
@@ -910,7 +969,7 @@ static ssize_t show_io_db(struct device *dev,
910 struct resource_map *p; 969 struct resource_map *p;
911 ssize_t ret = 0; 970 ssize_t ret = 0;
912 971
913 mutex_lock(&rsrc_mutex); 972 mutex_lock(&s->ops_mutex);
914 data = s->resource_data; 973 data = s->resource_data;
915 974
916 for (p = data->io_db.next; p != &data->io_db; p = p->next) { 975 for (p = data->io_db.next; p != &data->io_db; p = p->next) {
@@ -922,7 +981,7 @@ static ssize_t show_io_db(struct device *dev,
922 ((unsigned long) p->base + p->num - 1)); 981 ((unsigned long) p->base + p->num - 1));
923 } 982 }
924 983
925 mutex_unlock(&rsrc_mutex); 984 mutex_unlock(&s->ops_mutex);
926 return ret; 985 return ret;
927} 986}
928 987
@@ -950,9 +1009,11 @@ static ssize_t store_io_db(struct device *dev,
950 if (end_addr < start_addr) 1009 if (end_addr < start_addr)
951 return -EINVAL; 1010 return -EINVAL;
952 1011
1012 mutex_lock(&s->ops_mutex);
953 ret = adjust_io(s, add, start_addr, end_addr); 1013 ret = adjust_io(s, add, start_addr, end_addr);
954 if (!ret) 1014 if (!ret)
955 s->resource_setup_new = 1; 1015 s->resource_setup_new = 1;
1016 mutex_unlock(&s->ops_mutex);
956 1017
957 return ret ? ret : count; 1018 return ret ? ret : count;
958} 1019}
@@ -966,9 +1027,19 @@ static ssize_t show_mem_db(struct device *dev,
966 struct resource_map *p; 1027 struct resource_map *p;
967 ssize_t ret = 0; 1028 ssize_t ret = 0;
968 1029
969 mutex_lock(&rsrc_mutex); 1030 mutex_lock(&s->ops_mutex);
970 data = s->resource_data; 1031 data = s->resource_data;
971 1032
1033 for (p = data->mem_db_valid.next; p != &data->mem_db_valid;
1034 p = p->next) {
1035 if (ret > (PAGE_SIZE - 10))
1036 continue;
1037 ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
1038 "0x%08lx - 0x%08lx\n",
1039 ((unsigned long) p->base),
1040 ((unsigned long) p->base + p->num - 1));
1041 }
1042
972 for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { 1043 for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
973 if (ret > (PAGE_SIZE - 10)) 1044 if (ret > (PAGE_SIZE - 10))
974 continue; 1045 continue;
@@ -978,7 +1049,7 @@ static ssize_t show_mem_db(struct device *dev,
978 ((unsigned long) p->base + p->num - 1)); 1049 ((unsigned long) p->base + p->num - 1));
979 } 1050 }
980 1051
981 mutex_unlock(&rsrc_mutex); 1052 mutex_unlock(&s->ops_mutex);
982 return ret; 1053 return ret;
983} 1054}
984 1055
@@ -1006,9 +1077,11 @@ static ssize_t store_mem_db(struct device *dev,
1006 if (end_addr < start_addr) 1077 if (end_addr < start_addr)
1007 return -EINVAL; 1078 return -EINVAL;
1008 1079
1080 mutex_lock(&s->ops_mutex);
1009 ret = adjust_memory(s, add, start_addr, end_addr); 1081 ret = adjust_memory(s, add, start_addr, end_addr);
1010 if (!ret) 1082 if (!ret)
1011 s->resource_setup_new = 1; 1083 s->resource_setup_new = 1;
1084 mutex_unlock(&s->ops_mutex);
1012 1085
1013 return ret ? ret : count; 1086 return ret ? ret : count;
1014} 1087}
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 7a456000332a..08278016e58d 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -88,15 +88,14 @@ static DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL);
88static ssize_t pccard_store_insert(struct device *dev, struct device_attribute *attr, 88static ssize_t pccard_store_insert(struct device *dev, struct device_attribute *attr,
89 const char *buf, size_t count) 89 const char *buf, size_t count)
90{ 90{
91 ssize_t ret;
92 struct pcmcia_socket *s = to_socket(dev); 91 struct pcmcia_socket *s = to_socket(dev);
93 92
94 if (!count) 93 if (!count)
95 return -EINVAL; 94 return -EINVAL;
96 95
97 ret = pcmcia_insert_card(s); 96 pcmcia_parse_uevents(s, PCMCIA_UEVENT_INSERT);
98 97
99 return ret ? ret : count; 98 return count;
100} 99}
101static DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert); 100static DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert);
102 101
@@ -113,18 +112,22 @@ static ssize_t pccard_store_card_pm_state(struct device *dev,
113 struct device_attribute *attr, 112 struct device_attribute *attr,
114 const char *buf, size_t count) 113 const char *buf, size_t count)
115{ 114{
116 ssize_t ret = -EINVAL;
117 struct pcmcia_socket *s = to_socket(dev); 115 struct pcmcia_socket *s = to_socket(dev);
116 ssize_t ret = count;
118 117
119 if (!count) 118 if (!count)
120 return -EINVAL; 119 return -EINVAL;
121 120
122 if (!(s->state & SOCKET_SUSPEND) && !strncmp(buf, "off", 3)) 121 if (!strncmp(buf, "off", 3))
123 ret = pcmcia_suspend_card(s); 122 pcmcia_parse_uevents(s, PCMCIA_UEVENT_SUSPEND);
124 else if ((s->state & SOCKET_SUSPEND) && !strncmp(buf, "on", 2)) 123 else {
125 ret = pcmcia_resume_card(s); 124 if (!strncmp(buf, "on", 2))
125 pcmcia_parse_uevents(s, PCMCIA_UEVENT_RESUME);
126 else
127 ret = -EINVAL;
128 }
126 129
127 return ret ? -ENODEV : count; 130 return ret;
128} 131}
129static DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state); 132static DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state);
130 133
@@ -132,15 +135,14 @@ static ssize_t pccard_store_eject(struct device *dev,
132 struct device_attribute *attr, 135 struct device_attribute *attr,
133 const char *buf, size_t count) 136 const char *buf, size_t count)
134{ 137{
135 ssize_t ret;
136 struct pcmcia_socket *s = to_socket(dev); 138 struct pcmcia_socket *s = to_socket(dev);
137 139
138 if (!count) 140 if (!count)
139 return -EINVAL; 141 return -EINVAL;
140 142
141 ret = pcmcia_eject_card(s); 143 pcmcia_parse_uevents(s, PCMCIA_UEVENT_EJECT);
142 144
143 return ret ? ret : count; 145 return count;
144} 146}
145static DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject); 147static DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject);
146 148
@@ -167,7 +169,9 @@ static ssize_t pccard_store_irq_mask(struct device *dev,
167 ret = sscanf(buf, "0x%x\n", &mask); 169 ret = sscanf(buf, "0x%x\n", &mask);
168 170
169 if (ret == 1) { 171 if (ret == 1) {
172 mutex_lock(&s->ops_mutex);
170 s->irq_mask &= mask; 173 s->irq_mask &= mask;
174 mutex_unlock(&s->ops_mutex);
171 ret = 0; 175 ret = 0;
172 } 176 }
173 177
@@ -187,163 +191,21 @@ static ssize_t pccard_store_resource(struct device *dev,
187 struct device_attribute *attr, 191 struct device_attribute *attr,
188 const char *buf, size_t count) 192 const char *buf, size_t count)
189{ 193{
190 unsigned long flags;
191 struct pcmcia_socket *s = to_socket(dev); 194 struct pcmcia_socket *s = to_socket(dev);
192 195
193 if (!count) 196 if (!count)
194 return -EINVAL; 197 return -EINVAL;
195 198
196 spin_lock_irqsave(&s->lock, flags); 199 mutex_lock(&s->ops_mutex);
197 if (!s->resource_setup_done) 200 if (!s->resource_setup_done)
198 s->resource_setup_done = 1; 201 s->resource_setup_done = 1;
199 spin_unlock_irqrestore(&s->lock, flags); 202 mutex_unlock(&s->ops_mutex);
200
201 mutex_lock(&s->skt_mutex);
202 if ((s->callback) &&
203 (s->state & SOCKET_PRESENT) &&
204 !(s->state & SOCKET_CARDBUS)) {
205 if (try_module_get(s->callback->owner)) {
206 s->callback->requery(s, 0);
207 module_put(s->callback->owner);
208 }
209 }
210 mutex_unlock(&s->skt_mutex);
211
212 return count;
213}
214static DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource);
215 203
216 204 pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY);
217static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count)
218{
219 tuple_t tuple;
220 int status, i;
221 loff_t pointer = 0;
222 ssize_t ret = 0;
223 u_char *tuplebuffer;
224 u_char *tempbuffer;
225
226 tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL);
227 if (!tuplebuffer)
228 return -ENOMEM;
229
230 tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL);
231 if (!tempbuffer) {
232 ret = -ENOMEM;
233 goto free_tuple;
234 }
235
236 memset(&tuple, 0, sizeof(tuple_t));
237
238 tuple.Attributes = TUPLE_RETURN_LINK | TUPLE_RETURN_COMMON;
239 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
240 tuple.TupleOffset = 0;
241
242 status = pccard_get_first_tuple(s, BIND_FN_ALL, &tuple);
243 while (!status) {
244 tuple.TupleData = tuplebuffer;
245 tuple.TupleDataMax = 255;
246 memset(tuplebuffer, 0, sizeof(u_char) * 255);
247
248 status = pccard_get_tuple_data(s, &tuple);
249 if (status)
250 break;
251
252 if (off < (pointer + 2 + tuple.TupleDataLen)) {
253 tempbuffer[0] = tuple.TupleCode & 0xff;
254 tempbuffer[1] = tuple.TupleLink & 0xff;
255 for (i = 0; i < tuple.TupleDataLen; i++)
256 tempbuffer[i + 2] = tuplebuffer[i] & 0xff;
257
258 for (i = 0; i < (2 + tuple.TupleDataLen); i++) {
259 if (((i + pointer) >= off) &&
260 (i + pointer) < (off + count)) {
261 buf[ret] = tempbuffer[i];
262 ret++;
263 }
264 }
265 }
266
267 pointer += 2 + tuple.TupleDataLen;
268
269 if (pointer >= (off + count))
270 break;
271
272 if (tuple.TupleCode == CISTPL_END)
273 break;
274 status = pccard_get_next_tuple(s, BIND_FN_ALL, &tuple);
275 }
276
277 kfree(tempbuffer);
278 free_tuple:
279 kfree(tuplebuffer);
280
281 return ret;
282}
283
284static ssize_t pccard_show_cis(struct kobject *kobj,
285 struct bin_attribute *bin_attr,
286 char *buf, loff_t off, size_t count)
287{
288 unsigned int size = 0x200;
289
290 if (off >= size)
291 count = 0;
292 else {
293 struct pcmcia_socket *s;
294 unsigned int chains;
295
296 if (off + count > size)
297 count = size - off;
298
299 s = to_socket(container_of(kobj, struct device, kobj));
300
301 if (!(s->state & SOCKET_PRESENT))
302 return -ENODEV;
303 if (pccard_validate_cis(s, &chains))
304 return -EIO;
305 if (!chains)
306 return -ENODATA;
307
308 count = pccard_extract_cis(s, buf, off, count);
309 }
310
311 return count;
312}
313
314static ssize_t pccard_store_cis(struct kobject *kobj,
315 struct bin_attribute *bin_attr,
316 char *buf, loff_t off, size_t count)
317{
318 struct pcmcia_socket *s = to_socket(container_of(kobj, struct device, kobj));
319 int error;
320
321 if (off)
322 return -EINVAL;
323
324 if (count >= CISTPL_MAX_CIS_SIZE)
325 return -EINVAL;
326
327 if (!(s->state & SOCKET_PRESENT))
328 return -ENODEV;
329
330 error = pcmcia_replace_cis(s, buf, count);
331 if (error)
332 return -EIO;
333
334 mutex_lock(&s->skt_mutex);
335 if ((s->callback) && (s->state & SOCKET_PRESENT) &&
336 !(s->state & SOCKET_CARDBUS)) {
337 if (try_module_get(s->callback->owner)) {
338 s->callback->requery(s, 1);
339 module_put(s->callback->owner);
340 }
341 }
342 mutex_unlock(&s->skt_mutex);
343 205
344 return count; 206 return count;
345} 207}
346 208static DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource);
347 209
348static struct attribute *pccard_socket_attributes[] = { 210static struct attribute *pccard_socket_attributes[] = {
349 &dev_attr_card_type.attr, 211 &dev_attr_card_type.attr,
@@ -362,28 +224,12 @@ static const struct attribute_group socket_attrs = {
362 .attrs = pccard_socket_attributes, 224 .attrs = pccard_socket_attributes,
363}; 225};
364 226
365static struct bin_attribute pccard_cis_attr = {
366 .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
367 .size = 0x200,
368 .read = pccard_show_cis,
369 .write = pccard_store_cis,
370};
371
372int pccard_sysfs_add_socket(struct device *dev) 227int pccard_sysfs_add_socket(struct device *dev)
373{ 228{
374 int ret = 0; 229 return sysfs_create_group(&dev->kobj, &socket_attrs);
375
376 ret = sysfs_create_group(&dev->kobj, &socket_attrs);
377 if (!ret) {
378 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
379 if (ret)
380 sysfs_remove_group(&dev->kobj, &socket_attrs);
381 }
382 return ret;
383} 230}
384 231
385void pccard_sysfs_remove_socket(struct device *dev) 232void pccard_sysfs_remove_socket(struct device *dev)
386{ 233{
387 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
388 sysfs_remove_group(&dev->kobj, &socket_attrs); 234 sysfs_remove_group(&dev->kobj, &socket_attrs);
389} 235}
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
new file mode 100644
index 000000000000..61560cd6e287
--- /dev/null
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -0,0 +1,350 @@
1/*
2 * PCMCIA socket code for the MyCable XXS1500 system.
3 *
4 * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
5 *
6 */
7
8#include <linux/delay.h>
9#include <linux/gpio.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <linux/mm.h>
14#include <linux/platform_device.h>
15#include <linux/pm.h>
16#include <linux/resource.h>
17#include <linux/spinlock.h>
18
19#include <pcmcia/cs_types.h>
20#include <pcmcia/cs.h>
21#include <pcmcia/ss.h>
22#include <pcmcia/cistpl.h>
23
24#include <asm/irq.h>
25#include <asm/system.h>
26#include <asm/mach-au1x00/au1000.h>
27
28#define MEM_MAP_SIZE 0x400000
29#define IO_MAP_SIZE 0x1000
30
31
32/*
33 * 3.3V cards only; all interfacing is done via gpios:
34 *
35 * 0/1: carddetect (00 = card present, xx = huh)
36 * 4: card irq
37 * 204: reset (high-act)
38 * 205: buffer enable (low-act)
39 * 208/209: card voltage key (00,01,10,11)
40 * 210: battwarn
41 * 211: batdead
42 * 214: power (low-act)
43 */
44#define GPIO_CDA 0
45#define GPIO_CDB 1
46#define GPIO_CARDIRQ 4
47#define GPIO_RESET 204
48#define GPIO_OUTEN 205
49#define GPIO_VSL 208
50#define GPIO_VSH 209
51#define GPIO_BATTDEAD 210
52#define GPIO_BATTWARN 211
53#define GPIO_POWER 214
54
55struct xxs1500_pcmcia_sock {
56 struct pcmcia_socket socket;
57 void *virt_io;
58
59 phys_addr_t phys_io;
60 phys_addr_t phys_attr;
61 phys_addr_t phys_mem;
62
63 /* previous flags for set_socket() */
64 unsigned int old_flags;
65};
66
67#define to_xxs_socket(x) container_of(x, struct xxs1500_pcmcia_sock, socket)
68
69static irqreturn_t cdirq(int irq, void *data)
70{
71 struct xxs1500_pcmcia_sock *sock = data;
72
73 pcmcia_parse_events(&sock->socket, SS_DETECT);
74
75 return IRQ_HANDLED;
76}
77
78static int xxs1500_pcmcia_configure(struct pcmcia_socket *skt,
79 struct socket_state_t *state)
80{
81 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
82 unsigned int changed;
83
84 /* power control */
85 switch (state->Vcc) {
86 case 0:
87 gpio_set_value(GPIO_POWER, 1); /* power off */
88 break;
89 case 33:
90 gpio_set_value(GPIO_POWER, 0); /* power on */
91 break;
92 case 50:
93 default:
94 return -EINVAL;
95 }
96
97 changed = state->flags ^ sock->old_flags;
98
99 if (changed & SS_RESET) {
100 if (state->flags & SS_RESET) {
101 gpio_set_value(GPIO_RESET, 1); /* assert reset */
102 gpio_set_value(GPIO_OUTEN, 1); /* buffers off */
103 } else {
104 gpio_set_value(GPIO_RESET, 0); /* deassert reset */
105 gpio_set_value(GPIO_OUTEN, 0); /* buffers on */
106 msleep(500);
107 }
108 }
109
110 sock->old_flags = state->flags;
111
112 return 0;
113}
114
115static int xxs1500_pcmcia_get_status(struct pcmcia_socket *skt,
116 unsigned int *value)
117{
118 unsigned int status;
119 int i;
120
121 status = 0;
122
123 /* check carddetects: GPIO[0:1] must both be low */
124 if (!gpio_get_value(GPIO_CDA) && !gpio_get_value(GPIO_CDB))
125 status |= SS_DETECT;
126
127 /* determine card voltage: GPIO[208:209] binary value */
128 i = (!!gpio_get_value(GPIO_VSL)) | ((!!gpio_get_value(GPIO_VSH)) << 1);
129
130 switch (i) {
131 case 0:
132 case 1:
133 case 2:
134 status |= SS_3VCARD; /* 3V card */
135 break;
136 case 3: /* 5V card, unsupported */
137 default:
138 status |= SS_XVCARD; /* treated as unsupported in core */
139 }
140
141 /* GPIO214: low active power switch */
142 status |= gpio_get_value(GPIO_POWER) ? 0 : SS_POWERON;
143
144 /* GPIO204: high-active reset line */
145 status |= gpio_get_value(GPIO_RESET) ? SS_RESET : SS_READY;
146
147 /* other stuff */
148 status |= gpio_get_value(GPIO_BATTDEAD) ? 0 : SS_BATDEAD;
149 status |= gpio_get_value(GPIO_BATTWARN) ? 0 : SS_BATWARN;
150
151 *value = status;
152
153 return 0;
154}
155
156static int xxs1500_pcmcia_sock_init(struct pcmcia_socket *skt)
157{
158 gpio_direction_input(GPIO_CDA);
159 gpio_direction_input(GPIO_CDB);
160 gpio_direction_input(GPIO_VSL);
161 gpio_direction_input(GPIO_VSH);
162 gpio_direction_input(GPIO_BATTDEAD);
163 gpio_direction_input(GPIO_BATTWARN);
164 gpio_direction_output(GPIO_RESET, 1); /* assert reset */
165 gpio_direction_output(GPIO_OUTEN, 1); /* disable buffers */
166 gpio_direction_output(GPIO_POWER, 1); /* power off */
167
168 return 0;
169}
170
171static int xxs1500_pcmcia_sock_suspend(struct pcmcia_socket *skt)
172{
173 return 0;
174}
175
176static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
177 struct pccard_io_map *map)
178{
179 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
180
181 map->start = (u32)sock->virt_io;
182 map->stop = map->start + IO_MAP_SIZE;
183
184 return 0;
185}
186
187static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
188 struct pccard_mem_map *map)
189{
190 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
191
192 if (map->flags & MAP_ATTRIB)
193 map->static_start = sock->phys_attr + map->card_start;
194 else
195 map->static_start = sock->phys_mem + map->card_start;
196
197 return 0;
198}
199
200static struct pccard_operations xxs1500_pcmcia_operations = {
201 .init = xxs1500_pcmcia_sock_init,
202 .suspend = xxs1500_pcmcia_sock_suspend,
203 .get_status = xxs1500_pcmcia_get_status,
204 .set_socket = xxs1500_pcmcia_configure,
205 .set_io_map = au1x00_pcmcia_set_io_map,
206 .set_mem_map = au1x00_pcmcia_set_mem_map,
207};
208
209static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
210{
211 struct xxs1500_pcmcia_sock *sock;
212 struct resource *r;
213 int ret, irq;
214
215 sock = kzalloc(sizeof(struct xxs1500_pcmcia_sock), GFP_KERNEL);
216 if (!sock)
217 return -ENOMEM;
218
219 ret = -ENODEV;
220
221 /*
222 * pseudo-attr: The 32bit address of the PCMCIA attribute space
223 * for this socket (usually the 36bit address shifted 4 to the
224 * right).
225 */
226 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
227 if (!r) {
228 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
229 goto out0;
230 }
231 sock->phys_attr = r->start;
232
233 /*
234 * pseudo-mem: The 32bit address of the PCMCIA memory space for
235 * this socket (usually the 36bit address shifted 4 to the right)
236 */
237 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
238 if (!r) {
239 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
240 goto out0;
241 }
242 sock->phys_mem = r->start;
243
244 /*
245 * pseudo-io: The 32bit address of the PCMCIA IO space for this
246 * socket (usually the 36bit address shifted 4 to the right).
247 */
248 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
249 if (!r) {
250 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
251 goto out0;
252 }
253 sock->phys_io = r->start;
254
255
256 /*
257 * PCMCIA client drivers use the inb/outb macros to access
258 * the IO registers. Since mips_io_port_base is added
259 * to the access address of the mips implementation of
260 * inb/outb, we need to subtract it here because we want
261 * to access the I/O or MEM address directly, without
262 * going through this "mips_io_port_base" mechanism.
263 */
264 sock->virt_io = (void *)(ioremap(sock->phys_io, IO_MAP_SIZE) -
265 mips_io_port_base);
266
267 if (!sock->virt_io) {
268 dev_err(&pdev->dev, "cannot remap IO area\n");
269 ret = -ENOMEM;
270 goto out0;
271 }
272
273 sock->socket.ops = &xxs1500_pcmcia_operations;
274 sock->socket.owner = THIS_MODULE;
275 sock->socket.pci_irq = gpio_to_irq(GPIO_CARDIRQ);
276 sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
277 sock->socket.map_size = MEM_MAP_SIZE;
278 sock->socket.io_offset = (unsigned long)sock->virt_io;
279 sock->socket.dev.parent = &pdev->dev;
280 sock->socket.resource_ops = &pccard_static_ops;
281
282 platform_set_drvdata(pdev, sock);
283
284 /* setup carddetect irq: use one of the 2 GPIOs as an
285 * edge detector.
286 */
287 irq = gpio_to_irq(GPIO_CDA);
288 set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
289 ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock);
290 if (ret) {
291 dev_err(&pdev->dev, "cannot setup cd irq\n");
292 goto out1;
293 }
294
295 ret = pcmcia_register_socket(&sock->socket);
296 if (ret) {
297 dev_err(&pdev->dev, "failed to register\n");
298 goto out2;
299 }
300
301 printk(KERN_INFO "MyCable XXS1500 PCMCIA socket services\n");
302
303 return 0;
304
305out2:
306 free_irq(gpio_to_irq(GPIO_CDA), sock);
307out1:
308 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
309out0:
310 kfree(sock);
311 return ret;
312}
313
314static int __devexit xxs1500_pcmcia_remove(struct platform_device *pdev)
315{
316 struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev);
317
318 pcmcia_unregister_socket(&sock->socket);
319 free_irq(gpio_to_irq(GPIO_CDA), sock);
320 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
321 kfree(sock);
322
323 return 0;
324}
325
326static struct platform_driver xxs1500_pcmcia_socket_driver = {
327 .driver = {
328 .name = "xxs1500_pcmcia",
329 .owner = THIS_MODULE,
330 },
331 .probe = xxs1500_pcmcia_probe,
332 .remove = __devexit_p(xxs1500_pcmcia_remove),
333};
334
335int __init xxs1500_pcmcia_socket_load(void)
336{
337 return platform_driver_register(&xxs1500_pcmcia_socket_driver);
338}
339
340void __exit xxs1500_pcmcia_socket_unload(void)
341{
342 platform_driver_unregister(&xxs1500_pcmcia_socket_driver);
343}
344
345module_init(xxs1500_pcmcia_socket_load);
346module_exit(xxs1500_pcmcia_socket_unload);
347
348MODULE_LICENSE("GPL");
349MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems");
350MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 1f2039d5e966..b85375f87622 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -37,6 +37,11 @@ static int pwr_irqs_off;
37module_param(pwr_irqs_off, bool, 0644); 37module_param(pwr_irqs_off, bool, 0644);
38MODULE_PARM_DESC(pwr_irqs_off, "Force IRQs off during power-on of slot. Use only when seeing IRQ storms!"); 38MODULE_PARM_DESC(pwr_irqs_off, "Force IRQs off during power-on of slot. Use only when seeing IRQ storms!");
39 39
40static char o2_speedup[] = "default";
41module_param_string(o2_speedup, o2_speedup, sizeof(o2_speedup), 0444);
42MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' "
43 "or 'default' (uses recommended behaviour for the detected bridge)");
44
40#define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args) 45#define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args)
41 46
42/* Don't ask.. */ 47/* Don't ask.. */
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index e82d8c9c6cda..95a689befc84 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -532,7 +532,7 @@ static void ps3av_set_videomode_packet(u32 id)
532 res = ps3av_cmd_avb_param(&avb_param, len); 532 res = ps3av_cmd_avb_param(&avb_param, len);
533 if (res == PS3AV_STATUS_NO_SYNC_HEAD) 533 if (res == PS3AV_STATUS_NO_SYNC_HEAD)
534 printk(KERN_WARNING 534 printk(KERN_WARNING
535 "%s: Command failed. Please try your request again. \n", 535 "%s: Command failed. Please try your request again.\n",
536 __func__); 536 __func__);
537 else if (res) 537 else if (res)
538 dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n"); 538 dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8167e9e6827a..2bb8a8b7ffaf 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -868,4 +868,14 @@ config RTC_DRV_MC13783
868 help 868 help
869 This enables support for the Freescale MC13783 PMIC RTC 869 This enables support for the Freescale MC13783 PMIC RTC
870 870
871config RTC_DRV_MPC5121
872 tristate "Freescale MPC5121 built-in RTC"
873 depends on PPC_MPC512x && RTC_CLASS
874 help
875 If you say yes here you will get support for the
876 built-in RTC MPC5121.
877
878 This driver can also be built as a module. If so, the module
879 will be called rtc-mpc5121.
880
871endif # RTC_CLASS 881endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index e5160fddc446..b7148afb8f55 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
55obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o 55obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
56obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o 56obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
57obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o 57obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
58obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
58obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o 59obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
59obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o 60obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
60obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o 61obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
new file mode 100644
index 000000000000..4313ca03a96d
--- /dev/null
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -0,0 +1,387 @@
1/*
2 * Real-time clock driver for MPC5121
3 *
4 * Copyright 2007, Domen Puncer <domen.puncer@telargo.com>
5 * Copyright 2008, Freescale Semiconductor, Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/rtc.h>
15#include <linux/of_device.h>
16#include <linux/of_platform.h>
17#include <linux/io.h>
18
19struct mpc5121_rtc_regs {
20 u8 set_time; /* RTC + 0x00 */
21 u8 hour_set; /* RTC + 0x01 */
22 u8 minute_set; /* RTC + 0x02 */
23 u8 second_set; /* RTC + 0x03 */
24
25 u8 set_date; /* RTC + 0x04 */
26 u8 month_set; /* RTC + 0x05 */
27 u8 weekday_set; /* RTC + 0x06 */
28 u8 date_set; /* RTC + 0x07 */
29
30 u8 write_sw; /* RTC + 0x08 */
31 u8 sw_set; /* RTC + 0x09 */
32 u16 year_set; /* RTC + 0x0a */
33
34 u8 alm_enable; /* RTC + 0x0c */
35 u8 alm_hour_set; /* RTC + 0x0d */
36 u8 alm_min_set; /* RTC + 0x0e */
37 u8 int_enable; /* RTC + 0x0f */
38
39 u8 reserved1;
40 u8 hour; /* RTC + 0x11 */
41 u8 minute; /* RTC + 0x12 */
42 u8 second; /* RTC + 0x13 */
43
44 u8 month; /* RTC + 0x14 */
45 u8 wday_mday; /* RTC + 0x15 */
46 u16 year; /* RTC + 0x16 */
47
48 u8 int_alm; /* RTC + 0x18 */
49 u8 int_sw; /* RTC + 0x19 */
50 u8 alm_status; /* RTC + 0x1a */
51 u8 sw_minute; /* RTC + 0x1b */
52
53 u8 bus_error_1; /* RTC + 0x1c */
54 u8 int_day; /* RTC + 0x1d */
55 u8 int_min; /* RTC + 0x1e */
56 u8 int_sec; /* RTC + 0x1f */
57
58 /*
59 * target_time:
60 * intended to be used for hibernation but hibernation
61 * does not work on silicon rev 1.5 so use it for non-volatile
62 * storage of offset between the actual_time register and linux
63 * time
64 */
65 u32 target_time; /* RTC + 0x20 */
66 /*
67 * actual_time:
68 * readonly time since VBAT_RTC was last connected
69 */
70 u32 actual_time; /* RTC + 0x24 */
71 u32 keep_alive; /* RTC + 0x28 */
72};
73
74struct mpc5121_rtc_data {
75 unsigned irq;
76 unsigned irq_periodic;
77 struct mpc5121_rtc_regs __iomem *regs;
78 struct rtc_device *rtc;
79 struct rtc_wkalrm wkalarm;
80};
81
82/*
83 * Update second/minute/hour registers.
84 *
85 * This is just so alarm will work.
86 */
87static void mpc5121_rtc_update_smh(struct mpc5121_rtc_regs __iomem *regs,
88 struct rtc_time *tm)
89{
90 out_8(&regs->second_set, tm->tm_sec);
91 out_8(&regs->minute_set, tm->tm_min);
92 out_8(&regs->hour_set, tm->tm_hour);
93
94 /* set time sequence */
95 out_8(&regs->set_time, 0x1);
96 out_8(&regs->set_time, 0x3);
97 out_8(&regs->set_time, 0x1);
98 out_8(&regs->set_time, 0x0);
99}
100
101static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm)
102{
103 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
104 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
105 unsigned long now;
106
107 /*
108 * linux time is actual_time plus the offset saved in target_time
109 */
110 now = in_be32(&regs->actual_time) + in_be32(&regs->target_time);
111
112 rtc_time_to_tm(now, tm);
113
114 /*
115 * update second minute hour registers
116 * so alarms will work
117 */
118 mpc5121_rtc_update_smh(regs, tm);
119
120 return rtc_valid_tm(tm);
121}
122
123static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
124{
125 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
126 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
127 int ret;
128 unsigned long now;
129
130 /*
131 * The actual_time register is read only so we write the offset
132 * between it and linux time to the target_time register.
133 */
134 ret = rtc_tm_to_time(tm, &now);
135 if (ret == 0)
136 out_be32(&regs->target_time, now - in_be32(&regs->actual_time));
137
138 /*
139 * update second minute hour registers
140 * so alarms will work
141 */
142 mpc5121_rtc_update_smh(regs, tm);
143
144 return 0;
145}
146
147static int mpc5121_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
148{
149 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
150 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
151
152 *alarm = rtc->wkalarm;
153
154 alarm->pending = in_8(&regs->alm_status);
155
156 return 0;
157}
158
159static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
160{
161 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
162 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
163
164 /*
165 * the alarm has no seconds so deal with it
166 */
167 if (alarm->time.tm_sec) {
168 alarm->time.tm_sec = 0;
169 alarm->time.tm_min++;
170 if (alarm->time.tm_min >= 60) {
171 alarm->time.tm_min = 0;
172 alarm->time.tm_hour++;
173 if (alarm->time.tm_hour >= 24)
174 alarm->time.tm_hour = 0;
175 }
176 }
177
178 alarm->time.tm_mday = -1;
179 alarm->time.tm_mon = -1;
180 alarm->time.tm_year = -1;
181
182 out_8(&regs->alm_min_set, alarm->time.tm_min);
183 out_8(&regs->alm_hour_set, alarm->time.tm_hour);
184
185 out_8(&regs->alm_enable, alarm->enabled);
186
187 rtc->wkalarm = *alarm;
188 return 0;
189}
190
191static irqreturn_t mpc5121_rtc_handler(int irq, void *dev)
192{
193 struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
194 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
195
196 if (in_8(&regs->int_alm)) {
197 /* acknowledge and clear status */
198 out_8(&regs->int_alm, 1);
199 out_8(&regs->alm_status, 1);
200
201 rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
202 return IRQ_HANDLED;
203 }
204
205 return IRQ_NONE;
206}
207
208static irqreturn_t mpc5121_rtc_handler_upd(int irq, void *dev)
209{
210 struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
211 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
212
213 if (in_8(&regs->int_sec) && (in_8(&regs->int_enable) & 0x1)) {
214 /* acknowledge */
215 out_8(&regs->int_sec, 1);
216
217 rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_UF);
218 return IRQ_HANDLED;
219 }
220
221 return IRQ_NONE;
222}
223
224static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
225 unsigned int enabled)
226{
227 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
228 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
229 int val;
230
231 if (enabled)
232 val = 1;
233 else
234 val = 0;
235
236 out_8(&regs->alm_enable, val);
237 rtc->wkalarm.enabled = val;
238
239 return 0;
240}
241
242static int mpc5121_rtc_update_irq_enable(struct device *dev,
243 unsigned int enabled)
244{
245 struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
246 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
247 int val;
248
249 val = in_8(&regs->int_enable);
250
251 if (enabled)
252 val = (val & ~0x8) | 0x1;
253 else
254 val &= ~0x1;
255
256 out_8(&regs->int_enable, val);
257
258 return 0;
259}
260
261static const struct rtc_class_ops mpc5121_rtc_ops = {
262 .read_time = mpc5121_rtc_read_time,
263 .set_time = mpc5121_rtc_set_time,
264 .read_alarm = mpc5121_rtc_read_alarm,
265 .set_alarm = mpc5121_rtc_set_alarm,
266 .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
267 .update_irq_enable = mpc5121_rtc_update_irq_enable,
268};
269
270static int __devinit mpc5121_rtc_probe(struct of_device *op,
271 const struct of_device_id *match)
272{
273 struct mpc5121_rtc_data *rtc;
274 int err = 0;
275 u32 ka;
276
277 rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
278 if (!rtc)
279 return -ENOMEM;
280
281 rtc->regs = of_iomap(op->node, 0);
282 if (!rtc->regs) {
283 dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
284 err = -ENOSYS;
285 goto out_free;
286 }
287
288 device_init_wakeup(&op->dev, 1);
289
290 dev_set_drvdata(&op->dev, rtc);
291
292 rtc->irq = irq_of_parse_and_map(op->node, 1);
293 err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED,
294 "mpc5121-rtc", &op->dev);
295 if (err) {
296 dev_err(&op->dev, "%s: could not request irq: %i\n",
297 __func__, rtc->irq);
298 goto out_dispose;
299 }
300
301 rtc->irq_periodic = irq_of_parse_and_map(op->node, 0);
302 err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd,
303 IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev);
304 if (err) {
305 dev_err(&op->dev, "%s: could not request irq: %i\n",
306 __func__, rtc->irq_periodic);
307 goto out_dispose2;
308 }
309
310 ka = in_be32(&rtc->regs->keep_alive);
311 if (ka & 0x02) {
312 dev_warn(&op->dev,
313 "mpc5121-rtc: Battery or oscillator failure!\n");
314 out_be32(&rtc->regs->keep_alive, ka);
315 }
316
317 rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev,
318 &mpc5121_rtc_ops, THIS_MODULE);
319 if (IS_ERR(rtc->rtc)) {
320 err = PTR_ERR(rtc->rtc);
321 goto out_free_irq;
322 }
323
324 return 0;
325
326out_free_irq:
327 free_irq(rtc->irq_periodic, &op->dev);
328out_dispose2:
329 irq_dispose_mapping(rtc->irq_periodic);
330 free_irq(rtc->irq, &op->dev);
331out_dispose:
332 irq_dispose_mapping(rtc->irq);
333 iounmap(rtc->regs);
334out_free:
335 kfree(rtc);
336
337 return err;
338}
339
340static int __devexit mpc5121_rtc_remove(struct of_device *op)
341{
342 struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev);
343 struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
344
345 /* disable interrupt, so there are no nasty surprises */
346 out_8(&regs->alm_enable, 0);
347 out_8(&regs->int_enable, in_8(&regs->int_enable) & ~0x1);
348
349 rtc_device_unregister(rtc->rtc);
350 iounmap(rtc->regs);
351 free_irq(rtc->irq, &op->dev);
352 free_irq(rtc->irq_periodic, &op->dev);
353 irq_dispose_mapping(rtc->irq);
354 irq_dispose_mapping(rtc->irq_periodic);
355 dev_set_drvdata(&op->dev, NULL);
356 kfree(rtc);
357
358 return 0;
359}
360
361static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
362 { .compatible = "fsl,mpc5121-rtc", },
363 {},
364};
365
366static struct of_platform_driver mpc5121_rtc_driver = {
367 .owner = THIS_MODULE,
368 .name = "mpc5121-rtc",
369 .match_table = mpc5121_rtc_match,
370 .probe = mpc5121_rtc_probe,
371 .remove = __devexit_p(mpc5121_rtc_remove),
372};
373
374static int __init mpc5121_rtc_init(void)
375{
376 return of_register_platform_driver(&mpc5121_rtc_driver);
377}
378module_init(mpc5121_rtc_init);
379
380static void __exit mpc5121_rtc_exit(void)
381{
382 of_unregister_platform_driver(&mpc5121_rtc_driver);
383}
384module_exit(mpc5121_rtc_exit);
385
386MODULE_LICENSE("GPL");
387MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5905936c7c60..9ab1ae40565f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -20,6 +20,7 @@
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/hdreg.h> 21#include <linux/hdreg.h>
22#include <linux/async.h> 22#include <linux/async.h>
23#include <linux/mutex.h>
23 24
24#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
@@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void)
112 INIT_WORK(&device->restore_device, do_restore_device); 113 INIT_WORK(&device->restore_device, do_restore_device);
113 device->state = DASD_STATE_NEW; 114 device->state = DASD_STATE_NEW;
114 device->target = DASD_STATE_NEW; 115 device->target = DASD_STATE_NEW;
116 mutex_init(&device->state_mutex);
115 117
116 return device; 118 return device;
117} 119}
@@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
321 device->state = DASD_STATE_READY; 323 device->state = DASD_STATE_READY;
322 return rc; 324 return rc;
323 } 325 }
324 dasd_destroy_partitions(block);
325 dasd_flush_request_queue(block); 326 dasd_flush_request_queue(block);
327 dasd_destroy_partitions(block);
326 block->blocks = 0; 328 block->blocks = 0;
327 block->bp_block = 0; 329 block->bp_block = 0;
328 block->s2b_shift = 0; 330 block->s2b_shift = 0;
@@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device)
484 if (rc) 486 if (rc)
485 device->target = device->state; 487 device->target = device->state;
486 488
487 if (device->state == device->target) { 489 if (device->state == device->target)
488 wake_up(&dasd_init_waitq); 490 wake_up(&dasd_init_waitq);
489 dasd_put_device(device);
490 }
491 491
492 /* let user-space know that the device status changed */ 492 /* let user-space know that the device status changed */
493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device)
502static void do_kick_device(struct work_struct *work) 502static void do_kick_device(struct work_struct *work)
503{ 503{
504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
505 mutex_lock(&device->state_mutex);
505 dasd_change_state(device); 506 dasd_change_state(device);
507 mutex_unlock(&device->state_mutex);
506 dasd_schedule_device_bh(device); 508 dasd_schedule_device_bh(device);
507 dasd_put_device(device); 509 dasd_put_device(device);
508} 510}
@@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device)
539void dasd_set_target_state(struct dasd_device *device, int target) 541void dasd_set_target_state(struct dasd_device *device, int target)
540{ 542{
541 dasd_get_device(device); 543 dasd_get_device(device);
544 mutex_lock(&device->state_mutex);
542 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 545 /* If we are in probeonly mode stop at DASD_STATE_READY. */
543 if (dasd_probeonly && target > DASD_STATE_READY) 546 if (dasd_probeonly && target > DASD_STATE_READY)
544 target = DASD_STATE_READY; 547 target = DASD_STATE_READY;
545 if (device->target != target) { 548 if (device->target != target) {
546 if (device->state == target) { 549 if (device->state == target)
547 wake_up(&dasd_init_waitq); 550 wake_up(&dasd_init_waitq);
548 dasd_put_device(device);
549 }
550 device->target = target; 551 device->target = target;
551 } 552 }
552 if (device->state != device->target) 553 if (device->state != device->target)
553 dasd_change_state(device); 554 dasd_change_state(device);
555 mutex_unlock(&device->state_mutex);
556 dasd_put_device(device);
554} 557}
555 558
556/* 559/*
@@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
1000 return; 1003 return;
1001 } 1004 }
1002 1005
1003 device = (struct dasd_device *) cqr->startdev; 1006 device = dasd_device_from_cdev_locked(cdev);
1004 if (device == NULL || 1007 if (IS_ERR(device)) {
1005 device != dasd_device_from_cdev_locked(cdev) || 1008 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1009 "unable to get device from cdev");
1010 return;
1011 }
1012
1013 if (!cqr->startdev ||
1014 device != cqr->startdev ||
1015 strncmp(cqr->startdev->discipline->ebcname,
1016 (char *) &cqr->magic, 4)) {
1007 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1017 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1008 "invalid device in request"); 1018 "invalid device in request");
1019 dasd_put_device(device);
1009 return; 1020 return;
1010 } 1021 }
1011 1022
@@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
1692 cqr, rc); 1703 cqr, rc);
1693 } else { 1704 } else {
1694 cqr->stopclk = get_clock(); 1705 cqr->stopclk = get_clock();
1695 rc = 1;
1696 } 1706 }
1697 break; 1707 break;
1698 default: /* already finished or clear pending - do nothing */ 1708 default: /* already finished or clear pending - do nothing */
@@ -2170,9 +2180,13 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2170static int dasd_open(struct block_device *bdev, fmode_t mode) 2180static int dasd_open(struct block_device *bdev, fmode_t mode)
2171{ 2181{
2172 struct dasd_block *block = bdev->bd_disk->private_data; 2182 struct dasd_block *block = bdev->bd_disk->private_data;
2173 struct dasd_device *base = block->base; 2183 struct dasd_device *base;
2174 int rc; 2184 int rc;
2175 2185
2186 if (!block)
2187 return -ENODEV;
2188
2189 base = block->base;
2176 atomic_inc(&block->open_count); 2190 atomic_inc(&block->open_count);
2177 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2191 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2178 rc = -ENODEV; 2192 rc = -ENODEV;
@@ -2285,11 +2299,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2285 if (ret) 2299 if (ret)
2286 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2300 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2287 dev_name(&cdev->dev), ret); 2301 dev_name(&cdev->dev), ret);
2288 else {
2289 struct dasd_device *device = dasd_device_from_cdev(cdev);
2290 wait_event(dasd_init_waitq, _wait_for_device(device));
2291 dasd_put_device(device);
2292 }
2293} 2302}
2294 2303
2295/* 2304/*
@@ -2424,6 +2433,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2424 } else 2433 } else
2425 pr_debug("dasd_generic device %s found\n", 2434 pr_debug("dasd_generic device %s found\n",
2426 dev_name(&cdev->dev)); 2435 dev_name(&cdev->dev));
2436
2437 wait_event(dasd_init_waitq, _wait_for_device(device));
2438
2427 dasd_put_device(device); 2439 dasd_put_device(device);
2428 return rc; 2440 return rc;
2429} 2441}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 4cac5b54f26a..d49766f3b940 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 ssize_t len; 874 ssize_t len;
875 875
876 device = dasd_device_from_cdev(to_ccwdev(dev)); 876 device = dasd_device_from_cdev(to_ccwdev(dev));
877 if (!IS_ERR(device) && device->discipline) { 877 if (IS_ERR(device))
878 goto out;
879 else if (!device->discipline) {
880 dasd_put_device(device);
881 goto out;
882 } else {
878 len = snprintf(buf, PAGE_SIZE, "%s\n", 883 len = snprintf(buf, PAGE_SIZE, "%s\n",
879 device->discipline->name); 884 device->discipline->name);
880 dasd_put_device(device); 885 dasd_put_device(device);
881 } else 886 return len;
882 len = snprintf(buf, PAGE_SIZE, "none\n"); 887 }
888out:
889 len = snprintf(buf, PAGE_SIZE, "none\n");
883 return len; 890 return len;
884} 891}
885 892
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d3198303b93c..94f92a1247f2 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block)
88 if (block->gdp) { 88 if (block->gdp) {
89 del_gendisk(block->gdp); 89 del_gendisk(block->gdp);
90 block->gdp->queue = NULL; 90 block->gdp->queue = NULL;
91 block->gdp->private_data = NULL;
91 put_disk(block->gdp); 92 put_disk(block->gdp);
92 block->gdp = NULL; 93 block->gdp = NULL;
93 } 94 }
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e4c2143dabf6..ed73ce550822 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -368,6 +368,7 @@ struct dasd_device {
368 368
369 /* Device state and target state. */ 369 /* Device state and target state. */
370 int state, target; 370 int state, target;
371 struct mutex state_mutex;
371 int stopped; /* device (ccw_device_start) was stopped */ 372 int stopped; /* device (ccw_device_start) was stopped */
372 373
373 /* reference count. */ 374 /* reference count. */
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 71f95f54866f..f13a0bdd148c 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = {
165 .release = seq_release, 165 .release = seq_release,
166}; 166};
167 167
168static int
169dasd_calc_metrics(char *page, char **start, off_t off,
170 int count, int *eof, int len)
171{
172 len = (len > off) ? len - off : 0;
173 if (len > count)
174 len = count;
175 if (len < count)
176 *eof = 1;
177 *start = page + off;
178 return len;
179}
180
181#ifdef CONFIG_DASD_PROFILE 168#ifdef CONFIG_DASD_PROFILE
182static char * 169static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
183dasd_statistics_array(char *str, unsigned int *array, int factor)
184{ 170{
185 int i; 171 int i;
186 172
187 for (i = 0; i < 32; i++) { 173 for (i = 0; i < 32; i++) {
188 str += sprintf(str, "%7d ", array[i] / factor); 174 seq_printf(m, "%7d ", array[i] / factor);
189 if (i == 15) 175 if (i == 15)
190 str += sprintf(str, "\n"); 176 seq_putc(m, '\n');
191 } 177 }
192 str += sprintf(str,"\n"); 178 seq_putc(m, '\n');
193 return str;
194} 179}
195#endif /* CONFIG_DASD_PROFILE */ 180#endif /* CONFIG_DASD_PROFILE */
196 181
197static int 182static int dasd_stats_proc_show(struct seq_file *m, void *v)
198dasd_statistics_read(char *page, char **start, off_t off,
199 int count, int *eof, void *data)
200{ 183{
201 unsigned long len;
202#ifdef CONFIG_DASD_PROFILE 184#ifdef CONFIG_DASD_PROFILE
203 struct dasd_profile_info_t *prof; 185 struct dasd_profile_info_t *prof;
204 char *str;
205 int factor; 186 int factor;
206 187
207 /* check for active profiling */ 188 /* check for active profiling */
208 if (dasd_profile_level == DASD_PROFILE_OFF) { 189 if (dasd_profile_level == DASD_PROFILE_OFF) {
209 len = sprintf(page, "Statistics are off - they might be " 190 seq_printf(m, "Statistics are off - they might be "
210 "switched on using 'echo set on > " 191 "switched on using 'echo set on > "
211 "/proc/dasd/statistics'\n"); 192 "/proc/dasd/statistics'\n");
212 return dasd_calc_metrics(page, start, off, count, eof, len); 193 return 0;
213 } 194 }
214 195
215 prof = &dasd_global_profile; 196 prof = &dasd_global_profile;
@@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off,
217 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; 198 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
218 factor *= 10); 199 factor *= 10);
219 200
220 str = page; 201 seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
221 str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); 202 seq_printf(m, "with %u sectors(512B each)\n",
222 str += sprintf(str, "with %u sectors(512B each)\n",
223 prof->dasd_io_sects); 203 prof->dasd_io_sects);
224 str += sprintf(str, "Scale Factor is %d\n", factor); 204 seq_printf(m, "Scale Factor is %d\n", factor);
225 str += sprintf(str, 205 seq_printf(m,
226 " __<4 ___8 __16 __32 __64 _128 " 206 " __<4 ___8 __16 __32 __64 _128 "
227 " _256 _512 __1k __2k __4k __8k " 207 " _256 _512 __1k __2k __4k __8k "
228 " _16k _32k _64k 128k\n"); 208 " _16k _32k _64k 128k\n");
229 str += sprintf(str, 209 seq_printf(m,
230 " _256 _512 __1M __2M __4M __8M " 210 " _256 _512 __1M __2M __4M __8M "
231 " _16M _32M _64M 128M 256M 512M " 211 " _16M _32M _64M 128M 256M 512M "
232 " __1G __2G __4G " " _>4G\n"); 212 " __1G __2G __4G " " _>4G\n");
233 213
234 str += sprintf(str, "Histogram of sizes (512B secs)\n"); 214 seq_printf(m, "Histogram of sizes (512B secs)\n");
235 str = dasd_statistics_array(str, prof->dasd_io_secs, factor); 215 dasd_statistics_array(m, prof->dasd_io_secs, factor);
236 str += sprintf(str, "Histogram of I/O times (microseconds)\n"); 216 seq_printf(m, "Histogram of I/O times (microseconds)\n");
237 str = dasd_statistics_array(str, prof->dasd_io_times, factor); 217 dasd_statistics_array(m, prof->dasd_io_times, factor);
238 str += sprintf(str, "Histogram of I/O times per sector\n"); 218 seq_printf(m, "Histogram of I/O times per sector\n");
239 str = dasd_statistics_array(str, prof->dasd_io_timps, factor); 219 dasd_statistics_array(m, prof->dasd_io_timps, factor);
240 str += sprintf(str, "Histogram of I/O time till ssch\n"); 220 seq_printf(m, "Histogram of I/O time till ssch\n");
241 str = dasd_statistics_array(str, prof->dasd_io_time1, factor); 221 dasd_statistics_array(m, prof->dasd_io_time1, factor);
242 str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); 222 seq_printf(m, "Histogram of I/O time between ssch and irq\n");
243 str = dasd_statistics_array(str, prof->dasd_io_time2, factor); 223 dasd_statistics_array(m, prof->dasd_io_time2, factor);
244 str += sprintf(str, "Histogram of I/O time between ssch " 224 seq_printf(m, "Histogram of I/O time between ssch "
245 "and irq per sector\n"); 225 "and irq per sector\n");
246 str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); 226 dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
247 str += sprintf(str, "Histogram of I/O time between irq and end\n"); 227 seq_printf(m, "Histogram of I/O time between irq and end\n");
248 str = dasd_statistics_array(str, prof->dasd_io_time3, factor); 228 dasd_statistics_array(m, prof->dasd_io_time3, factor);
249 str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); 229 seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
250 str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); 230 dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
251 len = str - page;
252#else 231#else
253 len = sprintf(page, "Statistics are not activated in this kernel\n"); 232 seq_printf(m, "Statistics are not activated in this kernel\n");
254#endif 233#endif
255 return dasd_calc_metrics(page, start, off, count, eof, len); 234 return 0;
256} 235}
257 236
258static int 237static int dasd_stats_proc_open(struct inode *inode, struct file *file)
259dasd_statistics_write(struct file *file, const char __user *user_buf, 238{
260 unsigned long user_len, void *data) 239 return single_open(file, dasd_stats_proc_show, NULL);
240}
241
242static ssize_t dasd_stats_proc_write(struct file *file,
243 const char __user *user_buf, size_t user_len, loff_t *pos)
261{ 244{
262#ifdef CONFIG_DASD_PROFILE 245#ifdef CONFIG_DASD_PROFILE
263 char *buffer, *str; 246 char *buffer, *str;
@@ -308,6 +291,15 @@ out_error:
308#endif /* CONFIG_DASD_PROFILE */ 291#endif /* CONFIG_DASD_PROFILE */
309} 292}
310 293
294static const struct file_operations dasd_stats_proc_fops = {
295 .owner = THIS_MODULE,
296 .open = dasd_stats_proc_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300 .write = dasd_stats_proc_write,
301};
302
311/* 303/*
312 * Create dasd proc-fs entries. 304 * Create dasd proc-fs entries.
313 * In case creation failed, cleanup and return -ENOENT. 305 * In case creation failed, cleanup and return -ENOENT.
@@ -324,13 +316,12 @@ dasd_proc_init(void)
324 &dasd_devices_file_ops); 316 &dasd_devices_file_ops);
325 if (!dasd_devices_entry) 317 if (!dasd_devices_entry)
326 goto out_nodevices; 318 goto out_nodevices;
327 dasd_statistics_entry = create_proc_entry("statistics", 319 dasd_statistics_entry = proc_create("statistics",
328 S_IFREG | S_IRUGO | S_IWUSR, 320 S_IFREG | S_IRUGO | S_IWUSR,
329 dasd_proc_root_entry); 321 dasd_proc_root_entry,
322 &dasd_stats_proc_fops);
330 if (!dasd_statistics_entry) 323 if (!dasd_statistics_entry)
331 goto out_nostatistics; 324 goto out_nostatistics;
332 dasd_statistics_entry->read_proc = dasd_statistics_read;
333 dasd_statistics_entry->write_proc = dasd_statistics_write;
334 return 0; 325 return 0;
335 326
336 out_nostatistics: 327 out_nostatistics:
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 82daa3c1dc9c..3438658b66b7 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <asm/asm-offsets.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
19#include <asm/sclp.h> 20#include <asm/sclp.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
@@ -40,12 +41,12 @@ enum arch_id {
40/* dump system info */ 41/* dump system info */
41 42
42struct sys_info { 43struct sys_info {
43 enum arch_id arch; 44 enum arch_id arch;
44 unsigned long sa_base; 45 unsigned long sa_base;
45 u32 sa_size; 46 u32 sa_size;
46 int cpu_map[NR_CPUS]; 47 int cpu_map[NR_CPUS];
47 unsigned long mem_size; 48 unsigned long mem_size;
48 union save_area lc_mask; 49 struct save_area lc_mask;
49}; 50};
50 51
51struct ipib_info { 52struct ipib_info {
@@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
183 return 0; 184 return 0;
184} 185}
185 186
186#ifdef __s390x__
187/*
188 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
189 */
190static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
191 int cpu)
192{
193 int i;
194
195 for (i = 0; i < 16; i++) {
196 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
197 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
198 out->s390.ctrl_regs[i] =
199 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
200 }
201 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
202 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
203 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
204 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
205 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
206 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
207 out->s390.psw[1] |= 0x8; /* set bit 12 */
208 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
209 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
210 out->s390.pref_reg = in->s390x.pref_reg;
211 out->s390.timer = in->s390x.timer;
212 out->s390.clk_cmp = in->s390x.clk_cmp;
213}
214
215static void __init s390x_to_s390_save_areas(void)
216{
217 int i = 1;
218 static union save_area tmp;
219
220 while (zfcpdump_save_areas[i]) {
221 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
222 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
223 i++;
224 }
225}
226
227#endif /* __s390x__ */
228
229static int __init init_cpu_info(enum arch_id arch) 187static int __init init_cpu_info(enum arch_id arch)
230{ 188{
231 union save_area *sa; 189 struct save_area *sa;
232 190
233 /* get info for boot cpu from lowcore, stored in the HSA */ 191 /* get info for boot cpu from lowcore, stored in the HSA */
234 192
@@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch)
241 return -EIO; 199 return -EIO;
242 } 200 }
243 zfcpdump_save_areas[0] = sa; 201 zfcpdump_save_areas[0] = sa;
244
245#ifdef __s390x__
246 /* convert s390x regs to s390, if we are dumping an s390 Linux */
247
248 if (arch == ARCH_S390)
249 s390x_to_s390_save_areas();
250#endif
251
252 return 0; 202 return 0;
253} 203}
254 204
255static DEFINE_MUTEX(zcore_mutex); 205static DEFINE_MUTEX(zcore_mutex);
256 206
257#define DUMP_VERSION 0x3 207#define DUMP_VERSION 0x5
258#define DUMP_MAGIC 0xa8190173618f23fdULL 208#define DUMP_MAGIC 0xa8190173618f23fdULL
259#define DUMP_ARCH_S390X 2 209#define DUMP_ARCH_S390X 2
260#define DUMP_ARCH_S390 1 210#define DUMP_ARCH_S390 1
@@ -279,7 +229,14 @@ struct zcore_header {
279 u32 volnr; 229 u32 volnr;
280 u32 build_arch; 230 u32 build_arch;
281 u64 rmem_size; 231 u64 rmem_size;
282 char pad2[4016]; 232 u8 mvdump;
233 u16 cpu_cnt;
234 u16 real_cpu_cnt;
235 u8 end_pad1[0x200-0x061];
236 u64 mvdump_sign;
237 u64 mvdump_zipl_time;
238 u8 end_pad2[0x800-0x210];
239 u32 lc_vec[512];
283} __attribute__((packed,__aligned__(16))); 240} __attribute__((packed,__aligned__(16)));
284 241
285static struct zcore_header zcore_header = { 242static struct zcore_header zcore_header = {
@@ -289,7 +246,7 @@ static struct zcore_header zcore_header = {
289 .dump_level = 0, 246 .dump_level = 0,
290 .page_size = PAGE_SIZE, 247 .page_size = PAGE_SIZE,
291 .mem_start = 0, 248 .mem_start = 0,
292#ifdef __s390x__ 249#ifdef CONFIG_64BIT
293 .build_arch = DUMP_ARCH_S390X, 250 .build_arch = DUMP_ARCH_S390X,
294#else 251#else
295 .build_arch = DUMP_ARCH_S390, 252 .build_arch = DUMP_ARCH_S390,
@@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
340 unsigned long prefix; 297 unsigned long prefix;
341 unsigned long sa_off, len, buf_off; 298 unsigned long sa_off, len, buf_off;
342 299
343 if (sys_info.arch == ARCH_S390) 300 prefix = zfcpdump_save_areas[i]->pref_reg;
344 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
345 else
346 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
347
348 sa_start = prefix + sys_info.sa_base; 301 sa_start = prefix + sys_info.sa_base;
349 sa_end = prefix + sys_info.sa_base + sys_info.sa_size; 302 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
350 303
@@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = {
561 .release = zcore_reipl_release, 514 .release = zcore_reipl_release,
562}; 515};
563 516
517#ifdef CONFIG_32BIT
564 518
565static void __init set_s390_lc_mask(union save_area *map) 519static void __init set_lc_mask(struct save_area *map)
566{ 520{
567 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); 521 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
568 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); 522 memset(&map->timer, 0xff, sizeof(map->timer));
569 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); 523 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
570 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); 524 memset(&map->psw, 0xff, sizeof(map->psw));
571 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); 525 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
572 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); 526 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
573 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); 527 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
574 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); 528 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
575 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); 529 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
576} 530}
577 531
578static void __init set_s390x_lc_mask(union save_area *map) 532#else /* CONFIG_32BIT */
533
534static void __init set_lc_mask(struct save_area *map)
579{ 535{
580 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); 536 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
581 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); 537 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
582 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); 538 memset(&map->psw, 0xff, sizeof(map->psw));
583 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); 539 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
584 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); 540 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
585 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); 541 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
586 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); 542 memset(&map->timer, 0xff, sizeof(map->timer));
587 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); 543 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
588 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); 544 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
589 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); 545 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
590} 546}
591 547
548#endif /* CONFIG_32BIT */
549
592/* 550/*
593 * Initialize dump globals for a given architecture 551 * Initialize dump globals for a given architecture
594 */ 552 */
@@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch)
599 switch (arch) { 557 switch (arch) {
600 case ARCH_S390X: 558 case ARCH_S390X:
601 pr_alert("DETECTED 'S390X (64 bit) OS'\n"); 559 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
602 sys_info.sa_base = SAVE_AREA_BASE_S390X;
603 sys_info.sa_size = sizeof(struct save_area_s390x);
604 set_s390x_lc_mask(&sys_info.lc_mask);
605 break; 560 break;
606 case ARCH_S390: 561 case ARCH_S390:
607 pr_alert("DETECTED 'S390 (32 bit) OS'\n"); 562 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
608 sys_info.sa_base = SAVE_AREA_BASE_S390;
609 sys_info.sa_size = sizeof(struct save_area_s390);
610 set_s390_lc_mask(&sys_info.lc_mask);
611 break; 563 break;
612 default: 564 default:
613 pr_alert("0x%x is an unknown architecture.\n",arch); 565 pr_alert("0x%x is an unknown architecture.\n",arch);
614 return -EINVAL; 566 return -EINVAL;
615 } 567 }
568 sys_info.sa_base = SAVE_AREA_BASE;
569 sys_info.sa_size = sizeof(struct save_area);
616 sys_info.arch = arch; 570 sys_info.arch = arch;
571 set_lc_mask(&sys_info.lc_mask);
617 rc = init_cpu_info(arch); 572 rc = init_cpu_info(arch);
618 if (rc) 573 if (rc)
619 return rc; 574 return rc;
@@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem)
660 615
661static int __init zcore_header_init(int arch, struct zcore_header *hdr) 616static int __init zcore_header_init(int arch, struct zcore_header *hdr)
662{ 617{
663 int rc; 618 int rc, i;
664 unsigned long memory = 0; 619 unsigned long memory = 0;
620 u32 prefix;
665 621
666 if (arch == ARCH_S390X) 622 if (arch == ARCH_S390X)
667 hdr->arch_id = DUMP_ARCH_S390X; 623 hdr->arch_id = DUMP_ARCH_S390X;
@@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
676 hdr->num_pages = memory / PAGE_SIZE; 632 hdr->num_pages = memory / PAGE_SIZE;
677 hdr->tod = get_clock(); 633 hdr->tod = get_clock();
678 get_cpu_id(&hdr->cpu_id); 634 get_cpu_id(&hdr->cpu_id);
635 for (i = 0; zfcpdump_save_areas[i]; i++) {
636 prefix = zfcpdump_save_areas[i]->pref_reg;
637 hdr->real_cpu_cnt++;
638 if (!prefix)
639 continue;
640 hdr->lc_vec[hdr->cpu_cnt] = prefix;
641 hdr->cpu_cnt++;
642 }
679 return 0; 643 return 0;
680} 644}
681 645
@@ -741,14 +705,21 @@ static int __init zcore_init(void)
741 if (rc) 705 if (rc)
742 goto fail; 706 goto fail;
743 707
744#ifndef __s390x__ 708#ifdef CONFIG_64BIT
709 if (arch == ARCH_S390) {
710 pr_alert("The 64-bit dump tool cannot be used for a "
711 "32-bit system\n");
712 rc = -EINVAL;
713 goto fail;
714 }
715#else /* CONFIG_64BIT */
745 if (arch == ARCH_S390X) { 716 if (arch == ARCH_S390X) {
746 pr_alert("The 32-bit dump tool cannot be used for a " 717 pr_alert("The 32-bit dump tool cannot be used for a "
747 "64-bit system\n"); 718 "64-bit system\n");
748 rc = -EINVAL; 719 rc = -EINVAL;
749 goto fail; 720 goto fail;
750 } 721 }
751#endif 722#endif /* CONFIG_64BIT */
752 723
753 rc = sys_info_init(arch); 724 rc = sys_info_init(arch);
754 if (rc) 725 if (rc)
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7a28a3029a3f..37df42af05ec 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
224 */ 224 */
225void ccw_request_handler(struct ccw_device *cdev) 225void ccw_request_handler(struct ccw_device *cdev)
226{ 226{
227 struct irb *irb = (struct irb *)&S390_lowcore.irb;
227 struct ccw_request *req = &cdev->private->req; 228 struct ccw_request *req = &cdev->private->req;
228 struct irb *irb = (struct irb *) __LC_IRB;
229 enum io_status status; 229 enum io_status status;
230 int rc = -EOPNOTSUPP; 230 int rc = -EOPNOTSUPP;
231 231
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e567648..4038f5b4f144 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
574 secm_area->request.length = 0x0050; 574 secm_area->request.length = 0x0050;
575 secm_area->request.code = 0x0016; 575 secm_area->request.code = 0x0016;
576 576
577 secm_area->key = PAGE_DEFAULT_KEY; 577 secm_area->key = PAGE_DEFAULT_KEY >> 4;
578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
580 580
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index c84ac9443079..852612f5dba0 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
51{ 51{
52 struct chsc_private *private = sch->private; 52 struct chsc_private *private = sch->private;
53 struct chsc_request *request = private->request; 53 struct chsc_request *request = private->request;
54 struct irb *irb = (struct irb *)__LC_IRB; 54 struct irb *irb = (struct irb *)&S390_lowcore.irb;
55 55
56 CHSC_LOG(4, "irb"); 56 CHSC_LOG(4, "irb");
57 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 57 CHSC_LOG_HEX(4, irb, sizeof(*irb));
@@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
237 int ret = -ENODEV; 237 int ret = -ENODEV;
238 char dbf[10]; 238 char dbf[10];
239 239
240 chsc_area->header.key = PAGE_DEFAULT_KEY; 240 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
241 while ((sch = chsc_get_next_subchannel(sch))) { 241 while ((sch = chsc_get_next_subchannel(sch))) {
242 spin_lock(sch->lock); 242 spin_lock(sch->lock);
243 private = sch->private; 243 private = sch->private;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 126f240715a4..f736cdcf08ad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
625 /* 625 /*
626 * Get interrupt information from lowcore 626 * Get interrupt information from lowcore
627 */ 627 */
628 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 628 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
629 irb = (struct irb *) __LC_IRB; 629 irb = (struct irb *)&S390_lowcore.irb;
630 do { 630 do {
631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
632 /* 632 /*
@@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
661 * We don't do this for VM because a tpi drops the cpu 661 * We don't do this for VM because a tpi drops the cpu
662 * out of the sie which costs more cycles than it saves. 662 * out of the sie which costs more cycles than it saves.
663 */ 663 */
664 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 664 } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
665 irq_exit(); 665 irq_exit();
666 set_irq_regs(old_regs); 666 set_irq_regs(old_regs);
667} 667}
@@ -682,10 +682,10 @@ static int cio_tpi(void)
682 struct irb *irb; 682 struct irb *irb;
683 int irq_context; 683 int irq_context;
684 684
685 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 685 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
686 if (tpi(NULL) != 1) 686 if (tpi(NULL) != 1)
687 return 0; 687 return 0;
688 irb = (struct irb *) __LC_IRB; 688 irb = (struct irb *)&S390_lowcore.irb;
689 /* Store interrupt response block to lowcore. */ 689 /* Store interrupt response block to lowcore. */
690 if (tsch(tpi_info->schid, irb) != 0) 690 if (tsch(tpi_info->schid, irb) != 0)
691 /* Not status pending or not operational. */ 691 /* Not status pending or not operational. */
@@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
885 struct tpi_info ti; 885 struct tpi_info ti;
886 886
887 if (tpi(&ti)) { 887 if (tpi(&ti)) {
888 tsch(ti.schid, (struct irb *)__LC_IRB); 888 tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
889 if (schid_equal(&ti.schid, &schid)) 889 if (schid_equal(&ti.schid, &schid))
890 return 0; 890 return 0;
891 } 891 }
@@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1083 struct subchannel_id schid; 1083 struct subchannel_id schid;
1084 struct schib schib; 1084 struct schib schib;
1085 1085
1086 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1087 if (!schid.one) 1087 if (!schid.one)
1088 return -ENODEV; 1088 return -ENODEV;
1089 if (stsch(schid, &schib)) 1089 if (stsch(schid, &schib))
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e76..425f741a280c 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -8,15 +8,16 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/semaphore.h>
12#include <linux/mutex.h> 11#include <linux/mutex.h>
13#include <linux/kthread.h> 12#include <linux/kthread.h>
14#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h>
15#include <asm/crw.h> 15#include <asm/crw.h>
16 16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex); 17static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS]; 18static crw_handler_t crw_handlers[NR_RSCS];
19static atomic_t crw_nr_req = ATOMIC_INIT(0);
20static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
20 21
21/** 22/**
22 * crw_register_handler() - register a channel report word handler 23 * crw_register_handler() - register a channel report word handler
@@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
59static int crw_collect_info(void *unused) 60static int crw_collect_info(void *unused)
60{ 61{
61 struct crw crw[2]; 62 struct crw crw[2];
62 int ccode; 63 int ccode, signal;
63 unsigned int chain; 64 unsigned int chain;
64 int ignore;
65 65
66repeat: 66repeat:
67 ignore = down_interruptible(&crw_semaphore); 67 signal = wait_event_interruptible(crw_handler_wait_q,
68 atomic_read(&crw_nr_req) > 0);
69 if (unlikely(signal))
70 atomic_inc(&crw_nr_req);
68 chain = 0; 71 chain = 0;
69 while (1) { 72 while (1) {
70 crw_handler_t handler; 73 crw_handler_t handler;
@@ -122,25 +125,23 @@ repeat:
122 /* chain is always 0 or 1 here. */ 125 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0; 126 chain = crw[chain].chn ? chain + 1 : 0;
124 } 127 }
128 if (atomic_dec_and_test(&crw_nr_req))
129 wake_up(&crw_handler_wait_q);
125 goto repeat; 130 goto repeat;
126 return 0; 131 return 0;
127} 132}
128 133
129void crw_handle_channel_report(void) 134void crw_handle_channel_report(void)
130{ 135{
131 up(&crw_semaphore); 136 atomic_inc(&crw_nr_req);
137 wake_up(&crw_handler_wait_q);
132} 138}
133 139
134/* 140void crw_wait_for_channel_report(void)
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{ 141{
140 init_MUTEX_LOCKED(&crw_semaphore); 142 crw_handle_channel_report();
141 return 0; 143 wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
142} 144}
143pure_initcall(crw_init_semaphore);
144 145
145/* 146/*
146 * Machine checks for the channel subsystem must be enabled 147 * Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa14..2769da54f2b9 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/proc_fs.h>
21#include <asm/isc.h> 22#include <asm/isc.h>
22#include <asm/crw.h> 23#include <asm/crw.h>
23 24
@@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
232 if (!get_device(&sch->dev)) 233 if (!get_device(&sch->dev))
233 return; 234 return;
234 sch->todo = todo; 235 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) { 236 if (!queue_work(cio_work_q, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */ 237 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev); 238 put_device(&sch->dev);
238 } 239 }
@@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
543} 544}
544 545
545static DECLARE_WORK(slow_path_work, css_slow_path_func); 546static DECLARE_WORK(slow_path_work, css_slow_path_func);
546struct workqueue_struct *slow_path_wq; 547struct workqueue_struct *cio_work_q;
547 548
548void css_schedule_eval(struct subchannel_id schid) 549void css_schedule_eval(struct subchannel_id schid)
549{ 550{
@@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
552 spin_lock_irqsave(&slow_subchannel_lock, flags); 553 spin_lock_irqsave(&slow_subchannel_lock, flags);
553 idset_sch_add(slow_subchannel_set, schid); 554 idset_sch_add(slow_subchannel_set, schid);
554 atomic_set(&css_eval_scheduled, 1); 555 atomic_set(&css_eval_scheduled, 1);
555 queue_work(slow_path_wq, &slow_path_work); 556 queue_work(cio_work_q, &slow_path_work);
556 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
557} 558}
558 559
@@ -563,7 +564,7 @@ void css_schedule_eval_all(void)
563 spin_lock_irqsave(&slow_subchannel_lock, flags); 564 spin_lock_irqsave(&slow_subchannel_lock, flags);
564 idset_fill(slow_subchannel_set); 565 idset_fill(slow_subchannel_set);
565 atomic_set(&css_eval_scheduled, 1); 566 atomic_set(&css_eval_scheduled, 1);
566 queue_work(slow_path_wq, &slow_path_work); 567 queue_work(cio_work_q, &slow_path_work);
567 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
568} 569}
569 570
@@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
594 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 spin_lock_irqsave(&slow_subchannel_lock, flags);
595 idset_add_set(slow_subchannel_set, unreg_set); 596 idset_add_set(slow_subchannel_set, unreg_set);
596 atomic_set(&css_eval_scheduled, 1); 597 atomic_set(&css_eval_scheduled, 1);
597 queue_work(slow_path_wq, &slow_path_work); 598 queue_work(cio_work_q, &slow_path_work);
598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
599 idset_free(unreg_set); 600 idset_free(unreg_set);
600} 601}
601 602
602void css_wait_for_slow_path(void) 603void css_wait_for_slow_path(void)
603{ 604{
604 flush_workqueue(slow_path_wq); 605 flush_workqueue(cio_work_q);
605} 606}
606 607
607/* Schedule reprobing of all unregistered subchannels. */ 608/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void)
992 ret = css_bus_init(); 993 ret = css_bus_init();
993 if (ret) 994 if (ret)
994 return ret; 995 return ret;
995 996 cio_work_q = create_singlethread_workqueue("cio");
997 if (!cio_work_q) {
998 ret = -ENOMEM;
999 goto out_bus;
1000 }
996 ret = io_subchannel_init(); 1001 ret = io_subchannel_init();
997 if (ret) 1002 if (ret)
998 css_bus_cleanup(); 1003 goto out_wq;
999 1004
1000 return ret; 1005 return ret;
1006out_wq:
1007 destroy_workqueue(cio_work_q);
1008out_bus:
1009 css_bus_cleanup();
1010 return ret;
1001} 1011}
1002subsys_initcall(channel_subsystem_init); 1012subsys_initcall(channel_subsystem_init);
1003 1013
@@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused)
1006 struct css_driver *cssdrv = to_cssdriver(drv); 1016 struct css_driver *cssdrv = to_cssdriver(drv);
1007 1017
1008 if (cssdrv->settle) 1018 if (cssdrv->settle)
1009 cssdrv->settle(); 1019 return cssdrv->settle();
1010 return 0; 1020 return 0;
1011} 1021}
1012 1022
1023int css_complete_work(void)
1024{
1025 int ret;
1026
1027 /* Wait for the evaluation of subchannels to finish. */
1028 ret = wait_event_interruptible(css_eval_wq,
1029 atomic_read(&css_eval_scheduled) == 0);
1030 if (ret)
1031 return -EINTR;
1032 flush_workqueue(cio_work_q);
1033 /* Wait for the subchannel type specific initialization to finish */
1034 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1035}
1036
1037
1013/* 1038/*
1014 * Wait for the initialization of devices to finish, to make sure we are 1039 * Wait for the initialization of devices to finish, to make sure we are
1015 * done with our setup if the search for the root device starts. 1040 * done with our setup if the search for the root device starts.
@@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void)
1018{ 1043{
1019 /* Start initial subchannel evaluation. */ 1044 /* Start initial subchannel evaluation. */
1020 css_schedule_eval_all(); 1045 css_schedule_eval_all();
1021 /* Wait for the evaluation of subchannels to finish. */ 1046 css_complete_work();
1022 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1047 return 0;
1023 /* Wait for the subchannel type specific initialization to finish */
1024 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1025} 1048}
1026subsys_initcall_sync(channel_subsystem_init_sync); 1049subsys_initcall_sync(channel_subsystem_init_sync);
1027 1050
1051#ifdef CONFIG_PROC_FS
1052static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *ppos)
1054{
1055 int ret;
1056
1057 /* Handle pending CRW's. */
1058 crw_wait_for_channel_report();
1059 ret = css_complete_work();
1060
1061 return ret ? ret : count;
1062}
1063
1064static const struct file_operations cio_settle_proc_fops = {
1065 .write = cio_settle_write,
1066};
1067
1068static int __init cio_settle_init(void)
1069{
1070 struct proc_dir_entry *entry;
1071
1072 entry = proc_create("cio_settle", S_IWUSR, NULL,
1073 &cio_settle_proc_fops);
1074 if (!entry)
1075 return -ENOMEM;
1076 return 0;
1077}
1078device_initcall(cio_settle_init);
1079#endif /*CONFIG_PROC_FS*/
1080
1028int sch_is_pseudo_sch(struct subchannel *sch) 1081int sch_is_pseudo_sch(struct subchannel *sch)
1029{ 1082{
1030 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1083 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index fe84b92cde60..7e37886de231 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -95,7 +95,7 @@ struct css_driver {
95 int (*freeze)(struct subchannel *); 95 int (*freeze)(struct subchannel *);
96 int (*thaw) (struct subchannel *); 96 int (*thaw) (struct subchannel *);
97 int (*restore)(struct subchannel *); 97 int (*restore)(struct subchannel *);
98 void (*settle)(void); 98 int (*settle)(void);
99 const char *name; 99 const char *name;
100}; 100};
101 101
@@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
146/* Helper functions to build lists for the slow path. */ 146/* Helper functions to build lists for the slow path. */
147void css_schedule_eval(struct subchannel_id schid); 147void css_schedule_eval(struct subchannel_id schid);
148void css_schedule_eval_all(void); 148void css_schedule_eval_all(void);
149int css_complete_work(void);
149 150
150int sch_is_pseudo_sch(struct subchannel *); 151int sch_is_pseudo_sch(struct subchannel *);
151struct schib; 152struct schib;
152int css_sch_is_valid(struct schib *); 153int css_sch_is_valid(struct schib *);
153 154
154extern struct workqueue_struct *slow_path_wq; 155extern struct workqueue_struct *cio_work_q;
155void css_wait_for_slow_path(void); 156void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); 157void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
157#endif 158#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a6c7d5426fb2..c6abb75c4615 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
137 int); 137 int);
138static void recovery_func(unsigned long data); 138static void recovery_func(unsigned long data);
139struct workqueue_struct *ccw_device_work;
140wait_queue_head_t ccw_device_init_wq; 139wait_queue_head_t ccw_device_init_wq;
141atomic_t ccw_device_init_count; 140atomic_t ccw_device_init_count;
142 141
@@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
159 return 0; 158 return 0;
160} 159}
161 160
162static void io_subchannel_settle(void) 161static int io_subchannel_settle(void)
163{ 162{
164 wait_event(ccw_device_init_wq, 163 int ret;
165 atomic_read(&ccw_device_init_count) == 0); 164
166 flush_workqueue(ccw_device_work); 165 ret = wait_event_interruptible(ccw_device_init_wq,
166 atomic_read(&ccw_device_init_count) == 0);
167 if (ret)
168 return -EINTR;
169 flush_workqueue(cio_work_q);
170 return 0;
167} 171}
168 172
169static struct css_driver io_subchannel_driver = { 173static struct css_driver io_subchannel_driver = {
@@ -188,27 +192,13 @@ int __init io_subchannel_init(void)
188 atomic_set(&ccw_device_init_count, 0); 192 atomic_set(&ccw_device_init_count, 0);
189 setup_timer(&recovery_timer, recovery_func, 0); 193 setup_timer(&recovery_timer, recovery_func, 0);
190 194
191 ccw_device_work = create_singlethread_workqueue("cio"); 195 ret = bus_register(&ccw_bus_type);
192 if (!ccw_device_work) 196 if (ret)
193 return -ENOMEM; 197 return ret;
194 slow_path_wq = create_singlethread_workqueue("kslowcrw");
195 if (!slow_path_wq) {
196 ret = -ENOMEM;
197 goto out_err;
198 }
199 if ((ret = bus_register (&ccw_bus_type)))
200 goto out_err;
201
202 ret = css_driver_register(&io_subchannel_driver); 198 ret = css_driver_register(&io_subchannel_driver);
203 if (ret) 199 if (ret)
204 goto out_err; 200 bus_unregister(&ccw_bus_type);
205 201
206 return 0;
207out_err:
208 if (ccw_device_work)
209 destroy_workqueue(ccw_device_work);
210 if (slow_path_wq)
211 destroy_workqueue(slow_path_wq);
212 return ret; 202 return ret;
213} 203}
214 204
@@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1348 /* Not operational. */ 1338 /* Not operational. */
1349 if (!cdev) 1339 if (!cdev)
1350 return IO_SCH_UNREG; 1340 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE)) 1341 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1352 return IO_SCH_UNREG; 1342 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG; 1343 return IO_SCH_ORPH_UNREG;
1354 } 1344 }
@@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1356 if (!cdev) 1346 if (!cdev)
1357 return IO_SCH_ATTACH; 1347 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1348 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE)) 1349 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1360 return IO_SCH_UNREG_ATTACH; 1350 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH; 1351 return IO_SCH_ORPH_ATTACH;
1362 } 1352 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1353 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 1354 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1365 return IO_SCH_UNREG; 1355 return IO_SCH_UNREG;
1366 return IO_SCH_DISC; 1356 return IO_SCH_DISC;
1367 } 1357 }
@@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1410 rc = 0; 1400 rc = 0;
1411 goto out_unlock; 1401 goto out_unlock;
1412 case IO_SCH_VERIFY: 1402 case IO_SCH_VERIFY:
1403 if (cdev->private->flags.resuming == 1) {
1404 if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1405 ccw_device_set_notoper(cdev);
1406 break;
1407 }
1408 }
1413 /* Trigger path verification. */ 1409 /* Trigger path verification. */
1414 io_subchannel_verify(sch); 1410 io_subchannel_verify(sch);
1415 rc = 0; 1411 rc = 0;
@@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1448 break; 1444 break;
1449 case IO_SCH_UNREG_ATTACH: 1445 case IO_SCH_UNREG_ATTACH:
1450 /* Unregister ccw device. */ 1446 /* Unregister ccw device. */
1451 ccw_device_unregister(cdev); 1447 if (!cdev->private->flags.resuming)
1448 ccw_device_unregister(cdev);
1452 break; 1449 break;
1453 default: 1450 default:
1454 break; 1451 break;
@@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1457 switch (action) { 1454 switch (action) {
1458 case IO_SCH_ORPH_UNREG: 1455 case IO_SCH_ORPH_UNREG:
1459 case IO_SCH_UNREG: 1456 case IO_SCH_UNREG:
1460 css_sch_device_unregister(sch); 1457 if (!cdev || !cdev->private->flags.resuming)
1458 css_sch_device_unregister(sch);
1461 break; 1459 break;
1462 case IO_SCH_ORPH_ATTACH: 1460 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH: 1461 case IO_SCH_UNREG_ATTACH:
@@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1779{ 1777{
1780 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1778 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1781 1779
1782 if (cio_is_console(sch->schid)) 1780 spin_lock_irq(sch->lock);
1783 goto out; 1781 if (cio_is_console(sch->schid)) {
1782 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1783 goto out_unlock;
1784 }
1784 /* 1785 /*
1785 * While we were sleeping, devices may have gone or become 1786 * While we were sleeping, devices may have gone or become
1786 * available again. Kick re-detection. 1787 * available again. Kick re-detection.
1787 */ 1788 */
1788 spin_lock_irq(sch->lock);
1789 cdev->private->flags.resuming = 1; 1789 cdev->private->flags.resuming = 1;
1790 css_schedule_eval(sch->schid);
1791 spin_unlock_irq(sch->lock);
1792 css_complete_work();
1793
1794 /* cdev may have been moved to a different subchannel. */
1795 sch = to_subchannel(cdev->dev.parent);
1796 spin_lock_irq(sch->lock);
1797 if (cdev->private->state != DEV_STATE_ONLINE &&
1798 cdev->private->state != DEV_STATE_OFFLINE)
1799 goto out_unlock;
1800
1790 ccw_device_recognition(cdev); 1801 ccw_device_recognition(cdev);
1791 spin_unlock_irq(sch->lock); 1802 spin_unlock_irq(sch->lock);
1792 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1803 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1793 cdev->private->state == DEV_STATE_DISCONNECTED); 1804 cdev->private->state == DEV_STATE_DISCONNECTED);
1794out: 1805 spin_lock_irq(sch->lock);
1806
1807out_unlock:
1795 cdev->private->flags.resuming = 0; 1808 cdev->private->flags.resuming = 0;
1809 spin_unlock_irq(sch->lock);
1796} 1810}
1797 1811
1798static int resume_handle_boxed(struct ccw_device *cdev) 1812static int resume_handle_boxed(struct ccw_device *cdev)
1799{ 1813{
1800 cdev->private->state = DEV_STATE_BOXED; 1814 cdev->private->state = DEV_STATE_BOXED;
1801 if (ccw_device_notify(cdev, CIO_BOXED)) 1815 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1802 return 0; 1816 return 0;
1803 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1817 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1804 return -ENODEV; 1818 return -ENODEV;
@@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
1807static int resume_handle_disc(struct ccw_device *cdev) 1821static int resume_handle_disc(struct ccw_device *cdev)
1808{ 1822{
1809 cdev->private->state = DEV_STATE_DISCONNECTED; 1823 cdev->private->state = DEV_STATE_DISCONNECTED;
1810 if (ccw_device_notify(cdev, CIO_GONE)) 1824 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1811 return 0; 1825 return 0;
1812 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1826 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1813 return -ENODEV; 1827 return -ENODEV;
@@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
1816static int ccw_device_pm_restore(struct device *dev) 1830static int ccw_device_pm_restore(struct device *dev)
1817{ 1831{
1818 struct ccw_device *cdev = to_ccwdev(dev); 1832 struct ccw_device *cdev = to_ccwdev(dev);
1819 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1833 struct subchannel *sch;
1820 int ret = 0, cm_enabled; 1834 int ret = 0;
1821 1835
1822 __ccw_device_pm_restore(cdev); 1836 __ccw_device_pm_restore(cdev);
1837 sch = to_subchannel(cdev->dev.parent);
1823 spin_lock_irq(sch->lock); 1838 spin_lock_irq(sch->lock);
1824 if (cio_is_console(sch->schid)) { 1839 if (cio_is_console(sch->schid))
1825 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1826 spin_unlock_irq(sch->lock);
1827 goto out_restore; 1840 goto out_restore;
1828 } 1841
1829 cdev->private->flags.donotify = 0;
1830 /* check recognition results */ 1842 /* check recognition results */
1831 switch (cdev->private->state) { 1843 switch (cdev->private->state) {
1832 case DEV_STATE_OFFLINE: 1844 case DEV_STATE_OFFLINE:
1845 case DEV_STATE_ONLINE:
1846 cdev->private->flags.donotify = 0;
1833 break; 1847 break;
1834 case DEV_STATE_BOXED: 1848 case DEV_STATE_BOXED:
1835 ret = resume_handle_boxed(cdev); 1849 ret = resume_handle_boxed(cdev);
1836 spin_unlock_irq(sch->lock);
1837 if (ret) 1850 if (ret)
1838 goto out; 1851 goto out_unlock;
1839 goto out_restore; 1852 goto out_restore;
1840 case DEV_STATE_DISCONNECTED:
1841 goto out_disc_unlock;
1842 default: 1853 default:
1843 goto out_unreg_unlock; 1854 ret = resume_handle_disc(cdev);
1844 } 1855 if (ret)
1845 /* check if the device id has changed */ 1856 goto out_unlock;
1846 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1857 goto out_restore;
1847 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1848 "changed from %04x to %04x)\n",
1849 sch->schid.ssid, sch->schid.sch_no,
1850 cdev->private->dev_id.devno,
1851 sch->schib.pmcw.dev);
1852 goto out_unreg_unlock;
1853 } 1858 }
1854 /* check if the device type has changed */ 1859 /* check if the device type has changed */
1855 if (!ccw_device_test_sense_data(cdev)) { 1860 if (!ccw_device_test_sense_data(cdev)) {
@@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev)
1858 ret = -ENODEV; 1863 ret = -ENODEV;
1859 goto out_unlock; 1864 goto out_unlock;
1860 } 1865 }
1861 if (!cdev->online) { 1866 if (!cdev->online)
1862 ret = 0;
1863 goto out_unlock; 1867 goto out_unlock;
1864 }
1865 ret = ccw_device_online(cdev);
1866 if (ret)
1867 goto out_disc_unlock;
1868 1868
1869 cm_enabled = cdev->private->cmb != NULL; 1869 if (ccw_device_online(cdev)) {
1870 ret = resume_handle_disc(cdev);
1871 if (ret)
1872 goto out_unlock;
1873 goto out_restore;
1874 }
1870 spin_unlock_irq(sch->lock); 1875 spin_unlock_irq(sch->lock);
1871
1872 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1876 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1873 if (cdev->private->state != DEV_STATE_ONLINE) { 1877 spin_lock_irq(sch->lock);
1874 spin_lock_irq(sch->lock); 1878
1875 goto out_disc_unlock; 1879 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1880 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1881 ret = -ENODEV;
1882 goto out_unlock;
1876 } 1883 }
1877 if (cm_enabled) { 1884
1885 /* reenable cmf, if needed */
1886 if (cdev->private->cmb) {
1887 spin_unlock_irq(sch->lock);
1878 ret = ccw_set_cmf(cdev, 1); 1888 ret = ccw_set_cmf(cdev, 1);
1889 spin_lock_irq(sch->lock);
1879 if (ret) { 1890 if (ret) {
1880 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1891 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1881 "(rc=%d)\n", cdev->private->dev_id.ssid, 1892 "(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev)
1885 } 1896 }
1886 1897
1887out_restore: 1898out_restore:
1899 spin_unlock_irq(sch->lock);
1888 if (cdev->online && cdev->drv && cdev->drv->restore) 1900 if (cdev->online && cdev->drv && cdev->drv->restore)
1889 ret = cdev->drv->restore(cdev); 1901 ret = cdev->drv->restore(cdev);
1890out:
1891 return ret; 1902 return ret;
1892 1903
1893out_disc_unlock:
1894 ret = resume_handle_disc(cdev);
1895 spin_unlock_irq(sch->lock);
1896 if (ret)
1897 return ret;
1898 goto out_restore;
1899
1900out_unreg_unlock:
1901 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1902 ret = -ENODEV;
1903out_unlock: 1904out_unlock:
1904 spin_unlock_irq(sch->lock); 1905 spin_unlock_irq(sch->lock);
1905 return ret; 1906 return ret;
@@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2028 /* Get workqueue ref. */ 2029 /* Get workqueue ref. */
2029 if (!get_device(&cdev->dev)) 2030 if (!get_device(&cdev->dev))
2030 return; 2031 return;
2031 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { 2032 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2032 /* Already queued, release workqueue ref. */ 2033 /* Already queued, release workqueue ref. */
2033 put_device(&cdev->dev); 2034 put_device(&cdev->dev);
2034 } 2035 }
@@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register);
2041EXPORT_SYMBOL(ccw_driver_unregister); 2042EXPORT_SYMBOL(ccw_driver_unregister);
2042EXPORT_SYMBOL(get_ccwdev_by_busid); 2043EXPORT_SYMBOL(get_ccwdev_by_busid);
2043EXPORT_SYMBOL(ccw_bus_type); 2044EXPORT_SYMBOL(ccw_bus_type);
2044EXPORT_SYMBOL(ccw_device_work);
2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index bcfe13e42638..379de2d1ec49 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,7 +4,7 @@
4#include <asm/ccwdev.h> 4#include <asm/ccwdev.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7#include <linux/notifier.h>
8#include "io_sch.h" 8#include "io_sch.h"
9 9
10/* 10/*
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
71 cdev->private->state == DEV_STATE_BOXED); 71 cdev->private->state == DEV_STATE_BOXED);
72} 72}
73 73
74extern struct workqueue_struct *ccw_device_work;
75extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
76extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
77int __init io_subchannel_init(void); 76int __init io_subchannel_init(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ae760658a131..c56ab94612f9 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
313 } 313 }
314} 314}
315 315
316/**
317 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured
319 * @event: event that occurred
320 *
321 * Returns:
322 * -%EINVAL if the device is offline or has no driver.
323 * -%EOPNOTSUPP if the device's driver has no notifier registered.
324 * %NOTIFY_OK if the driver wants to keep the device.
325 * %NOTIFY_BAD if the driver doesn't want to keep the device.
326 */
316int ccw_device_notify(struct ccw_device *cdev, int event) 327int ccw_device_notify(struct ccw_device *cdev, int event)
317{ 328{
329 int ret = -EINVAL;
330
318 if (!cdev->drv) 331 if (!cdev->drv)
319 return 0; 332 goto out;
320 if (!cdev->online) 333 if (!cdev->online)
321 return 0; 334 goto out;
322 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 335 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
323 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 336 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
324 event); 337 event);
325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 338 if (!cdev->drv->notify) {
339 ret = -EOPNOTSUPP;
340 goto out;
341 }
342 if (cdev->drv->notify(cdev, event))
343 ret = NOTIFY_OK;
344 else
345 ret = NOTIFY_BAD;
346out:
347 return ret;
326} 348}
327 349
328static void ccw_device_oper_notify(struct ccw_device *cdev) 350static void ccw_device_oper_notify(struct ccw_device *cdev)
329{ 351{
330 if (ccw_device_notify(cdev, CIO_OPER)) { 352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
331 /* Reenable channel measurements, if needed. */ 353 /* Reenable channel measurements, if needed. */
332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
333 return; 355 return;
@@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
361 case DEV_STATE_BOXED: 383 case DEV_STATE_BOXED:
362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 384 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
363 cdev->private->dev_id.devno, sch->schid.sch_no); 385 cdev->private->dev_id.devno, sch->schid.sch_no);
364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 386 if (cdev->online &&
387 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 388 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
366 cdev->private->flags.donotify = 0; 389 cdev->private->flags.donotify = 0;
367 break; 390 break;
368 case DEV_STATE_NOT_OPER: 391 case DEV_STATE_NOT_OPER:
369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 392 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
370 cdev->private->dev_id.devno, sch->schid.sch_no); 393 cdev->private->dev_id.devno, sch->schid.sch_no);
371 if (!ccw_device_notify(cdev, CIO_GONE)) 394 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 395 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
373 else 396 else
374 ccw_device_set_disconnected(cdev); 397 ccw_device_set_disconnected(cdev);
@@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
378 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
379 "%04x\n", cdev->private->dev_id.devno, 402 "%04x\n", cdev->private->dev_id.devno,
380 sch->schid.sch_no); 403 sch->schid.sch_no);
381 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
383 else 406 else
384 ccw_device_set_disconnected(cdev); 407 ccw_device_set_disconnected(cdev);
@@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev)
586static void ccw_device_generic_notoper(struct ccw_device *cdev, 609static void ccw_device_generic_notoper(struct ccw_device *cdev,
587 enum dev_event dev_event) 610 enum dev_event dev_event)
588{ 611{
589 if (!ccw_device_notify(cdev, CIO_GONE)) 612 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 613 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
591 else 614 else
592 ccw_device_set_disconnected(cdev); 615 ccw_device_set_disconnected(cdev);
@@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
667 struct irb *irb; 690 struct irb *irb;
668 int is_cmd; 691 int is_cmd;
669 692
670 irb = (struct irb *) __LC_IRB; 693 irb = (struct irb *)&S390_lowcore.irb;
671 is_cmd = !scsw_is_tm(&irb->scsw); 694 is_cmd = !scsw_is_tm(&irb->scsw);
672 /* Check for unsolicited interrupt. */ 695 /* Check for unsolicited interrupt. */
673 if (!scsw_is_solicited(&irb->scsw)) { 696 if (!scsw_is_solicited(&irb->scsw)) {
@@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
732{ 755{
733 struct irb *irb; 756 struct irb *irb;
734 757
735 irb = (struct irb *) __LC_IRB; 758 irb = (struct irb *)&S390_lowcore.irb;
736 /* Check for unsolicited interrupt. */ 759 /* Check for unsolicited interrupt. */
737 if (scsw_stctl(&irb->scsw) == 760 if (scsw_stctl(&irb->scsw) ==
738 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 761 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 44f2f6a97f33..48aa0647432b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,18 +208,27 @@ struct qdio_dev_perf_stat {
208 unsigned int eqbs_partial; 208 unsigned int eqbs_partial;
209 unsigned int sqbs; 209 unsigned int sqbs;
210 unsigned int sqbs_partial; 210 unsigned int sqbs_partial;
211} ____cacheline_aligned;
212
213struct qdio_queue_perf_stat {
214 /*
215 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
216 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
217 * aka 127 SBALs found.
218 */
219 unsigned int nr_sbals[8];
220 unsigned int nr_sbal_error;
221 unsigned int nr_sbal_nop;
222 unsigned int nr_sbal_total;
211}; 223};
212 224
213struct qdio_input_q { 225struct qdio_input_q {
214 /* input buffer acknowledgement flag */ 226 /* input buffer acknowledgement flag */
215 int polling; 227 int polling;
216
217 /* first ACK'ed buffer */ 228 /* first ACK'ed buffer */
218 int ack_start; 229 int ack_start;
219
220 /* how much sbals are acknowledged with qebsm */ 230 /* how much sbals are acknowledged with qebsm */
221 int ack_count; 231 int ack_count;
222
223 /* last time of noticing incoming data */ 232 /* last time of noticing incoming data */
224 u64 timestamp; 233 u64 timestamp;
225}; 234};
@@ -227,40 +236,27 @@ struct qdio_input_q {
227struct qdio_output_q { 236struct qdio_output_q {
228 /* PCIs are enabled for the queue */ 237 /* PCIs are enabled for the queue */
229 int pci_out_enabled; 238 int pci_out_enabled;
230
231 /* IQDIO: output multiple buffers (enhanced SIGA) */ 239 /* IQDIO: output multiple buffers (enhanced SIGA) */
232 int use_enh_siga; 240 int use_enh_siga;
233
234 /* timer to check for more outbound work */ 241 /* timer to check for more outbound work */
235 struct timer_list timer; 242 struct timer_list timer;
236}; 243};
237 244
245/*
246 * Note on cache alignment: grouped slsb and write mostly data at the beginning
247 * sbal[] is read-only and starts on a new cacheline followed by read mostly.
248 */
238struct qdio_q { 249struct qdio_q {
239 struct slsb slsb; 250 struct slsb slsb;
251
240 union { 252 union {
241 struct qdio_input_q in; 253 struct qdio_input_q in;
242 struct qdio_output_q out; 254 struct qdio_output_q out;
243 } u; 255 } u;
244 256
245 /* queue number */
246 int nr;
247
248 /* bitmask of queue number */
249 int mask;
250
251 /* input or output queue */
252 int is_input_q;
253
254 /* list of thinint input queues */
255 struct list_head entry;
256
257 /* upper-layer program handler */
258 qdio_handler_t (*handler);
259
260 /* 257 /*
261 * inbound: next buffer the program should check for 258 * inbound: next buffer the program should check for
262 * outbound: next buffer to check for having been processed 259 * outbound: next buffer to check if adapter processed it
263 * by the card
264 */ 260 */
265 int first_to_check; 261 int first_to_check;
266 262
@@ -273,16 +269,32 @@ struct qdio_q {
273 /* number of buffers in use by the adapter */ 269 /* number of buffers in use by the adapter */
274 atomic_t nr_buf_used; 270 atomic_t nr_buf_used;
275 271
276 struct qdio_irq *irq_ptr;
277 struct dentry *debugfs_q;
278 struct tasklet_struct tasklet;
279
280 /* error condition during a data transfer */ 272 /* error condition during a data transfer */
281 unsigned int qdio_error; 273 unsigned int qdio_error;
282 274
283 struct sl *sl; 275 struct tasklet_struct tasklet;
284 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 276 struct qdio_queue_perf_stat q_stats;
277
278 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
279
280 /* queue number */
281 int nr;
282
283 /* bitmask of queue number */
284 int mask;
285
286 /* input or output queue */
287 int is_input_q;
288
289 /* list of thinint input queues */
290 struct list_head entry;
285 291
292 /* upper-layer program handler */
293 qdio_handler_t (*handler);
294
295 struct dentry *debugfs_q;
296 struct qdio_irq *irq_ptr;
297 struct sl *sl;
286 /* 298 /*
287 * Warning: Leave this member at the end so it won't be cleared in 299 * Warning: Leave this member at the end so it won't be cleared in
288 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * qdio_fill_qs. A page is allocated under this pointer and used for
@@ -317,12 +329,8 @@ struct qdio_irq {
317 struct qdio_ssqd_desc ssqd_desc; 329 struct qdio_ssqd_desc ssqd_desc;
318 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); 330 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
319 331
320 struct qdio_dev_perf_stat perf_stat;
321 int perf_stat_enabled; 332 int perf_stat_enabled;
322 /* 333
323 * Warning: Leave these members together at the end so they won't be
324 * cleared in qdio_setup_irq.
325 */
326 struct qdr *qdr; 334 struct qdr *qdr;
327 unsigned long chsc_page; 335 unsigned long chsc_page;
328 336
@@ -331,6 +339,7 @@ struct qdio_irq {
331 339
332 debug_info_t *debug_area; 340 debug_info_t *debug_area;
333 struct mutex setup_mutex; 341 struct mutex setup_mutex;
342 struct qdio_dev_perf_stat perf_stat;
334}; 343};
335 344
336/* helper functions */ 345/* helper functions */
@@ -341,9 +350,20 @@ struct qdio_irq {
341 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ 350 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
342 css_general_characteristics.aif_osa) 351 css_general_characteristics.aif_osa)
343 352
344#define qperf(qdev,attr) qdev->perf_stat.attr 353#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
345#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ 354
346 q->irq_ptr->perf_stat.attr++ 355#define qperf_inc(__q, __attr) \
356({ \
357 struct qdio_irq *qdev = (__q)->irq_ptr; \
358 if (qdev->perf_stat_enabled) \
359 (qdev->perf_stat.__attr)++; \
360})
361
362static inline void account_sbals_error(struct qdio_q *q, int count)
363{
364 q->q_stats.nr_sbal_error += count;
365 q->q_stats.nr_sbal_total += count;
366}
347 367
348/* the highest iqdio queue is used for multicast */ 368/* the highest iqdio queue is used for multicast */
349static inline int multicast_outbound(struct qdio_q *q) 369static inline int multicast_outbound(struct qdio_q *q)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f49761ff9a00..c94eb2a0fa2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v)
60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
63 seq_printf(m, "slsb buffer states:\n"); 63 seq_printf(m, "SBAL states:\n");
64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
65 65
66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v)
97 } 97 }
98 seq_printf(m, "\n"); 98 seq_printf(m, "\n");
99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); 99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
100
101 seq_printf(m, "\nSBAL statistics:");
102 if (!q->irq_ptr->perf_stat_enabled) {
103 seq_printf(m, " disabled\n");
104 return 0;
105 }
106
107 seq_printf(m, "\n1 2.. 4.. 8.. "
108 "16.. 32.. 64.. 127\n");
109 for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
110 seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
111 seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
112 q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
113 q->q_stats.nr_sbal_total);
100 return 0; 114 return 0;
101} 115}
102 116
@@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
181{ 195{
182 struct seq_file *seq = file->private_data; 196 struct seq_file *seq = file->private_data;
183 struct qdio_irq *irq_ptr = seq->private; 197 struct qdio_irq *irq_ptr = seq->private;
198 struct qdio_q *q;
184 unsigned long val; 199 unsigned long val;
185 char buf[8]; 200 char buf[8];
186 int ret; 201 int ret, i;
187 202
188 if (!irq_ptr) 203 if (!irq_ptr)
189 return 0; 204 return 0;
@@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
201 case 0: 216 case 0:
202 irq_ptr->perf_stat_enabled = 0; 217 irq_ptr->perf_stat_enabled = 0;
203 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 218 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
219 for_each_input_queue(irq_ptr, q, i)
220 memset(&q->q_stats, 0, sizeof(q->q_stats));
221 for_each_output_queue(irq_ptr, q, i)
222 memset(&q->q_stats, 0, sizeof(q->q_stats));
204 break; 223 break;
205 case 1: 224 case 1:
206 irq_ptr->perf_stat_enabled = 1; 225 irq_ptr->perf_stat_enabled = 1;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 62b654af9237..232ef047ba34 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
393} 393}
394 394
395static inline void account_sbals(struct qdio_q *q, int count)
396{
397 int pos = 0;
398
399 q->q_stats.nr_sbal_total += count;
400 if (count == QDIO_MAX_BUFFERS_MASK) {
401 q->q_stats.nr_sbals[7]++;
402 return;
403 }
404 while (count >>= 1)
405 pos++;
406 q->q_stats.nr_sbals[pos]++;
407}
408
395static void announce_buffer_error(struct qdio_q *q, int count) 409static void announce_buffer_error(struct qdio_q *q, int count)
396{ 410{
397 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 411 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
487 q->first_to_check = add_buf(q->first_to_check, count); 501 q->first_to_check = add_buf(q->first_to_check, count);
488 if (atomic_sub(count, &q->nr_buf_used) == 0) 502 if (atomic_sub(count, &q->nr_buf_used) == 0)
489 qperf_inc(q, inbound_queue_full); 503 qperf_inc(q, inbound_queue_full);
504 if (q->irq_ptr->perf_stat_enabled)
505 account_sbals(q, count);
490 break; 506 break;
491 case SLSB_P_INPUT_ERROR: 507 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 508 announce_buffer_error(q, count);
493 /* process the buffer, the upper layer will take care of it */ 509 /* process the buffer, the upper layer will take care of it */
494 q->first_to_check = add_buf(q->first_to_check, count); 510 q->first_to_check = add_buf(q->first_to_check, count);
495 atomic_sub(count, &q->nr_buf_used); 511 atomic_sub(count, &q->nr_buf_used);
512 if (q->irq_ptr->perf_stat_enabled)
513 account_sbals_error(q, count);
496 break; 514 break;
497 case SLSB_CU_INPUT_EMPTY: 515 case SLSB_CU_INPUT_EMPTY:
498 case SLSB_P_INPUT_NOT_INIT: 516 case SLSB_P_INPUT_NOT_INIT:
499 case SLSB_P_INPUT_ACK: 517 case SLSB_P_INPUT_ACK:
518 if (q->irq_ptr->perf_stat_enabled)
519 q->q_stats.nr_sbal_nop++;
500 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 520 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
501 break; 521 break;
502 default: 522 default:
@@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
514 534
515 if ((bufnr != q->last_move) || q->qdio_error) { 535 if ((bufnr != q->last_move) || q->qdio_error) {
516 q->last_move = bufnr; 536 q->last_move = bufnr;
517 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) 537 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
518 q->u.in.timestamp = get_usecs(); 538 q->u.in.timestamp = get_usecs();
519 return 1; 539 return 1;
520 } else 540 } else
@@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
643 663
644 atomic_sub(count, &q->nr_buf_used); 664 atomic_sub(count, &q->nr_buf_used);
645 q->first_to_check = add_buf(q->first_to_check, count); 665 q->first_to_check = add_buf(q->first_to_check, count);
666 if (q->irq_ptr->perf_stat_enabled)
667 account_sbals(q, count);
646 break; 668 break;
647 case SLSB_P_OUTPUT_ERROR: 669 case SLSB_P_OUTPUT_ERROR:
648 announce_buffer_error(q, count); 670 announce_buffer_error(q, count);
649 /* process the buffer, the upper layer will take care of it */ 671 /* process the buffer, the upper layer will take care of it */
650 q->first_to_check = add_buf(q->first_to_check, count); 672 q->first_to_check = add_buf(q->first_to_check, count);
651 atomic_sub(count, &q->nr_buf_used); 673 atomic_sub(count, &q->nr_buf_used);
674 if (q->irq_ptr->perf_stat_enabled)
675 account_sbals_error(q, count);
652 break; 676 break;
653 case SLSB_CU_OUTPUT_PRIMED: 677 case SLSB_CU_OUTPUT_PRIMED:
654 /* the adapter has not fetched the output yet */ 678 /* the adapter has not fetched the output yet */
679 if (q->irq_ptr->perf_stat_enabled)
680 q->q_stats.nr_sbal_nop++;
655 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
656 break; 682 break;
657 case SLSB_P_OUTPUT_NOT_INIT: 683 case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 8c2dea5fa2b4..7f4a75465140 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
333 irq_ptr->qdr->qdf0[i + nr].slsba = 333 irq_ptr->qdr->qdf0[i + nr].slsba =
334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
335 335
336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; 336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; 337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; 338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; 339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
340} 340}
341 341
342static void setup_qdr(struct qdio_irq *irq_ptr, 342static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; 353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
354 354
355 for (i = 0; i < qdio_init->no_input_qs; i++) 355 for (i = 0; i < qdio_init->no_input_qs; i++)
356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
383 int rc; 383 int rc;
384 384
385 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); 385 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
386 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
387 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
388 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
389 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
390
391 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
392 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
393
386 /* wipes qib.ac, required by ar7063 */ 394 /* wipes qib.ac, required by ar7063 */
387 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 395 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
388 396
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 091d904d3182..9942c1031b25 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
198 .code = 0x0021, 198 .code = 0x0021,
199 }; 199 };
200 scssc_area->operation_code = 0; 200 scssc_area->operation_code = 0;
201 scssc_area->ks = PAGE_DEFAULT_KEY; 201 scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
202 scssc_area->kc = PAGE_DEFAULT_KEY; 202 scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
203 scssc_area->isc = QDIO_AIRQ_ISC; 203 scssc_area->isc = QDIO_AIRQ_ISC;
204 scssc_area->schid = irq_ptr->schid; 204 scssc_area->schid = irq_ptr->schid;
205 205
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index c68be24e27d9..ba50fe02e572 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -33,6 +33,7 @@
33#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/smp_lock.h> 38#include <linux/smp_lock.h>
38#include <asm/atomic.h> 39#include <asm/atomic.h>
@@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = {
912 */ 913 */
913static struct proc_dir_entry *zcrypt_entry; 914static struct proc_dir_entry *zcrypt_entry;
914 915
915static int sprintcl(unsigned char *outaddr, unsigned char *addr, 916static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
916 unsigned int len)
917{ 917{
918 int hl, i; 918 int i;
919 919
920 hl = 0;
921 for (i = 0; i < len; i++) 920 for (i = 0; i < len; i++)
922 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); 921 seq_printf(m, "%01x", (unsigned int) addr[i]);
923 hl += sprintf(outaddr+hl, " "); 922 seq_putc(m, ' ');
924 return hl;
925} 923}
926 924
927static int sprintrw(unsigned char *outaddr, unsigned char *addr, 925static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
928 unsigned int len)
929{ 926{
930 int hl, inl, c, cx; 927 int inl, c, cx;
931 928
932 hl = sprintf(outaddr, " "); 929 seq_printf(m, " ");
933 inl = 0; 930 inl = 0;
934 for (c = 0; c < (len / 16); c++) { 931 for (c = 0; c < (len / 16); c++) {
935 hl += sprintcl(outaddr+hl, addr+inl, 16); 932 sprintcl(m, addr+inl, 16);
936 inl += 16; 933 inl += 16;
937 } 934 }
938 cx = len%16; 935 cx = len%16;
939 if (cx) { 936 if (cx) {
940 hl += sprintcl(outaddr+hl, addr+inl, cx); 937 sprintcl(m, addr+inl, cx);
941 inl += cx; 938 inl += cx;
942 } 939 }
943 hl += sprintf(outaddr+hl, "\n"); 940 seq_putc(m, '\n');
944 return hl;
945} 941}
946 942
947static int sprinthx(unsigned char *title, unsigned char *outaddr, 943static void sprinthx(unsigned char *title, struct seq_file *m,
948 unsigned char *addr, unsigned int len) 944 unsigned char *addr, unsigned int len)
949{ 945{
950 int hl, inl, r, rx; 946 int inl, r, rx;
951 947
952 hl = sprintf(outaddr, "\n%s\n", title); 948 seq_printf(m, "\n%s\n", title);
953 inl = 0; 949 inl = 0;
954 for (r = 0; r < (len / 64); r++) { 950 for (r = 0; r < (len / 64); r++) {
955 hl += sprintrw(outaddr+hl, addr+inl, 64); 951 sprintrw(m, addr+inl, 64);
956 inl += 64; 952 inl += 64;
957 } 953 }
958 rx = len % 64; 954 rx = len % 64;
959 if (rx) { 955 if (rx) {
960 hl += sprintrw(outaddr+hl, addr+inl, rx); 956 sprintrw(m, addr+inl, rx);
961 inl += rx; 957 inl += rx;
962 } 958 }
963 hl += sprintf(outaddr+hl, "\n"); 959 seq_putc(m, '\n');
964 return hl;
965} 960}
966 961
967static int sprinthx4(unsigned char *title, unsigned char *outaddr, 962static void sprinthx4(unsigned char *title, struct seq_file *m,
968 unsigned int *array, unsigned int len) 963 unsigned int *array, unsigned int len)
969{ 964{
970 int hl, r; 965 int r;
971 966
972 hl = sprintf(outaddr, "\n%s\n", title); 967 seq_printf(m, "\n%s\n", title);
973 for (r = 0; r < len; r++) { 968 for (r = 0; r < len; r++) {
974 if ((r % 8) == 0) 969 if ((r % 8) == 0)
975 hl += sprintf(outaddr+hl, " "); 970 seq_printf(m, " ");
976 hl += sprintf(outaddr+hl, "%08X ", array[r]); 971 seq_printf(m, "%08X ", array[r]);
977 if ((r % 8) == 7) 972 if ((r % 8) == 7)
978 hl += sprintf(outaddr+hl, "\n"); 973 seq_putc(m, '\n');
979 } 974 }
980 hl += sprintf(outaddr+hl, "\n"); 975 seq_putc(m, '\n');
981 return hl;
982} 976}
983 977
984static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, 978static int zcrypt_proc_show(struct seq_file *m, void *v)
985 int count, int *eof, void *data)
986{ 979{
987 unsigned char *workarea; 980 char workarea[sizeof(int) * AP_DEVICES];
988 int len; 981
989 982 seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
990 len = 0; 983 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
991 984 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
992 /* resp_buff is a page. Use the right half for a work area */ 985 seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
993 workarea = resp_buff + 2000; 986 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
994 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", 987 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
995 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 988 seq_printf(m, "PCIXCC MCL2 count: %d\n",
996 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", 989 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
997 ap_domain_index); 990 seq_printf(m, "PCIXCC MCL3 count: %d\n",
998 len += sprintf(resp_buff + len, "Total device count: %d\n", 991 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
999 zcrypt_device_count); 992 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1000 len += sprintf(resp_buff + len, "PCICA count: %d\n", 993 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1001 zcrypt_count_type(ZCRYPT_PCICA)); 994 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1002 len += sprintf(resp_buff + len, "PCICC count: %d\n", 995 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1003 zcrypt_count_type(ZCRYPT_PCICC)); 996 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1004 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", 997 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1005 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 998 seq_printf(m, "Total open handles: %d\n\n",
1006 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", 999 atomic_read(&zcrypt_open_count));
1007 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1008 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
1009 zcrypt_count_type(ZCRYPT_CEX2C));
1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX2A));
1012 len += sprintf(resp_buff + len, "CEX3C count: %d\n",
1013 zcrypt_count_type(ZCRYPT_CEX3C));
1014 len += sprintf(resp_buff + len, "CEX3A count: %d\n",
1015 zcrypt_count_type(ZCRYPT_CEX3A));
1016 len += sprintf(resp_buff + len, "requestq count: %d\n",
1017 zcrypt_requestq_count());
1018 len += sprintf(resp_buff + len, "pendingq count: %d\n",
1019 zcrypt_pendingq_count());
1020 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
1021 atomic_read(&zcrypt_open_count));
1022 zcrypt_status_mask(workarea); 1000 zcrypt_status_mask(workarea);
1023 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1001 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1024 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1002 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1025 resp_buff+len, workarea, AP_DEVICES); 1003 m, workarea, AP_DEVICES);
1026 zcrypt_qdepth_mask(workarea); 1004 zcrypt_qdepth_mask(workarea);
1027 len += sprinthx("Waiting work element counts", 1005 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1028 resp_buff+len, workarea, AP_DEVICES);
1029 zcrypt_perdev_reqcnt((int *) workarea); 1006 zcrypt_perdev_reqcnt((int *) workarea);
1030 len += sprinthx4("Per-device successfully completed request counts", 1007 sprinthx4("Per-device successfully completed request counts",
1031 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 1008 m, (unsigned int *) workarea, AP_DEVICES);
1032 *eof = 1; 1009 return 0;
1033 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); 1010}
1034 return len; 1011
1012static int zcrypt_proc_open(struct inode *inode, struct file *file)
1013{
1014 return single_open(file, zcrypt_proc_show, NULL);
1035} 1015}
1036 1016
1037static void zcrypt_disable_card(int index) 1017static void zcrypt_disable_card(int index)
@@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index)
1061 spin_unlock_bh(&zcrypt_device_lock); 1041 spin_unlock_bh(&zcrypt_device_lock);
1062} 1042}
1063 1043
1064static int zcrypt_status_write(struct file *file, const char __user *buffer, 1044static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1065 unsigned long count, void *data) 1045 size_t count, loff_t *pos)
1066{ 1046{
1067 unsigned char *lbuf, *ptr; 1047 unsigned char *lbuf, *ptr;
1068 unsigned long local_count; 1048 size_t local_count;
1069 int j; 1049 int j;
1070 1050
1071 if (count <= 0) 1051 if (count <= 0)
@@ -1115,6 +1095,15 @@ out:
1115 return count; 1095 return count;
1116} 1096}
1117 1097
1098static const struct file_operations zcrypt_proc_fops = {
1099 .owner = THIS_MODULE,
1100 .open = zcrypt_proc_open,
1101 .read = seq_read,
1102 .llseek = seq_lseek,
1103 .release = single_release,
1104 .write = zcrypt_proc_write,
1105};
1106
1118static int zcrypt_rng_device_count; 1107static int zcrypt_rng_device_count;
1119static u32 *zcrypt_rng_buffer; 1108static u32 *zcrypt_rng_buffer;
1120static int zcrypt_rng_buffer_index; 1109static int zcrypt_rng_buffer_index;
@@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void)
1197 goto out; 1186 goto out;
1198 1187
1199 /* Set up the proc file system */ 1188 /* Set up the proc file system */
1200 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1189 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
1201 if (!zcrypt_entry) { 1190 if (!zcrypt_entry) {
1202 rc = -ENOMEM; 1191 rc = -ENOMEM;
1203 goto out_misc; 1192 goto out_misc;
1204 } 1193 }
1205 zcrypt_entry->data = NULL;
1206 zcrypt_entry->read_proc = zcrypt_status_read;
1207 zcrypt_entry->write_proc = zcrypt_status_write;
1208 1194
1209 return 0; 1195 return 0;
1210 1196
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2930fc763ac5..b2fc4fd63f7f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code)
340 return; 340 return;
341 341
342 /* The LSB might be overloaded, we have to mask it */ 342 /* The LSB might be overloaded, we have to mask it */
343 vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); 343 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
344 344
345 /* We use the LSB of extparam, to decide, if this interrupt is a config 345 /* We use the LSB of extparam, to decide, if this interrupt is a config
346 * change or a "standard" interrupt */ 346 * change or a "standard" interrupt */
347 config_changed = (*(int *) __LC_EXT_PARAMS & 1); 347 config_changed = S390_lowcore.ext_params & 1;
348 348
349 if (config_changed) { 349 if (config_changed) {
350 struct virtio_driver *drv; 350 struct virtio_driver *drv;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index e9b15c3746fa..a81ff7bc5fa1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1217,12 +1217,6 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
1217 } 1217 }
1218#endif 1218#endif
1219 1219
1220#ifdef CONFIG_SERIAL_8250_AU1X00
1221 /* if access method is AU, it is a 16550 with a quirk */
1222 if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
1223 up->bugs |= UART_BUG_NOMSR;
1224#endif
1225
1226 serial_outp(up, UART_LCR, save_lcr); 1220 serial_outp(up, UART_LCR, save_lcr);
1227 1221
1228 if (up->capabilities != uart_config[up->port.type].flags) { 1222 if (up->capabilities != uart_config[up->port.type].flags) {
@@ -2428,7 +2422,7 @@ serial8250_pm(struct uart_port *port, unsigned int state,
2428static unsigned int serial8250_port_size(struct uart_8250_port *pt) 2422static unsigned int serial8250_port_size(struct uart_8250_port *pt)
2429{ 2423{
2430 if (pt->port.iotype == UPIO_AU) 2424 if (pt->port.iotype == UPIO_AU)
2431 return 0x100000; 2425 return 0x1000;
2432#ifdef CONFIG_ARCH_OMAP 2426#ifdef CONFIG_ARCH_OMAP
2433 if (is_omap_port(pt)) 2427 if (is_omap_port(pt))
2434 return 0x16 << pt->port.regshift; 2428 return 0x16 << pt->port.regshift;
@@ -2585,6 +2579,13 @@ static void serial8250_config_port(struct uart_port *port, int flags)
2585 2579
2586 if (flags & UART_CONFIG_TYPE) 2580 if (flags & UART_CONFIG_TYPE)
2587 autoconfig(up, probeflags); 2581 autoconfig(up, probeflags);
2582
2583#ifdef CONFIG_SERIAL_8250_AU1X00
2584 /* if access method is AU, it is a 16550 with a quirk */
2585 if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
2586 up->bugs |= UART_BUG_NOMSR;
2587#endif
2588
2588 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) 2589 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
2589 autoconfig_irq(up); 2590 autoconfig_irq(up);
2590 2591
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 7ce9e9f567a3..3119fddaedb5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -74,6 +74,7 @@
74#include <linux/io.h> 74#include <linux/io.h>
75#include <linux/of.h> 75#include <linux/of.h>
76#include <linux/of_platform.h> 76#include <linux/of_platform.h>
77#include <linux/clk.h>
77 78
78#include <asm/mpc52xx.h> 79#include <asm/mpc52xx.h>
79#include <asm/mpc52xx_psc.h> 80#include <asm/mpc52xx_psc.h>
@@ -113,6 +114,7 @@ static void mpc52xx_uart_of_enumerate(void);
113 114
114/* Forward declaration of the interruption handling routine */ 115/* Forward declaration of the interruption handling routine */
115static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id); 116static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
117static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port);
116 118
117 119
118/* Simple macro to test if a port is console or not. This one is taken 120/* Simple macro to test if a port is console or not. This one is taken
@@ -145,6 +147,11 @@ struct psc_ops {
145 void (*cw_disable_ints)(struct uart_port *port); 147 void (*cw_disable_ints)(struct uart_port *port);
146 void (*cw_restore_ints)(struct uart_port *port); 148 void (*cw_restore_ints)(struct uart_port *port);
147 unsigned long (*getuartclk)(void *p); 149 unsigned long (*getuartclk)(void *p);
150 int (*clock)(struct uart_port *port, int enable);
151 int (*fifoc_init)(void);
152 void (*fifoc_uninit)(void);
153 void (*get_irq)(struct uart_port *, struct device_node *);
154 irqreturn_t (*handle_irq)(struct uart_port *port);
148}; 155};
149 156
150#ifdef CONFIG_PPC_MPC52xx 157#ifdef CONFIG_PPC_MPC52xx
@@ -256,6 +263,18 @@ static unsigned long mpc52xx_getuartclk(void *p)
256 return mpc5xxx_get_bus_frequency(p) / 2; 263 return mpc5xxx_get_bus_frequency(p) / 2;
257} 264}
258 265
266static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
267{
268 port->irqflags = IRQF_DISABLED;
269 port->irq = irq_of_parse_and_map(np, 0);
270}
271
272/* 52xx specific interrupt handler. The caller holds the port lock */
273static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port)
274{
275 return mpc5xxx_uart_process_int(port);
276}
277
259static struct psc_ops mpc52xx_psc_ops = { 278static struct psc_ops mpc52xx_psc_ops = {
260 .fifo_init = mpc52xx_psc_fifo_init, 279 .fifo_init = mpc52xx_psc_fifo_init,
261 .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, 280 .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
@@ -273,14 +292,32 @@ static struct psc_ops mpc52xx_psc_ops = {
273 .cw_disable_ints = mpc52xx_psc_cw_disable_ints, 292 .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
274 .cw_restore_ints = mpc52xx_psc_cw_restore_ints, 293 .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
275 .getuartclk = mpc52xx_getuartclk, 294 .getuartclk = mpc52xx_getuartclk,
295 .get_irq = mpc52xx_psc_get_irq,
296 .handle_irq = mpc52xx_psc_handle_irq,
276}; 297};
277 298
278#endif /* CONFIG_MPC52xx */ 299#endif /* CONFIG_MPC52xx */
279 300
280#ifdef CONFIG_PPC_MPC512x 301#ifdef CONFIG_PPC_MPC512x
281#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1)) 302#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))
303
304/* PSC FIFO Controller for mpc512x */
305struct psc_fifoc {
306 u32 fifoc_cmd;
307 u32 fifoc_int;
308 u32 fifoc_dma;
309 u32 fifoc_axe;
310 u32 fifoc_debug;
311};
312
313static struct psc_fifoc __iomem *psc_fifoc;
314static unsigned int psc_fifoc_irq;
315
282static void mpc512x_psc_fifo_init(struct uart_port *port) 316static void mpc512x_psc_fifo_init(struct uart_port *port)
283{ 317{
318 /* /32 prescaler */
319 out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00);
320
284 out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE); 321 out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
285 out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE); 322 out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
286 out_be32(&FIFO_512x(port)->txalarm, 1); 323 out_be32(&FIFO_512x(port)->txalarm, 1);
@@ -393,6 +430,160 @@ static unsigned long mpc512x_getuartclk(void *p)
393 return mpc5xxx_get_bus_frequency(p); 430 return mpc5xxx_get_bus_frequency(p);
394} 431}
395 432
433#define DEFAULT_FIFO_SIZE 16
434
435static unsigned int __init get_fifo_size(struct device_node *np,
436 char *fifo_name)
437{
438 const unsigned int *fp;
439
440 fp = of_get_property(np, fifo_name, NULL);
441 if (fp)
442 return *fp;
443
444 pr_warning("no %s property in %s node, defaulting to %d\n",
445 fifo_name, np->full_name, DEFAULT_FIFO_SIZE);
446
447 return DEFAULT_FIFO_SIZE;
448}
449
450#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
451 ((u32)(_base) + sizeof(struct mpc52xx_psc)))
452
453/* Init PSC FIFO Controller */
454static int __init mpc512x_psc_fifoc_init(void)
455{
456 struct device_node *np;
457 void __iomem *psc;
458 unsigned int tx_fifo_size;
459 unsigned int rx_fifo_size;
460 int fifobase = 0; /* current fifo address in 32 bit words */
461
462 np = of_find_compatible_node(NULL, NULL,
463 "fsl,mpc5121-psc-fifo");
464 if (!np) {
465 pr_err("%s: Can't find FIFOC node\n", __func__);
466 return -ENODEV;
467 }
468
469 psc_fifoc = of_iomap(np, 0);
470 if (!psc_fifoc) {
471 pr_err("%s: Can't map FIFOC\n", __func__);
472 return -ENODEV;
473 }
474
475 psc_fifoc_irq = irq_of_parse_and_map(np, 0);
476 of_node_put(np);
477 if (psc_fifoc_irq == NO_IRQ) {
478 pr_err("%s: Can't get FIFOC irq\n", __func__);
479 iounmap(psc_fifoc);
480 return -ENODEV;
481 }
482
483 for_each_compatible_node(np, NULL, "fsl,mpc5121-psc-uart") {
484 tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
485 rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
486
487 /* size in register is in 4 byte units */
488 tx_fifo_size /= 4;
489 rx_fifo_size /= 4;
490 if (!tx_fifo_size)
491 tx_fifo_size = 1;
492 if (!rx_fifo_size)
493 rx_fifo_size = 1;
494
495 psc = of_iomap(np, 0);
496 if (!psc) {
497 pr_err("%s: Can't map %s device\n",
498 __func__, np->full_name);
499 continue;
500 }
501
502 /* FIFO space is 4KiB, check if requested size is available */
503 if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
504 pr_err("%s: no fifo space available for %s\n",
505 __func__, np->full_name);
506 iounmap(psc);
507 /*
508 * chances are that another device requests less
509 * fifo space, so we continue.
510 */
511 continue;
512 }
513 /* set tx and rx fifo size registers */
514 out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
515 fifobase += tx_fifo_size;
516 out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
517 fifobase += rx_fifo_size;
518
519 /* reset and enable the slices */
520 out_be32(&FIFOC(psc)->txcmd, 0x80);
521 out_be32(&FIFOC(psc)->txcmd, 0x01);
522 out_be32(&FIFOC(psc)->rxcmd, 0x80);
523 out_be32(&FIFOC(psc)->rxcmd, 0x01);
524
525 iounmap(psc);
526 }
527
528 return 0;
529}
530
531static void __exit mpc512x_psc_fifoc_uninit(void)
532{
533 iounmap(psc_fifoc);
534}
535
536/* 512x specific interrupt handler. The caller holds the port lock */
537static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
538{
539 unsigned long fifoc_int;
540 int psc_num;
541
542 /* Read pending PSC FIFOC interrupts */
543 fifoc_int = in_be32(&psc_fifoc->fifoc_int);
544
545 /* Check if it is an interrupt for this port */
546 psc_num = (port->mapbase & 0xf00) >> 8;
547 if (test_bit(psc_num, &fifoc_int) ||
548 test_bit(psc_num + 16, &fifoc_int))
549 return mpc5xxx_uart_process_int(port);
550
551 return IRQ_NONE;
552}
553
554static int mpc512x_psc_clock(struct uart_port *port, int enable)
555{
556 struct clk *psc_clk;
557 int psc_num;
558 char clk_name[10];
559
560 if (uart_console(port))
561 return 0;
562
563 psc_num = (port->mapbase & 0xf00) >> 8;
564 snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num);
565 psc_clk = clk_get(port->dev, clk_name);
566 if (IS_ERR(psc_clk)) {
567 dev_err(port->dev, "Failed to get PSC clock entry!\n");
568 return -ENODEV;
569 }
570
571 dev_dbg(port->dev, "%s %sable\n", clk_name, enable ? "en" : "dis");
572
573 if (enable)
574 clk_enable(psc_clk);
575 else
576 clk_disable(psc_clk);
577
578 return 0;
579}
580
581static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
582{
583 port->irqflags = IRQF_SHARED;
584 port->irq = psc_fifoc_irq;
585}
586
396static struct psc_ops mpc512x_psc_ops = { 587static struct psc_ops mpc512x_psc_ops = {
397 .fifo_init = mpc512x_psc_fifo_init, 588 .fifo_init = mpc512x_psc_fifo_init,
398 .raw_rx_rdy = mpc512x_psc_raw_rx_rdy, 589 .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
@@ -410,6 +601,11 @@ static struct psc_ops mpc512x_psc_ops = {
410 .cw_disable_ints = mpc512x_psc_cw_disable_ints, 601 .cw_disable_ints = mpc512x_psc_cw_disable_ints,
411 .cw_restore_ints = mpc512x_psc_cw_restore_ints, 602 .cw_restore_ints = mpc512x_psc_cw_restore_ints,
412 .getuartclk = mpc512x_getuartclk, 603 .getuartclk = mpc512x_getuartclk,
604 .clock = mpc512x_psc_clock,
605 .fifoc_init = mpc512x_psc_fifoc_init,
606 .fifoc_uninit = mpc512x_psc_fifoc_uninit,
607 .get_irq = mpc512x_psc_get_irq,
608 .handle_irq = mpc512x_psc_handle_irq,
413}; 609};
414#endif 610#endif
415 611
@@ -519,10 +715,15 @@ mpc52xx_uart_startup(struct uart_port *port)
519 struct mpc52xx_psc __iomem *psc = PSC(port); 715 struct mpc52xx_psc __iomem *psc = PSC(port);
520 int ret; 716 int ret;
521 717
718 if (psc_ops->clock) {
719 ret = psc_ops->clock(port, 1);
720 if (ret)
721 return ret;
722 }
723
522 /* Request IRQ */ 724 /* Request IRQ */
523 ret = request_irq(port->irq, mpc52xx_uart_int, 725 ret = request_irq(port->irq, mpc52xx_uart_int,
524 IRQF_DISABLED | IRQF_SAMPLE_RANDOM, 726 port->irqflags, "mpc52xx_psc_uart", port);
525 "mpc52xx_psc_uart", port);
526 if (ret) 727 if (ret)
527 return ret; 728 return ret;
528 729
@@ -553,6 +754,9 @@ mpc52xx_uart_shutdown(struct uart_port *port)
553 port->read_status_mask = 0; 754 port->read_status_mask = 0;
554 out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); 755 out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
555 756
757 if (psc_ops->clock)
758 psc_ops->clock(port, 0);
759
556 /* Release interrupt */ 760 /* Release interrupt */
557 free_irq(port->irq, port); 761 free_irq(port->irq, port);
558} 762}
@@ -851,15 +1055,12 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
851} 1055}
852 1056
853static irqreturn_t 1057static irqreturn_t
854mpc52xx_uart_int(int irq, void *dev_id) 1058mpc5xxx_uart_process_int(struct uart_port *port)
855{ 1059{
856 struct uart_port *port = dev_id;
857 unsigned long pass = ISR_PASS_LIMIT; 1060 unsigned long pass = ISR_PASS_LIMIT;
858 unsigned int keepgoing; 1061 unsigned int keepgoing;
859 u8 status; 1062 u8 status;
860 1063
861 spin_lock(&port->lock);
862
863 /* While we have stuff to do, we continue */ 1064 /* While we have stuff to do, we continue */
864 do { 1065 do {
865 /* If we don't find anything to do, we stop */ 1066 /* If we don't find anything to do, we stop */
@@ -886,11 +1087,23 @@ mpc52xx_uart_int(int irq, void *dev_id)
886 1087
887 } while (keepgoing); 1088 } while (keepgoing);
888 1089
889 spin_unlock(&port->lock);
890
891 return IRQ_HANDLED; 1090 return IRQ_HANDLED;
892} 1091}
893 1092
1093static irqreturn_t
1094mpc52xx_uart_int(int irq, void *dev_id)
1095{
1096 struct uart_port *port = dev_id;
1097 irqreturn_t ret;
1098
1099 spin_lock(&port->lock);
1100
1101 ret = psc_ops->handle_irq(port);
1102
1103 spin_unlock(&port->lock);
1104
1105 return ret;
1106}
894 1107
895/* ======================================================================== */ 1108/* ======================================================================== */
896/* Console ( if applicable ) */ 1109/* Console ( if applicable ) */
@@ -1152,7 +1365,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
1152 return -EINVAL; 1365 return -EINVAL;
1153 } 1366 }
1154 1367
1155 port->irq = irq_of_parse_and_map(op->node, 0); 1368 psc_ops->get_irq(port, op->node);
1156 if (port->irq == NO_IRQ) { 1369 if (port->irq == NO_IRQ) {
1157 dev_dbg(&op->dev, "Could not get irq\n"); 1370 dev_dbg(&op->dev, "Could not get irq\n");
1158 return -EINVAL; 1371 return -EINVAL;
@@ -1163,10 +1376,8 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
1163 1376
1164 /* Add the port to the uart sub-system */ 1377 /* Add the port to the uart sub-system */
1165 ret = uart_add_one_port(&mpc52xx_uart_driver, port); 1378 ret = uart_add_one_port(&mpc52xx_uart_driver, port);
1166 if (ret) { 1379 if (ret)
1167 irq_dispose_mapping(port->irq);
1168 return ret; 1380 return ret;
1169 }
1170 1381
1171 dev_set_drvdata(&op->dev, (void *)port); 1382 dev_set_drvdata(&op->dev, (void *)port);
1172 return 0; 1383 return 0;
@@ -1178,10 +1389,8 @@ mpc52xx_uart_of_remove(struct of_device *op)
1178 struct uart_port *port = dev_get_drvdata(&op->dev); 1389 struct uart_port *port = dev_get_drvdata(&op->dev);
1179 dev_set_drvdata(&op->dev, NULL); 1390 dev_set_drvdata(&op->dev, NULL);
1180 1391
1181 if (port) { 1392 if (port)
1182 uart_remove_one_port(&mpc52xx_uart_driver, port); 1393 uart_remove_one_port(&mpc52xx_uart_driver, port);
1183 irq_dispose_mapping(port->irq);
1184 }
1185 1394
1186 return 0; 1395 return 0;
1187} 1396}
@@ -1288,6 +1497,15 @@ mpc52xx_uart_init(void)
1288 1497
1289 mpc52xx_uart_of_enumerate(); 1498 mpc52xx_uart_of_enumerate();
1290 1499
1500 /*
1501 * Map the PSC FIFO Controller and init if on MPC512x.
1502 */
1503 if (psc_ops->fifoc_init) {
1504 ret = psc_ops->fifoc_init();
1505 if (ret)
1506 return ret;
1507 }
1508
1291 ret = of_register_platform_driver(&mpc52xx_uart_of_driver); 1509 ret = of_register_platform_driver(&mpc52xx_uart_of_driver);
1292 if (ret) { 1510 if (ret) {
1293 printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n", 1511 printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n",
@@ -1302,6 +1520,9 @@ mpc52xx_uart_init(void)
1302static void __exit 1520static void __exit
1303mpc52xx_uart_exit(void) 1521mpc52xx_uart_exit(void)
1304{ 1522{
1523 if (psc_ops->fifoc_uninit)
1524 psc_ops->fifoc_uninit();
1525
1305 of_unregister_platform_driver(&mpc52xx_uart_of_driver); 1526 of_unregister_platform_driver(&mpc52xx_uart_of_driver);
1306 uart_unregister_driver(&mpc52xx_uart_driver); 1527 uart_unregister_driver(&mpc52xx_uart_driver);
1307} 1528}
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 95421fa3b304..e91db4b38012 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -696,11 +696,11 @@ static int serial_config(struct pcmcia_device * link)
696 info->multi = info->quirk->multi; 696 info->multi = info->quirk->multi;
697 697
698 if (info->multi > 1) 698 if (info->multi > 1)
699 multi_config(link); 699 i = multi_config(link);
700 else 700 else
701 simple_config(link); 701 i = simple_config(link);
702 702
703 if (info->ndev == 0) 703 if (i || info->ndev == 0)
704 goto failed; 704 goto failed;
705 705
706 /* 706 /*
@@ -715,6 +715,7 @@ static int serial_config(struct pcmcia_device * link)
715 return 0; 715 return 0;
716 716
717failed: 717failed:
718 dev_warn(&link->dev, "serial_cs: failed to initialize\n");
718 serial_remove(link); 719 serial_remove(link);
719 return -ENODEV; 720 return -ENODEV;
720} 721}
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index cfd5ff9508fa..ba8ac4f599d3 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -412,11 +412,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
412 } 412 }
413 413
414 /* put buffers on the ring */ 414 /* put buffers on the ring */
415 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); 415 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
416 t->len, DDMA_FLAGS_IE);
416 if (!res) 417 if (!res)
417 dev_err(hw->dev, "rx dma put dest error\n"); 418 dev_err(hw->dev, "rx dma put dest error\n");
418 419
419 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); 420 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
421 t->len, DDMA_FLAGS_IE);
420 if (!res) 422 if (!res)
421 dev_err(hw->dev, "tx dma put source error\n"); 423 dev_err(hw->dev, "tx dma put source error\n");
422 424
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index c0a583cc2227..87447c102fa0 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -14,7 +14,6 @@ obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
14octeon-ethernet-objs := ethernet.o 14octeon-ethernet-objs := ethernet.o
15octeon-ethernet-objs += ethernet-mdio.o 15octeon-ethernet-objs += ethernet-mdio.o
16octeon-ethernet-objs += ethernet-mem.o 16octeon-ethernet-objs += ethernet-mem.o
17octeon-ethernet-objs += ethernet-proc.o
18octeon-ethernet-objs += ethernet-rgmii.o 17octeon-ethernet-objs += ethernet-rgmii.o
19octeon-ethernet-objs += ethernet-rx.o 18octeon-ethernet-objs += ethernet-rx.o
20octeon-ethernet-objs += ethernet-sgmii.o 19octeon-ethernet-objs += ethernet-sgmii.o
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index f13131b03c33..6a2cd50a17df 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -41,17 +41,10 @@
41 * Tells the driver to populate the packet buffers with kernel skbuffs. 41 * Tells the driver to populate the packet buffers with kernel skbuffs.
42 * This allows the driver to receive packets without copying them. It also 42 * This allows the driver to receive packets without copying them. It also
43 * means that 32bit userspace can't access the packet buffers. 43 * means that 32bit userspace can't access the packet buffers.
44 * USE_32BIT_SHARED
45 * This define tells the driver to allocate memory for buffers from the
46 * 32bit sahred region instead of the kernel memory space.
47 * USE_HW_TCPUDP_CHECKSUM 44 * USE_HW_TCPUDP_CHECKSUM
48 * Controls if the Octeon TCP/UDP checksum engine is used for packet 45 * Controls if the Octeon TCP/UDP checksum engine is used for packet
49 * output. If this is zero, the kernel will perform the checksum in 46 * output. If this is zero, the kernel will perform the checksum in
50 * software. 47 * software.
51 * USE_MULTICORE_RECEIVE
52 * Process receive interrupts on multiple cores. This spreads the network
53 * load across the first 8 processors. If ths is zero, only one core
54 * processes incomming packets.
55 * USE_ASYNC_IOBDMA 48 * USE_ASYNC_IOBDMA
56 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous 49 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
57 * IOBDMAs to issue IO accesses without stalling. Set this to zero 50 * IOBDMAs to issue IO accesses without stalling. Set this to zero
@@ -75,29 +68,15 @@
75#define CONFIG_CAVIUM_RESERVE32 0 68#define CONFIG_CAVIUM_RESERVE32 0
76#endif 69#endif
77 70
78#if CONFIG_CAVIUM_RESERVE32
79#define USE_32BIT_SHARED 1
80#define USE_SKBUFFS_IN_HW 0
81#define REUSE_SKBUFFS_WITHOUT_FREE 0
82#else
83#define USE_32BIT_SHARED 0
84#define USE_SKBUFFS_IN_HW 1 71#define USE_SKBUFFS_IN_HW 1
85#ifdef CONFIG_NETFILTER 72#ifdef CONFIG_NETFILTER
86#define REUSE_SKBUFFS_WITHOUT_FREE 0 73#define REUSE_SKBUFFS_WITHOUT_FREE 0
87#else 74#else
88#define REUSE_SKBUFFS_WITHOUT_FREE 1 75#define REUSE_SKBUFFS_WITHOUT_FREE 1
89#endif 76#endif
90#endif
91
92/* Max interrupts per second per core */
93#define INTERRUPT_LIMIT 10000
94 77
95/* Don't limit the number of interrupts */
96/*#define INTERRUPT_LIMIT 0 */
97#define USE_HW_TCPUDP_CHECKSUM 1 78#define USE_HW_TCPUDP_CHECKSUM 1
98 79
99#define USE_MULTICORE_RECEIVE 1
100
101/* Enable Random Early Dropping under load */ 80/* Enable Random Early Dropping under load */
102#define USE_RED 1 81#define USE_RED 1
103#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) 82#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
@@ -115,21 +94,12 @@
115/* Use this to not have FPA frees control L2 */ 94/* Use this to not have FPA frees control L2 */
116/*#define DONT_WRITEBACK(x) 0 */ 95/*#define DONT_WRITEBACK(x) 0 */
117 96
118/* Maximum number of packets to process per interrupt. */
119#define MAX_RX_PACKETS 120
120/* Maximum number of SKBs to try to free per xmit packet. */ 97/* Maximum number of SKBs to try to free per xmit packet. */
121#define MAX_SKB_TO_FREE 10
122#define MAX_OUT_QUEUE_DEPTH 1000 98#define MAX_OUT_QUEUE_DEPTH 1000
123 99
124#ifndef CONFIG_SMP 100#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
125#undef USE_MULTICORE_RECEIVE 101#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
126#define USE_MULTICORE_RECEIVE 0
127#endif
128
129#define IP_PROTOCOL_TCP 6
130#define IP_PROTOCOL_UDP 0x11
131 102
132#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
133#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) 103#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
134 104
135 105
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 05a5cc0f43ed..7e0be8d00dc3 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -96,11 +96,11 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
96}; 96};
97 97
98/** 98/**
99 * IOCTL support for PHY control 99 * cvm_oct_ioctl - IOCTL support for PHY control
100 *
101 * @dev: Device to change 100 * @dev: Device to change
102 * @rq: the request 101 * @rq: the request
103 * @cmd: the command 102 * @cmd: the command
103 *
104 * Returns Zero on success 104 * Returns Zero on success
105 */ 105 */
106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -153,7 +153,7 @@ static void cvm_oct_adjust_link(struct net_device *dev)
153 153
154 154
155/** 155/**
156 * Setup the PHY 156 * cvm_oct_phy_setup_device - setup the PHY
157 * 157 *
158 * @dev: Device to setup 158 * @dev: Device to setup
159 * 159 *
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 55d0614a7cd9..a417d4fce12c 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -32,7 +32,6 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h> 35#include <linux/seq_file.h>
37#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
38#include <net/dst.h> 37#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index b595903e2af1..00cc91df6b46 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31 29
32#include <asm/octeon/octeon.h> 30#include <asm/octeon/octeon.h>
33 31
@@ -36,18 +34,19 @@
36#include "cvmx-fpa.h" 34#include "cvmx-fpa.h"
37 35
38/** 36/**
39 * Fill the supplied hardware pool with skbuffs 37 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
40 *
41 * @pool: Pool to allocate an skbuff for 38 * @pool: Pool to allocate an skbuff for
42 * @size: Size of the buffer needed for the pool 39 * @size: Size of the buffer needed for the pool
43 * @elements: Number of buffers to allocate 40 * @elements: Number of buffers to allocate
41 *
42 * Returns the actual number of buffers allocated.
44 */ 43 */
45static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) 44static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
46{ 45{
47 int freed = elements; 46 int freed = elements;
48 while (freed) { 47 while (freed) {
49 48
50 struct sk_buff *skb = dev_alloc_skb(size + 128); 49 struct sk_buff *skb = dev_alloc_skb(size + 256);
51 if (unlikely(skb == NULL)) { 50 if (unlikely(skb == NULL)) {
52 pr_warning 51 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n", 52 ("Failed to allocate skb for hardware pool %d\n",
@@ -55,7 +54,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
55 break; 54 break;
56 } 55 }
57 56
58 skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f)); 57 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 58 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); 59 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
61 freed--; 60 freed--;
@@ -64,8 +63,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
64} 63}
65 64
66/** 65/**
67 * Free the supplied hardware pool of skbuffs 66 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
68 *
69 * @pool: Pool to allocate an skbuff for 67 * @pool: Pool to allocate an skbuff for
70 * @size: Size of the buffer needed for the pool 68 * @size: Size of the buffer needed for the pool
71 * @elements: Number of buffers to allocate 69 * @elements: Number of buffers to allocate
@@ -93,96 +91,76 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
93} 91}
94 92
95/** 93/**
96 * This function fills a hardware pool with memory. Depending 94 * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
97 * on the config defines, this memory might come from the
98 * kernel or global 32bit memory allocated with
99 * cvmx_bootmem_alloc.
100 *
101 * @pool: Pool to populate 95 * @pool: Pool to populate
102 * @size: Size of each buffer in the pool 96 * @size: Size of each buffer in the pool
103 * @elements: Number of buffers to allocate 97 * @elements: Number of buffers to allocate
98 *
99 * Returns the actual number of buffers allocated.
104 */ 100 */
105static int cvm_oct_fill_hw_memory(int pool, int size, int elements) 101static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
106{ 102{
107 char *memory; 103 char *memory;
104 char *fpa;
108 int freed = elements; 105 int freed = elements;
109 106
110 if (USE_32BIT_SHARED) { 107 while (freed) {
111 extern uint64_t octeon_reserve32_memory; 108 /*
112 109 * FPA memory must be 128 byte aligned. Since we are
113 memory = 110 * aligning we need to save the original pointer so we
114 cvmx_bootmem_alloc_range(elements * size, 128, 111 * can feed it to kfree when the memory is returned to
115 octeon_reserve32_memory, 112 * the kernel.
116 octeon_reserve32_memory + 113 *
117 (CONFIG_CAVIUM_RESERVE32 << 20) - 114 * We allocate an extra 256 bytes to allow for
118 1); 115 * alignment and space for the original pointer saved
119 if (memory == NULL) 116 * just before the block.
120 panic("Unable to allocate %u bytes for FPA pool %d\n", 117 */
121 elements * size, pool); 118 memory = kmalloc(size + 256, GFP_ATOMIC);
122 119 if (unlikely(memory == NULL)) {
123 pr_notice("Memory range %p - %p reserved for " 120 pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
124 "hardware\n", memory, 121 elements * size, pool);
125 memory + elements * size - 1); 122 break;
126
127 while (freed) {
128 cvmx_fpa_free(memory, pool, 0);
129 memory += size;
130 freed--;
131 }
132 } else {
133 while (freed) {
134 /* We need to force alignment to 128 bytes here */
135 memory = kmalloc(size + 127, GFP_ATOMIC);
136 if (unlikely(memory == NULL)) {
137 pr_warning("Unable to allocate %u bytes for "
138 "FPA pool %d\n",
139 elements * size, pool);
140 break;
141 }
142 memory = (char *)(((unsigned long)memory + 127) & -128);
143 cvmx_fpa_free(memory, pool, 0);
144 freed--;
145 } 123 }
124 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
125 *((char **)fpa - 1) = memory;
126 cvmx_fpa_free(fpa, pool, 0);
127 freed--;
146 } 128 }
147 return elements - freed; 129 return elements - freed;
148} 130}
149 131
150/** 132/**
151 * Free memory previously allocated with cvm_oct_fill_hw_memory 133 * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
152 *
153 * @pool: FPA pool to free 134 * @pool: FPA pool to free
154 * @size: Size of each buffer in the pool 135 * @size: Size of each buffer in the pool
155 * @elements: Number of buffers that should be in the pool 136 * @elements: Number of buffers that should be in the pool
156 */ 137 */
157static void cvm_oct_free_hw_memory(int pool, int size, int elements) 138static void cvm_oct_free_hw_memory(int pool, int size, int elements)
158{ 139{
159 if (USE_32BIT_SHARED) { 140 char *memory;
160 pr_warning("Warning: 32 shared memory is not freeable\n"); 141 char *fpa;
161 } else { 142 do {
162 char *memory; 143 fpa = cvmx_fpa_alloc(pool);
163 do { 144 if (fpa) {
164 memory = cvmx_fpa_alloc(pool); 145 elements--;
165 if (memory) { 146 fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
166 elements--; 147 memory = *((char **)fpa - 1);
167 kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); 148 kfree(memory);
168 } 149 }
169 } while (memory); 150 } while (fpa);
170 151
171 if (elements < 0) 152 if (elements < 0)
172 pr_warning("Freeing of pool %u had too many " 153 pr_warning("Freeing of pool %u had too many buffers (%d)\n",
173 "buffers (%d)\n", 154 pool, elements);
174 pool, elements); 155 else if (elements > 0)
175 else if (elements > 0) 156 pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
176 pr_warning("Warning: Freeing of pool %u is " 157 pool, elements);
177 "missing %d buffers\n",
178 pool, elements);
179 }
180} 158}
181 159
182int cvm_oct_mem_fill_fpa(int pool, int size, int elements) 160int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
183{ 161{
184 int freed; 162 int freed;
185 if (USE_SKBUFFS_IN_HW) 163 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
186 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); 164 freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
187 else 165 else
188 freed = cvm_oct_fill_hw_memory(pool, size, elements); 166 freed = cvm_oct_fill_hw_memory(pool, size, elements);
@@ -191,7 +169,7 @@ int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
191 169
192void cvm_oct_mem_empty_fpa(int pool, int size, int elements) 170void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
193{ 171{
194 if (USE_SKBUFFS_IN_HW) 172 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
195 cvm_oct_free_hw_skbuff(pool, size, elements); 173 cvm_oct_free_hw_skbuff(pool, size, elements);
196 else 174 else
197 cvm_oct_free_hw_memory(pool, size, elements); 175 cvm_oct_free_hw_memory(pool, size, elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
deleted file mode 100644
index 16308d484d3b..000000000000
--- a/drivers/staging/octeon/ethernet-proc.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/seq_file.h>
29#include <linux/proc_fs.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "octeon-ethernet.h"
35#include "ethernet-defines.h"
36
37#include "cvmx-helper.h"
38#include "cvmx-pip.h"
39
40/**
41 * User is reading /proc/octeon_ethernet_stats
42 *
43 * @m:
44 * @v:
45 * Returns
46 */
47static int cvm_oct_stats_show(struct seq_file *m, void *v)
48{
49 struct octeon_ethernet *priv;
50 int port;
51
52 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
53
54 if (cvm_oct_device[port]) {
55 priv = netdev_priv(cvm_oct_device[port]);
56
57 seq_printf(m, "\nOcteon Port %d (%s)\n", port,
58 cvm_oct_device[port]->name);
59 seq_printf(m,
60 "rx_packets: %12lu\t"
61 "tx_packets: %12lu\n",
62 priv->stats.rx_packets,
63 priv->stats.tx_packets);
64 seq_printf(m,
65 "rx_bytes: %12lu\t"
66 "tx_bytes: %12lu\n",
67 priv->stats.rx_bytes, priv->stats.tx_bytes);
68 seq_printf(m,
69 "rx_errors: %12lu\t"
70 "tx_errors: %12lu\n",
71 priv->stats.rx_errors,
72 priv->stats.tx_errors);
73 seq_printf(m,
74 "rx_dropped: %12lu\t"
75 "tx_dropped: %12lu\n",
76 priv->stats.rx_dropped,
77 priv->stats.tx_dropped);
78 seq_printf(m,
79 "rx_length_errors: %12lu\t"
80 "tx_aborted_errors: %12lu\n",
81 priv->stats.rx_length_errors,
82 priv->stats.tx_aborted_errors);
83 seq_printf(m,
84 "rx_over_errors: %12lu\t"
85 "tx_carrier_errors: %12lu\n",
86 priv->stats.rx_over_errors,
87 priv->stats.tx_carrier_errors);
88 seq_printf(m,
89 "rx_crc_errors: %12lu\t"
90 "tx_fifo_errors: %12lu\n",
91 priv->stats.rx_crc_errors,
92 priv->stats.tx_fifo_errors);
93 seq_printf(m,
94 "rx_frame_errors: %12lu\t"
95 "tx_heartbeat_errors: %12lu\n",
96 priv->stats.rx_frame_errors,
97 priv->stats.tx_heartbeat_errors);
98 seq_printf(m,
99 "rx_fifo_errors: %12lu\t"
100 "tx_window_errors: %12lu\n",
101 priv->stats.rx_fifo_errors,
102 priv->stats.tx_window_errors);
103 seq_printf(m,
104 "rx_missed_errors: %12lu\t"
105 "multicast: %12lu\n",
106 priv->stats.rx_missed_errors,
107 priv->stats.multicast);
108 }
109 }
110
111 return 0;
112}
113
114/**
115 * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
116 *
117 * @inode:
118 * @file:
119 * Returns
120 */
121static int cvm_oct_stats_open(struct inode *inode, struct file *file)
122{
123 return single_open(file, cvm_oct_stats_show, NULL);
124}
125
126static const struct file_operations cvm_oct_stats_operations = {
127 .open = cvm_oct_stats_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = single_release,
131};
132
133void cvm_oct_proc_initialize(void)
134{
135 struct proc_dir_entry *entry =
136 create_proc_entry("octeon_ethernet_stats", 0, NULL);
137 if (entry)
138 entry->proc_fops = &cvm_oct_stats_operations;
139}
140
141void cvm_oct_proc_shutdown(void)
142{
143 remove_proc_entry("octeon_ethernet_stats", NULL);
144}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
deleted file mode 100644
index 82c7d9f78bc4..000000000000
--- a/drivers/staging/octeon/ethernet-proc.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28void cvm_oct_proc_initialize(void);
29void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 3820f1ec11d1..a0d4d4b98bdc 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,7 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h> 29#include <linux/phy.h>
30#include <net/dst.h> 30#include <net/dst.h>
31 31
32#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
@@ -48,14 +48,20 @@ static int number_rgmii_ports;
48static void cvm_oct_rgmii_poll(struct net_device *dev) 48static void cvm_oct_rgmii_poll(struct net_device *dev)
49{ 49{
50 struct octeon_ethernet *priv = netdev_priv(dev); 50 struct octeon_ethernet *priv = netdev_priv(dev);
51 unsigned long flags; 51 unsigned long flags = 0;
52 cvmx_helper_link_info_t link_info; 52 cvmx_helper_link_info_t link_info;
53 int use_global_register_lock = (priv->phydev == NULL);
53 54
54 /* 55 BUG_ON(in_interrupt());
55 * Take the global register lock since we are going to touch 56 if (use_global_register_lock) {
56 * registers that affect more than one port. 57 /*
57 */ 58 * Take the global register lock since we are going to
58 spin_lock_irqsave(&global_register_lock, flags); 59 * touch registers that affect more than one port.
60 */
61 spin_lock_irqsave(&global_register_lock, flags);
62 } else {
63 mutex_lock(&priv->phydev->bus->mdio_lock);
64 }
59 65
60 link_info = cvmx_helper_link_get(priv->port); 66 link_info = cvmx_helper_link_get(priv->port);
61 if (link_info.u64 == priv->link_info) { 67 if (link_info.u64 == priv->link_info) {
@@ -115,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
115 dev->name); 121 dev->name);
116 } 122 }
117 } 123 }
118 spin_unlock_irqrestore(&global_register_lock, flags); 124
125 if (use_global_register_lock)
126 spin_unlock_irqrestore(&global_register_lock, flags);
127 else
128 mutex_unlock(&priv->phydev->bus->mdio_lock);
119 return; 129 return;
120 } 130 }
121 131
@@ -151,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
151 link_info = cvmx_helper_link_autoconf(priv->port); 161 link_info = cvmx_helper_link_autoconf(priv->port);
152 priv->link_info = link_info.u64; 162 priv->link_info = link_info.u64;
153 } 163 }
154 spin_unlock_irqrestore(&global_register_lock, flags); 164
165 if (use_global_register_lock)
166 spin_unlock_irqrestore(&global_register_lock, flags);
167 else {
168 mutex_unlock(&priv->phydev->bus->mdio_lock);
169 }
155 170
156 if (priv->phydev == NULL) { 171 if (priv->phydev == NULL) {
157 /* Tell core. */ 172 /* Tell core. */
@@ -213,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
213 struct net_device *dev = 228 struct net_device *dev =
214 cvm_oct_device[cvmx_helper_get_ipd_port 229 cvm_oct_device[cvmx_helper_get_ipd_port
215 (interface, index)]; 230 (interface, index)];
216 if (dev) 231 struct octeon_ethernet *priv = netdev_priv(dev);
217 cvm_oct_rgmii_poll(dev); 232
233 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
234 queue_work(cvm_oct_poll_queue, &priv->port_work);
235
218 gmx_rx_int_reg.u64 = 0; 236 gmx_rx_int_reg.u64 = 0;
219 gmx_rx_int_reg.s.phy_dupx = 1; 237 gmx_rx_int_reg.s.phy_dupx = 1;
220 gmx_rx_int_reg.s.phy_link = 1; 238 gmx_rx_int_reg.s.phy_link = 1;
@@ -252,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
252 struct net_device *dev = 270 struct net_device *dev =
253 cvm_oct_device[cvmx_helper_get_ipd_port 271 cvm_oct_device[cvmx_helper_get_ipd_port
254 (interface, index)]; 272 (interface, index)];
255 if (dev) 273 struct octeon_ethernet *priv = netdev_priv(dev);
256 cvm_oct_rgmii_poll(dev); 274
275 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
276 queue_work(cvm_oct_poll_queue, &priv->port_work);
277
257 gmx_rx_int_reg.u64 = 0; 278 gmx_rx_int_reg.u64 = 0;
258 gmx_rx_int_reg.s.phy_dupx = 1; 279 gmx_rx_int_reg.s.phy_dupx = 1;
259 gmx_rx_int_reg.s.phy_link = 1; 280 gmx_rx_int_reg.s.phy_link = 1;
@@ -302,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
302 return 0; 323 return 0;
303} 324}
304 325
326static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
327{
328 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
329 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
330}
331
305int cvm_oct_rgmii_init(struct net_device *dev) 332int cvm_oct_rgmii_init(struct net_device *dev)
306{ 333{
307 struct octeon_ethernet *priv = netdev_priv(dev); 334 struct octeon_ethernet *priv = netdev_priv(dev);
@@ -309,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
309 336
310 cvm_oct_common_init(dev); 337 cvm_oct_common_init(dev);
311 dev->netdev_ops->ndo_stop(dev); 338 dev->netdev_ops->ndo_stop(dev);
312 339 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
313 /* 340 /*
314 * Due to GMX errata in CN3XXX series chips, it is necessary 341 * Due to GMX errata in CN3XXX series chips, it is necessary
315 * to take the link down immediately when the PHY changes 342 * to take the link down immediately when the PHY changes
@@ -397,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev)
397 number_rgmii_ports--; 424 number_rgmii_ports--;
398 if (number_rgmii_ports == 0) 425 if (number_rgmii_ports == 0)
399 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); 426 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
427 cancel_work_sync(&priv->port_work);
400} 428}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1b237b7e689d..cb38f9eb2cc0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -27,16 +27,14 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/cache.h> 29#include <linux/cache.h>
30#include <linux/cpumask.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
33#include <linux/ip.h> 34#include <linux/ip.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/prefetch.h> 36#include <linux/prefetch.h>
36#include <linux/ethtool.h> 37#include <linux/smp.h>
37#include <linux/mii.h>
38#include <linux/seq_file.h>
39#include <linux/proc_fs.h>
40#include <net/dst.h> 38#include <net/dst.h>
41#ifdef CONFIG_XFRM 39#ifdef CONFIG_XFRM
42#include <linux/xfrm.h> 40#include <linux/xfrm.h>
@@ -48,8 +46,9 @@
48#include <asm/octeon/octeon.h> 46#include <asm/octeon/octeon.h>
49 47
50#include "ethernet-defines.h" 48#include "ethernet-defines.h"
51#include "octeon-ethernet.h"
52#include "ethernet-mem.h" 49#include "ethernet-mem.h"
50#include "ethernet-rx.h"
51#include "octeon-ethernet.h"
53#include "ethernet-util.h" 52#include "ethernet-util.h"
54 53
55#include "cvmx-helper.h" 54#include "cvmx-helper.h"
@@ -61,62 +60,88 @@
61 60
62#include "cvmx-gmxx-defs.h" 61#include "cvmx-gmxx-defs.h"
63 62
64struct cvm_tasklet_wrapper { 63struct cvm_napi_wrapper {
65 struct tasklet_struct t; 64 struct napi_struct napi;
66}; 65} ____cacheline_aligned_in_smp;
67 66
68/* 67static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
69 * Aligning the tasklet_struct on cachline boundries seems to decrease
70 * throughput even though in theory it would reduce contantion on the
71 * cache lines containing the locks.
72 */
73 68
74static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS]; 69struct cvm_oct_core_state {
70 int baseline_cores;
71 /*
72 * The number of additional cores that could be processing
73 * input packtes.
74 */
75 atomic_t available_cores;
76 cpumask_t cpu_state;
77} ____cacheline_aligned_in_smp;
75 78
76/** 79static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
77 * Interrupt handler. The interrupt occurs whenever the POW 80
78 * transitions from 0->1 packets in our group. 81static void cvm_oct_enable_napi(void *_)
79 *
80 * @cpl:
81 * @dev_id:
82 * @regs:
83 * Returns
84 */
85irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
86{ 82{
87 /* Acknowledge the interrupt */ 83 int cpu = smp_processor_id();
88 if (INTERRUPT_LIMIT) 84 napi_schedule(&cvm_oct_napi[cpu].napi);
89 cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group); 85}
90 else 86
91 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group); 87static void cvm_oct_enable_one_cpu(void)
92 preempt_disable(); 88{
93 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 89 int v;
94 preempt_enable(); 90 int cpu;
95 return IRQ_HANDLED; 91
92 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores);
94 if (v < 0)
95 return;
96
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101 NULL, 0);
102 if (v)
103 panic("Can't enable NAPI.");
104 break;
105 }
106 }
107}
108
109static void cvm_oct_no_more_work(void)
110{
111 int cpu = smp_processor_id();
112
113 /*
114 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets.
116 */
117 if (cpu == 0) {
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119 return;
120 }
121
122 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores);
96} 124}
97 125
98#ifdef CONFIG_NET_POLL_CONTROLLER
99/** 126/**
100 * This is called when the kernel needs to manually poll the 127 * cvm_oct_do_interrupt - interrupt handler.
101 * device. For Octeon, this is simply calling the interrupt 128 *
102 * handler. We actually poll all the devices, not just the 129 * The interrupt occurs whenever the POW has packets in our group.
103 * one supplied.
104 * 130 *
105 * @dev: Device to poll. Unused
106 */ 131 */
107void cvm_oct_poll_controller(struct net_device *dev) 132static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
108{ 133{
109 preempt_disable(); 134 /* Disable the IRQ and start napi_poll. */
110 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 135 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
111 preempt_enable(); 136 cvm_oct_enable_napi(NULL);
137
138 return IRQ_HANDLED;
112} 139}
113#endif
114 140
115/** 141/**
116 * This is called on receive errors, and determines if the packet 142 * cvm_oct_check_rcv_error - process receive errors
117 * can be dropped early-on in cvm_oct_tasklet_rx().
118 *
119 * @work: Work queue entry pointing to the packet. 143 * @work: Work queue entry pointing to the packet.
144 *
120 * Returns Non-zero if the packet can be dropped, zero otherwise. 145 * Returns Non-zero if the packet can be dropped, zero otherwise.
121 */ 146 */
122static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) 147static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
@@ -199,19 +224,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
199} 224}
200 225
201/** 226/**
202 * Tasklet function that is scheduled on a core when an interrupt occurs. 227 * cvm_oct_napi_poll - the NAPI poll function.
228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive.
203 * 230 *
204 * @unused: 231 * Returns the number of packets processed.
205 */ 232 */
206void cvm_oct_tasklet_rx(unsigned long unused) 233static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
207{ 234{
208 const int coreid = cvmx_get_core_num(); 235 const int coreid = cvmx_get_core_num();
209 uint64_t old_group_mask; 236 uint64_t old_group_mask;
210 uint64_t old_scratch; 237 uint64_t old_scratch;
211 int rx_count = 0; 238 int rx_count = 0;
212 int number_to_free; 239 int did_work_request = 0;
213 int num_freed; 240 int packet_not_copied;
214 int packet_not_copied;
215 241
216 /* Prefetch cvm_oct_device since we know we need it soon */ 242 /* Prefetch cvm_oct_device since we know we need it soon */
217 prefetch(cvm_oct_device); 243 prefetch(cvm_oct_device);
@@ -227,59 +253,63 @@ void cvm_oct_tasklet_rx(unsigned long unused)
227 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
228 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
229 255
230 if (USE_ASYNC_IOBDMA) 256 if (USE_ASYNC_IOBDMA) {
231 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
258 did_work_request = 1;
259 }
232 260
233 while (1) { 261 while (rx_count < budget) {
234 struct sk_buff *skb = NULL; 262 struct sk_buff *skb = NULL;
263 struct sk_buff **pskb = NULL;
235 int skb_in_hw; 264 int skb_in_hw;
236 cvmx_wqe_t *work; 265 cvmx_wqe_t *work;
237 266
238 if (USE_ASYNC_IOBDMA) { 267 if (USE_ASYNC_IOBDMA && did_work_request)
239 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); 268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
240 } else { 269 else
241 if ((INTERRUPT_LIMIT == 0) 270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
242 || likely(rx_count < MAX_RX_PACKETS)) 271
243 work =
244 cvmx_pow_work_request_sync
245 (CVMX_POW_NO_WAIT);
246 else
247 work = NULL;
248 }
249 prefetch(work); 272 prefetch(work);
250 if (work == NULL) 273 did_work_request = 0;
274 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
251 break; 280 break;
281 }
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb);
252 284
253 /* 285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
254 * Limit each core to processing MAX_RX_PACKETS 286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
255 * packets without a break. This way the RX can't 287 did_work_request = 1;
256 * starve the TX task. 288 }
257 */ 289
258 if (USE_ASYNC_IOBDMA) { 290 if (rx_count == 0) {
259 291 /*
260 if ((INTERRUPT_LIMIT == 0) 292 * First time through, see if there is enough
261 || likely(rx_count < MAX_RX_PACKETS)) 293 * work waiting to merit waking another
262 cvmx_pow_work_request_async_nocheck 294 * CPU.
263 (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 295 */
264 else { 296 union cvmx_pow_wq_int_cntx counts;
265 cvmx_scratch_write64(CVMX_SCR_SCRATCH, 297 int backlog;
266 0x8000000000000000ull); 298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
267 cvmx_pow_tag_sw_null_nocheck(); 299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
268 } 300 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu();
269 } 303 }
270 304
271 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 305 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
272 if (likely(skb_in_hw)) { 306 if (likely(skb_in_hw)) {
273 skb = 307 skb = *pskb;
274 *(struct sk_buff
275 **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
276 sizeof(void *));
277 prefetch(&skb->head); 308 prefetch(&skb->head);
278 prefetch(&skb->len); 309 prefetch(&skb->len);
279 } 310 }
280 prefetch(cvm_oct_device[work->ipprt]); 311 prefetch(cvm_oct_device[work->ipprt]);
281 312
282 rx_count++;
283 /* Immediately throw away all packets with receive errors */ 313 /* Immediately throw away all packets with receive errors */
284 if (unlikely(work->word2.snoip.rcv_error)) { 314 if (unlikely(work->word2.snoip.rcv_error)) {
285 if (cvm_oct_check_rcv_error(work)) 315 if (cvm_oct_check_rcv_error(work))
@@ -292,39 +322,27 @@ void cvm_oct_tasklet_rx(unsigned long unused)
292 * buffer. 322 * buffer.
293 */ 323 */
294 if (likely(skb_in_hw)) { 324 if (likely(skb_in_hw)) {
295 /* 325 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
296 * This calculation was changed in case the
297 * skb header is using a different address
298 * aliasing type than the buffer. It doesn't
299 * make any differnece now, but the new one is
300 * more correct.
301 */
302 skb->data =
303 skb->head + work->packet_ptr.s.addr -
304 cvmx_ptr_to_phys(skb->head);
305 prefetch(skb->data); 326 prefetch(skb->data);
306 skb->len = work->len; 327 skb->len = work->len;
307 skb_set_tail_pointer(skb, skb->len); 328 skb_set_tail_pointer(skb, skb->len);
308 packet_not_copied = 1; 329 packet_not_copied = 1;
309 } else { 330 } else {
310
311 /* 331 /*
312 * We have to copy the packet. First allocate 332 * We have to copy the packet. First allocate
313 * an skbuff for it. 333 * an skbuff for it.
314 */ 334 */
315 skb = dev_alloc_skb(work->len); 335 skb = dev_alloc_skb(work->len);
316 if (!skb) { 336 if (!skb) {
317 DEBUGPRINT("Port %d failed to allocate " 337 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
318 "skbuff, packet dropped\n", 338 work->ipprt);
319 work->ipprt);
320 cvm_oct_free_work(work); 339 cvm_oct_free_work(work);
321 continue; 340 continue;
322 } 341 }
323 342
324 /* 343 /*
325 * Check if we've received a packet that was 344 * Check if we've received a packet that was
326 * entirely stored in the work entry. This is 345 * entirely stored in the work entry.
327 * untested.
328 */ 346 */
329 if (unlikely(work->word2.s.bufs == 0)) { 347 if (unlikely(work->word2.s.bufs == 0)) {
330 uint8_t *ptr = work->packet_data; 348 uint8_t *ptr = work->packet_data;
@@ -343,15 +361,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
343 /* No packet buffers to free */ 361 /* No packet buffers to free */
344 } else { 362 } else {
345 int segments = work->word2.s.bufs; 363 int segments = work->word2.s.bufs;
346 union cvmx_buf_ptr segment_ptr = 364 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
347 work->packet_ptr;
348 int len = work->len; 365 int len = work->len;
349 366
350 while (segments--) { 367 while (segments--) {
351 union cvmx_buf_ptr next_ptr = 368 union cvmx_buf_ptr next_ptr =
352 *(union cvmx_buf_ptr *) 369 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
353 cvmx_phys_to_ptr(segment_ptr.s. 370
354 addr - 8);
355 /* 371 /*
356 * Octeon Errata PKI-100: The segment size is 372 * Octeon Errata PKI-100: The segment size is
357 * wrong. Until it is fixed, calculate the 373 * wrong. Until it is fixed, calculate the
@@ -361,22 +377,18 @@ void cvm_oct_tasklet_rx(unsigned long unused)
361 * one: int segment_size = 377 * one: int segment_size =
362 * segment_ptr.s.size; 378 * segment_ptr.s.size;
363 */ 379 */
364 int segment_size = 380 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
365 CVMX_FPA_PACKET_POOL_SIZE - 381 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
366 (segment_ptr.s.addr - 382 /*
367 (((segment_ptr.s.addr >> 7) - 383 * Don't copy more than what
368 segment_ptr.s.back) << 7)); 384 * is left in the packet.
369 /* Don't copy more than what is left 385 */
370 in the packet */
371 if (segment_size > len) 386 if (segment_size > len)
372 segment_size = len; 387 segment_size = len;
373 /* Copy the data into the packet */ 388 /* Copy the data into the packet */
374 memcpy(skb_put(skb, segment_size), 389 memcpy(skb_put(skb, segment_size),
375 cvmx_phys_to_ptr(segment_ptr.s. 390 cvmx_phys_to_ptr(segment_ptr.s.addr),
376 addr),
377 segment_size); 391 segment_size);
378 /* Reduce the amount of bytes left
379 to copy */
380 len -= segment_size; 392 len -= segment_size;
381 segment_ptr = next_ptr; 393 segment_ptr = next_ptr;
382 } 394 }
@@ -389,16 +401,15 @@ void cvm_oct_tasklet_rx(unsigned long unused)
389 struct net_device *dev = cvm_oct_device[work->ipprt]; 401 struct net_device *dev = cvm_oct_device[work->ipprt];
390 struct octeon_ethernet *priv = netdev_priv(dev); 402 struct octeon_ethernet *priv = netdev_priv(dev);
391 403
392 /* Only accept packets for devices 404 /*
393 that are currently up */ 405 * Only accept packets for devices that are
406 * currently up.
407 */
394 if (likely(dev->flags & IFF_UP)) { 408 if (likely(dev->flags & IFF_UP)) {
395 skb->protocol = eth_type_trans(skb, dev); 409 skb->protocol = eth_type_trans(skb, dev);
396 skb->dev = dev; 410 skb->dev = dev;
397 411
398 if (unlikely 412 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
399 (work->word2.s.not_IP
400 || work->word2.s.IP_exc
401 || work->word2.s.L4_error))
402 skb->ip_summed = CHECKSUM_NONE; 413 skb->ip_summed = CHECKSUM_NONE;
403 else 414 else
404 skb->ip_summed = CHECKSUM_UNNECESSARY; 415 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -414,15 +425,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
414#endif 425#endif
415 } 426 }
416 netif_receive_skb(skb); 427 netif_receive_skb(skb);
428 rx_count++;
417 } else { 429 } else {
430 /* Drop any packet received for a device that isn't up */
418 /* 431 /*
419 * Drop any packet received for a 432 DEBUGPRINT("%s: Device not up, packet dropped\n",
420 * device that isn't up. 433 dev->name);
421 */ 434 */
422 /*
423 DEBUGPRINT("%s: Device not up, packet dropped\n",
424 dev->name);
425 */
426#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
427 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); 436 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
428#else 437#else
@@ -435,9 +444,8 @@ void cvm_oct_tasklet_rx(unsigned long unused)
435 * Drop any packet received for a device that 444 * Drop any packet received for a device that
436 * doesn't exist. 445 * doesn't exist.
437 */ 446 */
438 DEBUGPRINT("Port %d not controlled by Linux, packet " 447 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
439 "dropped\n", 448 work->ipprt);
440 work->ipprt);
441 dev_kfree_skb_irq(skb); 449 dev_kfree_skb_irq(skb);
442 } 450 }
443 /* 451 /*
@@ -459,47 +467,93 @@ void cvm_oct_tasklet_rx(unsigned long unused)
459 cvm_oct_free_work(work); 467 cvm_oct_free_work(work);
460 } 468 }
461 } 469 }
462
463 /* Restore the original POW group mask */ 470 /* Restore the original POW group mask */
464 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); 471 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
465 if (USE_ASYNC_IOBDMA) { 472 if (USE_ASYNC_IOBDMA) {
466 /* Restore the scratch area */ 473 /* Restore the scratch area */
467 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 474 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
468 } 475 }
476 cvm_oct_rx_refill_pool(0);
469 477
470 if (USE_SKBUFFS_IN_HW) { 478 if (rx_count < budget && napi != NULL) {
471 /* Refill the packet buffer pool */ 479 /* No more work */
472 number_to_free = 480 napi_complete(napi);
473 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 481 cvm_oct_no_more_work();
474
475 if (number_to_free > 0) {
476 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
477 -number_to_free);
478 num_freed =
479 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
480 CVMX_FPA_PACKET_POOL_SIZE,
481 number_to_free);
482 if (num_freed != number_to_free) {
483 cvmx_fau_atomic_add32
484 (FAU_NUM_PACKET_BUFFERS_TO_FREE,
485 number_to_free - num_freed);
486 }
487 }
488 } 482 }
483 return rx_count;
489} 484}
490 485
486#ifdef CONFIG_NET_POLL_CONTROLLER
487/**
488 * cvm_oct_poll_controller - poll for receive packets
489 * device.
490 *
491 * @dev: Device to poll. Unused
492 */
493void cvm_oct_poll_controller(struct net_device *dev)
494{
495 cvm_oct_napi_poll(NULL, 16);
496}
497#endif
498
491void cvm_oct_rx_initialize(void) 499void cvm_oct_rx_initialize(void)
492{ 500{
493 int i; 501 int i;
494 /* Initialize all of the tasklets */ 502 struct net_device *dev_for_napi = NULL;
495 for (i = 0; i < NR_CPUS; i++) 503 union cvmx_pow_wq_int_thrx int_thr;
496 tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0); 504 union cvmx_pow_wq_int_pc int_pc;
505
506 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
507 if (cvm_oct_device[i]) {
508 dev_for_napi = cvm_oct_device[i];
509 break;
510 }
511 }
512
513 if (NULL == dev_for_napi)
514 panic("No net_devices were allocated.");
515
516 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
517 atomic_set(&core_state.available_cores, max_rx_cpus);
518 else
519 atomic_set(&core_state.available_cores, num_online_cpus());
520 core_state.baseline_cores = atomic_read(&core_state.available_cores);
521
522 core_state.cpu_state = CPU_MASK_NONE;
523 for_each_possible_cpu(i) {
524 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
525 cvm_oct_napi_poll, rx_napi_weight);
526 napi_enable(&cvm_oct_napi[i].napi);
527 }
528 /* Register an IRQ hander for to receive POW interrupts */
529 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
530 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
531
532 if (i)
533 panic("Could not acquire Ethernet IRQ %d\n",
534 OCTEON_IRQ_WORKQ0 + pow_receive_group);
535
536 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
537
538 int_thr.u64 = 0;
539 int_thr.s.tc_en = 1;
540 int_thr.s.tc_thr = 1;
541 /* Enable POW interrupt when our port has at least one packet */
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
543
544 int_pc.u64 = 0;
545 int_pc.s.pc_thr = 5;
546 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
547
548
549 /* Scheduld NAPI now. This will indirectly enable interrupts. */
550 cvm_oct_enable_one_cpu();
497} 551}
498 552
499void cvm_oct_rx_shutdown(void) 553void cvm_oct_rx_shutdown(void)
500{ 554{
501 int i; 555 int i;
502 /* Shutdown all of the tasklets */ 556 /* Shutdown all of the NAPIs */
503 for (i = 0; i < NR_CPUS; i++) 557 for_each_possible_cpu(i)
504 tasklet_kill(&cvm_oct_tasklet[i].t); 558 netif_napi_del(&cvm_oct_napi[i].napi);
505} 559}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a9b72b87a7a6..a0743b85d54e 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,10 +24,29 @@
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26*********************************************************************/ 26*********************************************************************/
27#include "cvmx-fau.h"
27 28
28irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
29void cvm_oct_poll_controller(struct net_device *dev); 29void cvm_oct_poll_controller(struct net_device *dev);
30void cvm_oct_tasklet_rx(unsigned long unused);
31
32void cvm_oct_rx_initialize(void); 30void cvm_oct_rx_initialize(void);
33void cvm_oct_rx_shutdown(void); 31void cvm_oct_rx_shutdown(void);
32
33static inline void cvm_oct_rx_refill_pool(int fill_threshold)
34{
35 int number_to_free;
36 int num_freed;
37 /* Refill the packet buffer pool */
38 number_to_free =
39 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
40
41 if (number_to_free > fill_threshold) {
42 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
43 -number_to_free);
44 num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
45 CVMX_FPA_PACKET_POOL_SIZE,
46 number_to_free);
47 if (num_freed != number_to_free) {
48 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
49 number_to_free - num_freed);
50 }
51 }
52}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 6061d01eca2d..2d8589eb461e 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 00dc0f4bad19..b58b8971f939 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 535294105f65..afc2b734d554 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -31,10 +31,6 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h>
37#include <linux/proc_fs.h>
38#include <net/dst.h> 34#include <net/dst.h>
39#ifdef CONFIG_XFRM 35#ifdef CONFIG_XFRM
40#include <linux/xfrm.h> 36#include <linux/xfrm.h>
@@ -52,11 +48,14 @@
52 48
53#include "cvmx-wqe.h" 49#include "cvmx-wqe.h"
54#include "cvmx-fau.h" 50#include "cvmx-fau.h"
51#include "cvmx-pip.h"
55#include "cvmx-pko.h" 52#include "cvmx-pko.h"
56#include "cvmx-helper.h" 53#include "cvmx-helper.h"
57 54
58#include "cvmx-gmxx-defs.h" 55#include "cvmx-gmxx-defs.h"
59 56
57#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
58
60/* 59/*
61 * You can define GET_SKBUFF_QOS() to override how the skbuff output 60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
62 * function determines which output queue is used. The default 61 * function determines which output queue is used. The default
@@ -68,12 +67,81 @@
68#define GET_SKBUFF_QOS(skb) 0 67#define GET_SKBUFF_QOS(skb) 0
69#endif 68#endif
70 69
70static void cvm_oct_tx_do_cleanup(unsigned long arg);
71static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
72
73/* Maximum number of SKBs to try to free per xmit packet. */
74#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
75
76static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
77{
78 int32_t undo;
79 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
80 if (undo > 0)
81 cvmx_fau_atomic_add32(fau, -undo);
82 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
83 return skb_to_free;
84}
85
86static void cvm_oct_kick_tx_poll_watchdog(void)
87{
88 union cvmx_ciu_timx ciu_timx;
89 ciu_timx.u64 = 0;
90 ciu_timx.s.one_shot = 1;
91 ciu_timx.s.len = cvm_oct_tx_poll_interval;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
93}
94
95void cvm_oct_free_tx_skbs(struct net_device *dev)
96{
97 int32_t skb_to_free;
98 int qos, queues_per_port;
99 int total_freed = 0;
100 int total_remaining = 0;
101 unsigned long flags;
102 struct octeon_ethernet *priv = netdev_priv(dev);
103
104 queues_per_port = cvmx_pko_get_num_queues(priv->port);
105 /* Drain any pending packets in the free list */
106 for (qos = 0; qos < queues_per_port; qos++) {
107 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
108 continue;
109 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
110 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
111
112
113 total_freed += skb_to_free;
114 if (skb_to_free > 0) {
115 struct sk_buff *to_free_list = NULL;
116 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
117 while (skb_to_free > 0) {
118 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
119 t->next = to_free_list;
120 to_free_list = t;
121 skb_to_free--;
122 }
123 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
124 /* Do the actual freeing outside of the lock. */
125 while (to_free_list) {
126 struct sk_buff *t = to_free_list;
127 to_free_list = to_free_list->next;
128 dev_kfree_skb_any(t);
129 }
130 }
131 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
132 }
133 if (total_freed >= 0 && netif_queue_stopped(dev))
134 netif_wake_queue(dev);
135 if (total_remaining)
136 cvm_oct_kick_tx_poll_watchdog();
137}
138
71/** 139/**
72 * Packet transmit 140 * cvm_oct_xmit - transmit a packet
73 *
74 * @skb: Packet to send 141 * @skb: Packet to send
75 * @dev: Device info structure 142 * @dev: Device info structure
76 * Returns Always returns zero 143 *
144 * Returns Always returns NETDEV_TX_OK
77 */ 145 */
78int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) 146int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 147{
@@ -81,13 +149,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
81 union cvmx_buf_ptr hw_buffer; 149 union cvmx_buf_ptr hw_buffer;
82 uint64_t old_scratch; 150 uint64_t old_scratch;
83 uint64_t old_scratch2; 151 uint64_t old_scratch2;
84 int dropped;
85 int qos; 152 int qos;
86 int queue_it_up; 153 int i;
154 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
87 struct octeon_ethernet *priv = netdev_priv(dev); 155 struct octeon_ethernet *priv = netdev_priv(dev);
156 struct sk_buff *to_free_list;
88 int32_t skb_to_free; 157 int32_t skb_to_free;
89 int32_t undo;
90 int32_t buffers_to_free; 158 int32_t buffers_to_free;
159 u32 total_to_clean;
160 unsigned long flags;
91#if REUSE_SKBUFFS_WITHOUT_FREE 161#if REUSE_SKBUFFS_WITHOUT_FREE
92 unsigned char *fpa_head; 162 unsigned char *fpa_head;
93#endif 163#endif
@@ -98,9 +168,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
98 */ 168 */
99 prefetch(priv); 169 prefetch(priv);
100 170
101 /* Start off assuming no drop */
102 dropped = 0;
103
104 /* 171 /*
105 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 172 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
106 * completely remove "qos" in the event neither interface 173 * completely remove "qos" in the event neither interface
@@ -135,6 +202,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
135 } 202 }
136 203
137 /* 204 /*
205 * We have space for 6 segment pointers, If there will be more
206 * than that, we must linearize.
207 */
208 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
209 if (unlikely(__skb_linearize(skb))) {
210 queue_type = QUEUE_DROP;
211 if (USE_ASYNC_IOBDMA) {
212 /* Get the number of skbuffs in use by the hardware */
213 CVMX_SYNCIOBDMA;
214 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
215 } else {
216 /* Get the number of skbuffs in use by the hardware */
217 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
218 MAX_SKB_TO_FREE);
219 }
220 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
221 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
222 goto skip_xmit;
223 }
224 }
225
226 /*
138 * The CN3XXX series of parts has an errata (GMX-401) which 227 * The CN3XXX series of parts has an errata (GMX-401) which
139 * causes the GMX block to hang if a collision occurs towards 228 * causes the GMX block to hang if a collision occurs towards
140 * the end of a <68 byte packet. As a workaround for this, we 229 * the end of a <68 byte packet. As a workaround for this, we
@@ -162,13 +251,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
162 } 251 }
163 } 252 }
164 253
165 /* Build the PKO buffer pointer */
166 hw_buffer.u64 = 0;
167 hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
168 hw_buffer.s.pool = 0;
169 hw_buffer.s.size =
170 (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
171
172 /* Build the PKO command */ 254 /* Build the PKO command */
173 pko_command.u64 = 0; 255 pko_command.u64 = 0;
174 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 256 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
@@ -178,7 +260,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
178 pko_command.s.subone0 = 1; 260 pko_command.s.subone0 = 1;
179 261
180 pko_command.s.dontfree = 1; 262 pko_command.s.dontfree = 1;
181 pko_command.s.reg0 = priv->fau + qos * 4; 263
264 /* Build the PKO buffer pointer */
265 hw_buffer.u64 = 0;
266 if (skb_shinfo(skb)->nr_frags == 0) {
267 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
268 hw_buffer.s.pool = 0;
269 hw_buffer.s.size = skb->len;
270 } else {
271 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
272 hw_buffer.s.pool = 0;
273 hw_buffer.s.size = skb_headlen(skb);
274 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
276 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
277 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
278 hw_buffer.s.size = fs->size;
279 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
280 }
281 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
284 pko_command.s.gather = 1;
285 goto dont_put_skbuff_in_hw;
286 }
287
182 /* 288 /*
183 * See if we can put this skb in the FPA pool. Any strange 289 * See if we can put this skb in the FPA pool. Any strange
184 * behavior from the Linux networking stack will most likely 290 * behavior from the Linux networking stack will most likely
@@ -190,7 +296,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
190 * shown a 25% increase in performance under some loads. 296 * shown a 25% increase in performance under some loads.
191 */ 297 */
192#if REUSE_SKBUFFS_WITHOUT_FREE 298#if REUSE_SKBUFFS_WITHOUT_FREE
193 fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); 299 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
194 if (unlikely(skb->data < fpa_head)) { 300 if (unlikely(skb->data < fpa_head)) {
195 /* 301 /*
196 * printk("TX buffer beginning can't meet FPA 302 * printk("TX buffer beginning can't meet FPA
@@ -248,10 +354,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
248 * We can use this buffer in the FPA. We don't need the FAU 354 * We can use this buffer in the FPA. We don't need the FAU
249 * update anymore 355 * update anymore
250 */ 356 */
251 pko_command.s.reg0 = 0;
252 pko_command.s.dontfree = 0; 357 pko_command.s.dontfree = 0;
253 358
254 hw_buffer.s.back = (skb->data - fpa_head) >> 7; 359 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
255 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; 360 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
256 361
257 /* 362 /*
@@ -272,16 +377,16 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
272 skb->tc_verd = 0; 377 skb->tc_verd = 0;
273#endif /* CONFIG_NET_CLS_ACT */ 378#endif /* CONFIG_NET_CLS_ACT */
274#endif /* CONFIG_NET_SCHED */ 379#endif /* CONFIG_NET_SCHED */
380#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
275 381
276dont_put_skbuff_in_hw: 382dont_put_skbuff_in_hw:
277#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
278 383
279 /* Check if we can use the hardware checksumming */ 384 /* Check if we can use the hardware checksumming */
280 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && 385 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
281 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && 386 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
282 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) 387 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
283 && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 388 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
284 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { 389 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
285 /* Use hardware checksum calc */ 390 /* Use hardware checksum calc */
286 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 391 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
287 } 392 }
@@ -299,89 +404,116 @@ dont_put_skbuff_in_hw:
299 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 404 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
300 } 405 }
301 406
302 /* 407 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
303 * We try to claim MAX_SKB_TO_FREE buffers. If there were not
304 * that many available, we have to un-claim (undo) any that
305 * were in excess. If skb_to_free is positive we will free
306 * that many buffers.
307 */
308 undo = skb_to_free > 0 ?
309 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
310 if (undo > 0)
311 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
312 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
313 MAX_SKB_TO_FREE : -skb_to_free;
314 408
315 /* 409 /*
316 * If we're sending faster than the receive can free them then 410 * If we're sending faster than the receive can free them then
317 * don't do the HW free. 411 * don't do the HW free.
318 */ 412 */
319 if ((buffers_to_free < -100) && !pko_command.s.dontfree) { 413 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
320 pko_command.s.dontfree = 1; 414 pko_command.s.dontfree = 1;
321 pko_command.s.reg0 = priv->fau + qos * 4; 415
416 if (pko_command.s.dontfree) {
417 queue_type = QUEUE_CORE;
418 pko_command.s.reg0 = priv->fau+qos*4;
419 } else {
420 queue_type = QUEUE_HW;
322 } 421 }
422 if (USE_ASYNC_IOBDMA)
423 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
323 424
324 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 425 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
325 CVMX_PKO_LOCK_CMD_QUEUE);
326 426
327 /* Drop this packet if we have too many already queued to the HW */ 427 /* Drop this packet if we have too many already queued to the HW */
328 if (unlikely 428 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
329 (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { 429 if (dev->tx_queue_len != 0) {
330 /* 430 /* Drop the lock when notifying the core. */
331 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); 431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
332 */ 432 netif_stop_queue(dev);
333 dropped = 1; 433 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
434 } else {
435 /* If not using normal queueing. */
436 queue_type = QUEUE_DROP;
437 goto skip_xmit;
438 }
334 } 439 }
440
441 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
442 CVMX_PKO_LOCK_NONE);
443
335 /* Send the packet to the output queue */ 444 /* Send the packet to the output queue */
336 else if (unlikely 445 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
337 (cvmx_pko_send_packet_finish 446 priv->queue + qos,
338 (priv->port, priv->queue + qos, pko_command, hw_buffer, 447 pko_command, hw_buffer,
339 CVMX_PKO_LOCK_CMD_QUEUE))) { 448 CVMX_PKO_LOCK_NONE))) {
340 DEBUGPRINT("%s: Failed to send the packet\n", dev->name); 449 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
341 dropped = 1; 450 queue_type = QUEUE_DROP;
451 }
452skip_xmit:
453 to_free_list = NULL;
454
455 switch (queue_type) {
456 case QUEUE_DROP:
457 skb->next = to_free_list;
458 to_free_list = skb;
459 priv->stats.tx_dropped++;
460 break;
461 case QUEUE_HW:
462 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
463 break;
464 case QUEUE_CORE:
465 __skb_queue_tail(&priv->tx_free_list[qos], skb);
466 break;
467 default:
468 BUG();
469 }
470
471 while (skb_to_free > 0) {
472 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
473 t->next = to_free_list;
474 to_free_list = t;
475 skb_to_free--;
476 }
477
478 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
479
480 /* Do the actual freeing outside of the lock. */
481 while (to_free_list) {
482 struct sk_buff *t = to_free_list;
483 to_free_list = to_free_list->next;
484 dev_kfree_skb_any(t);
342 } 485 }
343 486
344 if (USE_ASYNC_IOBDMA) { 487 if (USE_ASYNC_IOBDMA) {
488 CVMX_SYNCIOBDMA;
489 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
345 /* Restore the scratch area */ 490 /* Restore the scratch area */
346 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 491 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
347 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
348 }
349
350 queue_it_up = 0;
351 if (unlikely(dropped)) {
352 dev_kfree_skb_any(skb);
353 priv->stats.tx_dropped++;
354 } else { 493 } else {
355 if (USE_SKBUFFS_IN_HW) { 494 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
356 /* Put this packet on the queue to be freed later */
357 if (pko_command.s.dontfree)
358 queue_it_up = 1;
359 else
360 cvmx_fau_atomic_add32
361 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
362 } else {
363 /* Put this packet on the queue to be freed later */
364 queue_it_up = 1;
365 }
366 } 495 }
367 496
368 if (queue_it_up) { 497 if (total_to_clean & 0x3ff) {
369 spin_lock(&priv->tx_free_list[qos].lock); 498 /*
370 __skb_queue_tail(&priv->tx_free_list[qos], skb); 499 * Schedule the cleanup tasklet every 1024 packets for
371 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); 500 * the pathological case of high traffic on one port
372 spin_unlock(&priv->tx_free_list[qos].lock); 501 * delaying clean up of packets on a different port
373 } else { 502 * that is blocked waiting for the cleanup.
374 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 503 */
504 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
375 } 505 }
376 506
377 return 0; 507 cvm_oct_kick_tx_poll_watchdog();
508
509 return NETDEV_TX_OK;
378} 510}
379 511
380/** 512/**
381 * Packet transmit to the POW 513 * cvm_oct_xmit_pow - transmit a packet to the POW
382 *
383 * @skb: Packet to send 514 * @skb: Packet to send
384 * @dev: Device info structure 515 * @dev: Device info structure
516
385 * Returns Always returns zero 517 * Returns Always returns zero
386 */ 518 */
387int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) 519int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
@@ -459,8 +591,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
459 work->word2.s.dec_ipcomp = 0; /* FIXME */ 591 work->word2.s.dec_ipcomp = 0; /* FIXME */
460#endif 592#endif
461 work->word2.s.tcp_or_udp = 593 work->word2.s.tcp_or_udp =
462 (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 594 (ip_hdr(skb)->protocol == IPPROTO_TCP)
463 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); 595 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
464#if 0 596#if 0
465 /* FIXME */ 597 /* FIXME */
466 work->word2.s.dec_ipsec = 0; 598 work->word2.s.dec_ipsec = 0;
@@ -529,116 +661,63 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
529} 661}
530 662
531/** 663/**
532 * Transmit a work queue entry out of the ethernet port. Both 664 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
533 * the work queue entry and the packet data can optionally be 665 * @dev: Device being shutdown
534 * freed. The work will be freed on error as well.
535 *
536 * @dev: Device to transmit out.
537 * @work_queue_entry:
538 * Work queue entry to send
539 * @do_free: True if the work queue entry and packet data should be
540 * freed. If false, neither will be freed.
541 * @qos: Index into the queues for this port to transmit on. This
542 * is used to implement QoS if their are multiple queues per
543 * port. This parameter must be between 0 and the number of
544 * queues per port minus 1. Values outside of this range will
545 * be change to zero.
546 * 666 *
547 * Returns Zero on success, negative on failure.
548 */ 667 */
549int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 668void cvm_oct_tx_shutdown_dev(struct net_device *dev)
550 int do_free, int qos)
551{ 669{
552 unsigned long flags;
553 union cvmx_buf_ptr hw_buffer;
554 cvmx_pko_command_word0_t pko_command;
555 int dropped;
556 struct octeon_ethernet *priv = netdev_priv(dev); 670 struct octeon_ethernet *priv = netdev_priv(dev);
557 cvmx_wqe_t *work = work_queue_entry; 671 unsigned long flags;
672 int qos;
558 673
559 if (!(dev->flags & IFF_UP)) { 674 for (qos = 0; qos < 16; qos++) {
560 DEBUGPRINT("%s: Device not up\n", dev->name); 675 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
561 if (do_free) 676 while (skb_queue_len(&priv->tx_free_list[qos]))
562 cvm_oct_free_work(work); 677 dev_kfree_skb_any(__skb_dequeue
563 return -1; 678 (&priv->tx_free_list[qos]));
679 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
564 } 680 }
681}
565 682
566 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely 683static void cvm_oct_tx_do_cleanup(unsigned long arg)
567 remove "qos" in the event neither interface supports 684{
568 multiple queues per port */ 685 int port;
569 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
570 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
571 if (qos <= 0)
572 qos = 0;
573 else if (qos >= cvmx_pko_get_num_queues(priv->port))
574 qos = 0;
575 } else
576 qos = 0;
577
578 /* Start off assuming no drop */
579 dropped = 0;
580
581 local_irq_save(flags);
582 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
583 CVMX_PKO_LOCK_CMD_QUEUE);
584
585 /* Build the PKO buffer pointer */
586 hw_buffer.u64 = 0;
587 hw_buffer.s.addr = work->packet_ptr.s.addr;
588 hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
589 hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
590 hw_buffer.s.back = work->packet_ptr.s.back;
591 686
592 /* Build the PKO command */ 687 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
593 pko_command.u64 = 0; 688 if (cvm_oct_device[port]) {
594 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 689 struct net_device *dev = cvm_oct_device[port];
595 pko_command.s.dontfree = !do_free; 690 cvm_oct_free_tx_skbs(dev);
596 pko_command.s.segs = work->word2.s.bufs; 691 }
597 pko_command.s.total_bytes = work->len; 692 }
693}
598 694
599 /* Check if we can use the hardware checksumming */ 695static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
600 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) 696{
601 pko_command.s.ipoffp1 = 0; 697 /* Disable the interrupt. */
602 else 698 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
603 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 699 /* Do the work in the tasklet. */
700 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
701 return IRQ_HANDLED;
702}
604 703
605 /* Send the packet to the output queue */ 704void cvm_oct_tx_initialize(void)
606 if (unlikely 705{
607 (cvmx_pko_send_packet_finish 706 int i;
608 (priv->port, priv->queue + qos, pko_command, hw_buffer,
609 CVMX_PKO_LOCK_CMD_QUEUE))) {
610 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
611 dropped = -1;
612 }
613 local_irq_restore(flags);
614 707
615 if (unlikely(dropped)) { 708 /* Disable the interrupt. */
616 if (do_free) 709 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
617 cvm_oct_free_work(work); 710 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
618 priv->stats.tx_dropped++; 711 i = request_irq(OCTEON_IRQ_TIMER1,
619 } else if (do_free) 712 cvm_oct_tx_cleanup_watchdog, 0,
620 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); 713 "Ethernet", cvm_oct_device);
621 714
622 return dropped; 715 if (i)
716 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
623} 717}
624EXPORT_SYMBOL(cvm_oct_transmit_qos);
625 718
626/** 719void cvm_oct_tx_shutdown(void)
627 * This function frees all skb that are currently queued for TX.
628 *
629 * @dev: Device being shutdown
630 */
631void cvm_oct_tx_shutdown(struct net_device *dev)
632{ 720{
633 struct octeon_ethernet *priv = netdev_priv(dev); 721 /* Free the interrupt handler */
634 unsigned long flags; 722 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
635 int qos;
636
637 for (qos = 0; qos < 16; qos++) {
638 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
639 while (skb_queue_len(&priv->tx_free_list[qos]))
640 dev_kfree_skb_any(__skb_dequeue
641 (&priv->tx_free_list[qos]));
642 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
643 }
644} 723}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index c0bebf750bc0..547680c6c371 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -29,29 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); 29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos); 31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev); 32void cvm_oct_tx_initialize(void);
33 33void cvm_oct_tx_shutdown(void);
34/** 34void cvm_oct_tx_shutdown_dev(struct net_device *dev);
35 * Free dead transmit skbs.
36 *
37 * @priv: The driver data
38 * @skb_to_free: The number of SKBs to free (free none if negative).
39 * @qos: The queue to free from.
40 * @take_lock: If true, acquire the skb list lock.
41 */
42static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
43 int skb_to_free,
44 int qos, int take_lock)
45{
46 /* Free skbuffs not in use by the hardware. */
47 if (skb_to_free > 0) {
48 if (take_lock)
49 spin_lock(&priv->tx_free_list[qos].lock);
50 while (skb_to_free > 0) {
51 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
52 skb_to_free--;
53 }
54 if (take_lock)
55 spin_unlock(&priv->tx_free_list[qos].lock);
56 }
57}
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 37b665918000..23467563fe57 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -30,10 +30,9 @@
30 } while (0) 30 } while (0)
31 31
32/** 32/**
33 * Given a packet data address, return a pointer to the 33 * cvm_oct_get_buffer_ptr - convert packet data address to pointer
34 * beginning of the packet buffer.
35 *
36 * @packet_ptr: Packet data hardware address 34 * @packet_ptr: Packet data hardware address
35 *
37 * Returns Packet buffer pointer 36 * Returns Packet buffer pointer
38 */ 37 */
39static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) 38static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
@@ -43,9 +42,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
43} 42}
44 43
45/** 44/**
46 * Given an IPD/PKO port number, return the logical interface it is 45 * INTERFACE - convert IPD port to locgical interface
47 * on.
48 *
49 * @ipd_port: Port to check 46 * @ipd_port: Port to check
50 * 47 *
51 * Returns Logical interface 48 * Returns Logical interface
@@ -65,9 +62,7 @@ static inline int INTERFACE(int ipd_port)
65} 62}
66 63
67/** 64/**
68 * Given an IPD/PKO port number, return the port's index on a 65 * INDEX - convert IPD/PKO port number to the port's interface index
69 * logical interface.
70 *
71 * @ipd_port: Port to check 66 * @ipd_port: Port to check
72 * 67 *
73 * Returns Index into interface port list 68 * Returns Index into interface port list
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index ee3dc41b2c53..3fca1cc31ed8 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 4cfd4b136b32..02b63678811a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -29,7 +29,6 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/delay.h>
33#include <linux/phy.h> 32#include <linux/phy.h>
34 33
35#include <net/dst.h> 34#include <net/dst.h>
@@ -43,8 +42,6 @@
43#include "ethernet-tx.h" 42#include "ethernet-tx.h"
44#include "ethernet-mdio.h" 43#include "ethernet-mdio.h"
45#include "ethernet-util.h" 44#include "ethernet-util.h"
46#include "ethernet-proc.h"
47
48 45
49#include "cvmx-pip.h" 46#include "cvmx-pip.h"
50#include "cvmx-pko.h" 47#include "cvmx-pko.h"
@@ -104,13 +101,15 @@ MODULE_PARM_DESC(pow_send_list, "\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" 101 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group."); 102 "\tusing the pow_send_group.");
106 103
107static int disable_core_queueing = 1; 104int max_rx_cpus = -1;
108module_param(disable_core_queueing, int, 0444); 105module_param(max_rx_cpus, int, 0444);
109MODULE_PARM_DESC(disable_core_queueing, "\n" 106MODULE_PARM_DESC(max_rx_cpus, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n" 107 "\t\tThe maximum number of CPUs to use for packet reception.\n"
111 "\tallows packets to be sent without lock contention in the packet\n" 108 "\t\tUse -1 to use all available CPUs.");
112 "\tscheduler resulting in some cases in improved throughput.\n");
113 109
110int rx_napi_weight = 32;
111module_param(rx_napi_weight, int, 0444);
112MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
114 113
115/* 114/*
116 * The offset from mac_addr_base that should be used for the next port 115 * The offset from mac_addr_base that should be used for the next port
@@ -122,9 +121,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
122static unsigned int cvm_oct_mac_addr_offset; 121static unsigned int cvm_oct_mac_addr_offset;
123 122
124/** 123/**
125 * Periodic timer to check auto negotiation 124 * cvm_oct_poll_queue - Workqueue for polling operations.
125 */
126struct workqueue_struct *cvm_oct_poll_queue;
127
128/**
129 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
130 *
131 * Set to one right before cvm_oct_poll_queue is destroyed.
126 */ 132 */
127static struct timer_list cvm_oct_poll_timer; 133atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
128 134
129/** 135/**
130 * Array of every ethernet device owned by this driver indexed by 136 * Array of every ethernet device owned by this driver indexed by
@@ -132,65 +138,44 @@ static struct timer_list cvm_oct_poll_timer;
132 */ 138 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 139struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134 140
135/** 141u64 cvm_oct_tx_poll_interval;
136 * Periodic timer tick for slow management operations 142
137 * 143static void cvm_oct_rx_refill_worker(struct work_struct *work);
138 * @arg: Device to check 144static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
139 */ 145
140static void cvm_do_timer(unsigned long arg) 146static void cvm_oct_rx_refill_worker(struct work_struct *work)
141{ 147{
142 int32_t skb_to_free, undo; 148 /*
143 int queues_per_port; 149 * FPA 0 may have been drained, try to refill it if we need
144 int qos; 150 * more than num_packet_buffers / 2, otherwise normal receive
145 struct octeon_ethernet *priv; 151 * processing will refill it. If it were drained, no packets
146 static int port; 152 * could be received so cvm_oct_napi_poll would never be
153 * invoked to do the refill.
154 */
155 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
147 156
148 if (port >= CVMX_PIP_NUM_INPUT_PORTS) { 157 if (!atomic_read(&cvm_oct_poll_queue_stopping))
149 /* 158 queue_delayed_work(cvm_oct_poll_queue,
150 * All ports have been polled. Start the next 159 &cvm_oct_rx_refill_work, HZ);
151 * iteration through the ports in one second. 160}
152 */ 161
153 port = 0; 162static void cvm_oct_periodic_worker(struct work_struct *work)
154 mod_timer(&cvm_oct_poll_timer, jiffies + HZ); 163{
155 return; 164 struct octeon_ethernet *priv = container_of(work,
156 } 165 struct octeon_ethernet,
157 if (!cvm_oct_device[port]) 166 port_periodic_work.work);
158 goto out;
159 167
160 priv = netdev_priv(cvm_oct_device[port]);
161 if (priv->poll) 168 if (priv->poll)
162 priv->poll(cvm_oct_device[port]); 169 priv->poll(cvm_oct_device[priv->port]);
163
164 queues_per_port = cvmx_pko_get_num_queues(port);
165 /* Drain any pending packets in the free list */
166 for (qos = 0; qos < queues_per_port; qos++) {
167 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
168 continue;
169 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
170 MAX_SKB_TO_FREE);
171 undo = skb_to_free > 0 ?
172 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
173 if (undo > 0)
174 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
175 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
176 MAX_SKB_TO_FREE : -skb_to_free;
177 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
178 }
179 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
180 170
181out: 171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
182 port++; 172
183 /* Poll the next port in a 50th of a second. 173 if (!atomic_read(&cvm_oct_poll_queue_stopping))
184 This spreads the polling of ports out a little bit */ 174 queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
185 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); 175 }
186}
187 176
188/**
189 * Configure common hardware for all interfaces
190 */
191static __init void cvm_oct_configure_common_hw(void) 177static __init void cvm_oct_configure_common_hw(void)
192{ 178{
193 int r;
194 /* Setup the FPA */ 179 /* Setup the FPA */
195 cvmx_fpa_enable(); 180 cvmx_fpa_enable();
196 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, 181 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
@@ -205,28 +190,13 @@ static __init void cvm_oct_configure_common_hw(void)
205 cvmx_helper_setup_red(num_packet_buffers / 4, 190 cvmx_helper_setup_red(num_packet_buffers / 4,
206 num_packet_buffers / 8); 191 num_packet_buffers / 8);
207 192
208 /* Enable the MII interface */
209 if (!octeon_is_simulation())
210 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
211
212 /* Register an IRQ hander for to receive POW interrupts */
213 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
214 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
215 cvm_oct_device);
216
217#if defined(CONFIG_SMP) && 0
218 if (USE_MULTICORE_RECEIVE) {
219 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
220 cpu_online_mask);
221 }
222#endif
223} 193}
224 194
225/** 195/**
226 * Free a work queue entry received in a intercept callback. 196 * cvm_oct_free_work- Free a work queue entry
197 *
198 * @work_queue_entry: Work queue entry to free
227 * 199 *
228 * @work_queue_entry:
229 * Work queue entry to free
230 * Returns Zero on success, Negative on failure. 200 * Returns Zero on success, Negative on failure.
231 */ 201 */
232int cvm_oct_free_work(void *work_queue_entry) 202int cvm_oct_free_work(void *work_queue_entry)
@@ -253,9 +223,9 @@ int cvm_oct_free_work(void *work_queue_entry)
253EXPORT_SYMBOL(cvm_oct_free_work); 223EXPORT_SYMBOL(cvm_oct_free_work);
254 224
255/** 225/**
256 * Get the low level ethernet statistics 226 * cvm_oct_common_get_stats - get the low level ethernet statistics
257 *
258 * @dev: Device to get the statistics from 227 * @dev: Device to get the statistics from
228 *
259 * Returns Pointer to the statistics 229 * Returns Pointer to the statistics
260 */ 230 */
261static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) 231static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
@@ -299,8 +269,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
299} 269}
300 270
301/** 271/**
302 * Change the link MTU. Unimplemented 272 * cvm_oct_common_change_mtu - change the link MTU
303 *
304 * @dev: Device to change 273 * @dev: Device to change
305 * @new_mtu: The new MTU 274 * @new_mtu: The new MTU
306 * 275 *
@@ -364,8 +333,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
364} 333}
365 334
366/** 335/**
367 * Set the multicast list. Currently unimplemented. 336 * cvm_oct_common_set_multicast_list - set the multicast list
368 *
369 * @dev: Device to work on 337 * @dev: Device to work on
370 */ 338 */
371static void cvm_oct_common_set_multicast_list(struct net_device *dev) 339static void cvm_oct_common_set_multicast_list(struct net_device *dev)
@@ -420,10 +388,10 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
420} 388}
421 389
422/** 390/**
423 * Set the hardware MAC address for a device 391 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
424 * 392 * @dev: The device in question.
425 * @dev: Device to change the MAC address for 393 * @addr: Address structure to change it too.
426 * @addr: Address structure to change it too. MAC address is addr + 2. 394
427 * Returns Zero on success 395 * Returns Zero on success
428 */ 396 */
429static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) 397static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
@@ -470,9 +438,9 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
470} 438}
471 439
472/** 440/**
473 * Per network device initialization 441 * cvm_oct_common_init - per network device initialization
474 *
475 * @dev: Device to initialize 442 * @dev: Device to initialize
443 *
476 * Returns Zero on success 444 * Returns Zero on success
477 */ 445 */
478int cvm_oct_common_init(struct net_device *dev) 446int cvm_oct_common_init(struct net_device *dev)
@@ -510,8 +478,11 @@ int cvm_oct_common_init(struct net_device *dev)
510 && (always_use_pow || strstr(pow_send_list, dev->name))) 478 && (always_use_pow || strstr(pow_send_list, dev->name)))
511 priv->queue = -1; 479 priv->queue = -1;
512 480
513 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 481 if (priv->queue != -1) {
514 dev->features |= NETIF_F_IP_CSUM; 482 dev->features |= NETIF_F_SG;
483 if (USE_HW_TCPUDP_CHECKSUM)
484 dev->features |= NETIF_F_IP_CSUM;
485 }
515 486
516 /* We do our own locking, Linux doesn't need to */ 487 /* We do our own locking, Linux doesn't need to */
517 dev->features |= NETIF_F_LLTX; 488 dev->features |= NETIF_F_LLTX;
@@ -625,12 +596,6 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
625 596
626extern void octeon_mdiobus_force_mod_depencency(void); 597extern void octeon_mdiobus_force_mod_depencency(void);
627 598
628/**
629 * Module/ driver initialization. Creates the linux network
630 * devices.
631 *
632 * Returns Zero on success
633 */
634static int __init cvm_oct_init_module(void) 599static int __init cvm_oct_init_module(void)
635{ 600{
636 int num_interfaces; 601 int num_interfaces;
@@ -648,8 +613,12 @@ static int __init cvm_oct_init_module(void)
648 else 613 else
649 cvm_oct_mac_addr_offset = 0; 614 cvm_oct_mac_addr_offset = 0;
650 615
651 cvm_oct_proc_initialize(); 616 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
652 cvm_oct_rx_initialize(); 617 if (cvm_oct_poll_queue == NULL) {
618 pr_err("octeon-ethernet: Cannot create workqueue");
619 return -ENOMEM;
620 }
621
653 cvm_oct_configure_common_hw(); 622 cvm_oct_configure_common_hw();
654 623
655 cvmx_helper_initialize_packet_io_global(); 624 cvmx_helper_initialize_packet_io_global();
@@ -682,6 +651,9 @@ static int __init cvm_oct_init_module(void)
682 */ 651 */
683 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 652 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
684 653
654 /* Initialize the FAU used for counting tx SKBs that need to be freed */
655 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
656
685 if ((pow_send_group != -1)) { 657 if ((pow_send_group != -1)) {
686 struct net_device *dev; 658 struct net_device *dev;
687 pr_info("\tConfiguring device for POW only access\n"); 659 pr_info("\tConfiguring device for POW only access\n");
@@ -689,7 +661,6 @@ static int __init cvm_oct_init_module(void)
689 if (dev) { 661 if (dev) {
690 /* Initialize the device private structure. */ 662 /* Initialize the device private structure. */
691 struct octeon_ethernet *priv = netdev_priv(dev); 663 struct octeon_ethernet *priv = netdev_priv(dev);
692 memset(priv, 0, sizeof(struct octeon_ethernet));
693 664
694 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 665 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
695 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 666 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
@@ -700,19 +671,16 @@ static int __init cvm_oct_init_module(void)
700 skb_queue_head_init(&priv->tx_free_list[qos]); 671 skb_queue_head_init(&priv->tx_free_list[qos]);
701 672
702 if (register_netdev(dev) < 0) { 673 if (register_netdev(dev) < 0) {
703 pr_err("Failed to register ethernet " 674 pr_err("Failed to register ethernet device for POW\n");
704 "device for POW\n");
705 kfree(dev); 675 kfree(dev);
706 } else { 676 } else {
707 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; 677 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
708 pr_info("%s: POW send group %d, receive " 678 pr_info("%s: POW send group %d, receive group %d\n",
709 "group %d\n", 679 dev->name, pow_send_group,
710 dev->name, pow_send_group, 680 pow_receive_group);
711 pow_receive_group);
712 } 681 }
713 } else { 682 } else {
714 pr_err("Failed to allocate ethernet device " 683 pr_err("Failed to allocate ethernet device for POW\n");
715 "for POW\n");
716 } 684 }
717 } 685 }
718 686
@@ -730,17 +698,15 @@ static int __init cvm_oct_init_module(void)
730 struct net_device *dev = 698 struct net_device *dev =
731 alloc_etherdev(sizeof(struct octeon_ethernet)); 699 alloc_etherdev(sizeof(struct octeon_ethernet));
732 if (!dev) { 700 if (!dev) {
733 pr_err("Failed to allocate ethernet device " 701 pr_err("Failed to allocate ethernet device for port %d\n", port);
734 "for port %d\n", port);
735 continue; 702 continue;
736 } 703 }
737 if (disable_core_queueing)
738 dev->tx_queue_len = 0;
739 704
740 /* Initialize the device private structure. */ 705 /* Initialize the device private structure. */
741 priv = netdev_priv(dev); 706 priv = netdev_priv(dev);
742 memset(priv, 0, sizeof(struct octeon_ethernet));
743 707
708 INIT_DELAYED_WORK(&priv->port_periodic_work,
709 cvm_oct_periodic_worker);
744 priv->imode = imode; 710 priv->imode = imode;
745 priv->port = port; 711 priv->port = port;
746 priv->queue = cvmx_pko_get_base_queue(priv->port); 712 priv->queue = cvmx_pko_get_base_queue(priv->port);
@@ -803,44 +769,25 @@ static int __init cvm_oct_init_module(void)
803 fau -= 769 fau -=
804 cvmx_pko_get_num_queues(priv->port) * 770 cvmx_pko_get_num_queues(priv->port) *
805 sizeof(uint32_t); 771 sizeof(uint32_t);
772 queue_delayed_work(cvm_oct_poll_queue,
773 &priv->port_periodic_work, HZ);
806 } 774 }
807 } 775 }
808 } 776 }
809 777
810 if (INTERRUPT_LIMIT) { 778 cvm_oct_tx_initialize();
811 /* 779 cvm_oct_rx_initialize();
812 * Set the POW timer rate to give an interrupt at most
813 * INTERRUPT_LIMIT times per second.
814 */
815 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
816 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
817 16 * 256) << 8);
818 780
819 /* 781 /*
820 * Enable POW timer interrupt. It will count when 782 * 150 uS: about 10 1500-byte packtes at 1GE.
821 * there are packets available. 783 */
822 */ 784 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
823 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
824 0x1ful << 24);
825 } else {
826 /* Enable POW interrupt when our port has at least one packet */
827 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
828 }
829 785
830 /* Enable the poll timer for checking RGMII status */ 786 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
831 init_timer(&cvm_oct_poll_timer);
832 cvm_oct_poll_timer.data = 0;
833 cvm_oct_poll_timer.function = cvm_do_timer;
834 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
835 787
836 return 0; 788 return 0;
837} 789}
838 790
839/**
840 * Module / driver shutdown
841 *
842 * Returns Zero on success
843 */
844static void __exit cvm_oct_cleanup_module(void) 791static void __exit cvm_oct_cleanup_module(void)
845{ 792{
846 int port; 793 int port;
@@ -853,22 +800,31 @@ static void __exit cvm_oct_cleanup_module(void)
853 /* Free the interrupt handler */ 800 /* Free the interrupt handler */
854 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); 801 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
855 802
856 del_timer(&cvm_oct_poll_timer); 803 atomic_inc_return(&cvm_oct_poll_queue_stopping);
804 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
805
857 cvm_oct_rx_shutdown(); 806 cvm_oct_rx_shutdown();
807 cvm_oct_tx_shutdown();
808
858 cvmx_pko_disable(); 809 cvmx_pko_disable();
859 810
860 /* Free the ethernet devices */ 811 /* Free the ethernet devices */
861 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 812 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
862 if (cvm_oct_device[port]) { 813 if (cvm_oct_device[port]) {
863 cvm_oct_tx_shutdown(cvm_oct_device[port]); 814 struct net_device *dev = cvm_oct_device[port];
864 unregister_netdev(cvm_oct_device[port]); 815 struct octeon_ethernet *priv = netdev_priv(dev);
865 kfree(cvm_oct_device[port]); 816 cancel_delayed_work_sync(&priv->port_periodic_work);
817
818 cvm_oct_tx_shutdown_dev(dev);
819 unregister_netdev(dev);
820 kfree(dev);
866 cvm_oct_device[port] = NULL; 821 cvm_oct_device[port] = NULL;
867 } 822 }
868 } 823 }
869 824
825 destroy_workqueue(cvm_oct_poll_queue);
826
870 cvmx_pko_shutdown(); 827 cvmx_pko_shutdown();
871 cvm_oct_proc_shutdown();
872 828
873 cvmx_ipd_free_ptr(); 829 cvmx_ipd_free_ptr();
874 830
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 402a15b9bb0e..d58192563552 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -57,58 +57,12 @@ struct octeon_ethernet {
57 uint64_t link_info; 57 uint64_t link_info;
58 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
59 void (*poll) (struct net_device *dev); 59 void (*poll) (struct net_device *dev);
60 struct delayed_work port_periodic_work;
61 struct work_struct port_work; /* may be unused. */
60}; 62};
61 63
62/**
63 * Free a work queue entry received in a intercept callback.
64 *
65 * @work_queue_entry:
66 * Work queue entry to free
67 * Returns Zero on success, Negative on failure.
68 */
69int cvm_oct_free_work(void *work_queue_entry); 64int cvm_oct_free_work(void *work_queue_entry);
70 65
71/**
72 * Transmit a work queue entry out of the ethernet port. Both
73 * the work queue entry and the packet data can optionally be
74 * freed. The work will be freed on error as well.
75 *
76 * @dev: Device to transmit out.
77 * @work_queue_entry:
78 * Work queue entry to send
79 * @do_free: True if the work queue entry and packet data should be
80 * freed. If false, neither will be freed.
81 * @qos: Index into the queues for this port to transmit on. This
82 * is used to implement QoS if their are multiple queues per
83 * port. This parameter must be between 0 and the number of
84 * queues per port minus 1. Values outside of this range will
85 * be change to zero.
86 *
87 * Returns Zero on success, negative on failure.
88 */
89int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
90 int do_free, int qos);
91
92/**
93 * Transmit a work queue entry out of the ethernet port. Both
94 * the work queue entry and the packet data can optionally be
95 * freed. The work will be freed on error as well. This simply
96 * wraps cvmx_oct_transmit_qos() for backwards compatability.
97 *
98 * @dev: Device to transmit out.
99 * @work_queue_entry:
100 * Work queue entry to send
101 * @do_free: True if the work queue entry and packet data should be
102 * freed. If false, neither will be freed.
103 *
104 * Returns Zero on success, negative on failure.
105 */
106static inline int cvm_oct_transmit(struct net_device *dev,
107 void *work_queue_entry, int do_free)
108{
109 return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
110}
111
112extern int cvm_oct_rgmii_init(struct net_device *dev); 66extern int cvm_oct_rgmii_init(struct net_device *dev);
113extern void cvm_oct_rgmii_uninit(struct net_device *dev); 67extern void cvm_oct_rgmii_uninit(struct net_device *dev);
114extern int cvm_oct_rgmii_open(struct net_device *dev); 68extern int cvm_oct_rgmii_open(struct net_device *dev);
@@ -134,5 +88,11 @@ extern int pow_send_group;
134extern int pow_receive_group; 88extern int pow_receive_group;
135extern char pow_send_list[]; 89extern char pow_send_list[];
136extern struct net_device *cvm_oct_device[]; 90extern struct net_device *cvm_oct_device[];
91extern struct workqueue_struct *cvm_oct_poll_queue;
92extern atomic_t cvm_oct_poll_queue_stopping;
93extern u64 cvm_oct_tx_poll_interval;
94
95extern int max_rx_cpus;
96extern int rx_napi_weight;
137 97
138#endif 98#endif
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
index 133b86c6a678..2fff0a0052d1 100644
--- a/drivers/staging/sm7xx/smtc2d.c
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -5,7 +5,7 @@
5 * Author: Boyod boyod.yang@siliconmotion.com.cn 5 * Author: Boyod boyod.yang@siliconmotion.com.cn
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
index 38d0c335322b..02b4fa29136c 100644
--- a/drivers/staging/sm7xx/smtc2d.h
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -5,7 +5,7 @@
5 * Author: Ge Wang, gewang@siliconmotion.com 5 * Author: Ge Wang, gewang@siliconmotion.com
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 161dbc9c1397..a4f6f49aef48 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index 7f2c34138215..7ee565c2c952 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 72d68b3dc478..4637bcbe03a4 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1633,6 +1633,11 @@ static int __init fsl_diu_setup(char *options)
1633#endif 1633#endif
1634 1634
1635static struct of_device_id fsl_diu_match[] = { 1635static struct of_device_id fsl_diu_match[] = {
1636#ifdef CONFIG_PPC_MPC512x
1637 {
1638 .compatible = "fsl,mpc5121-diu",
1639 },
1640#endif
1636 { 1641 {
1637 .compatible = "fsl,diu", 1642 .compatible = "fsl,diu",
1638 }, 1643 },
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 2e94b71b20d9..2bb95cd308c1 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -34,6 +34,7 @@
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/clk.h>
37 38
38#include <asm/addrspace.h> 39#include <asm/addrspace.h>
39#include <asm/mach-ar7/ar7.h> 40#include <asm/mach-ar7/ar7.h>
@@ -80,6 +81,8 @@ static struct resource *ar7_regs_wdt;
80/* Pointer to the remapped WDT IO space */ 81/* Pointer to the remapped WDT IO space */
81static struct ar7_wdt *ar7_wdt; 82static struct ar7_wdt *ar7_wdt;
82 83
84static struct clk *vbus_clk;
85
83static void ar7_wdt_kick(u32 value) 86static void ar7_wdt_kick(u32 value)
84{ 87{
85 WRITE_REG(ar7_wdt->kick_lock, 0x5555); 88 WRITE_REG(ar7_wdt->kick_lock, 0x5555);
@@ -138,17 +141,19 @@ static void ar7_wdt_disable(u32 value)
138static void ar7_wdt_update_margin(int new_margin) 141static void ar7_wdt_update_margin(int new_margin)
139{ 142{
140 u32 change; 143 u32 change;
144 u32 vbus_rate;
141 145
142 change = new_margin * (ar7_vbus_freq() / prescale_value); 146 vbus_rate = clk_get_rate(vbus_clk);
147 change = new_margin * (vbus_rate / prescale_value);
143 if (change < 1) 148 if (change < 1)
144 change = 1; 149 change = 1;
145 if (change > 0xffff) 150 if (change > 0xffff)
146 change = 0xffff; 151 change = 0xffff;
147 ar7_wdt_change(change); 152 ar7_wdt_change(change);
148 margin = change * prescale_value / ar7_vbus_freq(); 153 margin = change * prescale_value / vbus_rate;
149 printk(KERN_INFO DRVNAME 154 printk(KERN_INFO DRVNAME
150 ": timer margin %d seconds (prescale %d, change %d, freq %d)\n", 155 ": timer margin %d seconds (prescale %d, change %d, freq %d)\n",
151 margin, prescale_value, change, ar7_vbus_freq()); 156 margin, prescale_value, change, vbus_rate);
152} 157}
153 158
154static void ar7_wdt_enable_wdt(void) 159static void ar7_wdt_enable_wdt(void)
@@ -298,6 +303,13 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
298 goto out_mem_region; 303 goto out_mem_region;
299 } 304 }
300 305
306 vbus_clk = clk_get(NULL, "vbus");
307 if (IS_ERR(vbus_clk)) {
308 printk(KERN_ERR DRVNAME ": could not get vbus clock\n");
309 rc = PTR_ERR(vbus_clk);
310 goto out_mem_region;
311 }
312
301 ar7_wdt_disable_wdt(); 313 ar7_wdt_disable_wdt();
302 ar7_wdt_prescale(prescale_value); 314 ar7_wdt_prescale(prescale_value);
303 ar7_wdt_update_margin(margin); 315 ar7_wdt_update_margin(margin);