aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/block/xen-blkfront.c23
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/iso.h2
-rw-r--r--drivers/ieee1394/ohci1394.c34
-rw-r--r--drivers/ieee1394/raw1394.c9
-rw-r--r--drivers/ieee1394/video1394.c2
-rw-r--r--drivers/input/Kconfig9
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/xen-kbdfront.c340
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/dm-exception-store.c10
-rw-r--r--drivers/md/dm-io.c38
-rw-r--r--drivers/md/dm-io.h79
-rw-r--r--drivers/md/dm-kcopyd.c (renamed from drivers/md/kcopyd.c)298
-rw-r--r--drivers/md/dm-log.c254
-rw-r--r--drivers/md/dm-log.h131
-rw-r--r--drivers/md/dm-raid1.c132
-rw-r--r--drivers/md/dm-snap.c22
-rw-r--r--drivers/md/dm-snap.h4
-rw-r--r--drivers/md/dm-table.c42
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h98
-rw-r--r--drivers/md/kcopyd.h42
-rw-r--r--drivers/mtd/Kconfig6
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/ar7part.c151
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c19
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c30
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c15
-rw-r--r--drivers/mtd/chips/cfi_probe.c7
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c73
-rw-r--r--drivers/mtd/cmdlinepart.c15
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/block2mtd.c10
-rw-r--r--drivers/mtd/devices/lart.c16
-rw-r--r--drivers/mtd/devices/m25p80.c40
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/ftl.c6
-rw-r--r--drivers/mtd/inftlmount.c5
-rw-r--r--drivers/mtd/maps/Kconfig3
-rw-r--r--drivers/mtd/maps/bast-flash.c5
-rw-r--r--drivers/mtd/maps/ck804xrom.c89
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c3
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/omap_nor.c12
-rw-r--r--drivers/mtd/maps/pcmciamtd.c2
-rw-r--r--drivers/mtd/maps/physmap.c8
-rw-r--r--drivers/mtd/maps/plat-ram.c50
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c6
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/nand/Kconfig56
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c370
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c17
-rw-r--r--drivers/mtd/nand/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c39
-rw-r--r--drivers/mtd/nand/fsl_upm.c291
-rw-r--r--drivers/mtd/nand/nand_base.c21
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1249
-rw-r--r--drivers/mtd/nand/rtc_from4.c50
-rw-r--r--drivers/mtd/nand/s3c2410.c73
-rw-r--r--drivers/mtd/nftlmount.c5
-rw-r--r--drivers/mtd/ofpart.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c51
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c3
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/mtd/ubi/Kconfig9
-rw-r--r--drivers/mtd/ubi/build.c40
-rw-r--r--drivers/mtd/ubi/debug.h4
-rw-r--r--drivers/mtd/ubi/gluebi.c5
-rw-r--r--drivers/mtd/ubi/io.c4
-rw-r--r--drivers/mtd/ubi/scan.c41
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi-media.h372
-rw-r--r--drivers/mtd/ubi/ubi.h7
-rw-r--r--drivers/net/arm/at91_ether.c1
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/atlx/atl1.c138
-rw-r--r--drivers/net/atlx/atlx.c177
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/bfin_mac.c7
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/dm9000.c1
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/defines.h3
-rw-r--r--drivers/net/e1000e/e1000.h34
-rw-r--r--drivers/net/e1000e/es2lan.c129
-rw-r--r--drivers/net/e1000e/ethtool.c49
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/netdev.c159
-rw-r--r--drivers/net/e1000e/phy.c73
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/forcedeth.c432
-rw-r--r--drivers/net/gianfar.c104
-rw-r--r--drivers/net/ibm_newemac/core.c83
-rw-r--r--drivers/net/ibm_newemac/core.h14
-rw-r--r--drivers/net/ibm_newemac/mal.c20
-rw-r--r--drivers/net/ibm_newemac/rgmii.c2
-rw-r--r--drivers/net/ibm_newemac/tah.c2
-rw-r--r--drivers/net/ibm_newemac/zmii.c2
-rw-r--r--drivers/net/igb/igb_main.c2
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c2
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/jazzsonic.c2
-rw-r--r--drivers/net/korina.c39
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mv643xx_eth.c5
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c15
-rw-r--r--drivers/net/niu.c371
-rw-r--r--drivers/net/niu.h12
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c129
-rw-r--r--drivers/net/s2io.c128
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sgiseeq.c4
-rw-r--r--drivers/net/smc911x.c8
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/sni_82596.c2
-rw-r--r--drivers/net/tehuti.c15
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/via-velocity.c46
-rw-r--r--drivers/net/wan/c101.c6
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/video/Kconfig14
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/xen-fbfront.c550
-rw-r--r--drivers/xen/Kconfig19
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c712
-rw-r--r--drivers/xen/events.c674
-rw-r--r--drivers/xen/features.c29
-rw-r--r--drivers/xen/grant-table.c37
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c32
-rw-r--r--drivers/xen/xencomm.c232
154 files changed, 7711 insertions, 1800 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3a0e3549739f..80f0ec91e2cf 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -97,4 +97,6 @@ source "drivers/dca/Kconfig"
97source "drivers/auxdisplay/Kconfig" 97source "drivers/auxdisplay/Kconfig"
98 98
99source "drivers/uio/Kconfig" 99source "drivers/uio/Kconfig"
100
101source "drivers/xen/Kconfig"
100endmenu 102endmenu
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9c6f3f99208d..d771da816d95 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -47,6 +47,7 @@
47 47
48#include <xen/interface/grant_table.h> 48#include <xen/interface/grant_table.h>
49#include <xen/interface/io/blkif.h> 49#include <xen/interface/io/blkif.h>
50#include <xen/interface/io/protocols.h>
50 51
51#include <asm/xen/hypervisor.h> 52#include <asm/xen/hypervisor.h>
52 53
@@ -74,7 +75,6 @@ static struct block_device_operations xlvbd_block_fops;
74struct blkfront_info 75struct blkfront_info
75{ 76{
76 struct xenbus_device *xbdev; 77 struct xenbus_device *xbdev;
77 dev_t dev;
78 struct gendisk *gd; 78 struct gendisk *gd;
79 int vdevice; 79 int vdevice;
80 blkif_vdev_t handle; 80 blkif_vdev_t handle;
@@ -88,6 +88,7 @@ struct blkfront_info
88 struct blk_shadow shadow[BLK_RING_SIZE]; 88 struct blk_shadow shadow[BLK_RING_SIZE];
89 unsigned long shadow_free; 89 unsigned long shadow_free;
90 int feature_barrier; 90 int feature_barrier;
91 int is_ready;
91 92
92 /** 93 /**
93 * The number of people holding this device open. We won't allow a 94 * The number of people holding this device open. We won't allow a
@@ -614,6 +615,12 @@ again:
614 message = "writing event-channel"; 615 message = "writing event-channel";
615 goto abort_transaction; 616 goto abort_transaction;
616 } 617 }
618 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
619 XEN_IO_PROTO_ABI_NATIVE);
620 if (err) {
621 message = "writing protocol";
622 goto abort_transaction;
623 }
617 624
618 err = xenbus_transaction_end(xbt, 0); 625 err = xenbus_transaction_end(xbt, 0);
619 if (err) { 626 if (err) {
@@ -833,6 +840,8 @@ static void blkfront_connect(struct blkfront_info *info)
833 spin_unlock_irq(&blkif_io_lock); 840 spin_unlock_irq(&blkif_io_lock);
834 841
835 add_disk(info->gd); 842 add_disk(info->gd);
843
844 info->is_ready = 1;
836} 845}
837 846
838/** 847/**
@@ -896,7 +905,7 @@ static void backend_changed(struct xenbus_device *dev,
896 break; 905 break;
897 906
898 case XenbusStateClosing: 907 case XenbusStateClosing:
899 bd = bdget(info->dev); 908 bd = bdget_disk(info->gd, 0);
900 if (bd == NULL) 909 if (bd == NULL)
901 xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 910 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
902 911
@@ -925,6 +934,13 @@ static int blkfront_remove(struct xenbus_device *dev)
925 return 0; 934 return 0;
926} 935}
927 936
937static int blkfront_is_ready(struct xenbus_device *dev)
938{
939 struct blkfront_info *info = dev->dev.driver_data;
940
941 return info->is_ready;
942}
943
928static int blkif_open(struct inode *inode, struct file *filep) 944static int blkif_open(struct inode *inode, struct file *filep)
929{ 945{
930 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; 946 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
@@ -971,6 +987,7 @@ static struct xenbus_driver blkfront = {
971 .remove = blkfront_remove, 987 .remove = blkfront_remove,
972 .resume = blkfront_resume, 988 .resume = blkfront_resume,
973 .otherend_changed = backend_changed, 989 .otherend_changed = backend_changed,
990 .is_ready = blkfront_is_ready,
974}; 991};
975 992
976static int __init xlblk_init(void) 993static int __init xlblk_init(void)
@@ -998,3 +1015,5 @@ module_exit(xlblk_exit);
998MODULE_DESCRIPTION("Xen virtual block device frontend"); 1015MODULE_DESCRIPTION("Xen virtual block device frontend");
999MODULE_LICENSE("GPL"); 1016MODULE_LICENSE("GPL");
1000MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); 1017MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
1018MODULE_ALIAS("xen:vbd");
1019MODULE_ALIAS("xenblk");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 6228fadacd38..9d19aec5820a 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2167,6 +2167,7 @@ static const struct file_operations dv1394_fops=
2167/* 2167/*
2168 * Export information about protocols/devices supported by this driver. 2168 * Export information about protocols/devices supported by this driver.
2169 */ 2169 */
2170#ifdef MODULE
2170static struct ieee1394_device_id dv1394_id_table[] = { 2171static struct ieee1394_device_id dv1394_id_table[] = {
2171 { 2172 {
2172 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2173 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -2177,6 +2178,7 @@ static struct ieee1394_device_id dv1394_id_table[] = {
2177}; 2178};
2178 2179
2179MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); 2180MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
2181#endif /* MODULE */
2180 2182
2181static struct hpsb_protocol_driver dv1394_driver = { 2183static struct hpsb_protocol_driver dv1394_driver = {
2182 .name = "dv1394", 2184 .name = "dv1394",
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
index b94e55e6eaa5..b5de5f21ef78 100644
--- a/drivers/ieee1394/iso.h
+++ b/drivers/ieee1394/iso.h
@@ -123,6 +123,8 @@ struct hpsb_iso {
123 123
124 /* how many times the buffer has overflowed or underflowed */ 124 /* how many times the buffer has overflowed or underflowed */
125 atomic_t overflows; 125 atomic_t overflows;
126 /* how many cycles were skipped for a given context */
127 atomic_t skips;
126 128
127 /* Current number of bytes lost in discarded packets */ 129 /* Current number of bytes lost in discarded packets */
128 int bytes_discarded; 130 int bytes_discarded;
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 0690469fcecf..e509e13cb7a7 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1723,6 +1723,8 @@ struct ohci_iso_xmit {
1723 struct dma_prog_region prog; 1723 struct dma_prog_region prog;
1724 struct ohci1394_iso_tasklet task; 1724 struct ohci1394_iso_tasklet task;
1725 int task_active; 1725 int task_active;
1726 int last_cycle;
1727 atomic_t skips;
1726 1728
1727 u32 ContextControlSet; 1729 u32 ContextControlSet;
1728 u32 ContextControlClear; 1730 u32 ContextControlClear;
@@ -1759,6 +1761,8 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1759 iso->hostdata = xmit; 1761 iso->hostdata = xmit;
1760 xmit->ohci = iso->host->hostdata; 1762 xmit->ohci = iso->host->hostdata;
1761 xmit->task_active = 0; 1763 xmit->task_active = 0;
1764 xmit->last_cycle = -1;
1765 atomic_set(&iso->skips, 0);
1762 1766
1763 dma_prog_region_init(&xmit->prog); 1767 dma_prog_region_init(&xmit->prog);
1764 1768
@@ -1856,6 +1860,26 @@ static void ohci_iso_xmit_task(unsigned long data)
1856 /* parse cycle */ 1860 /* parse cycle */
1857 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF; 1861 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1858 1862
1863 if (xmit->last_cycle > -1) {
1864 int cycle_diff = cycle - xmit->last_cycle;
1865 int skip;
1866
1867 /* unwrap */
1868 if (cycle_diff < 0) {
1869 cycle_diff += 8000;
1870 if (cycle_diff < 0)
1871 PRINT(KERN_ERR, "bogus cycle diff %d\n",
1872 cycle_diff);
1873 }
1874
1875 skip = cycle_diff - 1;
1876 if (skip > 0) {
1877 DBGMSG("skipped %d cycles without packet loss", skip);
1878 atomic_add(skip, &iso->skips);
1879 }
1880 }
1881 xmit->last_cycle = cycle;
1882
1859 /* tell the subsystem the packet has gone out */ 1883 /* tell the subsystem the packet has gone out */
1860 hpsb_iso_packet_sent(iso, cycle, event != 0x11); 1884 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1861 1885
@@ -1943,6 +1967,16 @@ static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info
1943 prev->output_last.branchAddress = cpu_to_le32( 1967 prev->output_last.branchAddress = cpu_to_le32(
1944 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3); 1968 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1945 1969
1970 /*
1971 * Link the skip address to this descriptor itself. This causes a
1972 * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1973 * without dropping the data at that point the application should then
1974 * decide whether this is an error condition or not. Some protocols
1975 * can deal with this by dropping some rate-matching padding packets.
1976 */
1977 next->output_more_immediate.branchAddress =
1978 prev->output_last.branchAddress;
1979
1946 /* disable interrupt, unless required by the IRQ interval */ 1980 /* disable interrupt, unless required by the IRQ interval */
1947 if (prev_i % iso->irq_interval) { 1981 if (prev_i % iso->irq_interval) {
1948 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */ 1982 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 04e96ba56e09..ec2a0adbedb2 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2356,13 +2356,16 @@ static void rawiso_activity_cb(struct hpsb_iso *iso)
2356static void raw1394_iso_fill_status(struct hpsb_iso *iso, 2356static void raw1394_iso_fill_status(struct hpsb_iso *iso,
2357 struct raw1394_iso_status *stat) 2357 struct raw1394_iso_status *stat)
2358{ 2358{
2359 int overflows = atomic_read(&iso->overflows);
2360 int skips = atomic_read(&iso->skips);
2361
2359 stat->config.data_buf_size = iso->buf_size; 2362 stat->config.data_buf_size = iso->buf_size;
2360 stat->config.buf_packets = iso->buf_packets; 2363 stat->config.buf_packets = iso->buf_packets;
2361 stat->config.channel = iso->channel; 2364 stat->config.channel = iso->channel;
2362 stat->config.speed = iso->speed; 2365 stat->config.speed = iso->speed;
2363 stat->config.irq_interval = iso->irq_interval; 2366 stat->config.irq_interval = iso->irq_interval;
2364 stat->n_packets = hpsb_iso_n_ready(iso); 2367 stat->n_packets = hpsb_iso_n_ready(iso);
2365 stat->overflows = atomic_read(&iso->overflows); 2368 stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF));
2366 stat->xmit_cycle = iso->xmit_cycle; 2369 stat->xmit_cycle = iso->xmit_cycle;
2367} 2370}
2368 2371
@@ -2437,6 +2440,8 @@ static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
2437 2440
2438 /* reset overflow counter */ 2441 /* reset overflow counter */
2439 atomic_set(&iso->overflows, 0); 2442 atomic_set(&iso->overflows, 0);
2443 /* reset skip counter */
2444 atomic_set(&iso->skips, 0);
2440 2445
2441 return 0; 2446 return 0;
2442} 2447}
@@ -2935,6 +2940,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2935/* 2940/*
2936 * Export information about protocols/devices supported by this driver. 2941 * Export information about protocols/devices supported by this driver.
2937 */ 2942 */
2943#ifdef MODULE
2938static struct ieee1394_device_id raw1394_id_table[] = { 2944static struct ieee1394_device_id raw1394_id_table[] = {
2939 { 2945 {
2940 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2946 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -2956,6 +2962,7 @@ static struct ieee1394_device_id raw1394_id_table[] = {
2956}; 2962};
2957 2963
2958MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); 2964MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2965#endif /* MODULE */
2959 2966
2960static struct hpsb_protocol_driver raw1394_driver = { 2967static struct hpsb_protocol_driver raw1394_driver = {
2961 .name = "raw1394", 2968 .name = "raw1394",
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index e03024eeeac1..e24772d336e1 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1293,6 +1293,7 @@ static const struct file_operations video1394_fops=
1293/* 1293/*
1294 * Export information about protocols/devices supported by this driver. 1294 * Export information about protocols/devices supported by this driver.
1295 */ 1295 */
1296#ifdef MODULE
1296static struct ieee1394_device_id video1394_id_table[] = { 1297static struct ieee1394_device_id video1394_id_table[] = {
1297 { 1298 {
1298 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
@@ -1313,6 +1314,7 @@ static struct ieee1394_device_id video1394_id_table[] = {
1313}; 1314};
1314 1315
1315MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); 1316MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
1317#endif /* MODULE */
1316 1318
1317static struct hpsb_protocol_driver video1394_driver = { 1319static struct hpsb_protocol_driver video1394_driver = {
1318 .name = VIDEO1394_DRIVER_NAME, 1320 .name = VIDEO1394_DRIVER_NAME,
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 9dea14db724c..5f9d860925a1 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -149,6 +149,15 @@ config INPUT_APMPOWER
149 To compile this driver as a module, choose M here: the 149 To compile this driver as a module, choose M here: the
150 module will be called apm-power. 150 module will be called apm-power.
151 151
152config XEN_KBDDEV_FRONTEND
153 tristate "Xen virtual keyboard and mouse support"
154 depends on XEN_FBDEV_FRONTEND
155 default y
156 help
157 This driver implements the front-end of the Xen virtual
158 keyboard and mouse device driver. It communicates with a back-end
159 in another domain.
160
152comment "Input Device Drivers" 161comment "Input Device Drivers"
153 162
154source "drivers/input/keyboard/Kconfig" 163source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 2ae87b19caa8..98c4f9a77876 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -23,3 +23,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
23obj-$(CONFIG_INPUT_MISC) += misc/ 23obj-$(CONFIG_INPUT_MISC) += misc/
24 24
25obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o 25obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
26
27obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
new file mode 100644
index 000000000000..0f47f4697cdf
--- /dev/null
+++ b/drivers/input/xen-kbdfront.c
@@ -0,0 +1,340 @@
1/*
2 * Xen para-virtual input device
3 *
4 * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/input/mouse/sermouse.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13
14/*
15 * TODO:
16 *
17 * Switch to grant tables together with xen-fbfront.c.
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/module.h>
23#include <linux/input.h>
24#include <asm/xen/hypervisor.h>
25#include <xen/events.h>
26#include <xen/page.h>
27#include <xen/interface/io/fbif.h>
28#include <xen/interface/io/kbdif.h>
29#include <xen/xenbus.h>
30
31struct xenkbd_info {
32 struct input_dev *kbd;
33 struct input_dev *ptr;
34 struct xenkbd_page *page;
35 int irq;
36 struct xenbus_device *xbdev;
37 char phys[32];
38};
39
40static int xenkbd_remove(struct xenbus_device *);
41static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
42static void xenkbd_disconnect_backend(struct xenkbd_info *);
43
44/*
45 * Note: if you need to send out events, see xenfb_do_update() for how
46 * to do that.
47 */
48
49static irqreturn_t input_handler(int rq, void *dev_id)
50{
51 struct xenkbd_info *info = dev_id;
52 struct xenkbd_page *page = info->page;
53 __u32 cons, prod;
54
55 prod = page->in_prod;
56 if (prod == page->in_cons)
57 return IRQ_HANDLED;
58 rmb(); /* ensure we see ring contents up to prod */
59 for (cons = page->in_cons; cons != prod; cons++) {
60 union xenkbd_in_event *event;
61 struct input_dev *dev;
62 event = &XENKBD_IN_RING_REF(page, cons);
63
64 dev = info->ptr;
65 switch (event->type) {
66 case XENKBD_TYPE_MOTION:
67 input_report_rel(dev, REL_X, event->motion.rel_x);
68 input_report_rel(dev, REL_Y, event->motion.rel_y);
69 break;
70 case XENKBD_TYPE_KEY:
71 dev = NULL;
72 if (test_bit(event->key.keycode, info->kbd->keybit))
73 dev = info->kbd;
74 if (test_bit(event->key.keycode, info->ptr->keybit))
75 dev = info->ptr;
76 if (dev)
77 input_report_key(dev, event->key.keycode,
78 event->key.pressed);
79 else
80 printk(KERN_WARNING
81 "xenkbd: unhandled keycode 0x%x\n",
82 event->key.keycode);
83 break;
84 case XENKBD_TYPE_POS:
85 input_report_abs(dev, ABS_X, event->pos.abs_x);
86 input_report_abs(dev, ABS_Y, event->pos.abs_y);
87 break;
88 }
89 if (dev)
90 input_sync(dev);
91 }
92 mb(); /* ensure we got ring contents */
93 page->in_cons = cons;
94 notify_remote_via_irq(info->irq);
95
96 return IRQ_HANDLED;
97}
98
99static int __devinit xenkbd_probe(struct xenbus_device *dev,
100 const struct xenbus_device_id *id)
101{
102 int ret, i;
103 struct xenkbd_info *info;
104 struct input_dev *kbd, *ptr;
105
106 info = kzalloc(sizeof(*info), GFP_KERNEL);
107 if (!info) {
108 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
109 return -ENOMEM;
110 }
111 dev->dev.driver_data = info;
112 info->xbdev = dev;
113 info->irq = -1;
114 snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
115
116 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
117 if (!info->page)
118 goto error_nomem;
119
120 /* keyboard */
121 kbd = input_allocate_device();
122 if (!kbd)
123 goto error_nomem;
124 kbd->name = "Xen Virtual Keyboard";
125 kbd->phys = info->phys;
126 kbd->id.bustype = BUS_PCI;
127 kbd->id.vendor = 0x5853;
128 kbd->id.product = 0xffff;
129 kbd->evbit[0] = BIT(EV_KEY);
130 for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
131 set_bit(i, kbd->keybit);
132 for (i = KEY_OK; i < KEY_MAX; i++)
133 set_bit(i, kbd->keybit);
134
135 ret = input_register_device(kbd);
136 if (ret) {
137 input_free_device(kbd);
138 xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
139 goto error;
140 }
141 info->kbd = kbd;
142
143 /* pointing device */
144 ptr = input_allocate_device();
145 if (!ptr)
146 goto error_nomem;
147 ptr->name = "Xen Virtual Pointer";
148 ptr->phys = info->phys;
149 ptr->id.bustype = BUS_PCI;
150 ptr->id.vendor = 0x5853;
151 ptr->id.product = 0xfffe;
152 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
153 for (i = BTN_LEFT; i <= BTN_TASK; i++)
154 set_bit(i, ptr->keybit);
155 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
156 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
157 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
158
159 ret = input_register_device(ptr);
160 if (ret) {
161 input_free_device(ptr);
162 xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
163 goto error;
164 }
165 info->ptr = ptr;
166
167 ret = xenkbd_connect_backend(dev, info);
168 if (ret < 0)
169 goto error;
170
171 return 0;
172
173 error_nomem:
174 ret = -ENOMEM;
175 xenbus_dev_fatal(dev, ret, "allocating device memory");
176 error:
177 xenkbd_remove(dev);
178 return ret;
179}
180
181static int xenkbd_resume(struct xenbus_device *dev)
182{
183 struct xenkbd_info *info = dev->dev.driver_data;
184
185 xenkbd_disconnect_backend(info);
186 memset(info->page, 0, PAGE_SIZE);
187 return xenkbd_connect_backend(dev, info);
188}
189
190static int xenkbd_remove(struct xenbus_device *dev)
191{
192 struct xenkbd_info *info = dev->dev.driver_data;
193
194 xenkbd_disconnect_backend(info);
195 if (info->kbd)
196 input_unregister_device(info->kbd);
197 if (info->ptr)
198 input_unregister_device(info->ptr);
199 free_page((unsigned long)info->page);
200 kfree(info);
201 return 0;
202}
203
204static int xenkbd_connect_backend(struct xenbus_device *dev,
205 struct xenkbd_info *info)
206{
207 int ret, evtchn;
208 struct xenbus_transaction xbt;
209
210 ret = xenbus_alloc_evtchn(dev, &evtchn);
211 if (ret)
212 return ret;
213 ret = bind_evtchn_to_irqhandler(evtchn, input_handler,
214 0, dev->devicetype, info);
215 if (ret < 0) {
216 xenbus_free_evtchn(dev, evtchn);
217 xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
218 return ret;
219 }
220 info->irq = ret;
221
222 again:
223 ret = xenbus_transaction_start(&xbt);
224 if (ret) {
225 xenbus_dev_fatal(dev, ret, "starting transaction");
226 return ret;
227 }
228 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
229 virt_to_mfn(info->page));
230 if (ret)
231 goto error_xenbus;
232 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
233 evtchn);
234 if (ret)
235 goto error_xenbus;
236 ret = xenbus_transaction_end(xbt, 0);
237 if (ret) {
238 if (ret == -EAGAIN)
239 goto again;
240 xenbus_dev_fatal(dev, ret, "completing transaction");
241 return ret;
242 }
243
244 xenbus_switch_state(dev, XenbusStateInitialised);
245 return 0;
246
247 error_xenbus:
248 xenbus_transaction_end(xbt, 1);
249 xenbus_dev_fatal(dev, ret, "writing xenstore");
250 return ret;
251}
252
253static void xenkbd_disconnect_backend(struct xenkbd_info *info)
254{
255 if (info->irq >= 0)
256 unbind_from_irqhandler(info->irq, info);
257 info->irq = -1;
258}
259
260static void xenkbd_backend_changed(struct xenbus_device *dev,
261 enum xenbus_state backend_state)
262{
263 struct xenkbd_info *info = dev->dev.driver_data;
264 int ret, val;
265
266 switch (backend_state) {
267 case XenbusStateInitialising:
268 case XenbusStateInitialised:
269 case XenbusStateUnknown:
270 case XenbusStateClosed:
271 break;
272
273 case XenbusStateInitWait:
274InitWait:
275 ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
276 "feature-abs-pointer", "%d", &val);
277 if (ret < 0)
278 val = 0;
279 if (val) {
280 ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
281 "request-abs-pointer", "1");
282 if (ret)
283 printk(KERN_WARNING
284 "xenkbd: can't request abs-pointer");
285 }
286 xenbus_switch_state(dev, XenbusStateConnected);
287 break;
288
289 case XenbusStateConnected:
290 /*
291 * Work around xenbus race condition: If backend goes
292 * through InitWait to Connected fast enough, we can
293 * get Connected twice here.
294 */
295 if (dev->state != XenbusStateConnected)
296 goto InitWait; /* no InitWait seen yet, fudge it */
297 break;
298
299 case XenbusStateClosing:
300 xenbus_frontend_closed(dev);
301 break;
302 }
303}
304
305static struct xenbus_device_id xenkbd_ids[] = {
306 { "vkbd" },
307 { "" }
308};
309
310static struct xenbus_driver xenkbd = {
311 .name = "vkbd",
312 .owner = THIS_MODULE,
313 .ids = xenkbd_ids,
314 .probe = xenkbd_probe,
315 .remove = xenkbd_remove,
316 .resume = xenkbd_resume,
317 .otherend_changed = xenkbd_backend_changed,
318};
319
320static int __init xenkbd_init(void)
321{
322 if (!is_running_on_xen())
323 return -ENODEV;
324
325 /* Nothing to do if running in dom0. */
326 if (is_initial_xendomain())
327 return -ENODEV;
328
329 return xenbus_register_frontend(&xenkbd);
330}
331
332static void __exit xenkbd_cleanup(void)
333{
334 xenbus_unregister_driver(&xenkbd);
335}
336
337module_init(xenkbd_init);
338module_exit(xenkbd_cleanup);
339
340MODULE_LICENSE("GPL");
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d9aa7edb8780..7be09eeea293 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,10 +3,10 @@
3# 3#
4 4
5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ 5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
6 dm-ioctl.o dm-io.o kcopyd.o 6 dm-ioctl.o dm-io.o dm-kcopyd.o
7dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o 7dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
8dm-snapshot-objs := dm-snap.o dm-exception-store.o 8dm-snapshot-objs := dm-snap.o dm-exception-store.o
9dm-mirror-objs := dm-log.o dm-raid1.o 9dm-mirror-objs := dm-raid1.o
10dm-rdac-objs := dm-mpath-rdac.o 10dm-rdac-objs := dm-mpath-rdac.o
11dm-hp-sw-objs := dm-mpath-hp-sw.o 11dm-hp-sw-objs := dm-mpath-hp-sw.o
12md-mod-objs := md.o bitmap.o 12md-mod-objs := md.o bitmap.o
@@ -39,7 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
39obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o 39obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
40obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o 40obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
41obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 41obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
42obj-$(CONFIG_DM_MIRROR) += dm-mirror.o 42obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
43obj-$(CONFIG_DM_ZERO) += dm-zero.o 43obj-$(CONFIG_DM_ZERO) += dm-zero.o
44 44
45quiet_cmd_unroll = UNROLL $@ 45quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 5bbce29f143a..41f408068a7c 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -9,13 +9,13 @@
9 9
10#include "dm.h" 10#include "dm.h"
11#include "dm-snap.h" 11#include "dm-snap.h"
12#include "dm-io.h"
13#include "kcopyd.h"
14 12
15#include <linux/mm.h> 13#include <linux/mm.h>
16#include <linux/pagemap.h> 14#include <linux/pagemap.h>
17#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
18#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/dm-io.h>
18#include <linux/dm-kcopyd.h>
19 19
20#define DM_MSG_PREFIX "snapshots" 20#define DM_MSG_PREFIX "snapshots"
21#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ 21#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
@@ -131,7 +131,7 @@ struct pstore {
131 131
132static unsigned sectors_to_pages(unsigned sectors) 132static unsigned sectors_to_pages(unsigned sectors)
133{ 133{
134 return sectors / (PAGE_SIZE >> 9); 134 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
135} 135}
136 136
137static int alloc_area(struct pstore *ps) 137static int alloc_area(struct pstore *ps)
@@ -159,7 +159,7 @@ static void free_area(struct pstore *ps)
159} 159}
160 160
161struct mdata_req { 161struct mdata_req {
162 struct io_region *where; 162 struct dm_io_region *where;
163 struct dm_io_request *io_req; 163 struct dm_io_request *io_req;
164 struct work_struct work; 164 struct work_struct work;
165 int result; 165 int result;
@@ -177,7 +177,7 @@ static void do_metadata(struct work_struct *work)
177 */ 177 */
178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) 178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
179{ 179{
180 struct io_region where = { 180 struct dm_io_region where = {
181 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
182 .sector = ps->snap->chunk_size * chunk, 182 .sector = ps->snap->chunk_size * chunk,
183 .count = ps->snap->chunk_size, 183 .count = ps->snap->chunk_size,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8f25f628ef16..4789c42d9a3a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,13 +5,14 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm-io.h" 8#include "dm.h"
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/mempool.h> 11#include <linux/mempool.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/dm-io.h>
15 16
16struct dm_io_client { 17struct dm_io_client {
17 mempool_t *pool; 18 mempool_t *pool;
@@ -20,7 +21,7 @@ struct dm_io_client {
20 21
21/* FIXME: can we shrink this ? */ 22/* FIXME: can we shrink this ? */
22struct io { 23struct io {
23 unsigned long error; 24 unsigned long error_bits;
24 atomic_t count; 25 atomic_t count;
25 struct task_struct *sleeper; 26 struct task_struct *sleeper;
26 struct dm_io_client *client; 27 struct dm_io_client *client;
@@ -107,14 +108,14 @@ static inline unsigned bio_get_region(struct bio *bio)
107static void dec_count(struct io *io, unsigned int region, int error) 108static void dec_count(struct io *io, unsigned int region, int error)
108{ 109{
109 if (error) 110 if (error)
110 set_bit(region, &io->error); 111 set_bit(region, &io->error_bits);
111 112
112 if (atomic_dec_and_test(&io->count)) { 113 if (atomic_dec_and_test(&io->count)) {
113 if (io->sleeper) 114 if (io->sleeper)
114 wake_up_process(io->sleeper); 115 wake_up_process(io->sleeper);
115 116
116 else { 117 else {
117 unsigned long r = io->error; 118 unsigned long r = io->error_bits;
118 io_notify_fn fn = io->callback; 119 io_notify_fn fn = io->callback;
119 void *context = io->context; 120 void *context = io->context;
120 121
@@ -271,7 +272,7 @@ static void km_dp_init(struct dpages *dp, void *data)
271/*----------------------------------------------------------------- 272/*-----------------------------------------------------------------
272 * IO routines that accept a list of pages. 273 * IO routines that accept a list of pages.
273 *---------------------------------------------------------------*/ 274 *---------------------------------------------------------------*/
274static void do_region(int rw, unsigned int region, struct io_region *where, 275static void do_region(int rw, unsigned region, struct dm_io_region *where,
275 struct dpages *dp, struct io *io) 276 struct dpages *dp, struct io *io)
276{ 277{
277 struct bio *bio; 278 struct bio *bio;
@@ -320,7 +321,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
320} 321}
321 322
322static void dispatch_io(int rw, unsigned int num_regions, 323static void dispatch_io(int rw, unsigned int num_regions,
323 struct io_region *where, struct dpages *dp, 324 struct dm_io_region *where, struct dpages *dp,
324 struct io *io, int sync) 325 struct io *io, int sync)
325{ 326{
326 int i; 327 int i;
@@ -347,17 +348,17 @@ static void dispatch_io(int rw, unsigned int num_regions,
347} 348}
348 349
349static int sync_io(struct dm_io_client *client, unsigned int num_regions, 350static int sync_io(struct dm_io_client *client, unsigned int num_regions,
350 struct io_region *where, int rw, struct dpages *dp, 351 struct dm_io_region *where, int rw, struct dpages *dp,
351 unsigned long *error_bits) 352 unsigned long *error_bits)
352{ 353{
353 struct io io; 354 struct io io;
354 355
355 if (num_regions > 1 && rw != WRITE) { 356 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
356 WARN_ON(1); 357 WARN_ON(1);
357 return -EIO; 358 return -EIO;
358 } 359 }
359 360
360 io.error = 0; 361 io.error_bits = 0;
361 atomic_set(&io.count, 1); /* see dispatch_io() */ 362 atomic_set(&io.count, 1); /* see dispatch_io() */
362 io.sleeper = current; 363 io.sleeper = current;
363 io.client = client; 364 io.client = client;
@@ -378,25 +379,25 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
378 return -EINTR; 379 return -EINTR;
379 380
380 if (error_bits) 381 if (error_bits)
381 *error_bits = io.error; 382 *error_bits = io.error_bits;
382 383
383 return io.error ? -EIO : 0; 384 return io.error_bits ? -EIO : 0;
384} 385}
385 386
386static int async_io(struct dm_io_client *client, unsigned int num_regions, 387static int async_io(struct dm_io_client *client, unsigned int num_regions,
387 struct io_region *where, int rw, struct dpages *dp, 388 struct dm_io_region *where, int rw, struct dpages *dp,
388 io_notify_fn fn, void *context) 389 io_notify_fn fn, void *context)
389{ 390{
390 struct io *io; 391 struct io *io;
391 392
392 if (num_regions > 1 && rw != WRITE) { 393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
393 WARN_ON(1); 394 WARN_ON(1);
394 fn(1, context); 395 fn(1, context);
395 return -EIO; 396 return -EIO;
396 } 397 }
397 398
398 io = mempool_alloc(client->pool, GFP_NOIO); 399 io = mempool_alloc(client->pool, GFP_NOIO);
399 io->error = 0; 400 io->error_bits = 0;
400 atomic_set(&io->count, 1); /* see dispatch_io() */ 401 atomic_set(&io->count, 1); /* see dispatch_io() */
401 io->sleeper = NULL; 402 io->sleeper = NULL;
402 io->client = client; 403 io->client = client;
@@ -435,10 +436,15 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
435} 436}
436 437
437/* 438/*
438 * New collapsed (a)synchronous interface 439 * New collapsed (a)synchronous interface.
440 *
441 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
442 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
443 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
444 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
439 */ 445 */
440int dm_io(struct dm_io_request *io_req, unsigned num_regions, 446int dm_io(struct dm_io_request *io_req, unsigned num_regions,
441 struct io_region *where, unsigned long *sync_error_bits) 447 struct dm_io_region *where, unsigned long *sync_error_bits)
442{ 448{
443 int r; 449 int r;
444 struct dpages dp; 450 struct dpages dp;
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
deleted file mode 100644
index f647e2cceaa6..000000000000
--- a/drivers/md/dm-io.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * Copyright (C) 2003 Sistina Software
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef _DM_IO_H
8#define _DM_IO_H
9
10#include "dm.h"
11
12struct io_region {
13 struct block_device *bdev;
14 sector_t sector;
15 sector_t count; /* If this is zero the region is ignored. */
16};
17
18struct page_list {
19 struct page_list *next;
20 struct page *page;
21};
22
23typedef void (*io_notify_fn)(unsigned long error, void *context);
24
25enum dm_io_mem_type {
26 DM_IO_PAGE_LIST,/* Page list */
27 DM_IO_BVEC, /* Bio vector */
28 DM_IO_VMA, /* Virtual memory area */
29 DM_IO_KMEM, /* Kernel memory */
30};
31
32struct dm_io_memory {
33 enum dm_io_mem_type type;
34
35 union {
36 struct page_list *pl;
37 struct bio_vec *bvec;
38 void *vma;
39 void *addr;
40 } ptr;
41
42 unsigned offset;
43};
44
45struct dm_io_notify {
46 io_notify_fn fn; /* Callback for asynchronous requests */
47 void *context; /* Passed to callback */
48};
49
50/*
51 * IO request structure
52 */
53struct dm_io_client;
54struct dm_io_request {
55 int bi_rw; /* READ|WRITE - not READA */
56 struct dm_io_memory mem; /* Memory to use for io */
57 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
58 struct dm_io_client *client; /* Client memory handler */
59};
60
61/*
62 * For async io calls, users can alternatively use the dm_io() function below
63 * and dm_io_client_create() to create private mempools for the client.
64 *
65 * Create/destroy may block.
66 */
67struct dm_io_client *dm_io_client_create(unsigned num_pages);
68int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
69void dm_io_client_destroy(struct dm_io_client *client);
70
71/*
72 * IO interface using private per-client pools.
73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
74 * error occurred doing io to the corresponding region.
75 */
76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
77 struct io_region *region, unsigned long *sync_error_bits);
78
79#endif
diff --git a/drivers/md/kcopyd.c b/drivers/md/dm-kcopyd.c
index e76b52ade690..996802b8a452 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -9,9 +9,8 @@
9 * completion notification. 9 * completion notification.
10 */ 10 */
11 11
12#include <asm/types.h> 12#include <linux/types.h>
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14
15#include <linux/blkdev.h> 14#include <linux/blkdev.h>
16#include <linux/fs.h> 15#include <linux/fs.h>
17#include <linux/init.h> 16#include <linux/init.h>
@@ -23,24 +22,15 @@
23#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
24#include <linux/workqueue.h> 23#include <linux/workqueue.h>
25#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/dm-kcopyd.h>
26 26
27#include "kcopyd.h" 27#include "dm.h"
28
29static struct workqueue_struct *_kcopyd_wq;
30static struct work_struct _kcopyd_work;
31
32static void wake(void)
33{
34 queue_work(_kcopyd_wq, &_kcopyd_work);
35}
36 28
37/*----------------------------------------------------------------- 29/*-----------------------------------------------------------------
38 * Each kcopyd client has its own little pool of preallocated 30 * Each kcopyd client has its own little pool of preallocated
39 * pages for kcopyd io. 31 * pages for kcopyd io.
40 *---------------------------------------------------------------*/ 32 *---------------------------------------------------------------*/
41struct kcopyd_client { 33struct dm_kcopyd_client {
42 struct list_head list;
43
44 spinlock_t lock; 34 spinlock_t lock;
45 struct page_list *pages; 35 struct page_list *pages;
46 unsigned int nr_pages; 36 unsigned int nr_pages;
@@ -50,8 +40,32 @@ struct kcopyd_client {
50 40
51 wait_queue_head_t destroyq; 41 wait_queue_head_t destroyq;
52 atomic_t nr_jobs; 42 atomic_t nr_jobs;
43
44 mempool_t *job_pool;
45
46 struct workqueue_struct *kcopyd_wq;
47 struct work_struct kcopyd_work;
48
49/*
50 * We maintain three lists of jobs:
51 *
52 * i) jobs waiting for pages
53 * ii) jobs that have pages, and are waiting for the io to be issued.
54 * iii) jobs that have completed.
55 *
56 * All three of these are protected by job_lock.
57 */
58 spinlock_t job_lock;
59 struct list_head complete_jobs;
60 struct list_head io_jobs;
61 struct list_head pages_jobs;
53}; 62};
54 63
64static void wake(struct dm_kcopyd_client *kc)
65{
66 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
67}
68
55static struct page_list *alloc_pl(void) 69static struct page_list *alloc_pl(void)
56{ 70{
57 struct page_list *pl; 71 struct page_list *pl;
@@ -75,7 +89,7 @@ static void free_pl(struct page_list *pl)
75 kfree(pl); 89 kfree(pl);
76} 90}
77 91
78static int kcopyd_get_pages(struct kcopyd_client *kc, 92static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
79 unsigned int nr, struct page_list **pages) 93 unsigned int nr, struct page_list **pages)
80{ 94{
81 struct page_list *pl; 95 struct page_list *pl;
@@ -98,7 +112,7 @@ static int kcopyd_get_pages(struct kcopyd_client *kc,
98 return 0; 112 return 0;
99} 113}
100 114
101static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl) 115static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
102{ 116{
103 struct page_list *cursor; 117 struct page_list *cursor;
104 118
@@ -126,7 +140,7 @@ static void drop_pages(struct page_list *pl)
126 } 140 }
127} 141}
128 142
129static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr) 143static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
130{ 144{
131 unsigned int i; 145 unsigned int i;
132 struct page_list *pl = NULL, *next; 146 struct page_list *pl = NULL, *next;
@@ -147,7 +161,7 @@ static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
147 return 0; 161 return 0;
148} 162}
149 163
150static void client_free_pages(struct kcopyd_client *kc) 164static void client_free_pages(struct dm_kcopyd_client *kc)
151{ 165{
152 BUG_ON(kc->nr_free_pages != kc->nr_pages); 166 BUG_ON(kc->nr_free_pages != kc->nr_pages);
153 drop_pages(kc->pages); 167 drop_pages(kc->pages);
@@ -161,7 +175,7 @@ static void client_free_pages(struct kcopyd_client *kc)
161 * ever having to do io (which could cause a deadlock). 175 * ever having to do io (which could cause a deadlock).
162 *---------------------------------------------------------------*/ 176 *---------------------------------------------------------------*/
163struct kcopyd_job { 177struct kcopyd_job {
164 struct kcopyd_client *kc; 178 struct dm_kcopyd_client *kc;
165 struct list_head list; 179 struct list_head list;
166 unsigned long flags; 180 unsigned long flags;
167 181
@@ -175,13 +189,13 @@ struct kcopyd_job {
175 * Either READ or WRITE 189 * Either READ or WRITE
176 */ 190 */
177 int rw; 191 int rw;
178 struct io_region source; 192 struct dm_io_region source;
179 193
180 /* 194 /*
181 * The destinations for the transfer. 195 * The destinations for the transfer.
182 */ 196 */
183 unsigned int num_dests; 197 unsigned int num_dests;
184 struct io_region dests[KCOPYD_MAX_REGIONS]; 198 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
185 199
186 sector_t offset; 200 sector_t offset;
187 unsigned int nr_pages; 201 unsigned int nr_pages;
@@ -191,7 +205,7 @@ struct kcopyd_job {
191 * Set this to ensure you are notified when the job has 205 * Set this to ensure you are notified when the job has
192 * completed. 'context' is for callback to use. 206 * completed. 'context' is for callback to use.
193 */ 207 */
194 kcopyd_notify_fn fn; 208 dm_kcopyd_notify_fn fn;
195 void *context; 209 void *context;
196 210
197 /* 211 /*
@@ -207,47 +221,19 @@ struct kcopyd_job {
207#define MIN_JOBS 512 221#define MIN_JOBS 512
208 222
209static struct kmem_cache *_job_cache; 223static struct kmem_cache *_job_cache;
210static mempool_t *_job_pool;
211 224
212/* 225int __init dm_kcopyd_init(void)
213 * We maintain three lists of jobs:
214 *
215 * i) jobs waiting for pages
216 * ii) jobs that have pages, and are waiting for the io to be issued.
217 * iii) jobs that have completed.
218 *
219 * All three of these are protected by job_lock.
220 */
221static DEFINE_SPINLOCK(_job_lock);
222
223static LIST_HEAD(_complete_jobs);
224static LIST_HEAD(_io_jobs);
225static LIST_HEAD(_pages_jobs);
226
227static int jobs_init(void)
228{ 226{
229 _job_cache = KMEM_CACHE(kcopyd_job, 0); 227 _job_cache = KMEM_CACHE(kcopyd_job, 0);
230 if (!_job_cache) 228 if (!_job_cache)
231 return -ENOMEM; 229 return -ENOMEM;
232 230
233 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
234 if (!_job_pool) {
235 kmem_cache_destroy(_job_cache);
236 return -ENOMEM;
237 }
238
239 return 0; 231 return 0;
240} 232}
241 233
242static void jobs_exit(void) 234void dm_kcopyd_exit(void)
243{ 235{
244 BUG_ON(!list_empty(&_complete_jobs));
245 BUG_ON(!list_empty(&_io_jobs));
246 BUG_ON(!list_empty(&_pages_jobs));
247
248 mempool_destroy(_job_pool);
249 kmem_cache_destroy(_job_cache); 236 kmem_cache_destroy(_job_cache);
250 _job_pool = NULL;
251 _job_cache = NULL; 237 _job_cache = NULL;
252} 238}
253 239
@@ -255,18 +241,19 @@ static void jobs_exit(void)
255 * Functions to push and pop a job onto the head of a given job 241 * Functions to push and pop a job onto the head of a given job
256 * list. 242 * list.
257 */ 243 */
258static struct kcopyd_job *pop(struct list_head *jobs) 244static struct kcopyd_job *pop(struct list_head *jobs,
245 struct dm_kcopyd_client *kc)
259{ 246{
260 struct kcopyd_job *job = NULL; 247 struct kcopyd_job *job = NULL;
261 unsigned long flags; 248 unsigned long flags;
262 249
263 spin_lock_irqsave(&_job_lock, flags); 250 spin_lock_irqsave(&kc->job_lock, flags);
264 251
265 if (!list_empty(jobs)) { 252 if (!list_empty(jobs)) {
266 job = list_entry(jobs->next, struct kcopyd_job, list); 253 job = list_entry(jobs->next, struct kcopyd_job, list);
267 list_del(&job->list); 254 list_del(&job->list);
268 } 255 }
269 spin_unlock_irqrestore(&_job_lock, flags); 256 spin_unlock_irqrestore(&kc->job_lock, flags);
270 257
271 return job; 258 return job;
272} 259}
@@ -274,10 +261,11 @@ static struct kcopyd_job *pop(struct list_head *jobs)
274static void push(struct list_head *jobs, struct kcopyd_job *job) 261static void push(struct list_head *jobs, struct kcopyd_job *job)
275{ 262{
276 unsigned long flags; 263 unsigned long flags;
264 struct dm_kcopyd_client *kc = job->kc;
277 265
278 spin_lock_irqsave(&_job_lock, flags); 266 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add_tail(&job->list, jobs); 267 list_add_tail(&job->list, jobs);
280 spin_unlock_irqrestore(&_job_lock, flags); 268 spin_unlock_irqrestore(&kc->job_lock, flags);
281} 269}
282 270
283/* 271/*
@@ -294,11 +282,11 @@ static int run_complete_job(struct kcopyd_job *job)
294 void *context = job->context; 282 void *context = job->context;
295 int read_err = job->read_err; 283 int read_err = job->read_err;
296 unsigned long write_err = job->write_err; 284 unsigned long write_err = job->write_err;
297 kcopyd_notify_fn fn = job->fn; 285 dm_kcopyd_notify_fn fn = job->fn;
298 struct kcopyd_client *kc = job->kc; 286 struct dm_kcopyd_client *kc = job->kc;
299 287
300 kcopyd_put_pages(kc, job->pages); 288 kcopyd_put_pages(kc, job->pages);
301 mempool_free(job, _job_pool); 289 mempool_free(job, kc->job_pool);
302 fn(read_err, write_err, context); 290 fn(read_err, write_err, context);
303 291
304 if (atomic_dec_and_test(&kc->nr_jobs)) 292 if (atomic_dec_and_test(&kc->nr_jobs))
@@ -310,6 +298,7 @@ static int run_complete_job(struct kcopyd_job *job)
310static void complete_io(unsigned long error, void *context) 298static void complete_io(unsigned long error, void *context)
311{ 299{
312 struct kcopyd_job *job = (struct kcopyd_job *) context; 300 struct kcopyd_job *job = (struct kcopyd_job *) context;
301 struct dm_kcopyd_client *kc = job->kc;
313 302
314 if (error) { 303 if (error) {
315 if (job->rw == WRITE) 304 if (job->rw == WRITE)
@@ -317,22 +306,22 @@ static void complete_io(unsigned long error, void *context)
317 else 306 else
318 job->read_err = 1; 307 job->read_err = 1;
319 308
320 if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { 309 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
321 push(&_complete_jobs, job); 310 push(&kc->complete_jobs, job);
322 wake(); 311 wake(kc);
323 return; 312 return;
324 } 313 }
325 } 314 }
326 315
327 if (job->rw == WRITE) 316 if (job->rw == WRITE)
328 push(&_complete_jobs, job); 317 push(&kc->complete_jobs, job);
329 318
330 else { 319 else {
331 job->rw = WRITE; 320 job->rw = WRITE;
332 push(&_io_jobs, job); 321 push(&kc->io_jobs, job);
333 } 322 }
334 323
335 wake(); 324 wake(kc);
336} 325}
337 326
338/* 327/*
@@ -343,7 +332,7 @@ static int run_io_job(struct kcopyd_job *job)
343{ 332{
344 int r; 333 int r;
345 struct dm_io_request io_req = { 334 struct dm_io_request io_req = {
346 .bi_rw = job->rw, 335 .bi_rw = job->rw | (1 << BIO_RW_SYNC),
347 .mem.type = DM_IO_PAGE_LIST, 336 .mem.type = DM_IO_PAGE_LIST,
348 .mem.ptr.pl = job->pages, 337 .mem.ptr.pl = job->pages,
349 .mem.offset = job->offset, 338 .mem.offset = job->offset,
@@ -369,7 +358,7 @@ static int run_pages_job(struct kcopyd_job *job)
369 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); 358 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
370 if (!r) { 359 if (!r) {
371 /* this job is ready for io */ 360 /* this job is ready for io */
372 push(&_io_jobs, job); 361 push(&job->kc->io_jobs, job);
373 return 0; 362 return 0;
374 } 363 }
375 364
@@ -384,12 +373,13 @@ static int run_pages_job(struct kcopyd_job *job)
384 * Run through a list for as long as possible. Returns the count 373 * Run through a list for as long as possible. Returns the count
385 * of successful jobs. 374 * of successful jobs.
386 */ 375 */
387static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) 376static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
377 int (*fn) (struct kcopyd_job *))
388{ 378{
389 struct kcopyd_job *job; 379 struct kcopyd_job *job;
390 int r, count = 0; 380 int r, count = 0;
391 381
392 while ((job = pop(jobs))) { 382 while ((job = pop(jobs, kc))) {
393 383
394 r = fn(job); 384 r = fn(job);
395 385
@@ -399,7 +389,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
399 job->write_err = (unsigned long) -1L; 389 job->write_err = (unsigned long) -1L;
400 else 390 else
401 job->read_err = 1; 391 job->read_err = 1;
402 push(&_complete_jobs, job); 392 push(&kc->complete_jobs, job);
403 break; 393 break;
404 } 394 }
405 395
@@ -421,8 +411,11 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
421/* 411/*
422 * kcopyd does this every time it's woken up. 412 * kcopyd does this every time it's woken up.
423 */ 413 */
424static void do_work(struct work_struct *ignored) 414static void do_work(struct work_struct *work)
425{ 415{
416 struct dm_kcopyd_client *kc = container_of(work,
417 struct dm_kcopyd_client, kcopyd_work);
418
426 /* 419 /*
427 * The order that these are called is *very* important. 420 * The order that these are called is *very* important.
428 * complete jobs can free some pages for pages jobs. 421 * complete jobs can free some pages for pages jobs.
@@ -430,9 +423,9 @@ static void do_work(struct work_struct *ignored)
430 * list. io jobs call wake when they complete and it all 423 * list. io jobs call wake when they complete and it all
431 * starts again. 424 * starts again.
432 */ 425 */
433 process_jobs(&_complete_jobs, run_complete_job); 426 process_jobs(&kc->complete_jobs, kc, run_complete_job);
434 process_jobs(&_pages_jobs, run_pages_job); 427 process_jobs(&kc->pages_jobs, kc, run_pages_job);
435 process_jobs(&_io_jobs, run_io_job); 428 process_jobs(&kc->io_jobs, kc, run_io_job);
436} 429}
437 430
438/* 431/*
@@ -442,9 +435,10 @@ static void do_work(struct work_struct *ignored)
442 */ 435 */
443static void dispatch_job(struct kcopyd_job *job) 436static void dispatch_job(struct kcopyd_job *job)
444{ 437{
445 atomic_inc(&job->kc->nr_jobs); 438 struct dm_kcopyd_client *kc = job->kc;
446 push(&_pages_jobs, job); 439 atomic_inc(&kc->nr_jobs);
447 wake(); 440 push(&kc->pages_jobs, job);
441 wake(kc);
448} 442}
449 443
450#define SUB_JOB_SIZE 128 444#define SUB_JOB_SIZE 128
@@ -469,7 +463,7 @@ static void segment_complete(int read_err, unsigned long write_err,
469 * Only dispatch more work if there hasn't been an error. 463 * Only dispatch more work if there hasn't been an error.
470 */ 464 */
471 if ((!job->read_err && !job->write_err) || 465 if ((!job->read_err && !job->write_err) ||
472 test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { 466 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
473 /* get the next chunk of work */ 467 /* get the next chunk of work */
474 progress = job->progress; 468 progress = job->progress;
475 count = job->source.count - progress; 469 count = job->source.count - progress;
@@ -484,7 +478,8 @@ static void segment_complete(int read_err, unsigned long write_err,
484 478
485 if (count) { 479 if (count) {
486 int i; 480 int i;
487 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); 481 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
482 GFP_NOIO);
488 483
489 *sub_job = *job; 484 *sub_job = *job;
490 sub_job->source.sector += progress; 485 sub_job->source.sector += progress;
@@ -508,7 +503,7 @@ static void segment_complete(int read_err, unsigned long write_err,
508 * after we've completed. 503 * after we've completed.
509 */ 504 */
510 job->fn(read_err, write_err, job->context); 505 job->fn(read_err, write_err, job->context);
511 mempool_free(job, _job_pool); 506 mempool_free(job, job->kc->job_pool);
512 } 507 }
513} 508}
514 509
@@ -526,16 +521,16 @@ static void split_job(struct kcopyd_job *job)
526 segment_complete(0, 0u, job); 521 segment_complete(0, 0u, job);
527} 522}
528 523
529int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, 524int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
530 unsigned int num_dests, struct io_region *dests, 525 unsigned int num_dests, struct dm_io_region *dests,
531 unsigned int flags, kcopyd_notify_fn fn, void *context) 526 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
532{ 527{
533 struct kcopyd_job *job; 528 struct kcopyd_job *job;
534 529
535 /* 530 /*
536 * Allocate a new job. 531 * Allocate a new job.
537 */ 532 */
538 job = mempool_alloc(_job_pool, GFP_NOIO); 533 job = mempool_alloc(kc->job_pool, GFP_NOIO);
539 534
540 /* 535 /*
541 * set up for the read. 536 * set up for the read.
@@ -569,6 +564,7 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
569 564
570 return 0; 565 return 0;
571} 566}
567EXPORT_SYMBOL(dm_kcopyd_copy);
572 568
573/* 569/*
574 * Cancels a kcopyd job, eg. someone might be deactivating a 570 * Cancels a kcopyd job, eg. someone might be deactivating a
@@ -583,126 +579,76 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
583#endif /* 0 */ 579#endif /* 0 */
584 580
585/*----------------------------------------------------------------- 581/*-----------------------------------------------------------------
586 * Unit setup 582 * Client setup
587 *---------------------------------------------------------------*/ 583 *---------------------------------------------------------------*/
588static DEFINE_MUTEX(_client_lock); 584int dm_kcopyd_client_create(unsigned int nr_pages,
589static LIST_HEAD(_clients); 585 struct dm_kcopyd_client **result)
590
591static void client_add(struct kcopyd_client *kc)
592{ 586{
593 mutex_lock(&_client_lock); 587 int r = -ENOMEM;
594 list_add(&kc->list, &_clients); 588 struct dm_kcopyd_client *kc;
595 mutex_unlock(&_client_lock);
596}
597
598static void client_del(struct kcopyd_client *kc)
599{
600 mutex_lock(&_client_lock);
601 list_del(&kc->list);
602 mutex_unlock(&_client_lock);
603}
604
605static DEFINE_MUTEX(kcopyd_init_lock);
606static int kcopyd_clients = 0;
607 589
608static int kcopyd_init(void) 590 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
609{ 591 if (!kc)
610 int r;
611
612 mutex_lock(&kcopyd_init_lock);
613
614 if (kcopyd_clients) {
615 /* Already initialized. */
616 kcopyd_clients++;
617 mutex_unlock(&kcopyd_init_lock);
618 return 0;
619 }
620
621 r = jobs_init();
622 if (r) {
623 mutex_unlock(&kcopyd_init_lock);
624 return r;
625 }
626
627 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
628 if (!_kcopyd_wq) {
629 jobs_exit();
630 mutex_unlock(&kcopyd_init_lock);
631 return -ENOMEM; 592 return -ENOMEM;
632 }
633
634 kcopyd_clients++;
635 INIT_WORK(&_kcopyd_work, do_work);
636 mutex_unlock(&kcopyd_init_lock);
637 return 0;
638}
639 593
640static void kcopyd_exit(void) 594 spin_lock_init(&kc->lock);
641{ 595 spin_lock_init(&kc->job_lock);
642 mutex_lock(&kcopyd_init_lock); 596 INIT_LIST_HEAD(&kc->complete_jobs);
643 kcopyd_clients--; 597 INIT_LIST_HEAD(&kc->io_jobs);
644 if (!kcopyd_clients) { 598 INIT_LIST_HEAD(&kc->pages_jobs);
645 jobs_exit();
646 destroy_workqueue(_kcopyd_wq);
647 _kcopyd_wq = NULL;
648 }
649 mutex_unlock(&kcopyd_init_lock);
650}
651
652int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
653{
654 int r = 0;
655 struct kcopyd_client *kc;
656 599
657 r = kcopyd_init(); 600 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
658 if (r) 601 if (!kc->job_pool)
659 return r; 602 goto bad_slab;
660 603
661 kc = kmalloc(sizeof(*kc), GFP_KERNEL); 604 INIT_WORK(&kc->kcopyd_work, do_work);
662 if (!kc) { 605 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
663 kcopyd_exit(); 606 if (!kc->kcopyd_wq)
664 return -ENOMEM; 607 goto bad_workqueue;
665 }
666 608
667 spin_lock_init(&kc->lock);
668 kc->pages = NULL; 609 kc->pages = NULL;
669 kc->nr_pages = kc->nr_free_pages = 0; 610 kc->nr_pages = kc->nr_free_pages = 0;
670 r = client_alloc_pages(kc, nr_pages); 611 r = client_alloc_pages(kc, nr_pages);
671 if (r) { 612 if (r)
672 kfree(kc); 613 goto bad_client_pages;
673 kcopyd_exit();
674 return r;
675 }
676 614
677 kc->io_client = dm_io_client_create(nr_pages); 615 kc->io_client = dm_io_client_create(nr_pages);
678 if (IS_ERR(kc->io_client)) { 616 if (IS_ERR(kc->io_client)) {
679 r = PTR_ERR(kc->io_client); 617 r = PTR_ERR(kc->io_client);
680 client_free_pages(kc); 618 goto bad_io_client;
681 kfree(kc);
682 kcopyd_exit();
683 return r;
684 } 619 }
685 620
686 init_waitqueue_head(&kc->destroyq); 621 init_waitqueue_head(&kc->destroyq);
687 atomic_set(&kc->nr_jobs, 0); 622 atomic_set(&kc->nr_jobs, 0);
688 623
689 client_add(kc);
690 *result = kc; 624 *result = kc;
691 return 0; 625 return 0;
626
627bad_io_client:
628 client_free_pages(kc);
629bad_client_pages:
630 destroy_workqueue(kc->kcopyd_wq);
631bad_workqueue:
632 mempool_destroy(kc->job_pool);
633bad_slab:
634 kfree(kc);
635
636 return r;
692} 637}
638EXPORT_SYMBOL(dm_kcopyd_client_create);
693 639
694void kcopyd_client_destroy(struct kcopyd_client *kc) 640void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
695{ 641{
696 /* Wait for completion of all jobs submitted by this client. */ 642 /* Wait for completion of all jobs submitted by this client. */
697 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 643 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
698 644
645 BUG_ON(!list_empty(&kc->complete_jobs));
646 BUG_ON(!list_empty(&kc->io_jobs));
647 BUG_ON(!list_empty(&kc->pages_jobs));
648 destroy_workqueue(kc->kcopyd_wq);
699 dm_io_client_destroy(kc->io_client); 649 dm_io_client_destroy(kc->io_client);
700 client_free_pages(kc); 650 client_free_pages(kc);
701 client_del(kc); 651 mempool_destroy(kc->job_pool);
702 kfree(kc); 652 kfree(kc);
703 kcopyd_exit();
704} 653}
705 654EXPORT_SYMBOL(dm_kcopyd_client_destroy);
706EXPORT_SYMBOL(kcopyd_client_create);
707EXPORT_SYMBOL(kcopyd_client_destroy);
708EXPORT_SYMBOL(kcopyd_copy);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 2a74b2142f50..67a6f31b7fc3 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
3 * 4 *
4 * This file is released under the LGPL. 5 * This file is released under the LGPL.
5 */ 6 */
@@ -8,64 +9,58 @@
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/dm-io.h>
13#include <linux/dm-dirty-log.h>
11 14
12#include "dm-log.h" 15#include "dm.h"
13#include "dm-io.h"
14 16
15#define DM_MSG_PREFIX "mirror log" 17#define DM_MSG_PREFIX "dirty region log"
16 18
17static LIST_HEAD(_log_types); 19struct dm_dirty_log_internal {
18static DEFINE_SPINLOCK(_lock); 20 struct dm_dirty_log_type *type;
19 21
20int dm_register_dirty_log_type(struct dirty_log_type *type) 22 struct list_head list;
21{ 23 long use;
22 spin_lock(&_lock); 24};
23 type->use_count = 0;
24 list_add(&type->list, &_log_types);
25 spin_unlock(&_lock);
26 25
27 return 0; 26static LIST_HEAD(_log_types);
28} 27static DEFINE_SPINLOCK(_lock);
29 28
30int dm_unregister_dirty_log_type(struct dirty_log_type *type) 29static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
31{ 30{
32 spin_lock(&_lock); 31 struct dm_dirty_log_internal *log_type;
33
34 if (type->use_count)
35 DMWARN("Attempt to unregister a log type that is still in use");
36 else
37 list_del(&type->list);
38 32
39 spin_unlock(&_lock); 33 list_for_each_entry(log_type, &_log_types, list)
34 if (!strcmp(name, log_type->type->name))
35 return log_type;
40 36
41 return 0; 37 return NULL;
42} 38}
43 39
44static struct dirty_log_type *_get_type(const char *type_name) 40static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
45{ 41{
46 struct dirty_log_type *type; 42 struct dm_dirty_log_internal *log_type;
47 43
48 spin_lock(&_lock); 44 spin_lock(&_lock);
49 list_for_each_entry (type, &_log_types, list) 45
50 if (!strcmp(type_name, type->name)) { 46 log_type = __find_dirty_log_type(name);
51 if (!type->use_count && !try_module_get(type->module)){ 47 if (log_type) {
52 spin_unlock(&_lock); 48 if (!log_type->use && !try_module_get(log_type->type->module))
53 return NULL; 49 log_type = NULL;
54 } 50 else
55 type->use_count++; 51 log_type->use++;
56 spin_unlock(&_lock); 52 }
57 return type;
58 }
59 53
60 spin_unlock(&_lock); 54 spin_unlock(&_lock);
61 return NULL; 55
56 return log_type;
62} 57}
63 58
64/* 59/*
65 * get_type 60 * get_type
66 * @type_name 61 * @type_name
67 * 62 *
68 * Attempt to retrieve the dirty_log_type by name. If not already 63 * Attempt to retrieve the dm_dirty_log_type by name. If not already
69 * available, attempt to load the appropriate module. 64 * available, attempt to load the appropriate module.
70 * 65 *
71 * Log modules are named "dm-log-" followed by the 'type_name'. 66 * Log modules are named "dm-log-" followed by the 'type_name'.
@@ -78,14 +73,17 @@ static struct dirty_log_type *_get_type(const char *type_name)
78 * 73 *
79 * Returns: dirty_log_type* on success, NULL on failure 74 * Returns: dirty_log_type* on success, NULL on failure
80 */ 75 */
81static struct dirty_log_type *get_type(const char *type_name) 76static struct dm_dirty_log_type *get_type(const char *type_name)
82{ 77{
83 char *p, *type_name_dup; 78 char *p, *type_name_dup;
84 struct dirty_log_type *type; 79 struct dm_dirty_log_internal *log_type;
80
81 if (!type_name)
82 return NULL;
85 83
86 type = _get_type(type_name); 84 log_type = _get_dirty_log_type(type_name);
87 if (type) 85 if (log_type)
88 return type; 86 return log_type->type;
89 87
90 type_name_dup = kstrdup(type_name, GFP_KERNEL); 88 type_name_dup = kstrdup(type_name, GFP_KERNEL);
91 if (!type_name_dup) { 89 if (!type_name_dup) {
@@ -95,34 +93,106 @@ static struct dirty_log_type *get_type(const char *type_name)
95 } 93 }
96 94
97 while (request_module("dm-log-%s", type_name_dup) || 95 while (request_module("dm-log-%s", type_name_dup) ||
98 !(type = _get_type(type_name))) { 96 !(log_type = _get_dirty_log_type(type_name))) {
99 p = strrchr(type_name_dup, '-'); 97 p = strrchr(type_name_dup, '-');
100 if (!p) 98 if (!p)
101 break; 99 break;
102 p[0] = '\0'; 100 p[0] = '\0';
103 } 101 }
104 102
105 if (!type) 103 if (!log_type)
106 DMWARN("Module for logging type \"%s\" not found.", type_name); 104 DMWARN("Module for logging type \"%s\" not found.", type_name);
107 105
108 kfree(type_name_dup); 106 kfree(type_name_dup);
109 107
110 return type; 108 return log_type ? log_type->type : NULL;
111} 109}
112 110
113static void put_type(struct dirty_log_type *type) 111static void put_type(struct dm_dirty_log_type *type)
114{ 112{
113 struct dm_dirty_log_internal *log_type;
114
115 if (!type)
116 return;
117
115 spin_lock(&_lock); 118 spin_lock(&_lock);
116 if (!--type->use_count) 119 log_type = __find_dirty_log_type(type->name);
120 if (!log_type)
121 goto out;
122
123 if (!--log_type->use)
117 module_put(type->module); 124 module_put(type->module);
125
126 BUG_ON(log_type->use < 0);
127
128out:
118 spin_unlock(&_lock); 129 spin_unlock(&_lock);
119} 130}
120 131
121struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti, 132static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
122 unsigned int argc, char **argv)
123{ 133{
124 struct dirty_log_type *type; 134 struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
125 struct dirty_log *log; 135 GFP_KERNEL);
136
137 if (log_type)
138 log_type->type = type;
139
140 return log_type;
141}
142
143int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
144{
145 struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
146 int r = 0;
147
148 if (!log_type)
149 return -ENOMEM;
150
151 spin_lock(&_lock);
152 if (!__find_dirty_log_type(type->name))
153 list_add(&log_type->list, &_log_types);
154 else {
155 kfree(log_type);
156 r = -EEXIST;
157 }
158 spin_unlock(&_lock);
159
160 return r;
161}
162EXPORT_SYMBOL(dm_dirty_log_type_register);
163
164int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
165{
166 struct dm_dirty_log_internal *log_type;
167
168 spin_lock(&_lock);
169
170 log_type = __find_dirty_log_type(type->name);
171 if (!log_type) {
172 spin_unlock(&_lock);
173 return -EINVAL;
174 }
175
176 if (log_type->use) {
177 spin_unlock(&_lock);
178 return -ETXTBSY;
179 }
180
181 list_del(&log_type->list);
182
183 spin_unlock(&_lock);
184 kfree(log_type);
185
186 return 0;
187}
188EXPORT_SYMBOL(dm_dirty_log_type_unregister);
189
190struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
191 struct dm_target *ti,
192 unsigned int argc, char **argv)
193{
194 struct dm_dirty_log_type *type;
195 struct dm_dirty_log *log;
126 196
127 log = kmalloc(sizeof(*log), GFP_KERNEL); 197 log = kmalloc(sizeof(*log), GFP_KERNEL);
128 if (!log) 198 if (!log)
@@ -143,13 +213,15 @@ struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *t
143 213
144 return log; 214 return log;
145} 215}
216EXPORT_SYMBOL(dm_dirty_log_create);
146 217
147void dm_destroy_dirty_log(struct dirty_log *log) 218void dm_dirty_log_destroy(struct dm_dirty_log *log)
148{ 219{
149 log->type->dtr(log); 220 log->type->dtr(log);
150 put_type(log->type); 221 put_type(log->type);
151 kfree(log); 222 kfree(log);
152} 223}
224EXPORT_SYMBOL(dm_dirty_log_destroy);
153 225
154/*----------------------------------------------------------------- 226/*-----------------------------------------------------------------
155 * Persistent and core logs share a lot of their implementation. 227 * Persistent and core logs share a lot of their implementation.
@@ -207,7 +279,7 @@ struct log_c {
207 struct dm_dev *log_dev; 279 struct dm_dev *log_dev;
208 struct log_header header; 280 struct log_header header;
209 281
210 struct io_region header_location; 282 struct dm_io_region header_location;
211 struct log_header *disk_header; 283 struct log_header *disk_header;
212}; 284};
213 285
@@ -215,7 +287,7 @@ struct log_c {
215 * The touched member needs to be updated every time we access 287 * The touched member needs to be updated every time we access
216 * one of the bitsets. 288 * one of the bitsets.
217 */ 289 */
218static inline int log_test_bit(uint32_t *bs, unsigned bit) 290static inline int log_test_bit(uint32_t *bs, unsigned bit)
219{ 291{
220 return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0; 292 return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
221} 293}
@@ -302,7 +374,7 @@ static inline int write_header(struct log_c *log)
302 * argv contains region_size followed optionally by [no]sync 374 * argv contains region_size followed optionally by [no]sync
303 *--------------------------------------------------------------*/ 375 *--------------------------------------------------------------*/
304#define BYTE_SHIFT 3 376#define BYTE_SHIFT 3
305static int create_log_context(struct dirty_log *log, struct dm_target *ti, 377static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
306 unsigned int argc, char **argv, 378 unsigned int argc, char **argv,
307 struct dm_dev *dev) 379 struct dm_dev *dev)
308{ 380{
@@ -315,7 +387,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 int r; 387 int r;
316 388
317 if (argc < 1 || argc > 2) { 389 if (argc < 1 || argc > 2) {
318 DMWARN("wrong number of arguments to mirror log"); 390 DMWARN("wrong number of arguments to dirty region log");
319 return -EINVAL; 391 return -EINVAL;
320 } 392 }
321 393
@@ -325,8 +397,8 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
325 else if (!strcmp(argv[1], "nosync")) 397 else if (!strcmp(argv[1], "nosync"))
326 sync = NOSYNC; 398 sync = NOSYNC;
327 else { 399 else {
328 DMWARN("unrecognised sync argument to mirror log: %s", 400 DMWARN("unrecognised sync argument to "
329 argv[1]); 401 "dirty region log: %s", argv[1]);
330 return -EINVAL; 402 return -EINVAL;
331 } 403 }
332 } 404 }
@@ -434,7 +506,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
434 return 0; 506 return 0;
435} 507}
436 508
437static int core_ctr(struct dirty_log *log, struct dm_target *ti, 509static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
438 unsigned int argc, char **argv) 510 unsigned int argc, char **argv)
439{ 511{
440 return create_log_context(log, ti, argc, argv, NULL); 512 return create_log_context(log, ti, argc, argv, NULL);
@@ -447,7 +519,7 @@ static void destroy_log_context(struct log_c *lc)
447 kfree(lc); 519 kfree(lc);
448} 520}
449 521
450static void core_dtr(struct dirty_log *log) 522static void core_dtr(struct dm_dirty_log *log)
451{ 523{
452 struct log_c *lc = (struct log_c *) log->context; 524 struct log_c *lc = (struct log_c *) log->context;
453 525
@@ -460,14 +532,14 @@ static void core_dtr(struct dirty_log *log)
460 * 532 *
461 * argv contains log_device region_size followed optionally by [no]sync 533 * argv contains log_device region_size followed optionally by [no]sync
462 *--------------------------------------------------------------*/ 534 *--------------------------------------------------------------*/
463static int disk_ctr(struct dirty_log *log, struct dm_target *ti, 535static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
464 unsigned int argc, char **argv) 536 unsigned int argc, char **argv)
465{ 537{
466 int r; 538 int r;
467 struct dm_dev *dev; 539 struct dm_dev *dev;
468 540
469 if (argc < 2 || argc > 3) { 541 if (argc < 2 || argc > 3) {
470 DMWARN("wrong number of arguments to disk mirror log"); 542 DMWARN("wrong number of arguments to disk dirty region log");
471 return -EINVAL; 543 return -EINVAL;
472 } 544 }
473 545
@@ -485,7 +557,7 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
485 return 0; 557 return 0;
486} 558}
487 559
488static void disk_dtr(struct dirty_log *log) 560static void disk_dtr(struct dm_dirty_log *log)
489{ 561{
490 struct log_c *lc = (struct log_c *) log->context; 562 struct log_c *lc = (struct log_c *) log->context;
491 563
@@ -514,7 +586,7 @@ static void fail_log_device(struct log_c *lc)
514 dm_table_event(lc->ti->table); 586 dm_table_event(lc->ti->table);
515} 587}
516 588
517static int disk_resume(struct dirty_log *log) 589static int disk_resume(struct dm_dirty_log *log)
518{ 590{
519 int r; 591 int r;
520 unsigned i; 592 unsigned i;
@@ -524,7 +596,7 @@ static int disk_resume(struct dirty_log *log)
524 /* read the disk header */ 596 /* read the disk header */
525 r = read_header(lc); 597 r = read_header(lc);
526 if (r) { 598 if (r) {
527 DMWARN("%s: Failed to read header on mirror log device", 599 DMWARN("%s: Failed to read header on dirty region log device",
528 lc->log_dev->name); 600 lc->log_dev->name);
529 fail_log_device(lc); 601 fail_log_device(lc);
530 /* 602 /*
@@ -562,7 +634,7 @@ static int disk_resume(struct dirty_log *log)
562 /* write the new header */ 634 /* write the new header */
563 r = write_header(lc); 635 r = write_header(lc);
564 if (r) { 636 if (r) {
565 DMWARN("%s: Failed to write header on mirror log device", 637 DMWARN("%s: Failed to write header on dirty region log device",
566 lc->log_dev->name); 638 lc->log_dev->name);
567 fail_log_device(lc); 639 fail_log_device(lc);
568 } 640 }
@@ -570,38 +642,38 @@ static int disk_resume(struct dirty_log *log)
570 return r; 642 return r;
571} 643}
572 644
573static uint32_t core_get_region_size(struct dirty_log *log) 645static uint32_t core_get_region_size(struct dm_dirty_log *log)
574{ 646{
575 struct log_c *lc = (struct log_c *) log->context; 647 struct log_c *lc = (struct log_c *) log->context;
576 return lc->region_size; 648 return lc->region_size;
577} 649}
578 650
579static int core_resume(struct dirty_log *log) 651static int core_resume(struct dm_dirty_log *log)
580{ 652{
581 struct log_c *lc = (struct log_c *) log->context; 653 struct log_c *lc = (struct log_c *) log->context;
582 lc->sync_search = 0; 654 lc->sync_search = 0;
583 return 0; 655 return 0;
584} 656}
585 657
586static int core_is_clean(struct dirty_log *log, region_t region) 658static int core_is_clean(struct dm_dirty_log *log, region_t region)
587{ 659{
588 struct log_c *lc = (struct log_c *) log->context; 660 struct log_c *lc = (struct log_c *) log->context;
589 return log_test_bit(lc->clean_bits, region); 661 return log_test_bit(lc->clean_bits, region);
590} 662}
591 663
592static int core_in_sync(struct dirty_log *log, region_t region, int block) 664static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
593{ 665{
594 struct log_c *lc = (struct log_c *) log->context; 666 struct log_c *lc = (struct log_c *) log->context;
595 return log_test_bit(lc->sync_bits, region); 667 return log_test_bit(lc->sync_bits, region);
596} 668}
597 669
598static int core_flush(struct dirty_log *log) 670static int core_flush(struct dm_dirty_log *log)
599{ 671{
600 /* no op */ 672 /* no op */
601 return 0; 673 return 0;
602} 674}
603 675
604static int disk_flush(struct dirty_log *log) 676static int disk_flush(struct dm_dirty_log *log)
605{ 677{
606 int r; 678 int r;
607 struct log_c *lc = (struct log_c *) log->context; 679 struct log_c *lc = (struct log_c *) log->context;
@@ -619,19 +691,19 @@ static int disk_flush(struct dirty_log *log)
619 return r; 691 return r;
620} 692}
621 693
622static void core_mark_region(struct dirty_log *log, region_t region) 694static void core_mark_region(struct dm_dirty_log *log, region_t region)
623{ 695{
624 struct log_c *lc = (struct log_c *) log->context; 696 struct log_c *lc = (struct log_c *) log->context;
625 log_clear_bit(lc, lc->clean_bits, region); 697 log_clear_bit(lc, lc->clean_bits, region);
626} 698}
627 699
628static void core_clear_region(struct dirty_log *log, region_t region) 700static void core_clear_region(struct dm_dirty_log *log, region_t region)
629{ 701{
630 struct log_c *lc = (struct log_c *) log->context; 702 struct log_c *lc = (struct log_c *) log->context;
631 log_set_bit(lc, lc->clean_bits, region); 703 log_set_bit(lc, lc->clean_bits, region);
632} 704}
633 705
634static int core_get_resync_work(struct dirty_log *log, region_t *region) 706static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
635{ 707{
636 struct log_c *lc = (struct log_c *) log->context; 708 struct log_c *lc = (struct log_c *) log->context;
637 709
@@ -654,7 +726,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
654 return 1; 726 return 1;
655} 727}
656 728
657static void core_set_region_sync(struct dirty_log *log, region_t region, 729static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
658 int in_sync) 730 int in_sync)
659{ 731{
660 struct log_c *lc = (struct log_c *) log->context; 732 struct log_c *lc = (struct log_c *) log->context;
@@ -669,7 +741,7 @@ static void core_set_region_sync(struct dirty_log *log, region_t region,
669 } 741 }
670} 742}
671 743
672static region_t core_get_sync_count(struct dirty_log *log) 744static region_t core_get_sync_count(struct dm_dirty_log *log)
673{ 745{
674 struct log_c *lc = (struct log_c *) log->context; 746 struct log_c *lc = (struct log_c *) log->context;
675 747
@@ -680,7 +752,7 @@ static region_t core_get_sync_count(struct dirty_log *log)
680 if (lc->sync != DEFAULTSYNC) \ 752 if (lc->sync != DEFAULTSYNC) \
681 DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "") 753 DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
682 754
683static int core_status(struct dirty_log *log, status_type_t status, 755static int core_status(struct dm_dirty_log *log, status_type_t status,
684 char *result, unsigned int maxlen) 756 char *result, unsigned int maxlen)
685{ 757{
686 int sz = 0; 758 int sz = 0;
@@ -700,7 +772,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
700 return sz; 772 return sz;
701} 773}
702 774
703static int disk_status(struct dirty_log *log, status_type_t status, 775static int disk_status(struct dm_dirty_log *log, status_type_t status,
704 char *result, unsigned int maxlen) 776 char *result, unsigned int maxlen)
705{ 777{
706 int sz = 0; 778 int sz = 0;
@@ -722,7 +794,7 @@ static int disk_status(struct dirty_log *log, status_type_t status,
722 return sz; 794 return sz;
723} 795}
724 796
725static struct dirty_log_type _core_type = { 797static struct dm_dirty_log_type _core_type = {
726 .name = "core", 798 .name = "core",
727 .module = THIS_MODULE, 799 .module = THIS_MODULE,
728 .ctr = core_ctr, 800 .ctr = core_ctr,
@@ -740,7 +812,7 @@ static struct dirty_log_type _core_type = {
740 .status = core_status, 812 .status = core_status,
741}; 813};
742 814
743static struct dirty_log_type _disk_type = { 815static struct dm_dirty_log_type _disk_type = {
744 .name = "disk", 816 .name = "disk",
745 .module = THIS_MODULE, 817 .module = THIS_MODULE,
746 .ctr = disk_ctr, 818 .ctr = disk_ctr,
@@ -763,26 +835,28 @@ int __init dm_dirty_log_init(void)
763{ 835{
764 int r; 836 int r;
765 837
766 r = dm_register_dirty_log_type(&_core_type); 838 r = dm_dirty_log_type_register(&_core_type);
767 if (r) 839 if (r)
768 DMWARN("couldn't register core log"); 840 DMWARN("couldn't register core log");
769 841
770 r = dm_register_dirty_log_type(&_disk_type); 842 r = dm_dirty_log_type_register(&_disk_type);
771 if (r) { 843 if (r) {
772 DMWARN("couldn't register disk type"); 844 DMWARN("couldn't register disk type");
773 dm_unregister_dirty_log_type(&_core_type); 845 dm_dirty_log_type_unregister(&_core_type);
774 } 846 }
775 847
776 return r; 848 return r;
777} 849}
778 850
779void dm_dirty_log_exit(void) 851void __exit dm_dirty_log_exit(void)
780{ 852{
781 dm_unregister_dirty_log_type(&_disk_type); 853 dm_dirty_log_type_unregister(&_disk_type);
782 dm_unregister_dirty_log_type(&_core_type); 854 dm_dirty_log_type_unregister(&_core_type);
783} 855}
784 856
785EXPORT_SYMBOL(dm_register_dirty_log_type); 857module_init(dm_dirty_log_init);
786EXPORT_SYMBOL(dm_unregister_dirty_log_type); 858module_exit(dm_dirty_log_exit);
787EXPORT_SYMBOL(dm_create_dirty_log); 859
788EXPORT_SYMBOL(dm_destroy_dirty_log); 860MODULE_DESCRIPTION(DM_NAME " dirty region log");
861MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
862MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log.h b/drivers/md/dm-log.h
deleted file mode 100644
index 3fae87eb5963..000000000000
--- a/drivers/md/dm-log.h
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (C) 2003 Sistina Software
3 *
4 * This file is released under the LGPL.
5 */
6
7#ifndef DM_DIRTY_LOG
8#define DM_DIRTY_LOG
9
10#include "dm.h"
11
12typedef sector_t region_t;
13
14struct dirty_log_type;
15
16struct dirty_log {
17 struct dirty_log_type *type;
18 void *context;
19};
20
21struct dirty_log_type {
22 struct list_head list;
23 const char *name;
24 struct module *module;
25 unsigned int use_count;
26
27 int (*ctr)(struct dirty_log *log, struct dm_target *ti,
28 unsigned int argc, char **argv);
29 void (*dtr)(struct dirty_log *log);
30
31 /*
32 * There are times when we don't want the log to touch
33 * the disk.
34 */
35 int (*presuspend)(struct dirty_log *log);
36 int (*postsuspend)(struct dirty_log *log);
37 int (*resume)(struct dirty_log *log);
38
39 /*
40 * Retrieves the smallest size of region that the log can
41 * deal with.
42 */
43 uint32_t (*get_region_size)(struct dirty_log *log);
44
45 /*
46 * A predicate to say whether a region is clean or not.
47 * May block.
48 */
49 int (*is_clean)(struct dirty_log *log, region_t region);
50
51 /*
52 * Returns: 0, 1, -EWOULDBLOCK, < 0
53 *
54 * A predicate function to check the area given by
55 * [sector, sector + len) is in sync.
56 *
57 * If -EWOULDBLOCK is returned the state of the region is
58 * unknown, typically this will result in a read being
59 * passed to a daemon to deal with, since a daemon is
60 * allowed to block.
61 */
62 int (*in_sync)(struct dirty_log *log, region_t region, int can_block);
63
64 /*
65 * Flush the current log state (eg, to disk). This
66 * function may block.
67 */
68 int (*flush)(struct dirty_log *log);
69
70 /*
71 * Mark an area as clean or dirty. These functions may
72 * block, though for performance reasons blocking should
73 * be extremely rare (eg, allocating another chunk of
74 * memory for some reason).
75 */
76 void (*mark_region)(struct dirty_log *log, region_t region);
77 void (*clear_region)(struct dirty_log *log, region_t region);
78
79 /*
80 * Returns: <0 (error), 0 (no region), 1 (region)
81 *
82 * The mirrord will need perform recovery on regions of
83 * the mirror that are in the NOSYNC state. This
84 * function asks the log to tell the caller about the
85 * next region that this machine should recover.
86 *
87 * Do not confuse this function with 'in_sync()', one
88 * tells you if an area is synchronised, the other
89 * assigns recovery work.
90 */
91 int (*get_resync_work)(struct dirty_log *log, region_t *region);
92
93 /*
94 * This notifies the log that the resync status of a region
95 * has changed. It also clears the region from the recovering
96 * list (if present).
97 */
98 void (*set_region_sync)(struct dirty_log *log,
99 region_t region, int in_sync);
100
101 /*
102 * Returns the number of regions that are in sync.
103 */
104 region_t (*get_sync_count)(struct dirty_log *log);
105
106 /*
107 * Support function for mirror status requests.
108 */
109 int (*status)(struct dirty_log *log, status_type_t status_type,
110 char *result, unsigned int maxlen);
111};
112
113int dm_register_dirty_log_type(struct dirty_log_type *type);
114int dm_unregister_dirty_log_type(struct dirty_log_type *type);
115
116
117/*
118 * Make sure you use these two functions, rather than calling
119 * type->constructor/destructor() directly.
120 */
121struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
122 unsigned int argc, char **argv);
123void dm_destroy_dirty_log(struct dirty_log *log);
124
125/*
126 * init/exit functions.
127 */
128int dm_dirty_log_init(void);
129void dm_dirty_log_exit(void);
130
131#endif
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 762cb086bb7f..ff05fe893083 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -7,9 +7,6 @@
7#include "dm.h" 7#include "dm.h"
8#include "dm-bio-list.h" 8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 9#include "dm-bio-record.h"
10#include "dm-io.h"
11#include "dm-log.h"
12#include "kcopyd.h"
13 10
14#include <linux/ctype.h> 11#include <linux/ctype.h>
15#include <linux/init.h> 12#include <linux/init.h>
@@ -22,6 +19,9 @@
22#include <linux/workqueue.h> 19#include <linux/workqueue.h>
23#include <linux/log2.h> 20#include <linux/log2.h>
24#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/dm-io.h>
23#include <linux/dm-dirty-log.h>
24#include <linux/dm-kcopyd.h>
25 25
26#define DM_MSG_PREFIX "raid1" 26#define DM_MSG_PREFIX "raid1"
27#define DM_IO_PAGES 64 27#define DM_IO_PAGES 64
@@ -74,7 +74,7 @@ struct region_hash {
74 unsigned region_shift; 74 unsigned region_shift;
75 75
76 /* holds persistent region state */ 76 /* holds persistent region state */
77 struct dirty_log *log; 77 struct dm_dirty_log *log;
78 78
79 /* hash table */ 79 /* hash table */
80 rwlock_t hash_lock; 80 rwlock_t hash_lock;
@@ -133,7 +133,7 @@ struct mirror_set {
133 struct dm_target *ti; 133 struct dm_target *ti;
134 struct list_head list; 134 struct list_head list;
135 struct region_hash rh; 135 struct region_hash rh;
136 struct kcopyd_client *kcopyd_client; 136 struct dm_kcopyd_client *kcopyd_client;
137 uint64_t features; 137 uint64_t features;
138 138
139 spinlock_t lock; /* protects the lists */ 139 spinlock_t lock; /* protects the lists */
@@ -154,6 +154,9 @@ struct mirror_set {
154 154
155 struct workqueue_struct *kmirrord_wq; 155 struct workqueue_struct *kmirrord_wq;
156 struct work_struct kmirrord_work; 156 struct work_struct kmirrord_work;
157 struct timer_list timer;
158 unsigned long timer_pending;
159
157 struct work_struct trigger_event; 160 struct work_struct trigger_event;
158 161
159 unsigned int nr_mirrors; 162 unsigned int nr_mirrors;
@@ -178,13 +181,32 @@ static void wake(struct mirror_set *ms)
178 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 181 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
179} 182}
180 183
184static void delayed_wake_fn(unsigned long data)
185{
186 struct mirror_set *ms = (struct mirror_set *) data;
187
188 clear_bit(0, &ms->timer_pending);
189 wake(ms);
190}
191
192static void delayed_wake(struct mirror_set *ms)
193{
194 if (test_and_set_bit(0, &ms->timer_pending))
195 return;
196
197 ms->timer.expires = jiffies + HZ / 5;
198 ms->timer.data = (unsigned long) ms;
199 ms->timer.function = delayed_wake_fn;
200 add_timer(&ms->timer);
201}
202
181/* FIXME move this */ 203/* FIXME move this */
182static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 204static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
183 205
184#define MIN_REGIONS 64 206#define MIN_REGIONS 64
185#define MAX_RECOVERY 1 207#define MAX_RECOVERY 1
186static int rh_init(struct region_hash *rh, struct mirror_set *ms, 208static int rh_init(struct region_hash *rh, struct mirror_set *ms,
187 struct dirty_log *log, uint32_t region_size, 209 struct dm_dirty_log *log, uint32_t region_size,
188 region_t nr_regions) 210 region_t nr_regions)
189{ 211{
190 unsigned int nr_buckets, max_buckets; 212 unsigned int nr_buckets, max_buckets;
@@ -249,7 +271,7 @@ static void rh_exit(struct region_hash *rh)
249 } 271 }
250 272
251 if (rh->log) 273 if (rh->log)
252 dm_destroy_dirty_log(rh->log); 274 dm_dirty_log_destroy(rh->log);
253 if (rh->region_pool) 275 if (rh->region_pool)
254 mempool_destroy(rh->region_pool); 276 mempool_destroy(rh->region_pool);
255 vfree(rh->buckets); 277 vfree(rh->buckets);
@@ -405,24 +427,22 @@ static void rh_update_states(struct region_hash *rh)
405 write_lock_irq(&rh->hash_lock); 427 write_lock_irq(&rh->hash_lock);
406 spin_lock(&rh->region_lock); 428 spin_lock(&rh->region_lock);
407 if (!list_empty(&rh->clean_regions)) { 429 if (!list_empty(&rh->clean_regions)) {
408 list_splice(&rh->clean_regions, &clean); 430 list_splice_init(&rh->clean_regions, &clean);
409 INIT_LIST_HEAD(&rh->clean_regions);
410 431
411 list_for_each_entry(reg, &clean, list) 432 list_for_each_entry(reg, &clean, list)
412 list_del(&reg->hash_list); 433 list_del(&reg->hash_list);
413 } 434 }
414 435
415 if (!list_empty(&rh->recovered_regions)) { 436 if (!list_empty(&rh->recovered_regions)) {
416 list_splice(&rh->recovered_regions, &recovered); 437 list_splice_init(&rh->recovered_regions, &recovered);
417 INIT_LIST_HEAD(&rh->recovered_regions);
418 438
419 list_for_each_entry (reg, &recovered, list) 439 list_for_each_entry (reg, &recovered, list)
420 list_del(&reg->hash_list); 440 list_del(&reg->hash_list);
421 } 441 }
422 442
423 if (!list_empty(&rh->failed_recovered_regions)) { 443 if (!list_empty(&rh->failed_recovered_regions)) {
424 list_splice(&rh->failed_recovered_regions, &failed_recovered); 444 list_splice_init(&rh->failed_recovered_regions,
425 INIT_LIST_HEAD(&rh->failed_recovered_regions); 445 &failed_recovered);
426 446
427 list_for_each_entry(reg, &failed_recovered, list) 447 list_for_each_entry(reg, &failed_recovered, list)
428 list_del(&reg->hash_list); 448 list_del(&reg->hash_list);
@@ -790,7 +810,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
790{ 810{
791 int r; 811 int r;
792 unsigned int i; 812 unsigned int i;
793 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; 813 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
794 struct mirror *m; 814 struct mirror *m;
795 unsigned long flags = 0; 815 unsigned long flags = 0;
796 816
@@ -822,9 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
822 } 842 }
823 843
824 /* hand to kcopyd */ 844 /* hand to kcopyd */
825 set_bit(KCOPYD_IGNORE_ERROR, &flags); 845 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
826 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, 846 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
827 recovery_complete, reg); 847 flags, recovery_complete, reg);
828 848
829 return r; 849 return r;
830} 850}
@@ -833,7 +853,7 @@ static void do_recovery(struct mirror_set *ms)
833{ 853{
834 int r; 854 int r;
835 struct region *reg; 855 struct region *reg;
836 struct dirty_log *log = ms->rh.log; 856 struct dm_dirty_log *log = ms->rh.log;
837 857
838 /* 858 /*
839 * Start quiescing some regions. 859 * Start quiescing some regions.
@@ -909,7 +929,7 @@ static void map_bio(struct mirror *m, struct bio *bio)
909 bio->bi_sector = map_sector(m, bio); 929 bio->bi_sector = map_sector(m, bio);
910} 930}
911 931
912static void map_region(struct io_region *io, struct mirror *m, 932static void map_region(struct dm_io_region *io, struct mirror *m,
913 struct bio *bio) 933 struct bio *bio)
914{ 934{
915 io->bdev = m->dev->bdev; 935 io->bdev = m->dev->bdev;
@@ -951,7 +971,7 @@ static void read_callback(unsigned long error, void *context)
951/* Asynchronous read. */ 971/* Asynchronous read. */
952static void read_async_bio(struct mirror *m, struct bio *bio) 972static void read_async_bio(struct mirror *m, struct bio *bio)
953{ 973{
954 struct io_region io; 974 struct dm_io_region io;
955 struct dm_io_request io_req = { 975 struct dm_io_request io_req = {
956 .bi_rw = READ, 976 .bi_rw = READ,
957 .mem.type = DM_IO_BVEC, 977 .mem.type = DM_IO_BVEC,
@@ -1019,7 +1039,7 @@ static void __bio_mark_nosync(struct mirror_set *ms,
1019{ 1039{
1020 unsigned long flags; 1040 unsigned long flags;
1021 struct region_hash *rh = &ms->rh; 1041 struct region_hash *rh = &ms->rh;
1022 struct dirty_log *log = ms->rh.log; 1042 struct dm_dirty_log *log = ms->rh.log;
1023 struct region *reg; 1043 struct region *reg;
1024 region_t region = bio_to_region(rh, bio); 1044 region_t region = bio_to_region(rh, bio);
1025 int recovering = 0; 1045 int recovering = 0;
@@ -1107,7 +1127,7 @@ out:
1107static void do_write(struct mirror_set *ms, struct bio *bio) 1127static void do_write(struct mirror_set *ms, struct bio *bio)
1108{ 1128{
1109 unsigned int i; 1129 unsigned int i;
1110 struct io_region io[ms->nr_mirrors], *dest = io; 1130 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1111 struct mirror *m; 1131 struct mirror *m;
1112 struct dm_io_request io_req = { 1132 struct dm_io_request io_req = {
1113 .bi_rw = WRITE, 1133 .bi_rw = WRITE,
@@ -1182,6 +1202,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1182 spin_lock_irq(&ms->lock); 1202 spin_lock_irq(&ms->lock);
1183 bio_list_merge(&ms->failures, &sync); 1203 bio_list_merge(&ms->failures, &sync);
1184 spin_unlock_irq(&ms->lock); 1204 spin_unlock_irq(&ms->lock);
1205 wake(ms);
1185 } else 1206 } else
1186 while ((bio = bio_list_pop(&sync))) 1207 while ((bio = bio_list_pop(&sync)))
1187 do_write(ms, bio); 1208 do_write(ms, bio);
@@ -1241,7 +1262,7 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1241 bio_list_merge(&ms->failures, failures); 1262 bio_list_merge(&ms->failures, failures);
1242 spin_unlock_irq(&ms->lock); 1263 spin_unlock_irq(&ms->lock);
1243 1264
1244 wake(ms); 1265 delayed_wake(ms);
1245} 1266}
1246 1267
1247static void trigger_event(struct work_struct *work) 1268static void trigger_event(struct work_struct *work)
@@ -1255,7 +1276,7 @@ static void trigger_event(struct work_struct *work)
1255/*----------------------------------------------------------------- 1276/*-----------------------------------------------------------------
1256 * kmirrord 1277 * kmirrord
1257 *---------------------------------------------------------------*/ 1278 *---------------------------------------------------------------*/
1258static int _do_mirror(struct work_struct *work) 1279static void do_mirror(struct work_struct *work)
1259{ 1280{
1260 struct mirror_set *ms =container_of(work, struct mirror_set, 1281 struct mirror_set *ms =container_of(work, struct mirror_set,
1261 kmirrord_work); 1282 kmirrord_work);
@@ -1277,23 +1298,7 @@ static int _do_mirror(struct work_struct *work)
1277 do_writes(ms, &writes); 1298 do_writes(ms, &writes);
1278 do_failures(ms, &failures); 1299 do_failures(ms, &failures);
1279 1300
1280 return (ms->failures.head) ? 1 : 0; 1301 dm_table_unplug_all(ms->ti->table);
1281}
1282
1283static void do_mirror(struct work_struct *work)
1284{
1285 /*
1286 * If _do_mirror returns 1, we give it
1287 * another shot. This helps for cases like
1288 * 'suspend' where we call flush_workqueue
1289 * and expect all work to be finished. If
1290 * a failure happens during a suspend, we
1291 * couldn't issue a 'wake' because it would
1292 * not be honored. Therefore, we return '1'
1293 * from _do_mirror, and retry here.
1294 */
1295 while (_do_mirror(work))
1296 schedule();
1297} 1302}
1298 1303
1299 1304
@@ -1303,7 +1308,7 @@ static void do_mirror(struct work_struct *work)
1303static struct mirror_set *alloc_context(unsigned int nr_mirrors, 1308static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1304 uint32_t region_size, 1309 uint32_t region_size,
1305 struct dm_target *ti, 1310 struct dm_target *ti,
1306 struct dirty_log *dl) 1311 struct dm_dirty_log *dl)
1307{ 1312{
1308 size_t len; 1313 size_t len;
1309 struct mirror_set *ms = NULL; 1314 struct mirror_set *ms = NULL;
@@ -1403,12 +1408,12 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1403/* 1408/*
1404 * Create dirty log: log_type #log_params <log_params> 1409 * Create dirty log: log_type #log_params <log_params>
1405 */ 1410 */
1406static struct dirty_log *create_dirty_log(struct dm_target *ti, 1411static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1407 unsigned int argc, char **argv, 1412 unsigned int argc, char **argv,
1408 unsigned int *args_used) 1413 unsigned int *args_used)
1409{ 1414{
1410 unsigned int param_count; 1415 unsigned int param_count;
1411 struct dirty_log *dl; 1416 struct dm_dirty_log *dl;
1412 1417
1413 if (argc < 2) { 1418 if (argc < 2) {
1414 ti->error = "Insufficient mirror log arguments"; 1419 ti->error = "Insufficient mirror log arguments";
@@ -1427,7 +1432,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1427 return NULL; 1432 return NULL;
1428 } 1433 }
1429 1434
1430 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); 1435 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
1431 if (!dl) { 1436 if (!dl) {
1432 ti->error = "Error creating mirror dirty log"; 1437 ti->error = "Error creating mirror dirty log";
1433 return NULL; 1438 return NULL;
@@ -1435,7 +1440,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1435 1440
1436 if (!_check_region_size(ti, dl->type->get_region_size(dl))) { 1441 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1437 ti->error = "Invalid region size"; 1442 ti->error = "Invalid region size";
1438 dm_destroy_dirty_log(dl); 1443 dm_dirty_log_destroy(dl);
1439 return NULL; 1444 return NULL;
1440 } 1445 }
1441 1446
@@ -1496,7 +1501,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1496 int r; 1501 int r;
1497 unsigned int nr_mirrors, m, args_used; 1502 unsigned int nr_mirrors, m, args_used;
1498 struct mirror_set *ms; 1503 struct mirror_set *ms;
1499 struct dirty_log *dl; 1504 struct dm_dirty_log *dl;
1500 1505
1501 dl = create_dirty_log(ti, argc, argv, &args_used); 1506 dl = create_dirty_log(ti, argc, argv, &args_used);
1502 if (!dl) 1507 if (!dl)
@@ -1506,9 +1511,9 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1506 argc -= args_used; 1511 argc -= args_used;
1507 1512
1508 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 1513 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1509 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { 1514 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1510 ti->error = "Invalid number of mirrors"; 1515 ti->error = "Invalid number of mirrors";
1511 dm_destroy_dirty_log(dl); 1516 dm_dirty_log_destroy(dl);
1512 return -EINVAL; 1517 return -EINVAL;
1513 } 1518 }
1514 1519
@@ -1516,13 +1521,13 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1516 1521
1517 if (argc < nr_mirrors * 2) { 1522 if (argc < nr_mirrors * 2) {
1518 ti->error = "Too few mirror arguments"; 1523 ti->error = "Too few mirror arguments";
1519 dm_destroy_dirty_log(dl); 1524 dm_dirty_log_destroy(dl);
1520 return -EINVAL; 1525 return -EINVAL;
1521 } 1526 }
1522 1527
1523 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1528 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1524 if (!ms) { 1529 if (!ms) {
1525 dm_destroy_dirty_log(dl); 1530 dm_dirty_log_destroy(dl);
1526 return -ENOMEM; 1531 return -ENOMEM;
1527 } 1532 }
1528 1533
@@ -1547,6 +1552,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1547 goto err_free_context; 1552 goto err_free_context;
1548 } 1553 }
1549 INIT_WORK(&ms->kmirrord_work, do_mirror); 1554 INIT_WORK(&ms->kmirrord_work, do_mirror);
1555 init_timer(&ms->timer);
1556 ms->timer_pending = 0;
1550 INIT_WORK(&ms->trigger_event, trigger_event); 1557 INIT_WORK(&ms->trigger_event, trigger_event);
1551 1558
1552 r = parse_features(ms, argc, argv, &args_used); 1559 r = parse_features(ms, argc, argv, &args_used);
@@ -1571,7 +1578,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1571 goto err_destroy_wq; 1578 goto err_destroy_wq;
1572 } 1579 }
1573 1580
1574 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1581 r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1575 if (r) 1582 if (r)
1576 goto err_destroy_wq; 1583 goto err_destroy_wq;
1577 1584
@@ -1589,8 +1596,9 @@ static void mirror_dtr(struct dm_target *ti)
1589{ 1596{
1590 struct mirror_set *ms = (struct mirror_set *) ti->private; 1597 struct mirror_set *ms = (struct mirror_set *) ti->private;
1591 1598
1599 del_timer_sync(&ms->timer);
1592 flush_workqueue(ms->kmirrord_wq); 1600 flush_workqueue(ms->kmirrord_wq);
1593 kcopyd_client_destroy(ms->kcopyd_client); 1601 dm_kcopyd_client_destroy(ms->kcopyd_client);
1594 destroy_workqueue(ms->kmirrord_wq); 1602 destroy_workqueue(ms->kmirrord_wq);
1595 free_context(ms, ti, ms->nr_mirrors); 1603 free_context(ms, ti, ms->nr_mirrors);
1596} 1604}
@@ -1734,7 +1742,7 @@ out:
1734static void mirror_presuspend(struct dm_target *ti) 1742static void mirror_presuspend(struct dm_target *ti)
1735{ 1743{
1736 struct mirror_set *ms = (struct mirror_set *) ti->private; 1744 struct mirror_set *ms = (struct mirror_set *) ti->private;
1737 struct dirty_log *log = ms->rh.log; 1745 struct dm_dirty_log *log = ms->rh.log;
1738 1746
1739 atomic_set(&ms->suspend, 1); 1747 atomic_set(&ms->suspend, 1);
1740 1748
@@ -1763,7 +1771,7 @@ static void mirror_presuspend(struct dm_target *ti)
1763static void mirror_postsuspend(struct dm_target *ti) 1771static void mirror_postsuspend(struct dm_target *ti)
1764{ 1772{
1765 struct mirror_set *ms = ti->private; 1773 struct mirror_set *ms = ti->private;
1766 struct dirty_log *log = ms->rh.log; 1774 struct dm_dirty_log *log = ms->rh.log;
1767 1775
1768 if (log->type->postsuspend && log->type->postsuspend(log)) 1776 if (log->type->postsuspend && log->type->postsuspend(log))
1769 /* FIXME: need better error handling */ 1777 /* FIXME: need better error handling */
@@ -1773,7 +1781,7 @@ static void mirror_postsuspend(struct dm_target *ti)
1773static void mirror_resume(struct dm_target *ti) 1781static void mirror_resume(struct dm_target *ti)
1774{ 1782{
1775 struct mirror_set *ms = ti->private; 1783 struct mirror_set *ms = ti->private;
1776 struct dirty_log *log = ms->rh.log; 1784 struct dm_dirty_log *log = ms->rh.log;
1777 1785
1778 atomic_set(&ms->suspend, 0); 1786 atomic_set(&ms->suspend, 0);
1779 if (log->type->resume && log->type->resume(log)) 1787 if (log->type->resume && log->type->resume(log))
@@ -1811,7 +1819,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1811{ 1819{
1812 unsigned int m, sz = 0; 1820 unsigned int m, sz = 0;
1813 struct mirror_set *ms = (struct mirror_set *) ti->private; 1821 struct mirror_set *ms = (struct mirror_set *) ti->private;
1814 struct dirty_log *log = ms->rh.log; 1822 struct dm_dirty_log *log = ms->rh.log;
1815 char buffer[ms->nr_mirrors + 1]; 1823 char buffer[ms->nr_mirrors + 1];
1816 1824
1817 switch (type) { 1825 switch (type) {
@@ -1864,15 +1872,9 @@ static int __init dm_mirror_init(void)
1864{ 1872{
1865 int r; 1873 int r;
1866 1874
1867 r = dm_dirty_log_init();
1868 if (r)
1869 return r;
1870
1871 r = dm_register_target(&mirror_target); 1875 r = dm_register_target(&mirror_target);
1872 if (r < 0) { 1876 if (r < 0)
1873 DMERR("Failed to register mirror target"); 1877 DMERR("Failed to register mirror target");
1874 dm_dirty_log_exit();
1875 }
1876 1878
1877 return r; 1879 return r;
1878} 1880}
@@ -1884,8 +1886,6 @@ static void __exit dm_mirror_exit(void)
1884 r = dm_unregister_target(&mirror_target); 1886 r = dm_unregister_target(&mirror_target);
1885 if (r < 0) 1887 if (r < 0)
1886 DMERR("unregister failed %d", r); 1888 DMERR("unregister failed %d", r);
1887
1888 dm_dirty_log_exit();
1889} 1889}
1890 1890
1891/* Module hooks */ 1891/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4dc8a43c034b..1ba8a47d61b1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -18,10 +18,10 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
21 22
22#include "dm-snap.h" 23#include "dm-snap.h"
23#include "dm-bio-list.h" 24#include "dm-bio-list.h"
24#include "kcopyd.h"
25 25
26#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
27 27
@@ -36,9 +36,9 @@
36#define SNAPSHOT_COPY_PRIORITY 2 36#define SNAPSHOT_COPY_PRIORITY 2
37 37
38/* 38/*
39 * Each snapshot reserves this many pages for io 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
40 */ 40 */
41#define SNAPSHOT_PAGES 256 41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
42 42
43static struct workqueue_struct *ksnapd; 43static struct workqueue_struct *ksnapd;
44static void flush_queued_bios(struct work_struct *work); 44static void flush_queued_bios(struct work_struct *work);
@@ -536,7 +536,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
536 s->last_percent = 0; 536 s->last_percent = 0;
537 init_rwsem(&s->lock); 537 init_rwsem(&s->lock);
538 spin_lock_init(&s->pe_lock); 538 spin_lock_init(&s->pe_lock);
539 s->table = ti->table; 539 s->ti = ti;
540 540
541 /* Allocate hash table for COW data */ 541 /* Allocate hash table for COW data */
542 if (init_hash_tables(s)) { 542 if (init_hash_tables(s)) {
@@ -558,7 +558,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
558 goto bad4; 558 goto bad4;
559 } 559 }
560 560
561 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 561 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
562 if (r) { 562 if (r) {
563 ti->error = "Could not create kcopyd client"; 563 ti->error = "Could not create kcopyd client";
564 goto bad5; 564 goto bad5;
@@ -591,7 +591,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
591 return 0; 591 return 0;
592 592
593 bad6: 593 bad6:
594 kcopyd_client_destroy(s->kcopyd_client); 594 dm_kcopyd_client_destroy(s->kcopyd_client);
595 595
596 bad5: 596 bad5:
597 s->store.destroy(&s->store); 597 s->store.destroy(&s->store);
@@ -613,7 +613,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
613 613
614static void __free_exceptions(struct dm_snapshot *s) 614static void __free_exceptions(struct dm_snapshot *s)
615{ 615{
616 kcopyd_client_destroy(s->kcopyd_client); 616 dm_kcopyd_client_destroy(s->kcopyd_client);
617 s->kcopyd_client = NULL; 617 s->kcopyd_client = NULL;
618 618
619 exit_exception_table(&s->pending, pending_cache); 619 exit_exception_table(&s->pending, pending_cache);
@@ -699,7 +699,7 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
699 699
700 s->valid = 0; 700 s->valid = 0;
701 701
702 dm_table_event(s->table); 702 dm_table_event(s->ti->table);
703} 703}
704 704
705static void get_pending_exception(struct dm_snap_pending_exception *pe) 705static void get_pending_exception(struct dm_snap_pending_exception *pe)
@@ -824,7 +824,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
824static void start_copy(struct dm_snap_pending_exception *pe) 824static void start_copy(struct dm_snap_pending_exception *pe)
825{ 825{
826 struct dm_snapshot *s = pe->snap; 826 struct dm_snapshot *s = pe->snap;
827 struct io_region src, dest; 827 struct dm_io_region src, dest;
828 struct block_device *bdev = s->origin->bdev; 828 struct block_device *bdev = s->origin->bdev;
829 sector_t dev_size; 829 sector_t dev_size;
830 830
@@ -839,7 +839,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
839 dest.count = src.count; 839 dest.count = src.count;
840 840
841 /* Hand over to kcopyd */ 841 /* Hand over to kcopyd */
842 kcopyd_copy(s->kcopyd_client, 842 dm_kcopyd_copy(s->kcopyd_client,
843 &src, 1, &dest, 0, copy_callback, pe); 843 &src, 1, &dest, 0, copy_callback, pe);
844} 844}
845 845
@@ -1060,7 +1060,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1060 goto next_snapshot; 1060 goto next_snapshot;
1061 1061
1062 /* Nothing to do if writing beyond end of snapshot */ 1062 /* Nothing to do if writing beyond end of snapshot */
1063 if (bio->bi_sector >= dm_table_get_size(snap->table)) 1063 if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1064 goto next_snapshot; 1064 goto next_snapshot;
1065 1065
1066 /* 1066 /*
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 93bce5d49742..24f9fb73b982 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -132,7 +132,7 @@ struct exception_store {
132 132
133struct dm_snapshot { 133struct dm_snapshot {
134 struct rw_semaphore lock; 134 struct rw_semaphore lock;
135 struct dm_table *table; 135 struct dm_target *ti;
136 136
137 struct dm_dev *origin; 137 struct dm_dev *origin;
138 struct dm_dev *cow; 138 struct dm_dev *cow;
@@ -169,7 +169,7 @@ struct dm_snapshot {
169 /* The on disk metadata handler */ 169 /* The on disk metadata handler */
170 struct exception_store store; 170 struct exception_store store;
171 171
172 struct kcopyd_client *kcopyd_client; 172 struct dm_kcopyd_client *kcopyd_client;
173 173
174 /* Queue of snapshot writes for ksnapd to flush */ 174 /* Queue of snapshot writes for ksnapd to flush */
175 struct bio_list queued_bios; 175 struct bio_list queued_bios;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e75b1437b58b..51be53344214 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -245,44 +245,6 @@ int dm_table_create(struct dm_table **result, int mode,
245 return 0; 245 return 0;
246} 246}
247 247
248int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
249{
250 struct dm_table *t;
251 sector_t dev_size = 1;
252 int r;
253
254 /*
255 * Find current size of device.
256 * Default to 1 sector if inactive.
257 */
258 t = dm_get_table(md);
259 if (t) {
260 dev_size = dm_table_get_size(t);
261 dm_table_put(t);
262 }
263
264 r = dm_table_create(&t, FMODE_READ, 1, md);
265 if (r)
266 return r;
267
268 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
269 if (r)
270 goto out;
271
272 r = dm_table_complete(t);
273 if (r)
274 goto out;
275
276 *result = t;
277
278out:
279 if (r)
280 dm_table_put(t);
281
282 return r;
283}
284EXPORT_SYMBOL_GPL(dm_create_error_table);
285
286static void free_devices(struct list_head *devices) 248static void free_devices(struct list_head *devices)
287{ 249{
288 struct list_head *tmp, *next; 250 struct list_head *tmp, *next;
@@ -954,7 +916,7 @@ void dm_table_presuspend_targets(struct dm_table *t)
954 if (!t) 916 if (!t)
955 return; 917 return;
956 918
957 return suspend_targets(t, 0); 919 suspend_targets(t, 0);
958} 920}
959 921
960void dm_table_postsuspend_targets(struct dm_table *t) 922void dm_table_postsuspend_targets(struct dm_table *t)
@@ -962,7 +924,7 @@ void dm_table_postsuspend_targets(struct dm_table *t)
962 if (!t) 924 if (!t)
963 return; 925 return;
964 926
965 return suspend_targets(t, 1); 927 suspend_targets(t, 1);
966} 928}
967 929
968int dm_table_resume_targets(struct dm_table *t) 930int dm_table_resume_targets(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6617ce4af095..372369b1cc20 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -204,6 +204,7 @@ static int (*_inits[])(void) __initdata = {
204 dm_target_init, 204 dm_target_init,
205 dm_linear_init, 205 dm_linear_init,
206 dm_stripe_init, 206 dm_stripe_init,
207 dm_kcopyd_init,
207 dm_interface_init, 208 dm_interface_init,
208}; 209};
209 210
@@ -212,6 +213,7 @@ static void (*_exits[])(void) = {
212 dm_target_exit, 213 dm_target_exit,
213 dm_linear_exit, 214 dm_linear_exit,
214 dm_stripe_exit, 215 dm_stripe_exit,
216 dm_kcopyd_exit,
215 dm_interface_exit, 217 dm_interface_exit,
216}; 218};
217 219
@@ -922,7 +924,7 @@ static void free_minor(int minor)
922/* 924/*
923 * See if the device with a specific minor # is free. 925 * See if the device with a specific minor # is free.
924 */ 926 */
925static int specific_minor(struct mapped_device *md, int minor) 927static int specific_minor(int minor)
926{ 928{
927 int r, m; 929 int r, m;
928 930
@@ -955,7 +957,7 @@ out:
955 return r; 957 return r;
956} 958}
957 959
958static int next_free_minor(struct mapped_device *md, int *minor) 960static int next_free_minor(int *minor)
959{ 961{
960 int r, m; 962 int r, m;
961 963
@@ -966,9 +968,8 @@ static int next_free_minor(struct mapped_device *md, int *minor)
966 spin_lock(&_minor_lock); 968 spin_lock(&_minor_lock);
967 969
968 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 970 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
969 if (r) { 971 if (r)
970 goto out; 972 goto out;
971 }
972 973
973 if (m >= (1 << MINORBITS)) { 974 if (m >= (1 << MINORBITS)) {
974 idr_remove(&_minor_idr, m); 975 idr_remove(&_minor_idr, m);
@@ -991,7 +992,7 @@ static struct block_device_operations dm_blk_dops;
991static struct mapped_device *alloc_dev(int minor) 992static struct mapped_device *alloc_dev(int minor)
992{ 993{
993 int r; 994 int r;
994 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 995 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
995 void *old_md; 996 void *old_md;
996 997
997 if (!md) { 998 if (!md) {
@@ -1004,13 +1005,12 @@ static struct mapped_device *alloc_dev(int minor)
1004 1005
1005 /* get a minor number for the dev */ 1006 /* get a minor number for the dev */
1006 if (minor == DM_ANY_MINOR) 1007 if (minor == DM_ANY_MINOR)
1007 r = next_free_minor(md, &minor); 1008 r = next_free_minor(&minor);
1008 else 1009 else
1009 r = specific_minor(md, minor); 1010 r = specific_minor(minor);
1010 if (r < 0) 1011 if (r < 0)
1011 goto bad_minor; 1012 goto bad_minor;
1012 1013
1013 memset(md, 0, sizeof(*md));
1014 init_rwsem(&md->io_lock); 1014 init_rwsem(&md->io_lock);
1015 mutex_init(&md->suspend_lock); 1015 mutex_init(&md->suspend_lock);
1016 spin_lock_init(&md->pushback_lock); 1016 spin_lock_init(&md->pushback_lock);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b4584a39383b..8c03b634e62e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,67 +16,6 @@
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/hdreg.h> 17#include <linux/hdreg.h>
18 18
19#define DM_NAME "device-mapper"
20
21#define DMERR(f, arg...) \
22 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
23#define DMERR_LIMIT(f, arg...) \
24 do { \
25 if (printk_ratelimit()) \
26 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
27 f "\n", ## arg); \
28 } while (0)
29
30#define DMWARN(f, arg...) \
31 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
32#define DMWARN_LIMIT(f, arg...) \
33 do { \
34 if (printk_ratelimit()) \
35 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
36 f "\n", ## arg); \
37 } while (0)
38
39#define DMINFO(f, arg...) \
40 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
41#define DMINFO_LIMIT(f, arg...) \
42 do { \
43 if (printk_ratelimit()) \
44 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
45 "\n", ## arg); \
46 } while (0)
47
48#ifdef CONFIG_DM_DEBUG
49# define DMDEBUG(f, arg...) \
50 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
51# define DMDEBUG_LIMIT(f, arg...) \
52 do { \
53 if (printk_ratelimit()) \
54 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
55 "\n", ## arg); \
56 } while (0)
57#else
58# define DMDEBUG(f, arg...) do {} while (0)
59# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
60#endif
61
62#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
63 0 : scnprintf(result + sz, maxlen - sz, x))
64
65#define SECTOR_SHIFT 9
66
67/*
68 * Definitions of return values from target end_io function.
69 */
70#define DM_ENDIO_INCOMPLETE 1
71#define DM_ENDIO_REQUEUE 2
72
73/*
74 * Definitions of return values from target map function.
75 */
76#define DM_MAPIO_SUBMITTED 0
77#define DM_MAPIO_REMAPPED 1
78#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
79
80/* 19/*
81 * Suspend feature flags 20 * Suspend feature flags
82 */ 21 */
@@ -136,34 +75,6 @@ static inline int array_too_big(unsigned long fixed, unsigned long obj,
136 return (num > (ULONG_MAX - fixed) / obj); 75 return (num > (ULONG_MAX - fixed) / obj);
137} 76}
138 77
139/*
140 * Ceiling(n / sz)
141 */
142#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
143
144#define dm_sector_div_up(n, sz) ( \
145{ \
146 sector_t _r = ((n) + (sz) - 1); \
147 sector_div(_r, (sz)); \
148 _r; \
149} \
150)
151
152/*
153 * ceiling(n / size) * size
154 */
155#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
156
157static inline sector_t to_sector(unsigned long n)
158{
159 return (n >> 9);
160}
161
162static inline unsigned long to_bytes(sector_t n)
163{
164 return (n << 9);
165}
166
167int dm_split_args(int *argc, char ***argvp, char *input); 78int dm_split_args(int *argc, char ***argvp, char *input);
168 79
169/* 80/*
@@ -189,4 +100,13 @@ int dm_lock_for_deletion(struct mapped_device *md);
189 100
190void dm_kobject_uevent(struct mapped_device *md); 101void dm_kobject_uevent(struct mapped_device *md);
191 102
103/*
104 * Dirty log
105 */
106int dm_dirty_log_init(void);
107void dm_dirty_log_exit(void);
108
109int dm_kcopyd_init(void);
110void dm_kcopyd_exit(void);
111
192#endif 112#endif
diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h
deleted file mode 100644
index 4845f2a0c676..000000000000
--- a/drivers/md/kcopyd.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) 2001 Sistina Software
3 *
4 * This file is released under the GPL.
5 *
6 * Kcopyd provides a simple interface for copying an area of one
7 * block-device to one or more other block-devices, with an asynchronous
8 * completion notification.
9 */
10
11#ifndef DM_KCOPYD_H
12#define DM_KCOPYD_H
13
14#include "dm-io.h"
15
16/* FIXME: make this configurable */
17#define KCOPYD_MAX_REGIONS 8
18
19#define KCOPYD_IGNORE_ERROR 1
20
21/*
22 * To use kcopyd you must first create a kcopyd client object.
23 */
24struct kcopyd_client;
25int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
26void kcopyd_client_destroy(struct kcopyd_client *kc);
27
28/*
29 * Submit a copy job to kcopyd. This is built on top of the
30 * previous three fns.
31 *
32 * read_err is a boolean,
33 * write_err is a bitset, with 1 bit for each destination region
34 */
35typedef void (*kcopyd_notify_fn)(int read_err, unsigned long write_err,
36 void *context);
37
38int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
39 unsigned int num_dests, struct io_region *dests,
40 unsigned int flags, kcopyd_notify_fn fn, void *context);
41
42#endif
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index e8503341e3b1..eed06d068fd1 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -158,6 +158,12 @@ config MTD_OF_PARTS
158 the partition map from the children of the flash node, 158 the partition map from the children of the flash node,
159 as described in Documentation/powerpc/booting-without-of.txt. 159 as described in Documentation/powerpc/booting-without-of.txt.
160 160
161config MTD_AR7_PARTS
162 tristate "TI AR7 partitioning support"
163 depends on MTD_PARTITIONS
164 ---help---
165 TI AR7 partitioning support
166
161comment "User Modules And Translation Layers" 167comment "User Modules And Translation Layers"
162 168
163config MTD_CHAR 169config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 538e33d11d46..4b77335715f0 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
14obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
14obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o 15obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
15 16
16# 'Users' - code which presents functionality to userspace. 17# 'Users' - code which presents functionality to userspace.
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
new file mode 100644
index 000000000000..ecf170b55c32
--- /dev/null
+++ b/drivers/mtd/ar7part.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 * TI AR7 flash partition table.
19 * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h>
28#include <linux/bootmem.h>
29#include <linux/magic.h>
30
31#define AR7_PARTS 4
32#define ROOT_OFFSET 0xe0000
33
34#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
35#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
36
37#ifndef SQUASHFS_MAGIC
38#define SQUASHFS_MAGIC 0x73717368
39#endif
40
41struct ar7_bin_rec {
42 unsigned int checksum;
43 unsigned int length;
44 unsigned int address;
45};
46
47static struct mtd_partition ar7_parts[AR7_PARTS];
48
49static int create_mtd_partitions(struct mtd_info *master,
50 struct mtd_partition **pparts,
51 unsigned long origin)
52{
53 struct ar7_bin_rec header;
54 unsigned int offset;
55 size_t len;
56 unsigned int pre_size = master->erasesize, post_size = 0;
57 unsigned int root_offset = ROOT_OFFSET;
58
59 int retries = 10;
60
61 ar7_parts[0].name = "loader";
62 ar7_parts[0].offset = 0;
63 ar7_parts[0].size = master->erasesize;
64 ar7_parts[0].mask_flags = MTD_WRITEABLE;
65
66 ar7_parts[1].name = "config";
67 ar7_parts[1].offset = 0;
68 ar7_parts[1].size = master->erasesize;
69 ar7_parts[1].mask_flags = 0;
70
71 do { /* Try 10 blocks starting from master->erasesize */
72 offset = pre_size;
73 master->read(master, offset,
74 sizeof(header), &len, (uint8_t *)&header);
75 if (!strncmp((char *)&header, "TIENV0.8", 8))
76 ar7_parts[1].offset = pre_size;
77 if (header.checksum == LOADER_MAGIC1)
78 break;
79 if (header.checksum == LOADER_MAGIC2)
80 break;
81 pre_size += master->erasesize;
82 } while (retries--);
83
84 pre_size = offset;
85
86 if (!ar7_parts[1].offset) {
87 ar7_parts[1].offset = master->size - master->erasesize;
88 post_size = master->erasesize;
89 }
90
91 switch (header.checksum) {
92 case LOADER_MAGIC1:
93 while (header.length) {
94 offset += sizeof(header) + header.length;
95 master->read(master, offset, sizeof(header),
96 &len, (uint8_t *)&header);
97 }
98 root_offset = offset + sizeof(header) + 4;
99 break;
100 case LOADER_MAGIC2:
101 while (header.length) {
102 offset += sizeof(header) + header.length;
103 master->read(master, offset, sizeof(header),
104 &len, (uint8_t *)&header);
105 }
106 root_offset = offset + sizeof(header) + 4 + 0xff;
107 root_offset &= ~(uint32_t)0xff;
108 break;
109 default:
110 printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
111 break;
112 }
113
114 master->read(master, root_offset,
115 sizeof(header), &len, (u8 *)&header);
116 if (header.checksum != SQUASHFS_MAGIC) {
117 root_offset += master->erasesize - 1;
118 root_offset &= ~(master->erasesize - 1);
119 }
120
121 ar7_parts[2].name = "linux";
122 ar7_parts[2].offset = pre_size;
123 ar7_parts[2].size = master->size - pre_size - post_size;
124 ar7_parts[2].mask_flags = 0;
125
126 ar7_parts[3].name = "rootfs";
127 ar7_parts[3].offset = root_offset;
128 ar7_parts[3].size = master->size - root_offset - post_size;
129 ar7_parts[3].mask_flags = 0;
130
131 *pparts = ar7_parts;
132 return AR7_PARTS;
133}
134
135static struct mtd_part_parser ar7_parser = {
136 .owner = THIS_MODULE,
137 .parse_fn = create_mtd_partitions,
138 .name = "ar7part",
139};
140
141static int __init ar7_parser_init(void)
142{
143 return register_mtd_parser(&ar7_parser);
144}
145
146module_init(ar7_parser_init);
147
148MODULE_LICENSE("GPL");
149MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
150 "Eugene Konev <ejka@openwrt.org>");
151MODULE_DESCRIPTION("MTD partitioning for TI AR7");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0080452531d6..e812df607a5c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -384,7 +384,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
384 if (extp_size > 4096) { 384 if (extp_size > 4096) {
385 printk(KERN_ERR 385 printk(KERN_ERR
386 "%s: cfi_pri_intelext is too fat\n", 386 "%s: cfi_pri_intelext is too fat\n",
387 __FUNCTION__); 387 __func__);
388 return NULL; 388 return NULL;
389 } 389 }
390 goto again; 390 goto again;
@@ -619,6 +619,9 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
619 sizeof(struct cfi_intelext_blockinfo); 619 sizeof(struct cfi_intelext_blockinfo);
620 } 620 }
621 621
622 if (!numparts)
623 numparts = 1;
624
622 /* Programming Region info */ 625 /* Programming Region info */
623 if (extp->MinorVersion >= '4') { 626 if (extp->MinorVersion >= '4') {
624 struct cfi_intelext_programming_regioninfo *prinfo; 627 struct cfi_intelext_programming_regioninfo *prinfo;
@@ -641,7 +644,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
641 if ((1 << partshift) < mtd->erasesize) { 644 if ((1 << partshift) < mtd->erasesize) {
642 printk( KERN_ERR 645 printk( KERN_ERR
643 "%s: bad number of hw partitions (%d)\n", 646 "%s: bad number of hw partitions (%d)\n",
644 __FUNCTION__, numparts); 647 __func__, numparts);
645 return -EINVAL; 648 return -EINVAL;
646 } 649 }
647 650
@@ -1071,10 +1074,10 @@ static int __xipram xip_wait_for_operation(
1071 chip->state = newstate; 1074 chip->state = newstate;
1072 map_write(map, CMD(0xff), adr); 1075 map_write(map, CMD(0xff), adr);
1073 (void) map_read(map, adr); 1076 (void) map_read(map, adr);
1074 asm volatile (".rep 8; nop; .endr"); 1077 xip_iprefetch();
1075 local_irq_enable(); 1078 local_irq_enable();
1076 spin_unlock(chip->mutex); 1079 spin_unlock(chip->mutex);
1077 asm volatile (".rep 8; nop; .endr"); 1080 xip_iprefetch();
1078 cond_resched(); 1081 cond_resched();
1079 1082
1080 /* 1083 /*
@@ -2013,7 +2016,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2013 2016
2014#ifdef DEBUG_LOCK_BITS 2017#ifdef DEBUG_LOCK_BITS
2015 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", 2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2016 __FUNCTION__, ofs, len); 2019 __func__, ofs, len);
2017 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2018 ofs, len, NULL); 2021 ofs, len, NULL);
2019#endif 2022#endif
@@ -2023,7 +2026,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2023 2026
2024#ifdef DEBUG_LOCK_BITS 2027#ifdef DEBUG_LOCK_BITS
2025 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2026 __FUNCTION__, ret); 2029 __func__, ret);
2027 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2028 ofs, len, NULL); 2031 ofs, len, NULL);
2029#endif 2032#endif
@@ -2037,7 +2040,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2037 2040
2038#ifdef DEBUG_LOCK_BITS 2041#ifdef DEBUG_LOCK_BITS
2039 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", 2042 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2040 __FUNCTION__, ofs, len); 2043 __func__, ofs, len);
2041 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2044 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2042 ofs, len, NULL); 2045 ofs, len, NULL);
2043#endif 2046#endif
@@ -2047,7 +2050,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2047 2050
2048#ifdef DEBUG_LOCK_BITS 2051#ifdef DEBUG_LOCK_BITS
2049 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2052 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2050 __FUNCTION__, ret); 2053 __func__, ret);
2051 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2054 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2052 ofs, len, NULL); 2055 ofs, len, NULL);
2053#endif 2056#endif
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 458d477614d6..f7fcc6389533 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -220,6 +220,28 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
220 mtd->flags |= MTD_POWERUP_LOCK; 220 mtd->flags |= MTD_POWERUP_LOCK;
221} 221}
222 222
223static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
224{
225 struct map_info *map = mtd->priv;
226 struct cfi_private *cfi = map->fldrv_priv;
227
228 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
229 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
230 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
231 }
232}
233
234static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
235{
236 struct map_info *map = mtd->priv;
237 struct cfi_private *cfi = map->fldrv_priv;
238
239 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
240 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
241 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
242 }
243}
244
223static struct cfi_fixup cfi_fixup_table[] = { 245static struct cfi_fixup cfi_fixup_table[] = {
224 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 246 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
225#ifdef AMD_BOOTLOC_BUG 247#ifdef AMD_BOOTLOC_BUG
@@ -231,6 +253,10 @@ static struct cfi_fixup cfi_fixup_table[] = {
231 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 253 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
232 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 254 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
233 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 255 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
256 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
257 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
258 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
259 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
234#if !FORCE_WORD_WRITE 260#if !FORCE_WORD_WRITE
235 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 261 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
236#endif 262#endif
@@ -723,10 +749,10 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
723 chip->erase_suspended = 1; 749 chip->erase_suspended = 1;
724 map_write(map, CMD(0xf0), adr); 750 map_write(map, CMD(0xf0), adr);
725 (void) map_read(map, adr); 751 (void) map_read(map, adr);
726 asm volatile (".rep 8; nop; .endr"); 752 xip_iprefetch();
727 local_irq_enable(); 753 local_irq_enable();
728 spin_unlock(chip->mutex); 754 spin_unlock(chip->mutex);
729 asm volatile (".rep 8; nop; .endr"); 755 xip_iprefetch();
730 cond_resched(); 756 cond_resched();
731 757
732 /* 758 /*
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 492e2ab27420..1b720cc571f3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
445 retry: 445 retry:
446 446
447#ifdef DEBUG_CFI_FEATURES 447#ifdef DEBUG_CFI_FEATURES
448 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state); 448 printk("%s: chip->state[%d]\n", __func__, chip->state);
449#endif 449#endif
450 spin_lock_bh(chip->mutex); 450 spin_lock_bh(chip->mutex);
451 451
@@ -463,7 +463,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
463 map_write(map, CMD(0x70), cmd_adr); 463 map_write(map, CMD(0x70), cmd_adr);
464 chip->state = FL_STATUS; 464 chip->state = FL_STATUS;
465#ifdef DEBUG_CFI_FEATURES 465#ifdef DEBUG_CFI_FEATURES
466 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr)); 466 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
467#endif 467#endif
468 468
469 case FL_STATUS: 469 case FL_STATUS:
@@ -591,7 +591,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
591 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */ 591 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
592 if (map_word_bitsset(map, status, CMD(0x3a))) { 592 if (map_word_bitsset(map, status, CMD(0x3a))) {
593#ifdef DEBUG_CFI_FEATURES 593#ifdef DEBUG_CFI_FEATURES
594 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]); 594 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
595#endif 595#endif
596 /* clear status */ 596 /* clear status */
597 map_write(map, CMD(0x50), cmd_adr); 597 map_write(map, CMD(0x50), cmd_adr);
@@ -625,9 +625,9 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
625 ofs = to - (chipnum << cfi->chipshift); 625 ofs = to - (chipnum << cfi->chipshift);
626 626
627#ifdef DEBUG_CFI_FEATURES 627#ifdef DEBUG_CFI_FEATURES
628 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map)); 628 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
629 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize); 629 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
630 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len); 630 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
631#endif 631#endif
632 632
633 /* Write buffer is worth it only if more than one word to write... */ 633 /* Write buffer is worth it only if more than one word to write... */
@@ -893,7 +893,8 @@ retry:
893 return ret; 893 return ret;
894} 894}
895 895
896int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 896static int cfi_staa_erase_varsize(struct mtd_info *mtd,
897 struct erase_info *instr)
897{ struct map_info *map = mtd->priv; 898{ struct map_info *map = mtd->priv;
898 struct cfi_private *cfi = map->fldrv_priv; 899 struct cfi_private *cfi = map->fldrv_priv;
899 unsigned long adr, len; 900 unsigned long adr, len;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index f651b6ef1c5d..a4463a91ce31 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -39,7 +39,7 @@ struct mtd_info *cfi_probe(struct map_info *map);
39#define xip_allowed(base, map) \ 39#define xip_allowed(base, map) \
40do { \ 40do { \
41 (void) map_read(map, base); \ 41 (void) map_read(map, base); \
42 asm volatile (".rep 8; nop; .endr"); \ 42 xip_iprefetch(); \
43 local_irq_enable(); \ 43 local_irq_enable(); \
44} while (0) 44} while (0)
45 45
@@ -232,6 +232,11 @@ static int __xipram cfi_chip_setup(struct map_info *map,
232 cfi->mfr = cfi_read_query16(map, base); 232 cfi->mfr = cfi_read_query16(map, base);
233 cfi->id = cfi_read_query16(map, base + ofs_factor); 233 cfi->id = cfi_read_query16(map, base + ofs_factor);
234 234
235 /* Get AMD/Spansion extended JEDEC ID */
236 if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
237 cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
238 cfi_read_query(map, base + 0xf * ofs_factor);
239
235 /* Put it back into Read Mode */ 240 /* Put it back into Read Mode */
236 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 241 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
237 /* ... even if it's an Intel chip */ 242 /* ... even if it's an Intel chip */
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 2e51496c248e..72e0022a47bf 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -65,7 +65,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
65 65
66#ifdef CONFIG_MTD_XIP 66#ifdef CONFIG_MTD_XIP
67 (void) map_read(map, base); 67 (void) map_read(map, base);
68 asm volatile (".rep 8; nop; .endr"); 68 xip_iprefetch();
69 local_irq_enable(); 69 local_irq_enable();
70#endif 70#endif
71 71
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 4be51a86a85c..aa07575eb288 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -132,6 +132,8 @@
132#define M29F800AB 0x0058 132#define M29F800AB 0x0058
133#define M29W800DT 0x00D7 133#define M29W800DT 0x00D7
134#define M29W800DB 0x005B 134#define M29W800DB 0x005B
135#define M29W400DT 0x00EE
136#define M29W400DB 0x00EF
135#define M29W160DT 0x22C4 137#define M29W160DT 0x22C4
136#define M29W160DB 0x2249 138#define M29W160DB 0x2249
137#define M29W040B 0x00E3 139#define M29W040B 0x00E3
@@ -160,6 +162,7 @@
160#define SST49LF030A 0x001C 162#define SST49LF030A 0x001C
161#define SST49LF040A 0x0051 163#define SST49LF040A 0x0051
162#define SST49LF080A 0x005B 164#define SST49LF080A 0x005B
165#define SST36VF3203 0x7354
163 166
164/* Toshiba */ 167/* Toshiba */
165#define TC58FVT160 0x00C2 168#define TC58FVT160 0x00C2
@@ -1113,7 +1116,7 @@ static const struct amd_flash_info jedec_table[] = {
1113 .regions = { 1116 .regions = {
1114 ERASEINFO(0x10000,8), 1117 ERASEINFO(0x10000,8),
1115 } 1118 }
1116 }, { 1119 }, {
1117 .mfr_id = MANUFACTURER_MACRONIX, 1120 .mfr_id = MANUFACTURER_MACRONIX,
1118 .dev_id = MX29F016, 1121 .dev_id = MX29F016,
1119 .name = "Macronix MX29F016", 1122 .name = "Macronix MX29F016",
@@ -1125,7 +1128,7 @@ static const struct amd_flash_info jedec_table[] = {
1125 .regions = { 1128 .regions = {
1126 ERASEINFO(0x10000,32), 1129 ERASEINFO(0x10000,32),
1127 } 1130 }
1128 }, { 1131 }, {
1129 .mfr_id = MANUFACTURER_MACRONIX, 1132 .mfr_id = MANUFACTURER_MACRONIX,
1130 .dev_id = MX29F004T, 1133 .dev_id = MX29F004T,
1131 .name = "Macronix MX29F004T", 1134 .name = "Macronix MX29F004T",
@@ -1140,7 +1143,7 @@ static const struct amd_flash_info jedec_table[] = {
1140 ERASEINFO(0x02000,2), 1143 ERASEINFO(0x02000,2),
1141 ERASEINFO(0x04000,1), 1144 ERASEINFO(0x04000,1),
1142 } 1145 }
1143 }, { 1146 }, {
1144 .mfr_id = MANUFACTURER_MACRONIX, 1147 .mfr_id = MANUFACTURER_MACRONIX,
1145 .dev_id = MX29F004B, 1148 .dev_id = MX29F004B,
1146 .name = "Macronix MX29F004B", 1149 .name = "Macronix MX29F004B",
@@ -1218,7 +1221,7 @@ static const struct amd_flash_info jedec_table[] = {
1218 .regions = { 1221 .regions = {
1219 ERASEINFO(0x40000,16), 1222 ERASEINFO(0x40000,16),
1220 } 1223 }
1221 }, { 1224 }, {
1222 .mfr_id = MANUFACTURER_SST, 1225 .mfr_id = MANUFACTURER_SST,
1223 .dev_id = SST39LF512, 1226 .dev_id = SST39LF512,
1224 .name = "SST 39LF512", 1227 .name = "SST 39LF512",
@@ -1230,7 +1233,7 @@ static const struct amd_flash_info jedec_table[] = {
1230 .regions = { 1233 .regions = {
1231 ERASEINFO(0x01000,16), 1234 ERASEINFO(0x01000,16),
1232 } 1235 }
1233 }, { 1236 }, {
1234 .mfr_id = MANUFACTURER_SST, 1237 .mfr_id = MANUFACTURER_SST,
1235 .dev_id = SST39LF010, 1238 .dev_id = SST39LF010,
1236 .name = "SST 39LF010", 1239 .name = "SST 39LF010",
@@ -1242,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = {
1242 .regions = { 1245 .regions = {
1243 ERASEINFO(0x01000,32), 1246 ERASEINFO(0x01000,32),
1244 } 1247 }
1245 }, { 1248 }, {
1246 .mfr_id = MANUFACTURER_SST, 1249 .mfr_id = MANUFACTURER_SST,
1247 .dev_id = SST29EE020, 1250 .dev_id = SST29EE020,
1248 .name = "SST 29EE020", 1251 .name = "SST 29EE020",
@@ -1276,7 +1279,7 @@ static const struct amd_flash_info jedec_table[] = {
1276 .regions = { 1279 .regions = {
1277 ERASEINFO(0x01000,64), 1280 ERASEINFO(0x01000,64),
1278 } 1281 }
1279 }, { 1282 }, {
1280 .mfr_id = MANUFACTURER_SST, 1283 .mfr_id = MANUFACTURER_SST,
1281 .dev_id = SST39LF040, 1284 .dev_id = SST39LF040,
1282 .name = "SST 39LF040", 1285 .name = "SST 39LF040",
@@ -1288,7 +1291,7 @@ static const struct amd_flash_info jedec_table[] = {
1288 .regions = { 1291 .regions = {
1289 ERASEINFO(0x01000,128), 1292 ERASEINFO(0x01000,128),
1290 } 1293 }
1291 }, { 1294 }, {
1292 .mfr_id = MANUFACTURER_SST, 1295 .mfr_id = MANUFACTURER_SST,
1293 .dev_id = SST39SF010A, 1296 .dev_id = SST39SF010A,
1294 .name = "SST 39SF010A", 1297 .name = "SST 39SF010A",
@@ -1300,7 +1303,7 @@ static const struct amd_flash_info jedec_table[] = {
1300 .regions = { 1303 .regions = {
1301 ERASEINFO(0x01000,32), 1304 ERASEINFO(0x01000,32),
1302 } 1305 }
1303 }, { 1306 }, {
1304 .mfr_id = MANUFACTURER_SST, 1307 .mfr_id = MANUFACTURER_SST,
1305 .dev_id = SST39SF020A, 1308 .dev_id = SST39SF020A,
1306 .name = "SST 39SF020A", 1309 .name = "SST 39SF020A",
@@ -1412,6 +1415,18 @@ static const struct amd_flash_info jedec_table[] = {
1412 ERASEINFO(0x1000,256) 1415 ERASEINFO(0x1000,256)
1413 } 1416 }
1414 }, { 1417 }, {
1418 .mfr_id = MANUFACTURER_SST,
1419 .dev_id = SST36VF3203,
1420 .name = "SST 36VF3203",
1421 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1422 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1423 .dev_size = SIZE_4MiB,
1424 .cmd_set = P_ID_AMD_STD,
1425 .nr_regions = 1,
1426 .regions = {
1427 ERASEINFO(0x10000,64),
1428 }
1429 }, {
1415 .mfr_id = MANUFACTURER_ST, 1430 .mfr_id = MANUFACTURER_ST,
1416 .dev_id = M29F800AB, 1431 .dev_id = M29F800AB,
1417 .name = "ST M29F800AB", 1432 .name = "ST M29F800AB",
@@ -1426,7 +1441,7 @@ static const struct amd_flash_info jedec_table[] = {
1426 ERASEINFO(0x08000,1), 1441 ERASEINFO(0x08000,1),
1427 ERASEINFO(0x10000,15), 1442 ERASEINFO(0x10000,15),
1428 } 1443 }
1429 }, { 1444 }, {
1430 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1445 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1431 .dev_id = M29W800DT, 1446 .dev_id = M29W800DT,
1432 .name = "ST M29W800DT", 1447 .name = "ST M29W800DT",
@@ -1456,6 +1471,36 @@ static const struct amd_flash_info jedec_table[] = {
1456 ERASEINFO(0x08000,1), 1471 ERASEINFO(0x08000,1),
1457 ERASEINFO(0x10000,15) 1472 ERASEINFO(0x10000,15)
1458 } 1473 }
1474 }, {
1475 .mfr_id = MANUFACTURER_ST,
1476 .dev_id = M29W400DT,
1477 .name = "ST M29W400DT",
1478 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1479 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1480 .dev_size = SIZE_512KiB,
1481 .cmd_set = P_ID_AMD_STD,
1482 .nr_regions = 4,
1483 .regions = {
1484 ERASEINFO(0x04000,7),
1485 ERASEINFO(0x02000,1),
1486 ERASEINFO(0x08000,2),
1487 ERASEINFO(0x10000,1)
1488 }
1489 }, {
1490 .mfr_id = MANUFACTURER_ST,
1491 .dev_id = M29W400DB,
1492 .name = "ST M29W400DB",
1493 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1494 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1495 .dev_size = SIZE_512KiB,
1496 .cmd_set = P_ID_AMD_STD,
1497 .nr_regions = 4,
1498 .regions = {
1499 ERASEINFO(0x04000,1),
1500 ERASEINFO(0x02000,2),
1501 ERASEINFO(0x08000,1),
1502 ERASEINFO(0x10000,7)
1503 }
1459 }, { 1504 }, {
1460 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1505 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1461 .dev_id = M29W160DT, 1506 .dev_id = M29W160DT,
@@ -1486,7 +1531,7 @@ static const struct amd_flash_info jedec_table[] = {
1486 ERASEINFO(0x08000,1), 1531 ERASEINFO(0x08000,1),
1487 ERASEINFO(0x10000,31) 1532 ERASEINFO(0x10000,31)
1488 } 1533 }
1489 }, { 1534 }, {
1490 .mfr_id = MANUFACTURER_ST, 1535 .mfr_id = MANUFACTURER_ST,
1491 .dev_id = M29W040B, 1536 .dev_id = M29W040B,
1492 .name = "ST M29W040B", 1537 .name = "ST M29W040B",
@@ -1498,7 +1543,7 @@ static const struct amd_flash_info jedec_table[] = {
1498 .regions = { 1543 .regions = {
1499 ERASEINFO(0x10000,8), 1544 ERASEINFO(0x10000,8),
1500 } 1545 }
1501 }, { 1546 }, {
1502 .mfr_id = MANUFACTURER_ST, 1547 .mfr_id = MANUFACTURER_ST,
1503 .dev_id = M50FW040, 1548 .dev_id = M50FW040,
1504 .name = "ST M50FW040", 1549 .name = "ST M50FW040",
@@ -1510,7 +1555,7 @@ static const struct amd_flash_info jedec_table[] = {
1510 .regions = { 1555 .regions = {
1511 ERASEINFO(0x10000,8), 1556 ERASEINFO(0x10000,8),
1512 } 1557 }
1513 }, { 1558 }, {
1514 .mfr_id = MANUFACTURER_ST, 1559 .mfr_id = MANUFACTURER_ST,
1515 .dev_id = M50FW080, 1560 .dev_id = M50FW080,
1516 .name = "ST M50FW080", 1561 .name = "ST M50FW080",
@@ -1522,7 +1567,7 @@ static const struct amd_flash_info jedec_table[] = {
1522 .regions = { 1567 .regions = {
1523 ERASEINFO(0x10000,16), 1568 ERASEINFO(0x10000,16),
1524 } 1569 }
1525 }, { 1570 }, {
1526 .mfr_id = MANUFACTURER_ST, 1571 .mfr_id = MANUFACTURER_ST,
1527 .dev_id = M50FW016, 1572 .dev_id = M50FW016,
1528 .name = "ST M50FW016", 1573 .name = "ST M50FW016",
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index b44292abd9f7..e472a0e9de9d 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -119,7 +119,8 @@ static struct mtd_partition * newpart(char *s,
119 char *p; 119 char *p;
120 120
121 name = ++s; 121 name = ++s;
122 if ((p = strchr(name, delim)) == 0) 122 p = strchr(name, delim);
123 if (!p)
123 { 124 {
124 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); 125 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
125 return NULL; 126 return NULL;
@@ -159,9 +160,10 @@ static struct mtd_partition * newpart(char *s,
159 return NULL; 160 return NULL;
160 } 161 }
161 /* more partitions follow, parse them */ 162 /* more partitions follow, parse them */
162 if ((parts = newpart(s + 1, &s, num_parts, 163 parts = newpart(s + 1, &s, num_parts, this_part + 1,
163 this_part + 1, &extra_mem, extra_mem_size)) == 0) 164 &extra_mem, extra_mem_size);
164 return NULL; 165 if (!parts)
166 return NULL;
165 } 167 }
166 else 168 else
167 { /* this is the last partition: allocate space for all */ 169 { /* this is the last partition: allocate space for all */
@@ -308,9 +310,6 @@ static int parse_cmdline_partitions(struct mtd_info *master,
308 struct cmdline_mtd_partition *part; 310 struct cmdline_mtd_partition *part;
309 char *mtd_id = master->name; 311 char *mtd_id = master->name;
310 312
311 if(!cmdline)
312 return -EINVAL;
313
314 /* parse command line */ 313 /* parse command line */
315 if (!cmdline_parsed) 314 if (!cmdline_parsed)
316 mtdpart_setup_real(cmdline); 315 mtdpart_setup_real(cmdline);
@@ -341,7 +340,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
341 return part->num_parts; 340 return part->num_parts;
342 } 341 }
343 } 342 }
344 return -EINVAL; 343 return 0;
345} 344}
346 345
347 346
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 811d56fd890f..35ed1103dbb2 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -77,6 +77,13 @@ config MTD_M25P80
77 if you want to specify device partitioning or to use a device which 77 if you want to specify device partitioning or to use a device which
78 doesn't support the JEDEC ID instruction. 78 doesn't support the JEDEC ID instruction.
79 79
80config M25PXX_USE_FAST_READ
81 bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
82 depends on MTD_M25P80
83 default y
84 help
85 This option enables FAST_READ access supported by ST M25Pxx.
86
80config MTD_SLRAM 87config MTD_SLRAM
81 tristate "Uncached system RAM" 88 tristate "Uncached system RAM"
82 help 89 help
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ad1880c67518..519d942e7940 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -305,7 +305,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
305 } 305 }
306 list_add(&dev->list, &blkmtd_device_list); 306 list_add(&dev->list, &blkmtd_device_list);
307 INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index, 307 INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
308 dev->mtd.name + strlen("blkmtd: "), 308 dev->mtd.name + strlen("block2mtd: "),
309 dev->mtd.erasesize >> 10, dev->mtd.erasesize); 309 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
310 return dev; 310 return dev;
311 311
@@ -366,9 +366,9 @@ static inline void kill_final_newline(char *str)
366} 366}
367 367
368 368
369#define parse_err(fmt, args...) do { \ 369#define parse_err(fmt, args...) do { \
370 ERROR("block2mtd: " fmt "\n", ## args); \ 370 ERROR(fmt, ## args); \
371 return 0; \ 371 return 0; \
372} while (0) 372} while (0)
373 373
374#ifndef MODULE 374#ifndef MODULE
@@ -473,7 +473,7 @@ static void __devexit block2mtd_exit(void)
473 block2mtd_sync(&dev->mtd); 473 block2mtd_sync(&dev->mtd);
474 del_mtd_device(&dev->mtd); 474 del_mtd_device(&dev->mtd);
475 INFO("mtd%d: [%s] removed", dev->mtd.index, 475 INFO("mtd%d: [%s] removed", dev->mtd.index,
476 dev->mtd.name + strlen("blkmtd: ")); 476 dev->mtd.name + strlen("block2mtd: "));
477 list_del(&dev->list); 477 list_del(&dev->list);
478 block2mtd_free_device(dev); 478 block2mtd_free_device(dev);
479 } 479 }
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 99fd210feaec..1d324e5c412d 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -275,7 +275,7 @@ static __u8 read8 (__u32 offset)
275{ 275{
276 volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset); 276 volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset);
277#ifdef LART_DEBUG 277#ifdef LART_DEBUG
278 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n",__FUNCTION__,offset,*data); 278 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n", __func__, offset, *data);
279#endif 279#endif
280 return (*data); 280 return (*data);
281} 281}
@@ -284,7 +284,7 @@ static __u32 read32 (__u32 offset)
284{ 284{
285 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset); 285 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
286#ifdef LART_DEBUG 286#ifdef LART_DEBUG
287 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n",__FUNCTION__,offset,*data); 287 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n", __func__, offset, *data);
288#endif 288#endif
289 return (*data); 289 return (*data);
290} 290}
@@ -294,7 +294,7 @@ static void write32 (__u32 x,__u32 offset)
294 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset); 294 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
295 *data = x; 295 *data = x;
296#ifdef LART_DEBUG 296#ifdef LART_DEBUG
297 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,*data); 297 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, *data);
298#endif 298#endif
299} 299}
300 300
@@ -337,7 +337,7 @@ static inline int erase_block (__u32 offset)
337 __u32 status; 337 __u32 status;
338 338
339#ifdef LART_DEBUG 339#ifdef LART_DEBUG
340 printk (KERN_DEBUG "%s(): 0x%.8x\n",__FUNCTION__,offset); 340 printk (KERN_DEBUG "%s(): 0x%.8x\n", __func__, offset);
341#endif 341#endif
342 342
343 /* erase and confirm */ 343 /* erase and confirm */
@@ -371,7 +371,7 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
371 int i,first; 371 int i,first;
372 372
373#ifdef LART_DEBUG 373#ifdef LART_DEBUG
374 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n",__FUNCTION__,instr->addr,instr->len); 374 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
375#endif 375#endif
376 376
377 /* sanity checks */ 377 /* sanity checks */
@@ -442,7 +442,7 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
442static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf) 442static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf)
443{ 443{
444#ifdef LART_DEBUG 444#ifdef LART_DEBUG
445 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) from,len); 445 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
446#endif 446#endif
447 447
448 /* sanity checks */ 448 /* sanity checks */
@@ -488,7 +488,7 @@ static inline int write_dword (__u32 offset,__u32 x)
488 __u32 status; 488 __u32 status;
489 489
490#ifdef LART_DEBUG 490#ifdef LART_DEBUG
491 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,x); 491 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, x);
492#endif 492#endif
493 493
494 /* setup writing */ 494 /* setup writing */
@@ -524,7 +524,7 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
524 int i,n; 524 int i,n;
525 525
526#ifdef LART_DEBUG 526#ifdef LART_DEBUG
527 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) to,len); 527 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
528#endif 528#endif
529 529
530 *retlen = 0; 530 *retlen = 0;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 98df5bcc02f3..25efd331ef28 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -33,7 +33,7 @@
33/* Flash opcodes. */ 33/* Flash opcodes. */
34#define OPCODE_WREN 0x06 /* Write enable */ 34#define OPCODE_WREN 0x06 /* Write enable */
35#define OPCODE_RDSR 0x05 /* Read status register */ 35#define OPCODE_RDSR 0x05 /* Read status register */
36#define OPCODE_READ 0x03 /* Read data bytes (low frequency) */ 36#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
37#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ 37#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
38#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 38#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
39#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ 39#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
@@ -52,7 +52,15 @@
52 52
53/* Define max times to check status register before we give up. */ 53/* Define max times to check status register before we give up. */
54#define MAX_READY_WAIT_COUNT 100000 54#define MAX_READY_WAIT_COUNT 100000
55#define CMD_SIZE 4
55 56
57#ifdef CONFIG_M25PXX_USE_FAST_READ
58#define OPCODE_READ OPCODE_FAST_READ
59#define FAST_READ_DUMMY_BYTE 1
60#else
61#define OPCODE_READ OPCODE_NORM_READ
62#define FAST_READ_DUMMY_BYTE 0
63#endif
56 64
57#ifdef CONFIG_MTD_PARTITIONS 65#ifdef CONFIG_MTD_PARTITIONS
58#define mtd_has_partitions() (1) 66#define mtd_has_partitions() (1)
@@ -68,7 +76,7 @@ struct m25p {
68 struct mtd_info mtd; 76 struct mtd_info mtd;
69 unsigned partitioned:1; 77 unsigned partitioned:1;
70 u8 erase_opcode; 78 u8 erase_opcode;
71 u8 command[4]; 79 u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE];
72}; 80};
73 81
74static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 82static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -151,7 +159,7 @@ static int wait_till_ready(struct m25p *flash)
151static int erase_sector(struct m25p *flash, u32 offset) 159static int erase_sector(struct m25p *flash, u32 offset)
152{ 160{
153 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", 161 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
154 flash->spi->dev.bus_id, __FUNCTION__, 162 flash->spi->dev.bus_id, __func__,
155 flash->mtd.erasesize / 1024, offset); 163 flash->mtd.erasesize / 1024, offset);
156 164
157 /* Wait until finished previous write command. */ 165 /* Wait until finished previous write command. */
@@ -167,7 +175,7 @@ static int erase_sector(struct m25p *flash, u32 offset)
167 flash->command[2] = offset >> 8; 175 flash->command[2] = offset >> 8;
168 flash->command[3] = offset; 176 flash->command[3] = offset;
169 177
170 spi_write(flash->spi, flash->command, sizeof(flash->command)); 178 spi_write(flash->spi, flash->command, CMD_SIZE);
171 179
172 return 0; 180 return 0;
173} 181}
@@ -188,7 +196,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
188 u32 addr,len; 196 u32 addr,len;
189 197
190 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", 198 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
191 flash->spi->dev.bus_id, __FUNCTION__, "at", 199 flash->spi->dev.bus_id, __func__, "at",
192 (u32)instr->addr, instr->len); 200 (u32)instr->addr, instr->len);
193 201
194 /* sanity checks */ 202 /* sanity checks */
@@ -240,7 +248,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
240 struct spi_message m; 248 struct spi_message m;
241 249
242 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 250 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
243 flash->spi->dev.bus_id, __FUNCTION__, "from", 251 flash->spi->dev.bus_id, __func__, "from",
244 (u32)from, len); 252 (u32)from, len);
245 253
246 /* sanity checks */ 254 /* sanity checks */
@@ -253,8 +261,12 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
253 spi_message_init(&m); 261 spi_message_init(&m);
254 memset(t, 0, (sizeof t)); 262 memset(t, 0, (sizeof t));
255 263
264 /* NOTE:
265 * OPCODE_FAST_READ (if available) is faster.
266 * Should add 1 byte DUMMY_BYTE.
267 */
256 t[0].tx_buf = flash->command; 268 t[0].tx_buf = flash->command;
257 t[0].len = sizeof(flash->command); 269 t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE;
258 spi_message_add_tail(&t[0], &m); 270 spi_message_add_tail(&t[0], &m);
259 271
260 t[1].rx_buf = buf; 272 t[1].rx_buf = buf;
@@ -287,7 +299,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
287 299
288 spi_sync(flash->spi, &m); 300 spi_sync(flash->spi, &m);
289 301
290 *retlen = m.actual_length - sizeof(flash->command); 302 *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE;
291 303
292 mutex_unlock(&flash->lock); 304 mutex_unlock(&flash->lock);
293 305
@@ -308,7 +320,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
308 struct spi_message m; 320 struct spi_message m;
309 321
310 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 322 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
311 flash->spi->dev.bus_id, __FUNCTION__, "to", 323 flash->spi->dev.bus_id, __func__, "to",
312 (u32)to, len); 324 (u32)to, len);
313 325
314 if (retlen) 326 if (retlen)
@@ -325,7 +337,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
325 memset(t, 0, (sizeof t)); 337 memset(t, 0, (sizeof t));
326 338
327 t[0].tx_buf = flash->command; 339 t[0].tx_buf = flash->command;
328 t[0].len = sizeof(flash->command); 340 t[0].len = CMD_SIZE;
329 spi_message_add_tail(&t[0], &m); 341 spi_message_add_tail(&t[0], &m);
330 342
331 t[1].tx_buf = buf; 343 t[1].tx_buf = buf;
@@ -354,7 +366,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
354 366
355 spi_sync(flash->spi, &m); 367 spi_sync(flash->spi, &m);
356 368
357 *retlen = m.actual_length - sizeof(flash->command); 369 *retlen = m.actual_length - CMD_SIZE;
358 } else { 370 } else {
359 u32 i; 371 u32 i;
360 372
@@ -364,7 +376,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
364 t[1].len = page_size; 376 t[1].len = page_size;
365 spi_sync(flash->spi, &m); 377 spi_sync(flash->spi, &m);
366 378
367 *retlen = m.actual_length - sizeof(flash->command); 379 *retlen = m.actual_length - CMD_SIZE;
368 380
369 /* write everything in PAGESIZE chunks */ 381 /* write everything in PAGESIZE chunks */
370 for (i = page_size; i < len; i += page_size) { 382 for (i = page_size; i < len; i += page_size) {
@@ -387,8 +399,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
387 spi_sync(flash->spi, &m); 399 spi_sync(flash->spi, &m);
388 400
389 if (retlen) 401 if (retlen)
390 *retlen += m.actual_length 402 *retlen += m.actual_length - CMD_SIZE;
391 - sizeof(flash->command);
392 } 403 }
393 } 404 }
394 405
@@ -435,6 +446,7 @@ static struct flash_info __devinitdata m25p_data [] = {
435 { "at25fs040", 0x1f6604, 64 * 1024, 8, SECT_4K, }, 446 { "at25fs040", 0x1f6604, 64 * 1024, 8, SECT_4K, },
436 447
437 { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, }, 448 { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, },
449 { "at25df641", 0x1f4800, 64 * 1024, 128, SECT_4K, },
438 450
439 { "at26f004", 0x1f0400, 64 * 1024, 8, SECT_4K, }, 451 { "at26f004", 0x1f0400, 64 * 1024, 8, SECT_4K, },
440 { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, }, 452 { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, },
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index e427c82d5f4c..bf485ff49457 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/mtd/compatmac.h> 18#include <linux/mtd/compatmac.h>
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/mtd/mtdram.h>
20 21
21static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE; 22static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
22static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE; 23static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 180298b92a7a..5f960182da95 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -282,7 +282,7 @@ static int phram_setup(const char *val, struct kernel_param *kp)
282} 282}
283 283
284module_param_call(phram, phram_setup, NULL, NULL, 000); 284module_param_call(phram, phram_setup, NULL, NULL, 000);
285MODULE_PARM_DESC(phram,"Memory region to map. \"map=<name>,<start>,<length>\""); 285MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
286 286
287 287
288static int __init init_phram(void) 288static int __init init_phram(void)
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index c815d0f38577..4a79b187b568 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -136,8 +136,6 @@ typedef struct partition_t {
136#endif 136#endif
137} partition_t; 137} partition_t;
138 138
139void ftl_freepart(partition_t *part);
140
141/* Partition state flags */ 139/* Partition state flags */
142#define FTL_FORMATTED 0x01 140#define FTL_FORMATTED 0x01
143 141
@@ -1014,7 +1012,7 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
1014 1012
1015/*====================================================================*/ 1013/*====================================================================*/
1016 1014
1017void ftl_freepart(partition_t *part) 1015static void ftl_freepart(partition_t *part)
1018{ 1016{
1019 vfree(part->VirtualBlockMap); 1017 vfree(part->VirtualBlockMap);
1020 part->VirtualBlockMap = NULL; 1018 part->VirtualBlockMap = NULL;
@@ -1069,7 +1067,7 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
1069 kfree(dev); 1067 kfree(dev);
1070} 1068}
1071 1069
1072struct mtd_blktrans_ops ftl_tr = { 1070static struct mtd_blktrans_ops ftl_tr = {
1073 .name = "ftl", 1071 .name = "ftl",
1074 .major = FTL_MAJOR, 1072 .major = FTL_MAJOR,
1075 .part_bits = PART_BITS, 1073 .part_bits = PART_BITS,
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index b8917beeb650..c551d2f0779c 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -41,11 +41,6 @@
41 41
42char inftlmountrev[]="$Revision: 1.18 $"; 42char inftlmountrev[]="$Revision: 1.18 $";
43 43
44extern int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
45 size_t *retlen, uint8_t *buf);
46extern int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
47 size_t *retlen, uint8_t *buf);
48
49/* 44/*
50 * find_boot_record: Find the INFTL Media Header and its Spare copy which 45 * find_boot_record: Find the INFTL Media Header and its Spare copy which
51 * contains the various device information of the INFTL partition and 46 * contains the various device information of the INFTL partition and
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 12c253664eb2..1bd69aa9e22a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -21,6 +21,9 @@ config MTD_PHYSMAP
21 particular board as well as the bus width, either statically 21 particular board as well as the bus width, either statically
22 with config options or at run-time. 22 with config options or at run-time.
23 23
24 To compile this driver as a module, choose M here: the
25 module will be called physmap.
26
24config MTD_PHYSMAP_START 27config MTD_PHYSMAP_START
25 hex "Physical start address of flash mapping" 28 hex "Physical start address of flash mapping"
26 depends on MTD_PHYSMAP 29 depends on MTD_PHYSMAP
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
index fc3b2672d1e2..1f492062f8ca 100644
--- a/drivers/mtd/maps/bast-flash.c
+++ b/drivers/mtd/maps/bast-flash.c
@@ -137,7 +137,7 @@ static int bast_flash_probe(struct platform_device *pdev)
137 if (info->map.size > AREA_MAXSIZE) 137 if (info->map.size > AREA_MAXSIZE)
138 info->map.size = AREA_MAXSIZE; 138 info->map.size = AREA_MAXSIZE;
139 139
140 pr_debug("%s: area %08lx, size %ld\n", __FUNCTION__, 140 pr_debug("%s: area %08lx, size %ld\n", __func__,
141 info->map.phys, info->map.size); 141 info->map.phys, info->map.size);
142 142
143 info->area = request_mem_region(res->start, info->map.size, 143 info->area = request_mem_region(res->start, info->map.size,
@@ -149,7 +149,7 @@ static int bast_flash_probe(struct platform_device *pdev)
149 } 149 }
150 150
151 info->map.virt = ioremap(res->start, info->map.size); 151 info->map.virt = ioremap(res->start, info->map.size);
152 pr_debug("%s: virt at %08x\n", __FUNCTION__, (int)info->map.virt); 152 pr_debug("%s: virt at %08x\n", __func__, (int)info->map.virt);
153 153
154 if (info->map.virt == 0) { 154 if (info->map.virt == 0) {
155 printk(KERN_ERR PFX "failed to ioremap() region\n"); 155 printk(KERN_ERR PFX "failed to ioremap() region\n");
@@ -223,3 +223,4 @@ module_exit(bast_flash_exit);
223MODULE_LICENSE("GPL"); 223MODULE_LICENSE("GPL");
224MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 224MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
225MODULE_DESCRIPTION("BAST MTD Map driver"); 225MODULE_DESCRIPTION("BAST MTD Map driver");
226MODULE_ALIAS("platform:bast-nor");
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 688ef495888a..59d8fb49270a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -28,6 +28,9 @@
28 28
29#define ROM_PROBE_STEP_SIZE (64*1024) 29#define ROM_PROBE_STEP_SIZE (64*1024)
30 30
31#define DEV_CK804 1
32#define DEV_MCP55 2
33
31struct ck804xrom_window { 34struct ck804xrom_window {
32 void __iomem *virt; 35 void __iomem *virt;
33 unsigned long phys; 36 unsigned long phys;
@@ -45,8 +48,9 @@ struct ck804xrom_map_info {
45 char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN]; 48 char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
46}; 49};
47 50
48 51/*
49/* The 2 bits controlling the window size are often set to allow reading 52 * The following applies to ck804 only:
53 * The 2 bits controlling the window size are often set to allow reading
50 * the BIOS, but too small to allow writing, since the lock registers are 54 * the BIOS, but too small to allow writing, since the lock registers are
51 * 4MiB lower in the address space than the data. 55 * 4MiB lower in the address space than the data.
52 * 56 *
@@ -58,10 +62,17 @@ struct ck804xrom_map_info {
58 * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a 62 * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
59 * 64KiB window. 63 * 64KiB window.
60 * 64 *
65 * The following applies to mcp55 only:
66 * The 15 bits controlling the window size are distributed as follows:
67 * byte @0x88: bit 0..7
68 * byte @0x8c: bit 8..15
69 * word @0x90: bit 16..30
70 * If all bits are enabled, we have a 16? MiB window
71 * Please set win_size_bits to 0x7fffffff if you actually want to do something
61 */ 72 */
62static uint win_size_bits = 0; 73static uint win_size_bits = 0;
63module_param(win_size_bits, uint, 0); 74module_param(win_size_bits, uint, 0);
64MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x88 byte, normally set by BIOS."); 75MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
65 76
66static struct ck804xrom_window ck804xrom_window = { 77static struct ck804xrom_window ck804xrom_window = {
67 .maps = LIST_HEAD_INIT(ck804xrom_window.maps), 78 .maps = LIST_HEAD_INIT(ck804xrom_window.maps),
@@ -102,10 +113,11 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
102 113
103 114
104static int __devinit ck804xrom_init_one (struct pci_dev *pdev, 115static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
105 const struct pci_device_id *ent) 116 const struct pci_device_id *ent)
106{ 117{
107 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
108 u8 byte; 119 u8 byte;
120 u16 word;
109 struct ck804xrom_window *window = &ck804xrom_window; 121 struct ck804xrom_window *window = &ck804xrom_window;
110 struct ck804xrom_map_info *map = NULL; 122 struct ck804xrom_map_info *map = NULL;
111 unsigned long map_top; 123 unsigned long map_top;
@@ -113,26 +125,42 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
113 /* Remember the pci dev I find the window in */ 125 /* Remember the pci dev I find the window in */
114 window->pdev = pci_dev_get(pdev); 126 window->pdev = pci_dev_get(pdev);
115 127
116 /* Enable the selected rom window. This is often incorrectly 128 switch (ent->driver_data) {
117 * set up by the BIOS, and the 4MiB offset for the lock registers 129 case DEV_CK804:
118 * requires the full 5MiB of window space. 130 /* Enable the selected rom window. This is often incorrectly
119 * 131 * set up by the BIOS, and the 4MiB offset for the lock registers
120 * This 'write, then read' approach leaves the bits for 132 * requires the full 5MiB of window space.
121 * other uses of the hardware info. 133 *
122 */ 134 * This 'write, then read' approach leaves the bits for
123 pci_read_config_byte(pdev, 0x88, &byte); 135 * other uses of the hardware info.
124 pci_write_config_byte(pdev, 0x88, byte | win_size_bits ); 136 */
125 137 pci_read_config_byte(pdev, 0x88, &byte);
126 138 pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
127 /* Assume the rom window is properly setup, and find it's size */ 139
128 pci_read_config_byte(pdev, 0x88, &byte); 140 /* Assume the rom window is properly setup, and find it's size */
129 141 pci_read_config_byte(pdev, 0x88, &byte);
130 if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) 142
131 window->phys = 0xffb00000; /* 5MiB */ 143 if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
132 else if ((byte & (1<<7)) == (1<<7)) 144 window->phys = 0xffb00000; /* 5MiB */
133 window->phys = 0xffc00000; /* 4MiB */ 145 else if ((byte & (1<<7)) == (1<<7))
134 else 146 window->phys = 0xffc00000; /* 4MiB */
135 window->phys = 0xffff0000; /* 64KiB */ 147 else
148 window->phys = 0xffff0000; /* 64KiB */
149 break;
150
151 case DEV_MCP55:
152 pci_read_config_byte(pdev, 0x88, &byte);
153 pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
154
155 pci_read_config_byte(pdev, 0x8c, &byte);
156 pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
157
158 pci_read_config_word(pdev, 0x90, &word);
159 pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
160
161 window->phys = 0xff000000; /* 16MiB, hardcoded for now */
162 break;
163 }
136 164
137 window->size = 0xffffffffUL - window->phys + 1UL; 165 window->size = 0xffffffffUL - window->phys + 1UL;
138 166
@@ -303,8 +331,15 @@ static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
303} 331}
304 332
305static struct pci_device_id ck804xrom_pci_tbl[] = { 333static struct pci_device_id ck804xrom_pci_tbl[] = {
306 { PCI_VENDOR_ID_NVIDIA, 0x0051, 334 { PCI_VENDOR_ID_NVIDIA, 0x0051, PCI_ANY_ID, PCI_ANY_ID, DEV_CK804 },
307 PCI_ANY_ID, PCI_ANY_ID, }, /* nvidia ck804 */ 335 { PCI_VENDOR_ID_NVIDIA, 0x0360, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
336 { PCI_VENDOR_ID_NVIDIA, 0x0361, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
337 { PCI_VENDOR_ID_NVIDIA, 0x0362, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
338 { PCI_VENDOR_ID_NVIDIA, 0x0363, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
339 { PCI_VENDOR_ID_NVIDIA, 0x0364, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
340 { PCI_VENDOR_ID_NVIDIA, 0x0365, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
341 { PCI_VENDOR_ID_NVIDIA, 0x0366, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
342 { PCI_VENDOR_ID_NVIDIA, 0x0367, PCI_ANY_ID, PCI_ANY_ID, DEV_MCP55 },
308 { 0, } 343 { 0, }
309}; 344};
310 345
@@ -332,7 +367,7 @@ static int __init init_ck804xrom(void)
332 break; 367 break;
333 } 368 }
334 if (pdev) { 369 if (pdev) {
335 retVal = ck804xrom_init_one(pdev, &ck804xrom_pci_tbl[0]); 370 retVal = ck804xrom_init_one(pdev, id);
336 pci_dev_put(pdev); 371 pci_dev_put(pdev);
337 return retVal; 372 return retVal;
338 } 373 }
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 6946d802e6f6..325c8880c437 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -190,6 +190,7 @@ static struct platform_driver armflash_driver = {
190 .remove = armflash_remove, 190 .remove = armflash_remove,
191 .driver = { 191 .driver = {
192 .name = "armflash", 192 .name = "armflash",
193 .owner = THIS_MODULE,
193 }, 194 },
194}; 195};
195 196
@@ -209,3 +210,4 @@ module_exit(armflash_exit);
209MODULE_AUTHOR("ARM Ltd"); 210MODULE_AUTHOR("ARM Ltd");
210MODULE_DESCRIPTION("ARM Integrator CFI map driver"); 211MODULE_DESCRIPTION("ARM Integrator CFI map driver");
211MODULE_LICENSE("GPL"); 212MODULE_LICENSE("GPL");
213MODULE_ALIAS("platform:armflash");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index c26488a1793a..c8396b8574c4 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -253,6 +253,7 @@ static struct platform_driver ixp2000_flash_driver = {
253 .remove = ixp2000_flash_remove, 253 .remove = ixp2000_flash_remove,
254 .driver = { 254 .driver = {
255 .name = "IXP2000-Flash", 255 .name = "IXP2000-Flash",
256 .owner = THIS_MODULE,
256 }, 257 },
257}; 258};
258 259
@@ -270,4 +271,4 @@ module_init(ixp2000_flash_init);
270module_exit(ixp2000_flash_exit); 271module_exit(ixp2000_flash_exit);
271MODULE_LICENSE("GPL"); 272MODULE_LICENSE("GPL");
272MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 273MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
273 274MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7a828e3e6446..01f19a4714b5 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -275,6 +275,7 @@ static struct platform_driver ixp4xx_flash_driver = {
275 .remove = ixp4xx_flash_remove, 275 .remove = ixp4xx_flash_remove,
276 .driver = { 276 .driver = {
277 .name = "IXP4XX-Flash", 277 .name = "IXP4XX-Flash",
278 .owner = THIS_MODULE,
278 }, 279 },
279}; 280};
280 281
@@ -295,3 +296,4 @@ module_exit(ixp4xx_flash_exit);
295MODULE_LICENSE("GPL"); 296MODULE_LICENSE("GPL");
296MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems"); 297MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
297MODULE_AUTHOR("Deepak Saxena"); 298MODULE_AUTHOR("Deepak Saxena");
299MODULE_ALIAS("platform:IXP4XX-Flash");
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index e8d9ae535673..240b0e2d095d 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -70,7 +70,7 @@ static void omap_set_vpp(struct map_info *map, int enable)
70 } 70 }
71} 71}
72 72
73static int __devinit omapflash_probe(struct platform_device *pdev) 73static int __init omapflash_probe(struct platform_device *pdev)
74{ 74{
75 int err; 75 int err;
76 struct omapflash_info *info; 76 struct omapflash_info *info;
@@ -130,7 +130,7 @@ out_free_info:
130 return err; 130 return err;
131} 131}
132 132
133static int __devexit omapflash_remove(struct platform_device *pdev) 133static int __exit omapflash_remove(struct platform_device *pdev)
134{ 134{
135 struct omapflash_info *info = platform_get_drvdata(pdev); 135 struct omapflash_info *info = platform_get_drvdata(pdev);
136 136
@@ -152,16 +152,16 @@ static int __devexit omapflash_remove(struct platform_device *pdev)
152} 152}
153 153
154static struct platform_driver omapflash_driver = { 154static struct platform_driver omapflash_driver = {
155 .probe = omapflash_probe, 155 .remove = __exit_p(omapflash_remove),
156 .remove = __devexit_p(omapflash_remove),
157 .driver = { 156 .driver = {
158 .name = "omapflash", 157 .name = "omapflash",
158 .owner = THIS_MODULE,
159 }, 159 },
160}; 160};
161 161
162static int __init omapflash_init(void) 162static int __init omapflash_init(void)
163{ 163{
164 return platform_driver_register(&omapflash_driver); 164 return platform_driver_probe(&omapflash_driver, omapflash_probe);
165} 165}
166 166
167static void __exit omapflash_exit(void) 167static void __exit omapflash_exit(void)
@@ -174,4 +174,4 @@ module_exit(omapflash_exit);
174 174
175MODULE_LICENSE("GPL"); 175MODULE_LICENSE("GPL");
176MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards"); 176MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
177 177MODULE_ALIAS("platform:omapflash");
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index eaeb56a4070a..1912d968718b 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
33#undef DEBUG 33#undef DEBUG
34#define DEBUG(n, format, arg...) \ 34#define DEBUG(n, format, arg...) \
35 if (n <= debug) { \ 35 if (n <= debug) { \
36 printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \ 36 printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
37 } 37 }
38 38
39#else 39#else
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index bc4649a17b9d..183255fcfdcb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -242,6 +242,7 @@ static struct platform_driver physmap_flash_driver = {
242 .shutdown = physmap_flash_shutdown, 242 .shutdown = physmap_flash_shutdown,
243 .driver = { 243 .driver = {
244 .name = "physmap-flash", 244 .name = "physmap-flash",
245 .owner = THIS_MODULE,
245 }, 246 },
246}; 247};
247 248
@@ -319,3 +320,10 @@ module_exit(physmap_exit);
319MODULE_LICENSE("GPL"); 320MODULE_LICENSE("GPL");
320MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 321MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
321MODULE_DESCRIPTION("Generic configurable MTD map driver"); 322MODULE_DESCRIPTION("Generic configurable MTD map driver");
323
324/* legacy platform drivers can't hotplug or coldplg */
325#ifndef PHYSMAP_COMPAT
326/* work with hotplug and coldplug */
327MODULE_ALIAS("platform:physmap-flash");
328#endif
329
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 894c0b271289..f0b10ca05029 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -47,6 +47,7 @@ struct platram_info {
47 struct mtd_info *mtd; 47 struct mtd_info *mtd;
48 struct map_info map; 48 struct map_info map;
49 struct mtd_partition *partitions; 49 struct mtd_partition *partitions;
50 bool free_partitions;
50 struct resource *area; 51 struct resource *area;
51 struct platdata_mtd_ram *pdata; 52 struct platdata_mtd_ram *pdata;
52}; 53};
@@ -98,7 +99,8 @@ static int platram_remove(struct platform_device *pdev)
98#ifdef CONFIG_MTD_PARTITIONS 99#ifdef CONFIG_MTD_PARTITIONS
99 if (info->partitions) { 100 if (info->partitions) {
100 del_mtd_partitions(info->mtd); 101 del_mtd_partitions(info->mtd);
101 kfree(info->partitions); 102 if (info->free_partitions)
103 kfree(info->partitions);
102 } 104 }
103#endif 105#endif
104 del_mtd_device(info->mtd); 106 del_mtd_device(info->mtd);
@@ -176,7 +178,8 @@ static int platram_probe(struct platform_device *pdev)
176 178
177 info->map.phys = res->start; 179 info->map.phys = res->start;
178 info->map.size = (res->end - res->start) + 1; 180 info->map.size = (res->end - res->start) + 1;
179 info->map.name = pdata->mapname != NULL ? pdata->mapname : (char *)pdev->name; 181 info->map.name = pdata->mapname != NULL ?
182 (char *)pdata->mapname : (char *)pdev->name;
180 info->map.bankwidth = pdata->bankwidth; 183 info->map.bankwidth = pdata->bankwidth;
181 184
182 /* register our usage of the memory area */ 185 /* register our usage of the memory area */
@@ -203,9 +206,19 @@ static int platram_probe(struct platform_device *pdev)
203 206
204 dev_dbg(&pdev->dev, "initialised map, probing for mtd\n"); 207 dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
205 208
206 /* probe for the right mtd map driver */ 209 /* probe for the right mtd map driver
210 * supplied by the platform_data struct */
211
212 if (pdata->map_probes != 0) {
213 const char **map_probes = pdata->map_probes;
214
215 for ( ; !info->mtd && *map_probes; map_probes++)
216 info->mtd = do_map_probe(*map_probes , &info->map);
217 }
218 /* fallback to map_ram */
219 else
220 info->mtd = do_map_probe("map_ram", &info->map);
207 221
208 info->mtd = do_map_probe("map_ram" , &info->map);
209 if (info->mtd == NULL) { 222 if (info->mtd == NULL) {
210 dev_err(&pdev->dev, "failed to probe for map_ram\n"); 223 dev_err(&pdev->dev, "failed to probe for map_ram\n");
211 err = -ENOMEM; 224 err = -ENOMEM;
@@ -220,19 +233,21 @@ static int platram_probe(struct platform_device *pdev)
220 * to add this device whole */ 233 * to add this device whole */
221 234
222#ifdef CONFIG_MTD_PARTITIONS 235#ifdef CONFIG_MTD_PARTITIONS
223 if (pdata->nr_partitions > 0) { 236 if (!pdata->nr_partitions) {
224 const char **probes = { NULL }; 237 /* try to probe using the supplied probe type */
225 238 if (pdata->probes) {
226 if (pdata->probes) 239 err = parse_mtd_partitions(info->mtd, pdata->probes,
227 probes = (const char **)pdata->probes;
228
229 err = parse_mtd_partitions(info->mtd, probes,
230 &info->partitions, 0); 240 &info->partitions, 0);
231 if (err > 0) { 241 info->free_partitions = 1;
232 err = add_mtd_partitions(info->mtd, info->partitions, 242 if (err > 0)
233 err); 243 err = add_mtd_partitions(info->mtd,
244 info->partitions, err);
234 } 245 }
235 } 246 }
247 /* use the static mapping */
248 else
249 err = add_mtd_partitions(info->mtd, pdata->partitions,
250 pdata->nr_partitions);
236#endif /* CONFIG_MTD_PARTITIONS */ 251#endif /* CONFIG_MTD_PARTITIONS */
237 252
238 if (add_mtd_device(info->mtd)) { 253 if (add_mtd_device(info->mtd)) {
@@ -240,7 +255,9 @@ static int platram_probe(struct platform_device *pdev)
240 err = -ENOMEM; 255 err = -ENOMEM;
241 } 256 }
242 257
243 dev_info(&pdev->dev, "registered mtd device\n"); 258 if (!err)
259 dev_info(&pdev->dev, "registered mtd device\n");
260
244 return err; 261 return err;
245 262
246 exit_free: 263 exit_free:
@@ -251,6 +268,9 @@ static int platram_probe(struct platform_device *pdev)
251 268
252/* device driver info */ 269/* device driver info */
253 270
271/* work with hotplug and coldplug */
272MODULE_ALIAS("platform:mtd-ram");
273
254static struct platform_driver platram_driver = { 274static struct platform_driver platram_driver = {
255 .probe = platram_probe, 275 .probe = platram_probe,
256 .remove = platram_remove, 276 .remove = platram_remove,
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 02bde8c982ec..f43ba2815cbb 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -46,7 +46,7 @@ static struct mtd_partition **msp_parts;
46static struct map_info *msp_maps; 46static struct map_info *msp_maps;
47static int fcnt; 47static int fcnt;
48 48
49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n",__FUNCTION__,__LINE__) 49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
50 50
51int __init init_msp_flash(void) 51int __init init_msp_flash(void)
52{ 52{
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index f904e6bd02e0..c7d5a52a2d55 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -456,6 +456,7 @@ static struct platform_driver sa1100_mtd_driver = {
456 .shutdown = sa1100_mtd_shutdown, 456 .shutdown = sa1100_mtd_shutdown,
457 .driver = { 457 .driver = {
458 .name = "flash", 458 .name = "flash",
459 .owner = THIS_MODULE,
459 }, 460 },
460}; 461};
461 462
@@ -475,3 +476,4 @@ module_exit(sa1100_mtd_exit);
475MODULE_AUTHOR("Nicolas Pitre"); 476MODULE_AUTHOR("Nicolas Pitre");
476MODULE_DESCRIPTION("SA1100 CFI map driver"); 477MODULE_DESCRIPTION("SA1100 CFI map driver");
477MODULE_LICENSE("GPL"); 478MODULE_LICENSE("GPL");
479MODULE_ALIAS("platform:flash");
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 12fe53c0d2fc..917dc778f24e 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -92,7 +92,7 @@ int __init init_sharpsl(void)
92 parts = sharpsl_partitions; 92 parts = sharpsl_partitions;
93 nb_parts = ARRAY_SIZE(sharpsl_partitions); 93 nb_parts = ARRAY_SIZE(sharpsl_partitions);
94 94
95 printk(KERN_NOTICE "Using %s partision definition\n", part_type); 95 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
96 add_mtd_partitions(mymtd, parts, nb_parts); 96 add_mtd_partitions(mymtd, parts, nb_parts);
97 97
98 return 0; 98 return 0;
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 37e4ded9b600..521734057314 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -124,7 +124,7 @@ int __init init_tqm_mtd(void)
124 //request maximum flash size address space 124 //request maximum flash size address space
125 start_scan_addr = ioremap(flash_addr, flash_size); 125 start_scan_addr = ioremap(flash_addr, flash_size);
126 if (!start_scan_addr) { 126 if (!start_scan_addr) {
127 printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr); 127 printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
128 return -EIO; 128 return -EIO;
129 } 129 }
130 130
@@ -132,7 +132,7 @@ int __init init_tqm_mtd(void)
132 if(mtd_size >= flash_size) 132 if(mtd_size >= flash_size)
133 break; 133 break;
134 134
135 printk(KERN_INFO "%s: chip probing count %d\n", __FUNCTION__, idx); 135 printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
136 136
137 map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL); 137 map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
138 if(map_banks[idx] == NULL) { 138 if(map_banks[idx] == NULL) {
@@ -178,7 +178,7 @@ int __init init_tqm_mtd(void)
178 mtd_size += mtd_banks[idx]->size; 178 mtd_size += mtd_banks[idx]->size;
179 num_banks++; 179 num_banks++;
180 180
181 printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks, 181 printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
182 mtd_banks[idx]->name, mtd_banks[idx]->size); 182 mtd_banks[idx]->name, mtd_banks[idx]->size);
183 } 183 }
184 } 184 }
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index d3cf05012b46..5a680e1e61f1 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -35,7 +35,7 @@
35 35
36#define OOPS_PAGE_SIZE 4096 36#define OOPS_PAGE_SIZE 4096
37 37
38struct mtdoops_context { 38static struct mtdoops_context {
39 int mtd_index; 39 int mtd_index;
40 struct work_struct work_erase; 40 struct work_struct work_erase;
41 struct work_struct work_write; 41 struct work_struct work_write;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 959fb86cda01..5076faf9ca66 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -278,6 +278,54 @@ config MTD_NAND_AT91
278 help 278 help
279 Enables support for NAND Flash / Smart Media Card interface 279 Enables support for NAND Flash / Smart Media Card interface
280 on Atmel AT91 processors. 280 on Atmel AT91 processors.
281choice
282 prompt "ECC management for NAND Flash / SmartMedia on AT91"
283 depends on MTD_NAND_AT91
284
285config MTD_NAND_AT91_ECC_HW
286 bool "Hardware ECC"
287 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260
288 help
289 Uses hardware ECC provided by the at91sam9260/at91sam9263 chip
290 instead of software ECC.
291 The hardware ECC controller is capable of single bit error
292 correction and 2-bit random detection per page.
293
294 NB : hardware and software ECC schemes are incompatible.
295 If you switch from one to another, you'll have to erase your
296 mtd partition.
297
298 If unsure, say Y
299
300config MTD_NAND_AT91_ECC_SOFT
301 bool "Software ECC"
302 help
303 Uses software ECC.
304
305 NB : hardware and software ECC schemes are incompatible.
306 If you switch from one to another, you'll have to erase your
307 mtd partition.
308
309config MTD_NAND_AT91_ECC_NONE
310 bool "No ECC (testing only, DANGEROUS)"
311 depends on DEBUG_KERNEL
312 help
313 No ECC will be used.
314 It's not a good idea and it should be reserved for testing
315 purpose only.
316
317 If unsure, say N
318
319 endchoice
320
321endchoice
322
323config MTD_NAND_PXA3xx
324 bool "Support for NAND flash devices on PXA3xx"
325 depends on MTD_NAND && PXA3xx
326 help
327 This enables the driver for the NAND flash device found on
328 PXA3xx processors
281 329
282config MTD_NAND_CM_X270 330config MTD_NAND_CM_X270
283 tristate "Support for NAND Flash on CM-X270 modules" 331 tristate "Support for NAND Flash on CM-X270 modules"
@@ -330,4 +378,12 @@ config MTD_NAND_FSL_ELBC
330 Enabling this option will enable you to use this to control 378 Enabling this option will enable you to use this to control
331 external NAND devices. 379 external NAND devices.
332 380
381config MTD_NAND_FSL_UPM
382 tristate "Support for NAND on Freescale UPM"
383 depends on MTD_NAND && OF_GPIO && (PPC_83xx || PPC_85xx)
384 select FSL_LBC
385 help
386 Enables support for NAND Flash chips wired onto Freescale PowerPC
387 processor localbus with User-Programmable Machine support.
388
333endif # MTD_NAND 389endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 80d575eeee96..a6e74a46992a 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,10 +27,12 @@ obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
27obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o 27obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
28obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 28obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
30obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 31obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
31obj-$(CONFIG_MTD_ALAUDA) += alauda.o 32obj-$(CONFIG_MTD_ALAUDA) += alauda.o
32obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 33obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
33obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 34obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
34obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 35obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
36obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
35 37
36nand-objs := nand_base.o nand_bbt.o 38nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index c9fb2acf4056..414ceaecdb3a 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -9,6 +9,15 @@
9 * Derived from drivers/mtd/spia.c 9 * Derived from drivers/mtd/spia.c
10 * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com) 10 * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
11 * 11 *
12 *
13 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
14 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
15 *
16 * Derived from Das U-Boot source code
17 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
18 * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
19 *
20 *
12 * This program is free software; you can redistribute it and/or modify 21 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 22 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation. 23 * published by the Free Software Foundation.
@@ -29,11 +38,59 @@
29#include <asm/arch/board.h> 38#include <asm/arch/board.h>
30#include <asm/arch/gpio.h> 39#include <asm/arch/gpio.h>
31 40
41#ifdef CONFIG_MTD_NAND_AT91_ECC_HW
42#define hard_ecc 1
43#else
44#define hard_ecc 0
45#endif
46
47#ifdef CONFIG_MTD_NAND_AT91_ECC_NONE
48#define no_ecc 1
49#else
50#define no_ecc 0
51#endif
52
53/* Register access macros */
54#define ecc_readl(add, reg) \
55 __raw_readl(add + AT91_ECC_##reg)
56#define ecc_writel(add, reg, value) \
57 __raw_writel((value), add + AT91_ECC_##reg)
58
59#include <asm/arch/at91_ecc.h> /* AT91SAM9260/3 ECC registers */
60
61/* oob layout for large page size
62 * bad block info is on bytes 0 and 1
63 * the bytes have to be consecutives to avoid
64 * several NAND_CMD_RNDOUT during read
65 */
66static struct nand_ecclayout at91_oobinfo_large = {
67 .eccbytes = 4,
68 .eccpos = {60, 61, 62, 63},
69 .oobfree = {
70 {2, 58}
71 },
72};
73
74/* oob layout for small page size
75 * bad block info is on bytes 4 and 5
76 * the bytes have to be consecutives to avoid
77 * several NAND_CMD_RNDOUT during read
78 */
79static struct nand_ecclayout at91_oobinfo_small = {
80 .eccbytes = 4,
81 .eccpos = {0, 1, 2, 3},
82 .oobfree = {
83 {6, 10}
84 },
85};
86
32struct at91_nand_host { 87struct at91_nand_host {
33 struct nand_chip nand_chip; 88 struct nand_chip nand_chip;
34 struct mtd_info mtd; 89 struct mtd_info mtd;
35 void __iomem *io_base; 90 void __iomem *io_base;
36 struct at91_nand_data *board; 91 struct at91_nand_data *board;
92 struct device *dev;
93 void __iomem *ecc;
37}; 94};
38 95
39/* 96/*
@@ -44,6 +101,12 @@ static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
44 struct nand_chip *nand_chip = mtd->priv; 101 struct nand_chip *nand_chip = mtd->priv;
45 struct at91_nand_host *host = nand_chip->priv; 102 struct at91_nand_host *host = nand_chip->priv;
46 103
104 if (host->board->enable_pin && (ctrl & NAND_CTRL_CHANGE)) {
105 if (ctrl & NAND_NCE)
106 at91_set_gpio_value(host->board->enable_pin, 0);
107 else
108 at91_set_gpio_value(host->board->enable_pin, 1);
109 }
47 if (cmd == NAND_CMD_NONE) 110 if (cmd == NAND_CMD_NONE)
48 return; 111 return;
49 112
@@ -82,8 +145,217 @@ static void at91_nand_disable(struct at91_nand_host *host)
82 at91_set_gpio_value(host->board->enable_pin, 1); 145 at91_set_gpio_value(host->board->enable_pin, 1);
83} 146}
84 147
148/*
149 * write oob for small pages
150 */
151static int at91_nand_write_oob_512(struct mtd_info *mtd,
152 struct nand_chip *chip, int page)
153{
154 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
155 int eccsize = chip->ecc.size, length = mtd->oobsize;
156 int len, pos, status = 0;
157 const uint8_t *bufpoi = chip->oob_poi;
158
159 pos = eccsize + chunk;
160
161 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
162 len = min_t(int, length, chunk);
163 chip->write_buf(mtd, bufpoi, len);
164 bufpoi += len;
165 length -= len;
166 if (length > 0)
167 chip->write_buf(mtd, bufpoi, length);
168
169 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
170 status = chip->waitfunc(mtd, chip);
171
172 return status & NAND_STATUS_FAIL ? -EIO : 0;
173
174}
175
176/*
177 * read oob for small pages
178 */
179static int at91_nand_read_oob_512(struct mtd_info *mtd,
180 struct nand_chip *chip, int page, int sndcmd)
181{
182 if (sndcmd) {
183 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
184 sndcmd = 0;
185 }
186 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
187 return sndcmd;
188}
189
190/*
191 * Calculate HW ECC
192 *
193 * function called after a write
194 *
195 * mtd: MTD block structure
196 * dat: raw data (unused)
197 * ecc_code: buffer for ECC
198 */
199static int at91_nand_calculate(struct mtd_info *mtd,
200 const u_char *dat, unsigned char *ecc_code)
201{
202 struct nand_chip *nand_chip = mtd->priv;
203 struct at91_nand_host *host = nand_chip->priv;
204 uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
205 unsigned int ecc_value;
206
207 /* get the first 2 ECC bytes */
208 ecc_value = ecc_readl(host->ecc, PR);
209
210 ecc_code[eccpos[0]] = ecc_value & 0xFF;
211 ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF;
212
213 /* get the last 2 ECC bytes */
214 ecc_value = ecc_readl(host->ecc, NPR) & AT91_ECC_NPARITY;
215
216 ecc_code[eccpos[2]] = ecc_value & 0xFF;
217 ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF;
218
219 return 0;
220}
221
222/*
223 * HW ECC read page function
224 *
225 * mtd: mtd info structure
226 * chip: nand chip info structure
227 * buf: buffer to store read data
228 */
229static int at91_nand_read_page(struct mtd_info *mtd,
230 struct nand_chip *chip, uint8_t *buf)
231{
232 int eccsize = chip->ecc.size;
233 int eccbytes = chip->ecc.bytes;
234 uint32_t *eccpos = chip->ecc.layout->eccpos;
235 uint8_t *p = buf;
236 uint8_t *oob = chip->oob_poi;
237 uint8_t *ecc_pos;
238 int stat;
239
240 /* read the page */
241 chip->read_buf(mtd, p, eccsize);
242
243 /* move to ECC position if needed */
244 if (eccpos[0] != 0) {
245 /* This only works on large pages
246 * because the ECC controller waits for
247 * NAND_CMD_RNDOUTSTART after the
248 * NAND_CMD_RNDOUT.
249 * anyway, for small pages, the eccpos[0] == 0
250 */
251 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
252 mtd->writesize + eccpos[0], -1);
253 }
254
255 /* the ECC controller needs to read the ECC just after the data */
256 ecc_pos = oob + eccpos[0];
257 chip->read_buf(mtd, ecc_pos, eccbytes);
258
259 /* check if there's an error */
260 stat = chip->ecc.correct(mtd, p, oob, NULL);
261
262 if (stat < 0)
263 mtd->ecc_stats.failed++;
264 else
265 mtd->ecc_stats.corrected += stat;
266
267 /* get back to oob start (end of page) */
268 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
269
270 /* read the oob */
271 chip->read_buf(mtd, oob, mtd->oobsize);
272
273 return 0;
274}
275
276/*
277 * HW ECC Correction
278 *
279 * function called after a read
280 *
281 * mtd: MTD block structure
282 * dat: raw data read from the chip
283 * read_ecc: ECC from the chip (unused)
284 * isnull: unused
285 *
286 * Detect and correct a 1 bit error for a page
287 */
288static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
289 u_char *read_ecc, u_char *isnull)
290{
291 struct nand_chip *nand_chip = mtd->priv;
292 struct at91_nand_host *host = nand_chip->priv;
293 unsigned int ecc_status;
294 unsigned int ecc_word, ecc_bit;
295
296 /* get the status from the Status Register */
297 ecc_status = ecc_readl(host->ecc, SR);
298
299 /* if there's no error */
300 if (likely(!(ecc_status & AT91_ECC_RECERR)))
301 return 0;
302
303 /* get error bit offset (4 bits) */
304 ecc_bit = ecc_readl(host->ecc, PR) & AT91_ECC_BITADDR;
305 /* get word address (12 bits) */
306 ecc_word = ecc_readl(host->ecc, PR) & AT91_ECC_WORDADDR;
307 ecc_word >>= 4;
308
309 /* if there are multiple errors */
310 if (ecc_status & AT91_ECC_MULERR) {
311 /* check if it is a freshly erased block
312 * (filled with 0xff) */
313 if ((ecc_bit == AT91_ECC_BITADDR)
314 && (ecc_word == (AT91_ECC_WORDADDR >> 4))) {
315 /* the block has just been erased, return OK */
316 return 0;
317 }
318 /* it doesn't seems to be a freshly
319 * erased block.
320 * We can't correct so many errors */
321 dev_dbg(host->dev, "at91_nand : multiple errors detected."
322 " Unable to correct.\n");
323 return -EIO;
324 }
325
326 /* if there's a single bit error : we can correct it */
327 if (ecc_status & AT91_ECC_ECCERR) {
328 /* there's nothing much to do here.
329 * the bit error is on the ECC itself.
330 */
331 dev_dbg(host->dev, "at91_nand : one bit error on ECC code."
332 " Nothing to correct\n");
333 return 0;
334 }
335
336 dev_dbg(host->dev, "at91_nand : one bit error on data."
337 " (word offset in the page :"
338 " 0x%x bit offset : 0x%x)\n",
339 ecc_word, ecc_bit);
340 /* correct the error */
341 if (nand_chip->options & NAND_BUSWIDTH_16) {
342 /* 16 bits words */
343 ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
344 } else {
345 /* 8 bits words */
346 dat[ecc_word] ^= (1 << ecc_bit);
347 }
348 dev_dbg(host->dev, "at91_nand : error corrected\n");
349 return 1;
350}
351
352/*
353 * Enable HW ECC : unsused
354 */
355static void at91_nand_hwctl(struct mtd_info *mtd, int mode) { ; }
356
85#ifdef CONFIG_MTD_PARTITIONS 357#ifdef CONFIG_MTD_PARTITIONS
86const char *part_probes[] = { "cmdlinepart", NULL }; 358static const char *part_probes[] = { "cmdlinepart", NULL };
87#endif 359#endif
88 360
89/* 361/*
@@ -94,6 +366,8 @@ static int __init at91_nand_probe(struct platform_device *pdev)
94 struct at91_nand_host *host; 366 struct at91_nand_host *host;
95 struct mtd_info *mtd; 367 struct mtd_info *mtd;
96 struct nand_chip *nand_chip; 368 struct nand_chip *nand_chip;
369 struct resource *regs;
370 struct resource *mem;
97 int res; 371 int res;
98 372
99#ifdef CONFIG_MTD_PARTITIONS 373#ifdef CONFIG_MTD_PARTITIONS
@@ -108,8 +382,13 @@ static int __init at91_nand_probe(struct platform_device *pdev)
108 return -ENOMEM; 382 return -ENOMEM;
109 } 383 }
110 384
111 host->io_base = ioremap(pdev->resource[0].start, 385 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
112 pdev->resource[0].end - pdev->resource[0].start + 1); 386 if (!mem) {
387 printk(KERN_ERR "at91_nand: can't get I/O resource mem\n");
388 return -ENXIO;
389 }
390
391 host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
113 if (host->io_base == NULL) { 392 if (host->io_base == NULL) {
114 printk(KERN_ERR "at91_nand: ioremap failed\n"); 393 printk(KERN_ERR "at91_nand: ioremap failed\n");
115 kfree(host); 394 kfree(host);
@@ -119,6 +398,7 @@ static int __init at91_nand_probe(struct platform_device *pdev)
119 mtd = &host->mtd; 398 mtd = &host->mtd;
120 nand_chip = &host->nand_chip; 399 nand_chip = &host->nand_chip;
121 host->board = pdev->dev.platform_data; 400 host->board = pdev->dev.platform_data;
401 host->dev = &pdev->dev;
122 402
123 nand_chip->priv = host; /* link the private data structures */ 403 nand_chip->priv = host; /* link the private data structures */
124 mtd->priv = nand_chip; 404 mtd->priv = nand_chip;
@@ -132,7 +412,32 @@ static int __init at91_nand_probe(struct platform_device *pdev)
132 if (host->board->rdy_pin) 412 if (host->board->rdy_pin)
133 nand_chip->dev_ready = at91_nand_device_ready; 413 nand_chip->dev_ready = at91_nand_device_ready;
134 414
415 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
416 if (!regs && hard_ecc) {
417 printk(KERN_ERR "at91_nand: can't get I/O resource "
418 "regs\nFalling back on software ECC\n");
419 }
420
135 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ 421 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
422 if (no_ecc)
423 nand_chip->ecc.mode = NAND_ECC_NONE;
424 if (hard_ecc && regs) {
425 host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
426 if (host->ecc == NULL) {
427 printk(KERN_ERR "at91_nand: ioremap failed\n");
428 res = -EIO;
429 goto err_ecc_ioremap;
430 }
431 nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME;
432 nand_chip->ecc.calculate = at91_nand_calculate;
433 nand_chip->ecc.correct = at91_nand_correct;
434 nand_chip->ecc.hwctl = at91_nand_hwctl;
435 nand_chip->ecc.read_page = at91_nand_read_page;
436 nand_chip->ecc.bytes = 4;
437 nand_chip->ecc.prepad = 0;
438 nand_chip->ecc.postpad = 0;
439 }
440
136 nand_chip->chip_delay = 20; /* 20us command delay time */ 441 nand_chip->chip_delay = 20; /* 20us command delay time */
137 442
138 if (host->board->bus_width_16) /* 16-bit bus width */ 443 if (host->board->bus_width_16) /* 16-bit bus width */
@@ -149,8 +454,53 @@ static int __init at91_nand_probe(struct platform_device *pdev)
149 } 454 }
150 } 455 }
151 456
152 /* Scan to find existance of the device */ 457 /* first scan to find the device and get the page size */
153 if (nand_scan(mtd, 1)) { 458 if (nand_scan_ident(mtd, 1)) {
459 res = -ENXIO;
460 goto out;
461 }
462
463 if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) {
464 /* ECC is calculated for the whole page (1 step) */
465 nand_chip->ecc.size = mtd->writesize;
466
467 /* set ECC page size and oob layout */
468 switch (mtd->writesize) {
469 case 512:
470 nand_chip->ecc.layout = &at91_oobinfo_small;
471 nand_chip->ecc.read_oob = at91_nand_read_oob_512;
472 nand_chip->ecc.write_oob = at91_nand_write_oob_512;
473 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_528);
474 break;
475 case 1024:
476 nand_chip->ecc.layout = &at91_oobinfo_large;
477 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_1056);
478 break;
479 case 2048:
480 nand_chip->ecc.layout = &at91_oobinfo_large;
481 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_2112);
482 break;
483 case 4096:
484 nand_chip->ecc.layout = &at91_oobinfo_large;
485 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_4224);
486 break;
487 default:
488 /* page size not handled by HW ECC */
489 /* switching back to soft ECC */
490 nand_chip->ecc.mode = NAND_ECC_SOFT;
491 nand_chip->ecc.calculate = NULL;
492 nand_chip->ecc.correct = NULL;
493 nand_chip->ecc.hwctl = NULL;
494 nand_chip->ecc.read_page = NULL;
495 nand_chip->ecc.postpad = 0;
496 nand_chip->ecc.prepad = 0;
497 nand_chip->ecc.bytes = 0;
498 break;
499 }
500 }
501
502 /* second phase scan */
503 if (nand_scan_tail(mtd)) {
154 res = -ENXIO; 504 res = -ENXIO;
155 goto out; 505 goto out;
156 } 506 }
@@ -179,9 +529,15 @@ static int __init at91_nand_probe(struct platform_device *pdev)
179 if (!res) 529 if (!res)
180 return res; 530 return res;
181 531
532#ifdef CONFIG_MTD_PARTITIONS
182release: 533release:
534#endif
183 nand_release(mtd); 535 nand_release(mtd);
536
184out: 537out:
538 iounmap(host->ecc);
539
540err_ecc_ioremap:
185 at91_nand_disable(host); 541 at91_nand_disable(host);
186 platform_set_drvdata(pdev, NULL); 542 platform_set_drvdata(pdev, NULL);
187 iounmap(host->io_base); 543 iounmap(host->io_base);
@@ -202,6 +558,7 @@ static int __devexit at91_nand_remove(struct platform_device *pdev)
202 at91_nand_disable(host); 558 at91_nand_disable(host);
203 559
204 iounmap(host->io_base); 560 iounmap(host->io_base);
561 iounmap(host->ecc);
205 kfree(host); 562 kfree(host);
206 563
207 return 0; 564 return 0;
@@ -233,4 +590,5 @@ module_exit(at91_nand_exit);
233 590
234MODULE_LICENSE("GPL"); 591MODULE_LICENSE("GPL");
235MODULE_AUTHOR("Rick Bronson"); 592MODULE_AUTHOR("Rick Bronson");
236MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200"); 593MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200 / AT91SAM9");
594MODULE_ALIAS("platform:at91_nand");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 747042ab094a..e87a57297328 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -1,6 +1,6 @@
1/* linux/drivers/mtd/nand/bf5xx_nand.c 1/* linux/drivers/mtd/nand/bf5xx_nand.c
2 * 2 *
3 * Copyright 2006-2007 Analog Devices Inc. 3 * Copyright 2006-2008 Analog Devices Inc.
4 * http://blackfin.uclinux.org/ 4 * http://blackfin.uclinux.org/
5 * Bryan Wu <bryan.wu@analog.com> 5 * Bryan Wu <bryan.wu@analog.com>
6 * 6 *
@@ -74,7 +74,7 @@ static int hardware_ecc = 1;
74static int hardware_ecc; 74static int hardware_ecc;
75#endif 75#endif
76 76
77static unsigned short bfin_nfc_pin_req[] = 77static const unsigned short bfin_nfc_pin_req[] =
78 {P_NAND_CE, 78 {P_NAND_CE,
79 P_NAND_RB, 79 P_NAND_RB,
80 P_NAND_D0, 80 P_NAND_D0,
@@ -581,12 +581,6 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
581 bfin_write_NFC_IRQSTAT(val); 581 bfin_write_NFC_IRQSTAT(val);
582 SSYNC(); 582 SSYNC();
583 583
584 if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
585 printk(KERN_ERR DRV_NAME
586 ": Requesting Peripherals failed\n");
587 return -EFAULT;
588 }
589
590 /* DMA initialization */ 584 /* DMA initialization */
591 if (bf5xx_nand_dma_init(info)) 585 if (bf5xx_nand_dma_init(info))
592 err = -ENXIO; 586 err = -ENXIO;
@@ -654,6 +648,12 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
654 648
655 dev_dbg(&pdev->dev, "(%p)\n", pdev); 649 dev_dbg(&pdev->dev, "(%p)\n", pdev);
656 650
651 if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
652 printk(KERN_ERR DRV_NAME
653 ": Requesting Peripherals failed\n");
654 return -EFAULT;
655 }
656
657 if (!plat) { 657 if (!plat) {
658 dev_err(&pdev->dev, "no platform specific information\n"); 658 dev_err(&pdev->dev, "no platform specific information\n");
659 goto exit_error; 659 goto exit_error;
@@ -803,3 +803,4 @@ module_exit(bf5xx_nand_exit);
803MODULE_LICENSE("GPL"); 803MODULE_LICENSE("GPL");
804MODULE_AUTHOR(DRV_AUTHOR); 804MODULE_AUTHOR(DRV_AUTHOR);
805MODULE_DESCRIPTION(DRV_DESC); 805MODULE_DESCRIPTION(DRV_DESC);
806MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 8dab69657b19..3370a800fd36 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -279,7 +279,7 @@ static int is_geode(void)
279 279
280 280
281#ifdef CONFIG_MTD_PARTITIONS 281#ifdef CONFIG_MTD_PARTITIONS
282const char *part_probes[] = { "cmdlinepart", NULL }; 282static const char *part_probes[] = { "cmdlinepart", NULL };
283#endif 283#endif
284 284
285 285
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 378b7aa63812..4b69aacdf5ca 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -184,11 +184,11 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
184 in_be32(&lbc->fbar), in_be32(&lbc->fpar), 184 in_be32(&lbc->fbar), in_be32(&lbc->fpar),
185 in_be32(&lbc->fbcr), priv->bank); 185 in_be32(&lbc->fbcr), priv->bank);
186 186
187 ctrl->irq_status = 0;
187 /* execute special operation */ 188 /* execute special operation */
188 out_be32(&lbc->lsor, priv->bank); 189 out_be32(&lbc->lsor, priv->bank);
189 190
190 /* wait for FCM complete flag or timeout */ 191 /* wait for FCM complete flag or timeout */
191 ctrl->irq_status = 0;
192 wait_event_timeout(ctrl->irq_wait, ctrl->irq_status, 192 wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
193 FCM_TIMEOUT_MSECS * HZ/1000); 193 FCM_TIMEOUT_MSECS * HZ/1000);
194 ctrl->status = ctrl->irq_status; 194 ctrl->status = ctrl->irq_status;
@@ -346,19 +346,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
346 ctrl->column = column; 346 ctrl->column = column;
347 ctrl->oob = 0; 347 ctrl->oob = 0;
348 348
349 fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
350 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
351
352 if (priv->page_size) { 349 if (priv->page_size) {
350 fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
351 (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
352
353 out_be32(&lbc->fir, 353 out_be32(&lbc->fir,
354 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 354 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
355 (FIR_OP_CA << FIR_OP1_SHIFT) | 355 (FIR_OP_CA << FIR_OP1_SHIFT) |
356 (FIR_OP_PA << FIR_OP2_SHIFT) | 356 (FIR_OP_PA << FIR_OP2_SHIFT) |
357 (FIR_OP_WB << FIR_OP3_SHIFT) | 357 (FIR_OP_WB << FIR_OP3_SHIFT) |
358 (FIR_OP_CW1 << FIR_OP4_SHIFT)); 358 (FIR_OP_CW1 << FIR_OP4_SHIFT));
359
360 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
361 } else { 359 } else {
360 fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
361 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
362
362 out_be32(&lbc->fir, 363 out_be32(&lbc->fir,
363 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 364 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
364 (FIR_OP_CM2 << FIR_OP1_SHIFT) | 365 (FIR_OP_CM2 << FIR_OP1_SHIFT) |
@@ -480,7 +481,7 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
480 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 481 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
481 unsigned int bufsize = mtd->writesize + mtd->oobsize; 482 unsigned int bufsize = mtd->writesize + mtd->oobsize;
482 483
483 if (len < 0) { 484 if (len <= 0) {
484 dev_err(ctrl->dev, "write_buf of %d bytes", len); 485 dev_err(ctrl->dev, "write_buf of %d bytes", len);
485 ctrl->status = 0; 486 ctrl->status = 0;
486 return; 487 return;
@@ -495,6 +496,15 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
495 } 496 }
496 497
497 memcpy_toio(&ctrl->addr[ctrl->index], buf, len); 498 memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
499 /*
500 * This is workaround for the weird elbc hangs during nand write,
501 * Scott Wood says: "...perhaps difference in how long it takes a
502 * write to make it through the localbus compared to a write to IMMR
503 * is causing problems, and sync isn't helping for some reason."
504 * Reading back the last byte helps though.
505 */
506 in_8(&ctrl->addr[ctrl->index] + len - 1);
507
498 ctrl->index += len; 508 ctrl->index += len;
499} 509}
500 510
@@ -666,7 +676,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
666 /* adjust Option Register and ECC to match Flash page size */ 676 /* adjust Option Register and ECC to match Flash page size */
667 if (mtd->writesize == 512) { 677 if (mtd->writesize == 512) {
668 priv->page_size = 0; 678 priv->page_size = 0;
669 clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS); 679 clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
670 } else if (mtd->writesize == 2048) { 680 } else if (mtd->writesize == 2048) {
671 priv->page_size = 1; 681 priv->page_size = 1;
672 setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); 682 setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
@@ -687,11 +697,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
687 return -1; 697 return -1;
688 } 698 }
689 699
690 /* The default u-boot configuration on MPC8313ERDB causes errors;
691 * more delay is needed. This should be safe for other boards
692 * as well.
693 */
694 setbits32(&lbc->bank[priv->bank].or, 0x70);
695 return 0; 700 return 0;
696} 701}
697 702
@@ -779,6 +784,8 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
779 784
780 nand_release(&priv->mtd); 785 nand_release(&priv->mtd);
781 786
787 kfree(priv->mtd.name);
788
782 if (priv->vbase) 789 if (priv->vbase)
783 iounmap(priv->vbase); 790 iounmap(priv->vbase);
784 791
@@ -839,6 +846,12 @@ static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
839 goto err; 846 goto err;
840 } 847 }
841 848
849 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", res.start);
850 if (!priv->mtd.name) {
851 ret = -ENOMEM;
852 goto err;
853 }
854
842 ret = fsl_elbc_chip_init(priv); 855 ret = fsl_elbc_chip_init(priv);
843 if (ret) 856 if (ret)
844 goto err; 857 goto err;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
new file mode 100644
index 000000000000..1ebfd87f00b4
--- /dev/null
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -0,0 +1,291 @@
1/*
2 * Freescale UPM NAND driver.
3 *
4 * Copyright © 2007-2008 MontaVista Software, Inc.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mtd/nand.h>
17#include <linux/mtd/nand_ecc.h>
18#include <linux/mtd/partitions.h>
19#include <linux/mtd/mtd.h>
20#include <linux/of_platform.h>
21#include <linux/of_gpio.h>
22#include <linux/io.h>
23#include <asm/fsl_lbc.h>
24
25struct fsl_upm_nand {
26 struct device *dev;
27 struct mtd_info mtd;
28 struct nand_chip chip;
29 int last_ctrl;
30#ifdef CONFIG_MTD_PARTITIONS
31 struct mtd_partition *parts;
32#endif
33
34 struct fsl_upm upm;
35 uint8_t upm_addr_offset;
36 uint8_t upm_cmd_offset;
37 void __iomem *io_base;
38 int rnb_gpio;
39 const uint32_t *wait_pattern;
40 const uint32_t *wait_write;
41 int chip_delay;
42};
43
44#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
45
46static int fun_chip_ready(struct mtd_info *mtd)
47{
48 struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
49
50 if (gpio_get_value(fun->rnb_gpio))
51 return 1;
52
53 dev_vdbg(fun->dev, "busy\n");
54 return 0;
55}
56
57static void fun_wait_rnb(struct fsl_upm_nand *fun)
58{
59 int cnt = 1000000;
60
61 if (fun->rnb_gpio >= 0) {
62 while (--cnt && !fun_chip_ready(&fun->mtd))
63 cpu_relax();
64 }
65
66 if (!cnt)
67 dev_err(fun->dev, "tired waiting for RNB\n");
68}
69
70static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
71{
72 struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
73
74 if (!(ctrl & fun->last_ctrl)) {
75 fsl_upm_end_pattern(&fun->upm);
76
77 if (cmd == NAND_CMD_NONE)
78 return;
79
80 fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
81 }
82
83 if (ctrl & NAND_CTRL_CHANGE) {
84 if (ctrl & NAND_ALE)
85 fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
86 else if (ctrl & NAND_CLE)
87 fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
88 }
89
90 fsl_upm_run_pattern(&fun->upm, fun->io_base, cmd);
91
92 if (fun->wait_pattern)
93 fun_wait_rnb(fun);
94}
95
96static uint8_t fun_read_byte(struct mtd_info *mtd)
97{
98 struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
99
100 return in_8(fun->chip.IO_ADDR_R);
101}
102
103static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
104{
105 struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
106 int i;
107
108 for (i = 0; i < len; i++)
109 buf[i] = in_8(fun->chip.IO_ADDR_R);
110}
111
112static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
113{
114 struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
115 int i;
116
117 for (i = 0; i < len; i++) {
118 out_8(fun->chip.IO_ADDR_W, buf[i]);
119 if (fun->wait_write)
120 fun_wait_rnb(fun);
121 }
122}
123
124static int __devinit fun_chip_init(struct fsl_upm_nand *fun)
125{
126 int ret;
127#ifdef CONFIG_MTD_PARTITIONS
128 static const char *part_types[] = { "cmdlinepart", NULL, };
129#endif
130
131 fun->chip.IO_ADDR_R = fun->io_base;
132 fun->chip.IO_ADDR_W = fun->io_base;
133 fun->chip.cmd_ctrl = fun_cmd_ctrl;
134 fun->chip.chip_delay = fun->chip_delay;
135 fun->chip.read_byte = fun_read_byte;
136 fun->chip.read_buf = fun_read_buf;
137 fun->chip.write_buf = fun_write_buf;
138 fun->chip.ecc.mode = NAND_ECC_SOFT;
139
140 if (fun->rnb_gpio >= 0)
141 fun->chip.dev_ready = fun_chip_ready;
142
143 fun->mtd.priv = &fun->chip;
144 fun->mtd.owner = THIS_MODULE;
145
146 ret = nand_scan(&fun->mtd, 1);
147 if (ret)
148 return ret;
149
150 fun->mtd.name = fun->dev->bus_id;
151
152#ifdef CONFIG_MTD_PARTITIONS
153 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
154 if (ret > 0)
155 return add_mtd_partitions(&fun->mtd, fun->parts, ret);
156#endif
157 return add_mtd_device(&fun->mtd);
158}
159
160static int __devinit fun_probe(struct of_device *ofdev,
161 const struct of_device_id *ofid)
162{
163 struct fsl_upm_nand *fun;
164 struct resource io_res;
165 const uint32_t *prop;
166 int ret;
167 int size;
168
169 fun = kzalloc(sizeof(*fun), GFP_KERNEL);
170 if (!fun)
171 return -ENOMEM;
172
173 ret = of_address_to_resource(ofdev->node, 0, &io_res);
174 if (ret) {
175 dev_err(&ofdev->dev, "can't get IO base\n");
176 goto err1;
177 }
178
179 ret = fsl_upm_find(io_res.start, &fun->upm);
180 if (ret) {
181 dev_err(&ofdev->dev, "can't find UPM\n");
182 goto err1;
183 }
184
185 prop = of_get_property(ofdev->node, "fsl,upm-addr-offset", &size);
186 if (!prop || size != sizeof(uint32_t)) {
187 dev_err(&ofdev->dev, "can't get UPM address offset\n");
188 ret = -EINVAL;
189 goto err2;
190 }
191 fun->upm_addr_offset = *prop;
192
193 prop = of_get_property(ofdev->node, "fsl,upm-cmd-offset", &size);
194 if (!prop || size != sizeof(uint32_t)) {
195 dev_err(&ofdev->dev, "can't get UPM command offset\n");
196 ret = -EINVAL;
197 goto err2;
198 }
199 fun->upm_cmd_offset = *prop;
200
201 fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
202 if (fun->rnb_gpio >= 0) {
203 ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id);
204 if (ret) {
205 dev_err(&ofdev->dev, "can't request RNB gpio\n");
206 goto err2;
207 }
208 gpio_direction_input(fun->rnb_gpio);
209 } else if (fun->rnb_gpio == -EINVAL) {
210 dev_err(&ofdev->dev, "specified RNB gpio is invalid\n");
211 goto err2;
212 }
213
214 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
215 io_res.end - io_res.start + 1);
216 if (!fun->io_base) {
217 ret = -ENOMEM;
218 goto err2;
219 }
220
221 fun->dev = &ofdev->dev;
222 fun->last_ctrl = NAND_CLE;
223 fun->wait_pattern = of_get_property(ofdev->node, "fsl,wait-pattern",
224 NULL);
225 fun->wait_write = of_get_property(ofdev->node, "fsl,wait-write", NULL);
226
227 prop = of_get_property(ofdev->node, "chip-delay", NULL);
228 if (prop)
229 fun->chip_delay = *prop;
230 else
231 fun->chip_delay = 50;
232
233 ret = fun_chip_init(fun);
234 if (ret)
235 goto err2;
236
237 dev_set_drvdata(&ofdev->dev, fun);
238
239 return 0;
240err2:
241 if (fun->rnb_gpio >= 0)
242 gpio_free(fun->rnb_gpio);
243err1:
244 kfree(fun);
245
246 return ret;
247}
248
249static int __devexit fun_remove(struct of_device *ofdev)
250{
251 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
252
253 nand_release(&fun->mtd);
254
255 if (fun->rnb_gpio >= 0)
256 gpio_free(fun->rnb_gpio);
257
258 kfree(fun);
259
260 return 0;
261}
262
263static struct of_device_id of_fun_match[] = {
264 { .compatible = "fsl,upm-nand" },
265 {},
266};
267MODULE_DEVICE_TABLE(of, of_fun_match);
268
269static struct of_platform_driver of_fun_driver = {
270 .name = "fsl,upm-nand",
271 .match_table = of_fun_match,
272 .probe = fun_probe,
273 .remove = __devexit_p(fun_remove),
274};
275
276static int __init fun_module_init(void)
277{
278 return of_register_platform_driver(&of_fun_driver);
279}
280module_init(fun_module_init);
281
282static void __exit fun_module_exit(void)
283{
284 of_unregister_platform_driver(&of_fun_driver);
285}
286module_exit(fun_module_exit);
287
288MODULE_LICENSE("GPL");
289MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
290MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
291 "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 7acb1a0e7409..ba1bdf787323 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2229,6 +2229,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2229{ 2229{
2230 struct nand_flash_dev *type = NULL; 2230 struct nand_flash_dev *type = NULL;
2231 int i, dev_id, maf_idx; 2231 int i, dev_id, maf_idx;
2232 int tmp_id, tmp_manf;
2232 2233
2233 /* Select the device */ 2234 /* Select the device */
2234 chip->select_chip(mtd, 0); 2235 chip->select_chip(mtd, 0);
@@ -2240,6 +2241,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2240 *maf_id = chip->read_byte(mtd); 2241 *maf_id = chip->read_byte(mtd);
2241 dev_id = chip->read_byte(mtd); 2242 dev_id = chip->read_byte(mtd);
2242 2243
2244 /* Try again to make sure, as some systems the bus-hold or other
2245 * interface concerns can cause random data which looks like a
2246 * possibly credible NAND flash to appear. If the two results do
2247 * not match, ignore the device completely.
2248 */
2249
2250 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2251
2252 /* Read manufacturer and device IDs */
2253
2254 tmp_manf = chip->read_byte(mtd);
2255 tmp_id = chip->read_byte(mtd);
2256
2257 if (tmp_manf != *maf_id || tmp_id != dev_id) {
2258 printk(KERN_INFO "%s: second ID read did not match "
2259 "%02x,%02x against %02x,%02x\n", __func__,
2260 *maf_id, dev_id, tmp_manf, tmp_id);
2261 return ERR_PTR(-ENODEV);
2262 }
2263
2243 /* Lookup the flash id */ 2264 /* Lookup the flash id */
2244 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 2265 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
2245 if (dev_id == nand_flash_ids[i].id) { 2266 if (dev_id == nand_flash_ids[i].id) {
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 1c0e89f00e8d..955959eb02d4 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -317,3 +317,5 @@ module_exit(ndfc_nand_exit);
317MODULE_LICENSE("GPL"); 317MODULE_LICENSE("GPL");
318MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 318MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
319MODULE_DESCRIPTION("Platform driver for NDFC"); 319MODULE_DESCRIPTION("Platform driver for NDFC");
320MODULE_ALIAS("platform:ndfc-chip");
321MODULE_ALIAS("platform:ndfc-nand");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index ec5ad28b237e..59e05a1c50cf 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -169,3 +169,4 @@ module_exit(orion_nand_exit);
169MODULE_LICENSE("GPL"); 169MODULE_LICENSE("GPL");
170MODULE_AUTHOR("Tzachi Perelstein"); 170MODULE_AUTHOR("Tzachi Perelstein");
171MODULE_DESCRIPTION("NAND glue for Orion platforms"); 171MODULE_DESCRIPTION("NAND glue for Orion platforms");
172MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index f6d5c2adc4fd..f674c5427b17 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -54,6 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev)
54 data->chip.priv = &data; 54 data->chip.priv = &data;
55 data->mtd.priv = &data->chip; 55 data->mtd.priv = &data->chip;
56 data->mtd.owner = THIS_MODULE; 56 data->mtd.owner = THIS_MODULE;
57 data->mtd.name = pdev->dev.bus_id;
57 58
58 data->chip.IO_ADDR_R = data->io_base; 59 data->chip.IO_ADDR_R = data->io_base;
59 data->chip.IO_ADDR_W = data->io_base; 60 data->chip.IO_ADDR_W = data->io_base;
@@ -150,3 +151,4 @@ module_exit(plat_nand_exit);
150MODULE_LICENSE("GPL"); 151MODULE_LICENSE("GPL");
151MODULE_AUTHOR("Vitaly Wool"); 152MODULE_AUTHOR("Vitaly Wool");
152MODULE_DESCRIPTION("Simple generic NAND driver"); 153MODULE_DESCRIPTION("Simple generic NAND driver");
154MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
new file mode 100644
index 000000000000..fceb468ccdec
--- /dev/null
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -0,0 +1,1249 @@
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/clk.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <asm/dma.h>
24
25#include <asm/arch/pxa-regs.h>
26#include <asm/arch/pxa3xx_nand.h>
27
28#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
29
30/* registers and bit definitions */
31#define NDCR (0x00) /* Control register */
32#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
33#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
34#define NDSR (0x14) /* Status Register */
35#define NDPCR (0x18) /* Page Count Register */
36#define NDBDR0 (0x1C) /* Bad Block Register 0 */
37#define NDBDR1 (0x20) /* Bad Block Register 1 */
38#define NDDB (0x40) /* Data Buffer */
39#define NDCB0 (0x48) /* Command Buffer0 */
40#define NDCB1 (0x4C) /* Command Buffer1 */
41#define NDCB2 (0x50) /* Command Buffer2 */
42
43#define NDCR_SPARE_EN (0x1 << 31)
44#define NDCR_ECC_EN (0x1 << 30)
45#define NDCR_DMA_EN (0x1 << 29)
46#define NDCR_ND_RUN (0x1 << 28)
47#define NDCR_DWIDTH_C (0x1 << 27)
48#define NDCR_DWIDTH_M (0x1 << 26)
49#define NDCR_PAGE_SZ (0x1 << 24)
50#define NDCR_NCSX (0x1 << 23)
51#define NDCR_ND_MODE (0x3 << 21)
52#define NDCR_NAND_MODE (0x0)
53#define NDCR_CLR_PG_CNT (0x1 << 20)
54#define NDCR_CLR_ECC (0x1 << 19)
55#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
56#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58#define NDCR_RA_START (0x1 << 15)
59#define NDCR_PG_PER_BLK (0x1 << 14)
60#define NDCR_ND_ARB_EN (0x1 << 12)
61
62#define NDSR_MASK (0xfff)
63#define NDSR_RDY (0x1 << 11)
64#define NDSR_CS0_PAGED (0x1 << 10)
65#define NDSR_CS1_PAGED (0x1 << 9)
66#define NDSR_CS0_CMDD (0x1 << 8)
67#define NDSR_CS1_CMDD (0x1 << 7)
68#define NDSR_CS0_BBD (0x1 << 6)
69#define NDSR_CS1_BBD (0x1 << 5)
70#define NDSR_DBERR (0x1 << 4)
71#define NDSR_SBERR (0x1 << 3)
72#define NDSR_WRDREQ (0x1 << 2)
73#define NDSR_RDDREQ (0x1 << 1)
74#define NDSR_WRCMDREQ (0x1)
75
76#define NDCB0_AUTO_RS (0x1 << 25)
77#define NDCB0_CSEL (0x1 << 24)
78#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
79#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
80#define NDCB0_NC (0x1 << 20)
81#define NDCB0_DBC (0x1 << 19)
82#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
83#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
84#define NDCB0_CMD2_MASK (0xff << 8)
85#define NDCB0_CMD1_MASK (0xff)
86#define NDCB0_ADDR_CYC_SHIFT (16)
87
88/* dma-able I/O address for the NAND data and commands */
89#define NDCB0_DMA_ADDR (0x43100048)
90#define NDDB_DMA_ADDR (0x43100040)
91
92/* macros for registers read/write */
93#define nand_writel(info, off, val) \
94 __raw_writel((val), (info)->mmio_base + (off))
95
96#define nand_readl(info, off) \
97 __raw_readl((info)->mmio_base + (off))
98
99/* error code and state */
100enum {
101 ERR_NONE = 0,
102 ERR_DMABUSERR = -1,
103 ERR_SENDCMD = -2,
104 ERR_DBERR = -3,
105 ERR_BBERR = -4,
106};
107
108enum {
109 STATE_READY = 0,
110 STATE_CMD_HANDLE,
111 STATE_DMA_READING,
112 STATE_DMA_WRITING,
113 STATE_DMA_DONE,
114 STATE_PIO_READING,
115 STATE_PIO_WRITING,
116};
117
118struct pxa3xx_nand_timing {
119 unsigned int tCH; /* Enable signal hold time */
120 unsigned int tCS; /* Enable signal setup time */
121 unsigned int tWH; /* ND_nWE high duration */
122 unsigned int tWP; /* ND_nWE pulse time */
123 unsigned int tRH; /* ND_nRE high duration */
124 unsigned int tRP; /* ND_nRE pulse width */
125 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
126 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
127 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
128};
129
130struct pxa3xx_nand_cmdset {
131 uint16_t read1;
132 uint16_t read2;
133 uint16_t program;
134 uint16_t read_status;
135 uint16_t read_id;
136 uint16_t erase;
137 uint16_t reset;
138 uint16_t lock;
139 uint16_t unlock;
140 uint16_t lock_status;
141};
142
143struct pxa3xx_nand_flash {
144 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
145 struct pxa3xx_nand_cmdset *cmdset;
146
147 uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
148 uint32_t page_size; /* Page size in bytes (PAGE_SZ) */
149 uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */
150 uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */
151 uint32_t num_blocks; /* Number of physical blocks in Flash */
152 uint32_t chip_id;
153
154 /* NOTE: these are automatically calculated, do not define */
155 size_t oob_size;
156 size_t read_id_bytes;
157
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160};
161
162struct pxa3xx_nand_info {
163 struct nand_chip nand_chip;
164
165 struct platform_device *pdev;
166 struct pxa3xx_nand_flash *flash_info;
167
168 struct clk *clk;
169 void __iomem *mmio_base;
170
171 unsigned int buf_start;
172 unsigned int buf_count;
173
174 /* DMA information */
175 int drcmr_dat;
176 int drcmr_cmd;
177
178 unsigned char *data_buff;
179 dma_addr_t data_buff_phys;
180 size_t data_buff_size;
181 int data_dma_ch;
182 struct pxa_dma_desc *data_desc;
183 dma_addr_t data_desc_addr;
184
185 uint32_t reg_ndcr;
186
187 /* saved column/page_addr during CMD_SEQIN */
188 int seqin_column;
189 int seqin_page_addr;
190
191 /* relate to the command */
192 unsigned int state;
193
194 int use_ecc; /* use HW ECC ? */
195 int use_dma; /* use DMA ? */
196
197 size_t data_size; /* data size in FIFO */
198 int retcode;
199 struct completion cmd_complete;
200
201 /* generated NDCBx register values */
202 uint32_t ndcb0;
203 uint32_t ndcb1;
204 uint32_t ndcb2;
205};
206
207static int use_dma = 1;
208module_param(use_dma, bool, 0444);
209MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
210
211static struct pxa3xx_nand_cmdset smallpage_cmdset = {
212 .read1 = 0x0000,
213 .read2 = 0x0050,
214 .program = 0x1080,
215 .read_status = 0x0070,
216 .read_id = 0x0090,
217 .erase = 0xD060,
218 .reset = 0x00FF,
219 .lock = 0x002A,
220 .unlock = 0x2423,
221 .lock_status = 0x007A,
222};
223
224static struct pxa3xx_nand_cmdset largepage_cmdset = {
225 .read1 = 0x3000,
226 .read2 = 0x0050,
227 .program = 0x1080,
228 .read_status = 0x0070,
229 .read_id = 0x0090,
230 .erase = 0xD060,
231 .reset = 0x00FF,
232 .lock = 0x002A,
233 .unlock = 0x2423,
234 .lock_status = 0x007A,
235};
236
237static struct pxa3xx_nand_timing samsung512MbX16_timing = {
238 .tCH = 10,
239 .tCS = 0,
240 .tWH = 20,
241 .tWP = 40,
242 .tRH = 30,
243 .tRP = 40,
244 .tR = 11123,
245 .tWHR = 110,
246 .tAR = 10,
247};
248
249static struct pxa3xx_nand_flash samsung512MbX16 = {
250 .timing = &samsung512MbX16_timing,
251 .cmdset = &smallpage_cmdset,
252 .page_per_block = 32,
253 .page_size = 512,
254 .flash_width = 16,
255 .dfc_width = 16,
256 .num_blocks = 4096,
257 .chip_id = 0x46ec,
258};
259
260static struct pxa3xx_nand_timing micron_timing = {
261 .tCH = 10,
262 .tCS = 25,
263 .tWH = 15,
264 .tWP = 25,
265 .tRH = 15,
266 .tRP = 25,
267 .tR = 25000,
268 .tWHR = 60,
269 .tAR = 10,
270};
271
272static struct pxa3xx_nand_flash micron1GbX8 = {
273 .timing = &micron_timing,
274 .cmdset = &largepage_cmdset,
275 .page_per_block = 64,
276 .page_size = 2048,
277 .flash_width = 8,
278 .dfc_width = 8,
279 .num_blocks = 1024,
280 .chip_id = 0xa12c,
281};
282
283static struct pxa3xx_nand_flash micron1GbX16 = {
284 .timing = &micron_timing,
285 .cmdset = &largepage_cmdset,
286 .page_per_block = 64,
287 .page_size = 2048,
288 .flash_width = 16,
289 .dfc_width = 16,
290 .num_blocks = 1024,
291 .chip_id = 0xb12c,
292};
293
294static struct pxa3xx_nand_flash *builtin_flash_types[] = {
295 &samsung512MbX16,
296 &micron1GbX8,
297 &micron1GbX16,
298};
299
300#define NDTR0_tCH(c) (min((c), 7) << 19)
301#define NDTR0_tCS(c) (min((c), 7) << 16)
302#define NDTR0_tWH(c) (min((c), 7) << 11)
303#define NDTR0_tWP(c) (min((c), 7) << 8)
304#define NDTR0_tRH(c) (min((c), 7) << 3)
305#define NDTR0_tRP(c) (min((c), 7) << 0)
306
307#define NDTR1_tR(c) (min((c), 65535) << 16)
308#define NDTR1_tWHR(c) (min((c), 15) << 4)
309#define NDTR1_tAR(c) (min((c), 15) << 0)
310
311/* convert nano-seconds to nand flash controller clock cycles */
312#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1)
313
314static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
315 struct pxa3xx_nand_timing *t)
316{
317 unsigned long nand_clk = clk_get_rate(info->clk);
318 uint32_t ndtr0, ndtr1;
319
320 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
321 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
322 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
323 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
324 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
325 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
326
327 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
328 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
329 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
330
331 nand_writel(info, NDTR0CS0, ndtr0);
332 nand_writel(info, NDTR1CS0, ndtr1);
333}
334
335#define WAIT_EVENT_TIMEOUT 10
336
337static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
338{
339 int timeout = WAIT_EVENT_TIMEOUT;
340 uint32_t ndsr;
341
342 while (timeout--) {
343 ndsr = nand_readl(info, NDSR) & NDSR_MASK;
344 if (ndsr & event) {
345 nand_writel(info, NDSR, ndsr);
346 return 0;
347 }
348 udelay(10);
349 }
350
351 return -ETIMEDOUT;
352}
353
354static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
355 uint16_t cmd, int column, int page_addr)
356{
357 struct pxa3xx_nand_flash *f = info->flash_info;
358 struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
359
360 /* calculate data size */
361 switch (f->page_size) {
362 case 2048:
363 info->data_size = (info->use_ecc) ? 2088 : 2112;
364 break;
365 case 512:
366 info->data_size = (info->use_ecc) ? 520 : 528;
367 break;
368 default:
369 return -EINVAL;
370 }
371
372 /* generate values for NDCBx registers */
373 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
374 info->ndcb1 = 0;
375 info->ndcb2 = 0;
376 info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
377
378 if (f->col_addr_cycles == 2) {
379 /* large block, 2 cycles for column address
380 * row address starts from 3rd cycle
381 */
382 info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
383 if (f->row_addr_cycles == 3)
384 info->ndcb2 = (page_addr >> 16) & 0xff;
385 } else
386 /* small block, 1 cycles for column address
387 * row address starts from 2nd cycle
388 */
389 info->ndcb1 = (page_addr << 8) | (column & 0xff);
390
391 if (cmd == cmdset->program)
392 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
393
394 return 0;
395}
396
397static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
398 uint16_t cmd, int page_addr)
399{
400 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
401 info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
402 info->ndcb1 = page_addr;
403 info->ndcb2 = 0;
404 return 0;
405}
406
407static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
408{
409 struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
410
411 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
412 info->ndcb1 = 0;
413 info->ndcb2 = 0;
414
415 if (cmd == cmdset->read_id) {
416 info->ndcb0 |= NDCB0_CMD_TYPE(3);
417 info->data_size = 8;
418 } else if (cmd == cmdset->read_status) {
419 info->ndcb0 |= NDCB0_CMD_TYPE(4);
420 info->data_size = 8;
421 } else if (cmd == cmdset->reset || cmd == cmdset->lock ||
422 cmd == cmdset->unlock) {
423 info->ndcb0 |= NDCB0_CMD_TYPE(5);
424 } else
425 return -EINVAL;
426
427 return 0;
428}
429
430static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
431{
432 uint32_t ndcr;
433
434 ndcr = nand_readl(info, NDCR);
435 nand_writel(info, NDCR, ndcr & ~int_mask);
436}
437
438static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
439{
440 uint32_t ndcr;
441
442 ndcr = nand_readl(info, NDCR);
443 nand_writel(info, NDCR, ndcr | int_mask);
444}
445
446/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
447 * otherwise, it does not work
448 */
449static int write_cmd(struct pxa3xx_nand_info *info)
450{
451 uint32_t ndcr;
452
453 /* clear status bits and run */
454 nand_writel(info, NDSR, NDSR_MASK);
455
456 ndcr = info->reg_ndcr;
457
458 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
459 ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
460 ndcr |= NDCR_ND_RUN;
461
462 nand_writel(info, NDCR, ndcr);
463
464 if (wait_for_event(info, NDSR_WRCMDREQ)) {
465 printk(KERN_ERR "timed out writing command\n");
466 return -ETIMEDOUT;
467 }
468
469 nand_writel(info, NDCB0, info->ndcb0);
470 nand_writel(info, NDCB0, info->ndcb1);
471 nand_writel(info, NDCB0, info->ndcb2);
472 return 0;
473}
474
475static int handle_data_pio(struct pxa3xx_nand_info *info)
476{
477 int ret, timeout = CHIP_DELAY_TIMEOUT;
478
479 switch (info->state) {
480 case STATE_PIO_WRITING:
481 __raw_writesl(info->mmio_base + NDDB, info->data_buff,
482 info->data_size << 2);
483
484 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
485
486 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
487 if (!ret) {
488 printk(KERN_ERR "program command time out\n");
489 return -1;
490 }
491 break;
492 case STATE_PIO_READING:
493 __raw_readsl(info->mmio_base + NDDB, info->data_buff,
494 info->data_size << 2);
495 break;
496 default:
497 printk(KERN_ERR "%s: invalid state %d\n", __func__,
498 info->state);
499 return -EINVAL;
500 }
501
502 info->state = STATE_READY;
503 return 0;
504}
505
506static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
507{
508 struct pxa_dma_desc *desc = info->data_desc;
509 int dma_len = ALIGN(info->data_size, 32);
510
511 desc->ddadr = DDADR_STOP;
512 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
513
514 if (dir_out) {
515 desc->dsadr = info->data_buff_phys;
516 desc->dtadr = NDDB_DMA_ADDR;
517 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
518 } else {
519 desc->dtadr = info->data_buff_phys;
520 desc->dsadr = NDDB_DMA_ADDR;
521 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
522 }
523
524 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
525 DDADR(info->data_dma_ch) = info->data_desc_addr;
526 DCSR(info->data_dma_ch) |= DCSR_RUN;
527}
528
529static void pxa3xx_nand_data_dma_irq(int channel, void *data)
530{
531 struct pxa3xx_nand_info *info = data;
532 uint32_t dcsr;
533
534 dcsr = DCSR(channel);
535 DCSR(channel) = dcsr;
536
537 if (dcsr & DCSR_BUSERR) {
538 info->retcode = ERR_DMABUSERR;
539 complete(&info->cmd_complete);
540 }
541
542 if (info->state == STATE_DMA_WRITING) {
543 info->state = STATE_DMA_DONE;
544 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
545 } else {
546 info->state = STATE_READY;
547 complete(&info->cmd_complete);
548 }
549}
550
551static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
552{
553 struct pxa3xx_nand_info *info = devid;
554 unsigned int status;
555
556 status = nand_readl(info, NDSR);
557
558 if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
559 if (status & NDSR_DBERR)
560 info->retcode = ERR_DBERR;
561
562 disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
563
564 if (info->use_dma) {
565 info->state = STATE_DMA_READING;
566 start_data_dma(info, 0);
567 } else {
568 info->state = STATE_PIO_READING;
569 complete(&info->cmd_complete);
570 }
571 } else if (status & NDSR_WRDREQ) {
572 disable_int(info, NDSR_WRDREQ);
573 if (info->use_dma) {
574 info->state = STATE_DMA_WRITING;
575 start_data_dma(info, 1);
576 } else {
577 info->state = STATE_PIO_WRITING;
578 complete(&info->cmd_complete);
579 }
580 } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
581 if (status & NDSR_CS0_BBD)
582 info->retcode = ERR_BBERR;
583
584 disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
585 info->state = STATE_READY;
586 complete(&info->cmd_complete);
587 }
588 nand_writel(info, NDSR, status);
589 return IRQ_HANDLED;
590}
591
592static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
593{
594 uint32_t ndcr;
595 int ret, timeout = CHIP_DELAY_TIMEOUT;
596
597 if (write_cmd(info)) {
598 info->retcode = ERR_SENDCMD;
599 goto fail_stop;
600 }
601
602 info->state = STATE_CMD_HANDLE;
603
604 enable_int(info, event);
605
606 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
607 if (!ret) {
608 printk(KERN_ERR "command execution timed out\n");
609 info->retcode = ERR_SENDCMD;
610 goto fail_stop;
611 }
612
613 if (info->use_dma == 0 && info->data_size > 0)
614 if (handle_data_pio(info))
615 goto fail_stop;
616
617 return 0;
618
619fail_stop:
620 ndcr = nand_readl(info, NDCR);
621 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
622 udelay(10);
623 return -ETIMEDOUT;
624}
625
626static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
627{
628 struct pxa3xx_nand_info *info = mtd->priv;
629 return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
630}
631
632static inline int is_buf_blank(uint8_t *buf, size_t len)
633{
634 for (; len > 0; len--)
635 if (*buf++ != 0xff)
636 return 0;
637 return 1;
638}
639
640static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
641 int column, int page_addr)
642{
643 struct pxa3xx_nand_info *info = mtd->priv;
644 struct pxa3xx_nand_flash *flash_info = info->flash_info;
645 struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
646 int ret;
647
648 info->use_dma = (use_dma) ? 1 : 0;
649 info->use_ecc = 0;
650 info->data_size = 0;
651 info->state = STATE_READY;
652
653 init_completion(&info->cmd_complete);
654
655 switch (command) {
656 case NAND_CMD_READOOB:
657 /* disable HW ECC to get all the OOB data */
658 info->buf_count = mtd->writesize + mtd->oobsize;
659 info->buf_start = mtd->writesize + column;
660
661 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
662 break;
663
664 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
665
666 /* We only are OOB, so if the data has error, does not matter */
667 if (info->retcode == ERR_DBERR)
668 info->retcode = ERR_NONE;
669 break;
670
671 case NAND_CMD_READ0:
672 info->use_ecc = 1;
673 info->retcode = ERR_NONE;
674 info->buf_start = column;
675 info->buf_count = mtd->writesize + mtd->oobsize;
676 memset(info->data_buff, 0xFF, info->buf_count);
677
678 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
679 break;
680
681 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
682
683 if (info->retcode == ERR_DBERR) {
684 /* for blank page (all 0xff), HW will calculate its ECC as
685 * 0, which is different from the ECC information within
686 * OOB, ignore such double bit errors
687 */
688 if (is_buf_blank(info->data_buff, mtd->writesize))
689 info->retcode = ERR_NONE;
690 }
691 break;
692 case NAND_CMD_SEQIN:
693 info->buf_start = column;
694 info->buf_count = mtd->writesize + mtd->oobsize;
695 memset(info->data_buff, 0xff, info->buf_count);
696
697 /* save column/page_addr for next CMD_PAGEPROG */
698 info->seqin_column = column;
699 info->seqin_page_addr = page_addr;
700 break;
701 case NAND_CMD_PAGEPROG:
702 info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
703
704 if (prepare_read_prog_cmd(info, cmdset->program,
705 info->seqin_column, info->seqin_page_addr))
706 break;
707
708 pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
709 break;
710 case NAND_CMD_ERASE1:
711 if (prepare_erase_cmd(info, cmdset->erase, page_addr))
712 break;
713
714 pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
715 break;
716 case NAND_CMD_ERASE2:
717 break;
718 case NAND_CMD_READID:
719 case NAND_CMD_STATUS:
720 info->use_dma = 0; /* force PIO read */
721 info->buf_start = 0;
722 info->buf_count = (command == NAND_CMD_READID) ?
723 flash_info->read_id_bytes : 1;
724
725 if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
726 cmdset->read_id : cmdset->read_status))
727 break;
728
729 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
730 break;
731 case NAND_CMD_RESET:
732 if (prepare_other_cmd(info, cmdset->reset))
733 break;
734
735 ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
736 if (ret == 0) {
737 int timeout = 2;
738 uint32_t ndcr;
739
740 while (timeout--) {
741 if (nand_readl(info, NDSR) & NDSR_RDY)
742 break;
743 msleep(10);
744 }
745
746 ndcr = nand_readl(info, NDCR);
747 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
748 }
749 break;
750 default:
751 printk(KERN_ERR "non-supported command.\n");
752 break;
753 }
754
755 if (info->retcode == ERR_DBERR) {
756 printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
757 info->retcode = ERR_NONE;
758 }
759}
760
761static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
762{
763 struct pxa3xx_nand_info *info = mtd->priv;
764 char retval = 0xFF;
765
766 if (info->buf_start < info->buf_count)
767 /* Has just send a new command? */
768 retval = info->data_buff[info->buf_start++];
769
770 return retval;
771}
772
773static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
774{
775 struct pxa3xx_nand_info *info = mtd->priv;
776 u16 retval = 0xFFFF;
777
778 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
779 retval = *((u16 *)(info->data_buff+info->buf_start));
780 info->buf_start += 2;
781 }
782 return retval;
783}
784
785static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
786{
787 struct pxa3xx_nand_info *info = mtd->priv;
788 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
789
790 memcpy(buf, info->data_buff + info->buf_start, real_len);
791 info->buf_start += real_len;
792}
793
794static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
795 const uint8_t *buf, int len)
796{
797 struct pxa3xx_nand_info *info = mtd->priv;
798 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
799
800 memcpy(info->data_buff + info->buf_start, buf, real_len);
801 info->buf_start += real_len;
802}
803
804static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
805 const uint8_t *buf, int len)
806{
807 return 0;
808}
809
810static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
811{
812 return;
813}
814
815static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
816{
817 struct pxa3xx_nand_info *info = mtd->priv;
818
819 /* pxa3xx_nand_send_command has waited for command complete */
820 if (this->state == FL_WRITING || this->state == FL_ERASING) {
821 if (info->retcode == ERR_NONE)
822 return 0;
823 else {
824 /*
825 * any error make it return 0x01 which will tell
826 * the caller the erase and write fail
827 */
828 return 0x01;
829 }
830 }
831
832 return 0;
833}
834
835static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
836{
837 return;
838}
839
840static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
841 const uint8_t *dat, uint8_t *ecc_code)
842{
843 return 0;
844}
845
846static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
847 uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
848{
849 struct pxa3xx_nand_info *info = mtd->priv;
850 /*
851 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
852 * consider it as a ecc error which will tell the caller the
853 * read fail We have distinguish all the errors, but the
854 * nand_read_ecc only check this function return value
855 */
856 if (info->retcode != ERR_NONE)
857 return -1;
858
859 return 0;
860}
861
862static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
863{
864 struct pxa3xx_nand_flash *f = info->flash_info;
865 struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
866 uint32_t ndcr;
867 uint8_t id_buff[8];
868
869 if (prepare_other_cmd(info, cmdset->read_id)) {
870 printk(KERN_ERR "failed to prepare command\n");
871 return -EINVAL;
872 }
873
874 /* Send command */
875 if (write_cmd(info))
876 goto fail_timeout;
877
878 /* Wait for CMDDM(command done successfully) */
879 if (wait_for_event(info, NDSR_RDDREQ))
880 goto fail_timeout;
881
882 __raw_readsl(info->mmio_base + NDDB, id_buff, 2);
883 *id = id_buff[0] | (id_buff[1] << 8);
884 return 0;
885
886fail_timeout:
887 ndcr = nand_readl(info, NDCR);
888 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
889 udelay(10);
890 return -ETIMEDOUT;
891}
892
893static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
894 struct pxa3xx_nand_flash *f)
895{
896 struct platform_device *pdev = info->pdev;
897 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
898 uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
899
900 if (f->page_size != 2048 && f->page_size != 512)
901 return -EINVAL;
902
903 if (f->flash_width != 16 && f->flash_width != 8)
904 return -EINVAL;
905
906 /* calculate flash information */
907 f->oob_size = (f->page_size == 2048) ? 64 : 16;
908 f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
909
910 /* calculate addressing information */
911 f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
912
913 if (f->num_blocks * f->page_per_block > 65536)
914 f->row_addr_cycles = 3;
915 else
916 f->row_addr_cycles = 2;
917
918 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
919 ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
920 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
921 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
922 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
923 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
924
925 ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
926 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
927
928 info->reg_ndcr = ndcr;
929
930 pxa3xx_nand_set_timing(info, f->timing);
931 info->flash_info = f;
932 return 0;
933}
934
935static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
936{
937 struct pxa3xx_nand_flash *f;
938 uint32_t id;
939 int i;
940
941 for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
942
943 f = builtin_flash_types[i];
944
945 if (pxa3xx_nand_config_flash(info, f))
946 continue;
947
948 if (__readid(info, &id))
949 continue;
950
951 if (id == f->chip_id)
952 return 0;
953 }
954
955 return -ENODEV;
956}
957
958/* the maximum possible buffer size for large page with OOB data
959 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
960 * data buffer and the DMA descriptor
961 */
962#define MAX_BUFF_SIZE PAGE_SIZE
963
964static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
965{
966 struct platform_device *pdev = info->pdev;
967 int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
968
969 if (use_dma == 0) {
970 info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
971 if (info->data_buff == NULL)
972 return -ENOMEM;
973 return 0;
974 }
975
976 info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
977 &info->data_buff_phys, GFP_KERNEL);
978 if (info->data_buff == NULL) {
979 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
980 return -ENOMEM;
981 }
982
983 info->data_buff_size = MAX_BUFF_SIZE;
984 info->data_desc = (void *)info->data_buff + data_desc_offset;
985 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
986
987 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
988 pxa3xx_nand_data_dma_irq, info);
989 if (info->data_dma_ch < 0) {
990 dev_err(&pdev->dev, "failed to request data dma\n");
991 dma_free_coherent(&pdev->dev, info->data_buff_size,
992 info->data_buff, info->data_buff_phys);
993 return info->data_dma_ch;
994 }
995
996 return 0;
997}
998
999static struct nand_ecclayout hw_smallpage_ecclayout = {
1000 .eccbytes = 6,
1001 .eccpos = {8, 9, 10, 11, 12, 13 },
1002 .oobfree = { {2, 6} }
1003};
1004
1005static struct nand_ecclayout hw_largepage_ecclayout = {
1006 .eccbytes = 24,
1007 .eccpos = {
1008 40, 41, 42, 43, 44, 45, 46, 47,
1009 48, 49, 50, 51, 52, 53, 54, 55,
1010 56, 57, 58, 59, 60, 61, 62, 63},
1011 .oobfree = { {2, 38} }
1012};
1013
1014static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1015 struct pxa3xx_nand_info *info)
1016{
1017 struct pxa3xx_nand_flash *f = info->flash_info;
1018 struct nand_chip *this = &info->nand_chip;
1019
1020 this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1021
1022 this->waitfunc = pxa3xx_nand_waitfunc;
1023 this->select_chip = pxa3xx_nand_select_chip;
1024 this->dev_ready = pxa3xx_nand_dev_ready;
1025 this->cmdfunc = pxa3xx_nand_cmdfunc;
1026 this->read_word = pxa3xx_nand_read_word;
1027 this->read_byte = pxa3xx_nand_read_byte;
1028 this->read_buf = pxa3xx_nand_read_buf;
1029 this->write_buf = pxa3xx_nand_write_buf;
1030 this->verify_buf = pxa3xx_nand_verify_buf;
1031
1032 this->ecc.mode = NAND_ECC_HW;
1033 this->ecc.hwctl = pxa3xx_nand_ecc_hwctl;
1034 this->ecc.calculate = pxa3xx_nand_ecc_calculate;
1035 this->ecc.correct = pxa3xx_nand_ecc_correct;
1036 this->ecc.size = f->page_size;
1037
1038 if (f->page_size == 2048)
1039 this->ecc.layout = &hw_largepage_ecclayout;
1040 else
1041 this->ecc.layout = &hw_smallpage_ecclayout;
1042
1043 this->chip_delay = 25;
1044}
1045
1046static int pxa3xx_nand_probe(struct platform_device *pdev)
1047{
1048 struct pxa3xx_nand_platform_data *pdata;
1049 struct pxa3xx_nand_info *info;
1050 struct nand_chip *this;
1051 struct mtd_info *mtd;
1052 struct resource *r;
1053 int ret = 0, irq;
1054
1055 pdata = pdev->dev.platform_data;
1056
1057 if (!pdata) {
1058 dev_err(&pdev->dev, "no platform data defined\n");
1059 return -ENODEV;
1060 }
1061
1062 mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1063 GFP_KERNEL);
1064 if (!mtd) {
1065 dev_err(&pdev->dev, "failed to allocate memory\n");
1066 return -ENOMEM;
1067 }
1068
1069 info = (struct pxa3xx_nand_info *)(&mtd[1]);
1070 info->pdev = pdev;
1071
1072 this = &info->nand_chip;
1073 mtd->priv = info;
1074
1075 info->clk = clk_get(&pdev->dev, "NANDCLK");
1076 if (IS_ERR(info->clk)) {
1077 dev_err(&pdev->dev, "failed to get nand clock\n");
1078 ret = PTR_ERR(info->clk);
1079 goto fail_free_mtd;
1080 }
1081 clk_enable(info->clk);
1082
1083 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1084 if (r == NULL) {
1085 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1086 ret = -ENXIO;
1087 goto fail_put_clk;
1088 }
1089 info->drcmr_dat = r->start;
1090
1091 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1092 if (r == NULL) {
1093 dev_err(&pdev->dev, "no resource defined for command DMA\n");
1094 ret = -ENXIO;
1095 goto fail_put_clk;
1096 }
1097 info->drcmr_cmd = r->start;
1098
1099 irq = platform_get_irq(pdev, 0);
1100 if (irq < 0) {
1101 dev_err(&pdev->dev, "no IRQ resource defined\n");
1102 ret = -ENXIO;
1103 goto fail_put_clk;
1104 }
1105
1106 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1107 if (r == NULL) {
1108 dev_err(&pdev->dev, "no IO memory resource defined\n");
1109 ret = -ENODEV;
1110 goto fail_put_clk;
1111 }
1112
1113 r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
1114 if (r == NULL) {
1115 dev_err(&pdev->dev, "failed to request memory resource\n");
1116 ret = -EBUSY;
1117 goto fail_put_clk;
1118 }
1119
1120 info->mmio_base = ioremap(r->start, r->end - r->start + 1);
1121 if (info->mmio_base == NULL) {
1122 dev_err(&pdev->dev, "ioremap() failed\n");
1123 ret = -ENODEV;
1124 goto fail_free_res;
1125 }
1126
1127 ret = pxa3xx_nand_init_buff(info);
1128 if (ret)
1129 goto fail_free_io;
1130
1131 ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1132 pdev->name, info);
1133 if (ret < 0) {
1134 dev_err(&pdev->dev, "failed to request IRQ\n");
1135 goto fail_free_buf;
1136 }
1137
1138 ret = pxa3xx_nand_detect_flash(info);
1139 if (ret) {
1140 dev_err(&pdev->dev, "failed to detect flash\n");
1141 ret = -ENODEV;
1142 goto fail_free_irq;
1143 }
1144
1145 pxa3xx_nand_init_mtd(mtd, info);
1146
1147 platform_set_drvdata(pdev, mtd);
1148
1149 if (nand_scan(mtd, 1)) {
1150 dev_err(&pdev->dev, "failed to scan nand\n");
1151 ret = -ENXIO;
1152 goto fail_free_irq;
1153 }
1154
1155 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1156
1157fail_free_irq:
1158 free_irq(IRQ_NAND, info);
1159fail_free_buf:
1160 if (use_dma) {
1161 pxa_free_dma(info->data_dma_ch);
1162 dma_free_coherent(&pdev->dev, info->data_buff_size,
1163 info->data_buff, info->data_buff_phys);
1164 } else
1165 kfree(info->data_buff);
1166fail_free_io:
1167 iounmap(info->mmio_base);
1168fail_free_res:
1169 release_mem_region(r->start, r->end - r->start + 1);
1170fail_put_clk:
1171 clk_disable(info->clk);
1172 clk_put(info->clk);
1173fail_free_mtd:
1174 kfree(mtd);
1175 return ret;
1176}
1177
1178static int pxa3xx_nand_remove(struct platform_device *pdev)
1179{
1180 struct mtd_info *mtd = platform_get_drvdata(pdev);
1181 struct pxa3xx_nand_info *info = mtd->priv;
1182
1183 platform_set_drvdata(pdev, NULL);
1184
1185 del_mtd_device(mtd);
1186 del_mtd_partitions(mtd);
1187 free_irq(IRQ_NAND, info);
1188 if (use_dma) {
1189 pxa_free_dma(info->data_dma_ch);
1190 dma_free_writecombine(&pdev->dev, info->data_buff_size,
1191 info->data_buff, info->data_buff_phys);
1192 } else
1193 kfree(info->data_buff);
1194 kfree(mtd);
1195 return 0;
1196}
1197
1198#ifdef CONFIG_PM
1199static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1200{
1201 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1202 struct pxa3xx_nand_info *info = mtd->priv;
1203
1204 if (info->state != STATE_READY) {
1205 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1206 return -EAGAIN;
1207 }
1208
1209 return 0;
1210}
1211
1212static int pxa3xx_nand_resume(struct platform_device *pdev)
1213{
1214 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1215 struct pxa3xx_nand_info *info = mtd->priv;
1216
1217 clk_enable(info->clk);
1218
1219 return pxa3xx_nand_config_flash(info);
1220}
1221#else
1222#define pxa3xx_nand_suspend NULL
1223#define pxa3xx_nand_resume NULL
1224#endif
1225
1226static struct platform_driver pxa3xx_nand_driver = {
1227 .driver = {
1228 .name = "pxa3xx-nand",
1229 },
1230 .probe = pxa3xx_nand_probe,
1231 .remove = pxa3xx_nand_remove,
1232 .suspend = pxa3xx_nand_suspend,
1233 .resume = pxa3xx_nand_resume,
1234};
1235
1236static int __init pxa3xx_nand_init(void)
1237{
1238 return platform_driver_register(&pxa3xx_nand_driver);
1239}
1240module_init(pxa3xx_nand_init);
1241
1242static void __exit pxa3xx_nand_exit(void)
1243{
1244 platform_driver_unregister(&pxa3xx_nand_driver);
1245}
1246module_exit(pxa3xx_nand_exit);
1247
1248MODULE_LICENSE("GPL");
1249MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 0f6ac250f434..26f88215bc47 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -478,6 +478,7 @@ static int __init rtc_from4_init(void)
478 struct nand_chip *this; 478 struct nand_chip *this;
479 unsigned short bcr1, bcr2, wcr2; 479 unsigned short bcr1, bcr2, wcr2;
480 int i; 480 int i;
481 int ret;
481 482
482 /* Allocate memory for MTD device structure and private data */ 483 /* Allocate memory for MTD device structure and private data */
483 rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 484 rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
@@ -537,6 +538,22 @@ static int __init rtc_from4_init(void)
537 this->ecc.hwctl = rtc_from4_enable_hwecc; 538 this->ecc.hwctl = rtc_from4_enable_hwecc;
538 this->ecc.calculate = rtc_from4_calculate_ecc; 539 this->ecc.calculate = rtc_from4_calculate_ecc;
539 this->ecc.correct = rtc_from4_correct_data; 540 this->ecc.correct = rtc_from4_correct_data;
541
542 /* We could create the decoder on demand, if memory is a concern.
543 * This way we have it handy, if an error happens
544 *
545 * Symbolsize is 10 (bits)
546 * Primitve polynomial is x^10+x^3+1
547 * first consecutive root is 0
548 * primitve element to generate roots = 1
549 * generator polinomial degree = 6
550 */
551 rs_decoder = init_rs(10, 0x409, 0, 1, 6);
552 if (!rs_decoder) {
553 printk(KERN_ERR "Could not create a RS decoder\n");
554 ret = -ENOMEM;
555 goto err_1;
556 }
540#else 557#else
541 printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n"); 558 printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
542 559
@@ -549,8 +566,8 @@ static int __init rtc_from4_init(void)
549 566
550 /* Scan to find existence of the device */ 567 /* Scan to find existence of the device */
551 if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) { 568 if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
552 kfree(rtc_from4_mtd); 569 ret = -ENXIO;
553 return -ENXIO; 570 goto err_2;
554 } 571 }
555 572
556 /* Perform 'device recovery' for each chip in case there was a power loss. */ 573 /* Perform 'device recovery' for each chip in case there was a power loss. */
@@ -566,28 +583,19 @@ static int __init rtc_from4_init(void)
566#endif 583#endif
567 584
568 /* Register the partitions */ 585 /* Register the partitions */
569 add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS); 586 ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
587 if (ret)
588 goto err_3;
570 589
571#ifdef RTC_FROM4_HWECC
572 /* We could create the decoder on demand, if memory is a concern.
573 * This way we have it handy, if an error happens
574 *
575 * Symbolsize is 10 (bits)
576 * Primitve polynomial is x^10+x^3+1
577 * first consecutive root is 0
578 * primitve element to generate roots = 1
579 * generator polinomial degree = 6
580 */
581 rs_decoder = init_rs(10, 0x409, 0, 1, 6);
582 if (!rs_decoder) {
583 printk(KERN_ERR "Could not create a RS decoder\n");
584 nand_release(rtc_from4_mtd);
585 kfree(rtc_from4_mtd);
586 return -ENOMEM;
587 }
588#endif
589 /* Return happy */ 590 /* Return happy */
590 return 0; 591 return 0;
592err_3:
593 nand_release(rtc_from4_mtd);
594err_2:
595 free_rs(rs_decoder);
596err_1:
597 kfree(rtc_from4_mtd);
598 return ret;
591} 599}
592 600
593module_init(rtc_from4_init); 601module_init(rtc_from4_init);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 9260ad947524..b34a460ab679 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -119,8 +119,7 @@ struct s3c2410_nand_info {
119 void __iomem *sel_reg; 119 void __iomem *sel_reg;
120 int sel_bit; 120 int sel_bit;
121 int mtd_count; 121 int mtd_count;
122 122 unsigned long save_sel;
123 unsigned long save_nfconf;
124 123
125 enum s3c_cpu_type cpu_type; 124 enum s3c_cpu_type cpu_type;
126}; 125};
@@ -358,6 +357,14 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
358 if (diff0 == 0 && diff1 == 0 && diff2 == 0) 357 if (diff0 == 0 && diff1 == 0 && diff2 == 0)
359 return 0; /* ECC is ok */ 358 return 0; /* ECC is ok */
360 359
360 /* sometimes people do not think about using the ECC, so check
361 * to see if we have an 0xff,0xff,0xff read ECC and then ignore
362 * the error, on the assumption that this is an un-eccd page.
363 */
364 if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
365 && info->platform->ignore_unset_ecc)
366 return 0;
367
361 /* Can we correct this ECC (ie, one row and column change). 368 /* Can we correct this ECC (ie, one row and column change).
362 * Note, this is similar to the 256 error code on smartmedia */ 369 * Note, this is similar to the 256 error code on smartmedia */
363 370
@@ -473,7 +480,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
473 ecc_code[1] = ecc >> 8; 480 ecc_code[1] = ecc >> 8;
474 ecc_code[2] = ecc >> 16; 481 ecc_code[2] = ecc >> 16;
475 482
476 pr_debug("%s: returning ecc %06lx\n", __func__, ecc); 483 pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
477 484
478 return 0; 485 return 0;
479} 486}
@@ -644,9 +651,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
644 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 651 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
645 chip->ecc.correct = s3c2410_nand_correct_data; 652 chip->ecc.correct = s3c2410_nand_correct_data;
646 chip->ecc.mode = NAND_ECC_HW; 653 chip->ecc.mode = NAND_ECC_HW;
647 chip->ecc.size = 512;
648 chip->ecc.bytes = 3;
649 chip->ecc.layout = &nand_hw_eccoob;
650 654
651 switch (info->cpu_type) { 655 switch (info->cpu_type) {
652 case TYPE_S3C2410: 656 case TYPE_S3C2410:
@@ -668,6 +672,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
668 } else { 672 } else {
669 chip->ecc.mode = NAND_ECC_SOFT; 673 chip->ecc.mode = NAND_ECC_SOFT;
670 } 674 }
675
676 if (set->ecc_layout != NULL)
677 chip->ecc.layout = set->ecc_layout;
678
679 if (set->disable_ecc)
680 chip->ecc.mode = NAND_ECC_NONE;
681}
682
683/* s3c2410_nand_update_chip
684 *
685 * post-probe chip update, to change any items, such as the
686 * layout for large page nand
687 */
688
689static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
690 struct s3c2410_nand_mtd *nmtd)
691{
692 struct nand_chip *chip = &nmtd->chip;
693
694 printk("%s: chip %p: %d\n", __func__, chip, chip->page_shift);
695
696 if (hardware_ecc) {
697 /* change the behaviour depending on wether we are using
698 * the large or small page nand device */
699
700 if (chip->page_shift > 10) {
701 chip->ecc.size = 256;
702 chip->ecc.bytes = 3;
703 } else {
704 chip->ecc.size = 512;
705 chip->ecc.bytes = 3;
706 chip->ecc.layout = &nand_hw_eccoob;
707 }
708 }
671} 709}
672 710
673/* s3c2410_nand_probe 711/* s3c2410_nand_probe
@@ -776,9 +814,12 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
776 814
777 s3c2410_nand_init_chip(info, nmtd, sets); 815 s3c2410_nand_init_chip(info, nmtd, sets);
778 816
779 nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1); 817 nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
818 (sets) ? sets->nr_chips : 1);
780 819
781 if (nmtd->scan_res == 0) { 820 if (nmtd->scan_res == 0) {
821 s3c2410_nand_update_chip(info, nmtd);
822 nand_scan_tail(&nmtd->mtd);
782 s3c2410_nand_add_partition(info, nmtd, sets); 823 s3c2410_nand_add_partition(info, nmtd, sets);
783 } 824 }
784 825
@@ -810,15 +851,14 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 851 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
811 852
812 if (info) { 853 if (info) {
813 info->save_nfconf = readl(info->regs + S3C2410_NFCONF); 854 info->save_sel = readl(info->sel_reg);
814 855
815 /* For the moment, we must ensure nFCE is high during 856 /* For the moment, we must ensure nFCE is high during
816 * the time we are suspended. This really should be 857 * the time we are suspended. This really should be
817 * handled by suspending the MTDs we are using, but 858 * handled by suspending the MTDs we are using, but
818 * that is currently not the case. */ 859 * that is currently not the case. */
819 860
820 writel(info->save_nfconf | info->sel_bit, 861 writel(info->save_sel | info->sel_bit, info->sel_reg);
821 info->regs + S3C2410_NFCONF);
822 862
823 if (!allow_clk_stop(info)) 863 if (!allow_clk_stop(info))
824 clk_disable(info->clk); 864 clk_disable(info->clk);
@@ -830,7 +870,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
830static int s3c24xx_nand_resume(struct platform_device *dev) 870static int s3c24xx_nand_resume(struct platform_device *dev)
831{ 871{
832 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 872 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
833 unsigned long nfconf; 873 unsigned long sel;
834 874
835 if (info) { 875 if (info) {
836 clk_enable(info->clk); 876 clk_enable(info->clk);
@@ -838,10 +878,10 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
838 878
839 /* Restore the state of the nFCE line. */ 879 /* Restore the state of the nFCE line. */
840 880
841 nfconf = readl(info->regs + S3C2410_NFCONF); 881 sel = readl(info->sel_reg);
842 nfconf &= ~info->sel_bit; 882 sel &= ~info->sel_bit;
843 nfconf |= info->save_nfconf & info->sel_bit; 883 sel |= info->save_sel & info->sel_bit;
844 writel(nfconf, info->regs + S3C2410_NFCONF); 884 writel(sel, info->sel_reg);
845 885
846 if (allow_clk_stop(info)) 886 if (allow_clk_stop(info))
847 clk_disable(info->clk); 887 clk_disable(info->clk);
@@ -927,3 +967,6 @@ module_exit(s3c2410_nand_exit);
927MODULE_LICENSE("GPL"); 967MODULE_LICENSE("GPL");
928MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 968MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
929MODULE_DESCRIPTION("S3C24XX MTD NAND driver"); 969MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
970MODULE_ALIAS("platform:s3c2410-nand");
971MODULE_ALIAS("platform:s3c2412-nand");
972MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 0513cbc8834d..345e6eff89ce 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -33,11 +33,6 @@
33 33
34char nftlmountrev[]="$Revision: 1.41 $"; 34char nftlmountrev[]="$Revision: 1.41 $";
35 35
36extern int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
37 size_t *retlen, uint8_t *buf);
38extern int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
39 size_t *retlen, uint8_t *buf);
40
41/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 36/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
42 * various device information of the NFTL partition and Bad Unit Table. Update 37 * various device information of the NFTL partition and Bad Unit Table. Update
43 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] 38 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index f86e06934cd8..4f80c2fd89af 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -72,3 +72,5 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
72 return nr_parts; 72 return nr_parts;
73} 73}
74EXPORT_SYMBOL(of_mtd_parse_partitions); 74EXPORT_SYMBOL(of_mtd_parse_partitions);
75
76MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 8d7d21be1541..5d7965f7e9ce 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -329,6 +329,21 @@ static int onenand_wait(struct mtd_info *mtd, int state)
329 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl); 329 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl);
330 if (ctrl & ONENAND_CTRL_LOCK) 330 if (ctrl & ONENAND_CTRL_LOCK)
331 printk(KERN_ERR "onenand_wait: it's locked error.\n"); 331 printk(KERN_ERR "onenand_wait: it's locked error.\n");
332 if (state == FL_READING) {
333 /*
334 * A power loss while writing can result in a page
335 * becoming unreadable. When the device is mounted
336 * again, reading that page gives controller errors.
337 * Upper level software like JFFS2 treat -EIO as fatal,
338 * refusing to mount at all. That means it is necessary
339 * to treat the error as an ECC error to allow recovery.
340 * Note that typically in this case, the eraseblock can
341 * still be erased and rewritten i.e. it has not become
342 * a bad block.
343 */
344 mtd->ecc_stats.failed++;
345 return -EBADMSG;
346 }
332 return -EIO; 347 return -EIO;
333 } 348 }
334 349
@@ -1336,7 +1351,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1336 } 1351 }
1337 1352
1338 /* Reject writes, which are not page aligned */ 1353 /* Reject writes, which are not page aligned */
1339 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 1354 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1340 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1355 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
1341 return -EINVAL; 1356 return -EINVAL;
1342 } 1357 }
@@ -1466,7 +1481,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1466 } 1481 }
1467 1482
1468 /* Reject writes, which are not page aligned */ 1483 /* Reject writes, which are not page aligned */
1469 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 1484 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1470 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); 1485 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
1471 return -EINVAL; 1486 return -EINVAL;
1472 } 1487 }
@@ -2052,7 +2067,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2052 * 2067 *
2053 * Check lock status 2068 * Check lock status
2054 */ 2069 */
2055static void onenand_check_lock_status(struct onenand_chip *this) 2070static int onenand_check_lock_status(struct onenand_chip *this)
2056{ 2071{
2057 unsigned int value, block, status; 2072 unsigned int value, block, status;
2058 unsigned int end; 2073 unsigned int end;
@@ -2070,9 +2085,13 @@ static void onenand_check_lock_status(struct onenand_chip *this)
2070 2085
2071 /* Check lock status */ 2086 /* Check lock status */
2072 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2087 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2073 if (!(status & ONENAND_WP_US)) 2088 if (!(status & ONENAND_WP_US)) {
2074 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2089 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
2090 return 0;
2091 }
2075 } 2092 }
2093
2094 return 1;
2076} 2095}
2077 2096
2078/** 2097/**
@@ -2081,9 +2100,11 @@ static void onenand_check_lock_status(struct onenand_chip *this)
2081 * 2100 *
2082 * Unlock all blocks 2101 * Unlock all blocks
2083 */ 2102 */
2084static int onenand_unlock_all(struct mtd_info *mtd) 2103static void onenand_unlock_all(struct mtd_info *mtd)
2085{ 2104{
2086 struct onenand_chip *this = mtd->priv; 2105 struct onenand_chip *this = mtd->priv;
2106 loff_t ofs = 0;
2107 size_t len = this->chipsize;
2087 2108
2088 if (this->options & ONENAND_HAS_UNLOCK_ALL) { 2109 if (this->options & ONENAND_HAS_UNLOCK_ALL) {
2089 /* Set start block address */ 2110 /* Set start block address */
@@ -2099,23 +2120,19 @@ static int onenand_unlock_all(struct mtd_info *mtd)
2099 & ONENAND_CTRL_ONGO) 2120 & ONENAND_CTRL_ONGO)
2100 continue; 2121 continue;
2101 2122
2123 /* Check lock status */
2124 if (onenand_check_lock_status(this))
2125 return;
2126
2102 /* Workaround for all block unlock in DDP */ 2127 /* Workaround for all block unlock in DDP */
2103 if (ONENAND_IS_DDP(this)) { 2128 if (ONENAND_IS_DDP(this)) {
2104 /* 1st block on another chip */ 2129 /* All blocks on another chip */
2105 loff_t ofs = this->chipsize >> 1; 2130 ofs = this->chipsize >> 1;
2106 size_t len = mtd->erasesize; 2131 len = this->chipsize >> 1;
2107
2108 onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
2109 } 2132 }
2110
2111 onenand_check_lock_status(this);
2112
2113 return 0;
2114 } 2133 }
2115 2134
2116 onenand_do_lock_cmd(mtd, 0x0, this->chipsize, ONENAND_CMD_UNLOCK); 2135 onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
2117
2118 return 0;
2119} 2136}
2120 2137
2121#ifdef CONFIG_MTD_ONENAND_OTP 2138#ifdef CONFIG_MTD_ONENAND_OTP
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index aecdd50a1781..2f53b51c6805 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -17,9 +17,6 @@
17#include <linux/mtd/onenand.h> 17#include <linux/mtd/onenand.h>
18#include <linux/mtd/compatmac.h> 18#include <linux/mtd/compatmac.h>
19 19
20extern int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
21 struct mtd_oob_ops *ops);
22
23/** 20/**
24 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 21 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
25 * @param buf the buffer to search 22 * @param buf the buffer to search
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 823fba4e6d2f..c84e45465499 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -823,7 +823,7 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
823 kfree(part); 823 kfree(part);
824} 824}
825 825
826struct mtd_blktrans_ops rfd_ftl_tr = { 826static struct mtd_blktrans_ops rfd_ftl_tr = {
827 .name = "rfd", 827 .name = "rfd",
828 .major = RFD_FTL_MAJOR, 828 .major = RFD_FTL_MAJOR,
829 .part_bits = PART_BITS, 829 .part_bits = PART_BITS,
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index b9daf159a4a7..3f063108e95f 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -24,8 +24,13 @@ config MTD_UBI_WL_THRESHOLD
24 erase counter value and the lowest erase counter value of eraseblocks 24 erase counter value and the lowest erase counter value of eraseblocks
25 of UBI devices. When this threshold is exceeded, UBI starts performing 25 of UBI devices. When this threshold is exceeded, UBI starts performing
26 wear leveling by means of moving data from eraseblock with low erase 26 wear leveling by means of moving data from eraseblock with low erase
27 counter to eraseblocks with high erase counter. Leave the default 27 counter to eraseblocks with high erase counter.
28 value if unsure. 28
29 The default value should be OK for SLC NAND flashes, NOR flashes and
30 other flashes which have eraseblock life-cycle 100000 or more.
31 However, in case of MLC NAND flashes which typically have eraseblock
32 life-cycle less then 10000, the threshold should be lessened (e.g.,
33 to 128 or 256, although it does not have to be power of 2).
29 34
30config MTD_UBI_BEB_RESERVE 35config MTD_UBI_BEB_RESERVE
31 int "Percentage of reserved eraseblocks for bad eraseblocks handling" 36 int "Percentage of reserved eraseblocks for bad eraseblocks handling"
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 275960462970..961416ac0616 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -606,8 +606,16 @@ static int io_init(struct ubi_device *ubi)
606 ubi->ro_mode = 1; 606 ubi->ro_mode = 1;
607 } 607 }
608 608
609 dbg_msg("leb_size %d", ubi->leb_size); 609 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
610 dbg_msg("ro_mode %d", ubi->ro_mode); 610 ubi->peb_size, ubi->peb_size >> 10);
611 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
612 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
613 if (ubi->hdrs_min_io_size != ubi->min_io_size)
614 ubi_msg("sub-page size: %d",
615 ubi->hdrs_min_io_size);
616 ubi_msg("VID header offset: %d (aligned %d)",
617 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
618 ubi_msg("data offset: %d", ubi->leb_start);
611 619
612 /* 620 /*
613 * Note, ideally, we have to initialize ubi->bad_peb_count here. But 621 * Note, ideally, we have to initialize ubi->bad_peb_count here. But
@@ -755,8 +763,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
755 mutex_init(&ubi->volumes_mutex); 763 mutex_init(&ubi->volumes_mutex);
756 spin_lock_init(&ubi->volumes_lock); 764 spin_lock_init(&ubi->volumes_lock);
757 765
758 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d", 766 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
759 mtd->index, ubi_num, vid_hdr_offset);
760 767
761 err = io_init(ubi); 768 err = io_init(ubi);
762 if (err) 769 if (err)
@@ -804,15 +811,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
804 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 811 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
805 ubi_msg("MTD device name: \"%s\"", mtd->name); 812 ubi_msg("MTD device name: \"%s\"", mtd->name);
806 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 813 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
807 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
808 ubi->peb_size, ubi->peb_size >> 10);
809 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
810 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 814 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
811 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 815 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
812 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
813 ubi_msg("VID header offset: %d (aligned %d)",
814 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
815 ubi_msg("data offset: %d", ubi->leb_start);
816 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 816 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
817 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 817 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
818 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 818 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
@@ -950,8 +950,7 @@ static int __init ubi_init(void)
950 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 950 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
951 951
952 if (mtd_devs > UBI_MAX_DEVICES) { 952 if (mtd_devs > UBI_MAX_DEVICES) {
953 printk(KERN_ERR "UBI error: too many MTD devices, " 953 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
954 "maximum is %d\n", UBI_MAX_DEVICES);
955 return -EINVAL; 954 return -EINVAL;
956 } 955 }
957 956
@@ -959,25 +958,25 @@ static int __init ubi_init(void)
959 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 958 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
960 if (IS_ERR(ubi_class)) { 959 if (IS_ERR(ubi_class)) {
961 err = PTR_ERR(ubi_class); 960 err = PTR_ERR(ubi_class);
962 printk(KERN_ERR "UBI error: cannot create UBI class\n"); 961 ubi_err("cannot create UBI class");
963 goto out; 962 goto out;
964 } 963 }
965 964
966 err = class_create_file(ubi_class, &ubi_version); 965 err = class_create_file(ubi_class, &ubi_version);
967 if (err) { 966 if (err) {
968 printk(KERN_ERR "UBI error: cannot create sysfs file\n"); 967 ubi_err("cannot create sysfs file");
969 goto out_class; 968 goto out_class;
970 } 969 }
971 970
972 err = misc_register(&ubi_ctrl_cdev); 971 err = misc_register(&ubi_ctrl_cdev);
973 if (err) { 972 if (err) {
974 printk(KERN_ERR "UBI error: cannot register device\n"); 973 ubi_err("cannot register device");
975 goto out_version; 974 goto out_version;
976 } 975 }
977 976
978 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 977 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
979 sizeof(struct ubi_wl_entry), 978 sizeof(struct ubi_wl_entry),
980 0, 0, NULL); 979 0, 0, NULL);
981 if (!ubi_wl_entry_slab) 980 if (!ubi_wl_entry_slab)
982 goto out_dev_unreg; 981 goto out_dev_unreg;
983 982
@@ -1000,8 +999,7 @@ static int __init ubi_init(void)
1000 mutex_unlock(&ubi_devices_mutex); 999 mutex_unlock(&ubi_devices_mutex);
1001 if (err < 0) { 1000 if (err < 0) {
1002 put_mtd_device(mtd); 1001 put_mtd_device(mtd);
1003 printk(KERN_ERR "UBI error: cannot attach mtd%d\n", 1002 ubi_err("cannot attach mtd%d", mtd->index);
1004 mtd->index);
1005 goto out_detach; 1003 goto out_detach;
1006 } 1004 }
1007 } 1005 }
@@ -1023,7 +1021,7 @@ out_version:
1023out_class: 1021out_class:
1024 class_destroy(ubi_class); 1022 class_destroy(ubi_class);
1025out: 1023out:
1026 printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err); 1024 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1027 return err; 1025 return err;
1028} 1026}
1029module_init(ubi_init); 1027module_init(ubi_init);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 51c40b17f1ec..8ea99d8c9e1f 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -41,7 +41,7 @@
41/* Generic debugging message */ 41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \ 42#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
44 current->pid, __FUNCTION__, ##__VA_ARGS__) 44 current->pid, __func__, ##__VA_ARGS__)
45 45
46#define ubi_dbg_dump_stack() dump_stack() 46#define ubi_dbg_dump_stack() dump_stack()
47 47
@@ -99,8 +99,10 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
100/* Initialization and build messages */ 100/* Initialization and build messages */
101#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 101#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
102#define UBI_IO_DEBUG 1
102#else 103#else
103#define dbg_bld(fmt, ...) ({}) 104#define dbg_bld(fmt, ...) ({})
105#define UBI_IO_DEBUG 0
104#endif 106#endif
105 107
106#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS 108#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index d397219238d3..e909b390069a 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -291,11 +291,12 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
291 /* 291 /*
292 * In case of dynamic volume, MTD device size is just volume size. In 292 * In case of dynamic volume, MTD device size is just volume size. In
293 * case of a static volume the size is equivalent to the amount of data 293 * case of a static volume the size is equivalent to the amount of data
294 * bytes, which is zero at this moment and will be changed after volume 294 * bytes.
295 * update.
296 */ 295 */
297 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 296 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
298 mtd->size = vol->usable_leb_size * vol->reserved_pebs; 297 mtd->size = vol->usable_leb_size * vol->reserved_pebs;
298 else
299 mtd->size = vol->used_bytes;
299 300
300 if (add_mtd_device(mtd)) { 301 if (add_mtd_device(mtd)) {
301 ubi_err("cannot not add MTD device\n"); 302 ubi_err("cannot not add MTD device\n");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index db3efdef2433..4ac11df7b048 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -631,6 +631,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
631 631
632 dbg_io("read EC header from PEB %d", pnum); 632 dbg_io("read EC header from PEB %d", pnum);
633 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 633 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
634 if (UBI_IO_DEBUG)
635 verbose = 1;
634 636
635 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 637 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
636 if (err) { 638 if (err) {
@@ -904,6 +906,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
904 906
905 dbg_io("read VID header from PEB %d", pnum); 907 dbg_io("read VID header from PEB %d", pnum);
906 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 908 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
909 if (UBI_IO_DEBUG)
910 verbose = 1;
907 911
908 p = (char *)vid_hdr - ubi->vid_hdr_shift; 912 p = (char *)vid_hdr - ubi->vid_hdr_shift;
909 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 913 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 05aa3e7daba1..96d410e106ab 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -42,6 +42,7 @@
42 42
43#include <linux/err.h> 43#include <linux/err.h>
44#include <linux/crc32.h> 44#include <linux/crc32.h>
45#include <asm/div64.h>
45#include "ubi.h" 46#include "ubi.h"
46 47
47#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 48#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
@@ -92,27 +93,6 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
92} 93}
93 94
94/** 95/**
95 * commit_to_mean_value - commit intermediate results to the final mean erase
96 * counter value.
97 * @si: scanning information
98 *
99 * This is a helper function which calculates partial mean erase counter mean
100 * value and adds it to the resulting mean value. As we can work only in
101 * integer arithmetic and we want to calculate the mean value of erase counter
102 * accurately, we first sum erase counter values in @si->ec_sum variable and
103 * count these components in @si->ec_count. If this temporary @si->ec_sum is
104 * going to overflow, we calculate the partial mean value
105 * (@si->ec_sum/@si->ec_count) and add it to @si->mean_ec.
106 */
107static void commit_to_mean_value(struct ubi_scan_info *si)
108{
109 si->ec_sum /= si->ec_count;
110 if (si->ec_sum % si->ec_count >= si->ec_count / 2)
111 si->mean_ec += 1;
112 si->mean_ec += si->ec_sum;
113}
114
115/**
116 * validate_vid_hdr - check that volume identifier header is correct and 96 * validate_vid_hdr - check that volume identifier header is correct and
117 * consistent. 97 * consistent.
118 * @vid_hdr: the volume identifier header to check 98 * @vid_hdr: the volume identifier header to check
@@ -901,15 +881,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
901 881
902adjust_mean_ec: 882adjust_mean_ec:
903 if (!ec_corr) { 883 if (!ec_corr) {
904 if (si->ec_sum + ec < ec) { 884 si->ec_sum += ec;
905 commit_to_mean_value(si); 885 si->ec_count += 1;
906 si->ec_sum = 0;
907 si->ec_count = 0;
908 } else {
909 si->ec_sum += ec;
910 si->ec_count += 1;
911 }
912
913 if (ec > si->max_ec) 886 if (ec > si->max_ec)
914 si->max_ec = ec; 887 si->max_ec = ec;
915 if (ec < si->min_ec) 888 if (ec < si->min_ec)
@@ -965,9 +938,11 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
965 938
966 dbg_msg("scanning is finished"); 939 dbg_msg("scanning is finished");
967 940
968 /* Finish mean erase counter calculations */ 941 /* Calculate mean erase counter */
969 if (si->ec_count) 942 if (si->ec_count) {
970 commit_to_mean_value(si); 943 do_div(si->ec_sum, si->ec_count);
944 si->mean_ec = si->ec_sum;
945 }
971 946
972 if (si->is_empty) 947 if (si->is_empty)
973 ubi_msg("empty MTD device detected"); 948 ubi_msg("empty MTD device detected");
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 46d444af471a..966b9b682a42 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -124,7 +124,7 @@ struct ubi_scan_info {
124 int max_ec; 124 int max_ec;
125 unsigned long long max_sqnum; 125 unsigned long long max_sqnum;
126 int mean_ec; 126 int mean_ec;
127 int ec_sum; 127 uint64_t ec_sum;
128 int ec_count; 128 int ec_count;
129}; 129};
130 130
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 000000000000..c3185d9fd048
--- /dev/null
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,372 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (Битюцкий Артём)
19 * Thomas Gleixner
20 * Frank Haverkamp
21 * Oliver Lohmann
22 * Andreas Arnez
23 */
24
25/*
26 * This file defines the layout of UBI headers and all the other UBI on-flash
27 * data structures.
28 */
29
30#ifndef __UBI_MEDIA_H__
31#define __UBI_MEDIA_H__
32
33#include <asm/byteorder.h>
34
35/* The version of UBI images supported by this implementation */
36#define UBI_VERSION 1
37
38/* The highest erase counter value supported by this implementation */
39#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
40
41/* The initial CRC32 value used when calculating CRC checksums */
42#define UBI_CRC32_INIT 0xFFFFFFFFU
43
44/* Erase counter header magic number (ASCII "UBI#") */
45#define UBI_EC_HDR_MAGIC 0x55424923
46/* Volume identifier header magic number (ASCII "UBI!") */
47#define UBI_VID_HDR_MAGIC 0x55424921
48
49/*
50 * Volume type constants used in the volume identifier header.
51 *
52 * @UBI_VID_DYNAMIC: dynamic volume
53 * @UBI_VID_STATIC: static volume
54 */
55enum {
56 UBI_VID_DYNAMIC = 1,
57 UBI_VID_STATIC = 2
58};
59
60/*
61 * Volume flags used in the volume table record.
62 *
63 * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
64 *
65 * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
66 * table. UBI automatically re-sizes the volume which has this flag and makes
67 * the volume to be of largest possible size. This means that if after the
68 * initialization UBI finds out that there are available physical eraseblocks
69 * present on the device, it automatically appends all of them to the volume
70 * (the physical eraseblocks reserved for bad eraseblocks handling and other
71 * reserved physical eraseblocks are not taken). So, if there is a volume with
72 * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
73 * eraseblocks will be zero after UBI is loaded, because all of them will be
74 * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
75 * after the volume had been initialized.
76 *
77 * The auto-resize feature is useful for device production purposes. For
78 * example, different NAND flash chips may have different amount of initial bad
79 * eraseblocks, depending of particular chip instance. Manufacturers of NAND
80 * chips usually guarantee that the amount of initial bad eraseblocks does not
81 * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
82 * flashed to the end devices in production, he does not know the exact amount
83 * of good physical eraseblocks the NAND chip on the device will have, but this
84 * number is required to calculate the volume sized and put them to the volume
85 * table of the UBI image. In this case, one of the volumes (e.g., the one
86 * which will store the root file system) is marked as "auto-resizable", and
87 * UBI will adjust its size on the first boot if needed.
88 *
89 * Note, first UBI reserves some amount of physical eraseblocks for bad
90 * eraseblock handling, and then re-sizes the volume, not vice-versa. This
91 * means that the pool of reserved physical eraseblocks will always be present.
92 */
93enum {
94 UBI_VTBL_AUTORESIZE_FLG = 0x01,
95};
96
97/*
98 * Compatibility constants used by internal volumes.
99 *
100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
101 * to the flash
102 * @UBI_COMPAT_RO: attach this device in read-only mode
103 * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
104 * physical eraseblocks, don't allow the wear-leveling unit to move them
105 * @UBI_COMPAT_REJECT: reject this UBI image
106 */
107enum {
108 UBI_COMPAT_DELETE = 1,
109 UBI_COMPAT_RO = 2,
110 UBI_COMPAT_PRESERVE = 4,
111 UBI_COMPAT_REJECT = 5
112};
113
114/* Sizes of UBI headers */
115#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
116#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
117
118/* Sizes of UBI headers without the ending CRC */
119#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
120#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
121
122/**
123 * struct ubi_ec_hdr - UBI erase counter header.
124 * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
125 * @version: version of UBI implementation which is supposed to accept this
126 * UBI image
127 * @padding1: reserved for future, zeroes
128 * @ec: the erase counter
129 * @vid_hdr_offset: where the VID header starts
130 * @data_offset: where the user data start
131 * @padding2: reserved for future, zeroes
132 * @hdr_crc: erase counter header CRC checksum
133 *
134 * The erase counter header takes 64 bytes and has a plenty of unused space for
135 * future usage. The unused fields are zeroed. The @version field is used to
136 * indicate the version of UBI implementation which is supposed to be able to
137 * work with this UBI image. If @version is greater then the current UBI
138 * version, the image is rejected. This may be useful in future if something
139 * is changed radically. This field is duplicated in the volume identifier
140 * header.
141 *
142 * The @vid_hdr_offset and @data_offset fields contain the offset of the the
143 * volume identifier header and user data, relative to the beginning of the
144 * physical eraseblock. These values have to be the same for all physical
145 * eraseblocks.
146 */
147struct ubi_ec_hdr {
148 __be32 magic;
149 __u8 version;
150 __u8 padding1[3];
151 __be64 ec; /* Warning: the current limit is 31-bit anyway! */
152 __be32 vid_hdr_offset;
153 __be32 data_offset;
154 __u8 padding2[36];
155 __be32 hdr_crc;
156} __attribute__ ((packed));
157
158/**
159 * struct ubi_vid_hdr - on-flash UBI volume identifier header.
160 * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
161 * @version: UBI implementation version which is supposed to accept this UBI
162 * image (%UBI_VERSION)
163 * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
164 * @copy_flag: if this logical eraseblock was copied from another physical
165 * eraseblock (for wear-leveling reasons)
166 * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
167 * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
168 * @vol_id: ID of this volume
169 * @lnum: logical eraseblock number
170 * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be
171 * removed, kept only for not breaking older UBI users)
172 * @data_size: how many bytes of data this logical eraseblock contains
173 * @used_ebs: total number of used logical eraseblocks in this volume
174 * @data_pad: how many bytes at the end of this physical eraseblock are not
175 * used
176 * @data_crc: CRC checksum of the data stored in this logical eraseblock
177 * @padding1: reserved for future, zeroes
178 * @sqnum: sequence number
179 * @padding2: reserved for future, zeroes
180 * @hdr_crc: volume identifier header CRC checksum
181 *
182 * The @sqnum is the value of the global sequence counter at the time when this
183 * VID header was created. The global sequence counter is incremented each time
184 * UBI writes a new VID header to the flash, i.e. when it maps a logical
185 * eraseblock to a new physical eraseblock. The global sequence counter is an
186 * unsigned 64-bit integer and we assume it never overflows. The @sqnum
187 * (sequence number) is used to distinguish between older and newer versions of
188 * logical eraseblocks.
189 *
190 * There are 2 situations when there may be more then one physical eraseblock
191 * corresponding to the same logical eraseblock, i.e., having the same @vol_id
192 * and @lnum values in the volume identifier header. Suppose we have a logical
193 * eraseblock L and it is mapped to the physical eraseblock P.
194 *
195 * 1. Because UBI may erase physical eraseblocks asynchronously, the following
196 * situation is possible: L is asynchronously erased, so P is scheduled for
197 * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
198 * so P1 is written to, then an unclean reboot happens. Result - there are 2
199 * physical eraseblocks P and P1 corresponding to the same logical eraseblock
200 * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
201 * flash.
202 *
203 * 2. From time to time UBI moves logical eraseblocks to other physical
204 * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
205 * to P1, and an unclean reboot happens before P is physically erased, there
206 * are two physical eraseblocks P and P1 corresponding to L and UBI has to
207 * select one of them when the flash is attached. The @sqnum field says which
208 * PEB is the original (obviously P will have lower @sqnum) and the copy. But
209 * it is not enough to select the physical eraseblock with the higher sequence
210 * number, because the unclean reboot could have happen in the middle of the
211 * copying process, so the data in P is corrupted. It is also not enough to
212 * just select the physical eraseblock with lower sequence number, because the
213 * data there may be old (consider a case if more data was added to P1 after
214 * the copying). Moreover, the unclean reboot may happen when the erasure of P
215 * was just started, so it result in unstable P, which is "mostly" OK, but
216 * still has unstable bits.
217 *
218 * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
219 * copy. UBI also calculates data CRC when the data is moved and stores it at
220 * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
221 * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
222 * examined. If it is cleared, the situation* is simple and the newer one is
223 * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
224 * checksum is correct, this physical eraseblock is selected (P1). Otherwise
225 * the older one (P) is selected.
226 *
227 * Note, there is an obsolete @leb_ver field which was used instead of @sqnum
228 * in the past. But it is not used anymore and we keep it in order to be able
229 * to deal with old UBI images. It will be removed at some point.
230 *
231 * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
232 * Internal volumes are not seen from outside and are used for various internal
233 * UBI purposes. In this implementation there is only one internal volume - the
234 * layout volume. Internal volumes are the main mechanism of UBI extensions.
235 * For example, in future one may introduce a journal internal volume. Internal
236 * volumes have their own reserved range of IDs.
237 *
238 * The @compat field is only used for internal volumes and contains the "degree
239 * of their compatibility". It is always zero for user volumes. This field
240 * provides a mechanism to introduce UBI extensions and to be still compatible
241 * with older UBI binaries. For example, if someone introduced a journal in
242 * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
243 * journal volume. And in this case, older UBI binaries, which know nothing
244 * about the journal volume, would just delete this volume and work perfectly
245 * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
246 * - it just ignores the Ext3fs journal.
247 *
248 * The @data_crc field contains the CRC checksum of the contents of the logical
249 * eraseblock if this is a static volume. In case of dynamic volumes, it does
250 * not contain the CRC checksum as a rule. The only exception is when the
251 * data of the physical eraseblock was moved by the wear-leveling unit, then
252 * the wear-leveling unit calculates the data CRC and stores it in the
253 * @data_crc field. And of course, the @copy_flag is %in this case.
254 *
255 * The @data_size field is used only for static volumes because UBI has to know
256 * how many bytes of data are stored in this eraseblock. For dynamic volumes,
257 * this field usually contains zero. The only exception is when the data of the
258 * physical eraseblock was moved to another physical eraseblock for
259 * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
260 * contents and uses both @data_crc and @data_size fields. In this case, the
261 * @data_size field contains data size.
262 *
263 * The @used_ebs field is used only for static volumes and indicates how many
264 * eraseblocks the data of the volume takes. For dynamic volumes this field is
265 * not used and always contains zero.
266 *
267 * The @data_pad is calculated when volumes are created using the alignment
268 * parameter. So, effectively, the @data_pad field reduces the size of logical
269 * eraseblocks of this volume. This is very handy when one uses block-oriented
270 * software (say, cramfs) on top of the UBI volume.
271 */
272struct ubi_vid_hdr {
273 __be32 magic;
274 __u8 version;
275 __u8 vol_type;
276 __u8 copy_flag;
277 __u8 compat;
278 __be32 vol_id;
279 __be32 lnum;
280 __be32 leb_ver; /* obsolete, to be removed, don't use */
281 __be32 data_size;
282 __be32 used_ebs;
283 __be32 data_pad;
284 __be32 data_crc;
285 __u8 padding1[4];
286 __be64 sqnum;
287 __u8 padding2[12];
288 __be32 hdr_crc;
289} __attribute__ ((packed));
290
291/* Internal UBI volumes count */
292#define UBI_INT_VOL_COUNT 1
293
294/*
295 * Starting ID of internal volumes. There is reserved room for 4096 internal
296 * volumes.
297 */
298#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
299
300/* The layout volume contains the volume table */
301
302#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
303#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
304#define UBI_LAYOUT_VOLUME_ALIGN 1
305#define UBI_LAYOUT_VOLUME_EBS 2
306#define UBI_LAYOUT_VOLUME_NAME "layout volume"
307#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
308
309/* The maximum number of volumes per one UBI device */
310#define UBI_MAX_VOLUMES 128
311
312/* The maximum volume name length */
313#define UBI_VOL_NAME_MAX 127
314
315/* Size of the volume table record */
316#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
317
318/* Size of the volume table record without the ending CRC */
319#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
320
321/**
322 * struct ubi_vtbl_record - a record in the volume table.
323 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
324 * @alignment: volume alignment
325 * @data_pad: how many bytes are unused at the end of the each physical
326 * eraseblock to satisfy the requested alignment
327 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
328 * @upd_marker: if volume update was started but not finished
329 * @name_len: volume name length
330 * @name: the volume name
331 * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
332 * @padding: reserved, zeroes
333 * @crc: a CRC32 checksum of the record
334 *
335 * The volume table records are stored in the volume table, which is stored in
336 * the layout volume. The layout volume consists of 2 logical eraseblock, each
337 * of which contains a copy of the volume table (i.e., the volume table is
338 * duplicated). The volume table is an array of &struct ubi_vtbl_record
339 * objects indexed by the volume ID.
340 *
341 * If the size of the logical eraseblock is large enough to fit
342 * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
343 * records. Otherwise, it contains as many records as it can fit (i.e., size of
344 * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
345 *
346 * The @upd_marker flag is used to implement volume update. It is set to %1
347 * before update and set to %0 after the update. So if the update operation was
348 * interrupted, UBI knows that the volume is corrupted.
349 *
350 * The @alignment field is specified when the volume is created and cannot be
351 * later changed. It may be useful, for example, when a block-oriented file
352 * system works on top of UBI. The @data_pad field is calculated using the
353 * logical eraseblock size and @alignment. The alignment must be multiple to the
354 * minimal flash I/O unit. If @alignment is 1, all the available space of
355 * the physical eraseblocks is used.
356 *
357 * Empty records contain all zeroes and the CRC checksum of those zeroes.
358 */
359struct ubi_vtbl_record {
360 __be32 reserved_pebs;
361 __be32 alignment;
362 __be32 data_pad;
363 __u8 vol_type;
364 __u8 upd_marker;
365 __be16 name_len;
366 __u8 name[UBI_VOL_NAME_MAX+1];
367 __u8 flags;
368 __u8 padding[23];
369 __be32 crc;
370} __attribute__ ((packed));
371
372#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index a548c1d28fa8..67dcbd11c15c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -37,10 +37,9 @@
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
40
41#include <mtd/ubi-header.h>
42#include <linux/mtd/ubi.h> 40#include <linux/mtd/ubi.h>
43 41
42#include "ubi-media.h"
44#include "scan.h" 43#include "scan.h"
45#include "debug.h" 44#include "debug.h"
46 45
@@ -54,10 +53,10 @@
54#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) 53#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
55/* UBI warning messages */ 54/* UBI warning messages */
56#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ 55#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
57 __FUNCTION__, ##__VA_ARGS__) 56 __func__, ##__VA_ARGS__)
58/* UBI error messages */ 57/* UBI error messages */
59#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ 58#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
60 __FUNCTION__, ##__VA_ARGS__) 59 __func__, ##__VA_ARGS__)
61 60
62/* Lowest number PEBs reserved for bad PEB handling */ 61/* Lowest number PEBs reserved for bad PEB handling */
63#define MIN_RESEVED_PEBS 2 62#define MIN_RESEVED_PEBS 2
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 978e20a1791b..1e39e78f1778 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -1248,3 +1248,4 @@ module_exit(at91ether_exit)
1248MODULE_LICENSE("GPL"); 1248MODULE_LICENSE("GPL");
1249MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); 1249MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
1250MODULE_AUTHOR("Andrew Victor"); 1250MODULE_AUTHOR("Andrew Victor");
1251MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 91a6590d107b..ecd8fc6146e9 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -897,6 +897,7 @@ static struct platform_driver ep93xx_eth_driver = {
897 .remove = ep93xx_eth_remove, 897 .remove = ep93xx_eth_remove,
898 .driver = { 898 .driver = {
899 .name = "ep93xx-eth", 899 .name = "ep93xx-eth",
900 .owner = THIS_MODULE,
900 }, 901 },
901}; 902};
902 903
@@ -914,3 +915,4 @@ static void __exit ep93xx_eth_cleanup_module(void)
914module_init(ep93xx_eth_init_module); 915module_init(ep93xx_eth_init_module);
915module_exit(ep93xx_eth_cleanup_module); 916module_exit(ep93xx_eth_cleanup_module);
916MODULE_LICENSE("GPL"); 917MODULE_LICENSE("GPL");
918MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 5586fc624688..0afe522b8f7b 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,144 @@
91#include "atlx.c" 91#include "atlx.c"
92 92
93/* 93/*
94 * This is the only thing that needs to be changed to adjust the
95 * maximum number of ports that the driver can manage.
96 */
97#define ATL1_MAX_NIC 4
98
99#define OPTION_UNSET -1
100#define OPTION_DISABLED 0
101#define OPTION_ENABLED 1
102
103#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
104
105/*
106 * Interrupt Moderate Timer in units of 2 us
107 *
108 * Valid Range: 10-65535
109 *
110 * Default Value: 100 (200us)
111 */
112static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
113static int num_int_mod_timer;
114module_param_array_named(int_mod_timer, int_mod_timer, int,
115 &num_int_mod_timer, 0);
116MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
117
118#define DEFAULT_INT_MOD_CNT 100 /* 200us */
119#define MAX_INT_MOD_CNT 65000
120#define MIN_INT_MOD_CNT 50
121
122struct atl1_option {
123 enum { enable_option, range_option, list_option } type;
124 char *name;
125 char *err;
126 int def;
127 union {
128 struct { /* range_option info */
129 int min;
130 int max;
131 } r;
132 struct { /* list_option info */
133 int nr;
134 struct atl1_opt_list {
135 int i;
136 char *str;
137 } *p;
138 } l;
139 } arg;
140};
141
142static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
143 struct pci_dev *pdev)
144{
145 if (*value == OPTION_UNSET) {
146 *value = opt->def;
147 return 0;
148 }
149
150 switch (opt->type) {
151 case enable_option:
152 switch (*value) {
153 case OPTION_ENABLED:
154 dev_info(&pdev->dev, "%s enabled\n", opt->name);
155 return 0;
156 case OPTION_DISABLED:
157 dev_info(&pdev->dev, "%s disabled\n", opt->name);
158 return 0;
159 }
160 break;
161 case range_option:
162 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
163 dev_info(&pdev->dev, "%s set to %i\n", opt->name,
164 *value);
165 return 0;
166 }
167 break;
168 case list_option:{
169 int i;
170 struct atl1_opt_list *ent;
171
172 for (i = 0; i < opt->arg.l.nr; i++) {
173 ent = &opt->arg.l.p[i];
174 if (*value == ent->i) {
175 if (ent->str[0] != '\0')
176 dev_info(&pdev->dev, "%s\n",
177 ent->str);
178 return 0;
179 }
180 }
181 }
182 break;
183
184 default:
185 break;
186 }
187
188 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
189 opt->name, *value, opt->err);
190 *value = opt->def;
191 return -1;
192}
193
194/*
195 * atl1_check_options - Range Checking for Command Line Parameters
196 * @adapter: board private structure
197 *
198 * This routine checks all command line parameters for valid user
199 * input. If an invalid value is given, or if no user specified
200 * value exists, a default value is used. The final value is stored
201 * in a variable in the adapter structure.
202 */
203void __devinit atl1_check_options(struct atl1_adapter *adapter)
204{
205 struct pci_dev *pdev = adapter->pdev;
206 int bd = adapter->bd_number;
207 if (bd >= ATL1_MAX_NIC) {
208 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
209 dev_notice(&pdev->dev, "using defaults for all values\n");
210 }
211 { /* Interrupt Moderate Timer */
212 struct atl1_option opt = {
213 .type = range_option,
214 .name = "Interrupt Moderator Timer",
215 .err = "using default of "
216 __MODULE_STRING(DEFAULT_INT_MOD_CNT),
217 .def = DEFAULT_INT_MOD_CNT,
218 .arg = {.r = {.min = MIN_INT_MOD_CNT,
219 .max = MAX_INT_MOD_CNT} }
220 };
221 int val;
222 if (num_int_mod_timer > bd) {
223 val = int_mod_timer[bd];
224 atl1_validate_option(&val, &opt, pdev);
225 adapter->imt = (u16) val;
226 } else
227 adapter->imt = (u16) (opt.def);
228 }
229}
230
231/*
94 * atl1_pci_tbl - PCI Device ID Table 232 * atl1_pci_tbl - PCI Device ID Table
95 */ 233 */
96static const struct pci_device_id atl1_pci_tbl[] = { 234static const struct pci_device_id atl1_pci_tbl[] = {
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 4186326d1b94..f06b854e2501 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -253,181 +253,4 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
253 atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp); 253 atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
254} 254}
255 255
256/*
257 * This is the only thing that needs to be changed to adjust the
258 * maximum number of ports that the driver can manage.
259 */
260#define ATL1_MAX_NIC 4
261
262#define OPTION_UNSET -1
263#define OPTION_DISABLED 0
264#define OPTION_ENABLED 1
265
266#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
267
268/*
269 * Interrupt Moderate Timer in units of 2 us
270 *
271 * Valid Range: 10-65535
272 *
273 * Default Value: 100 (200us)
274 */
275static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
276static int num_int_mod_timer;
277module_param_array_named(int_mod_timer, int_mod_timer, int,
278 &num_int_mod_timer, 0);
279MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
280
281/*
282 * flash_vendor
283 *
284 * Valid Range: 0-2
285 *
286 * 0 - Atmel
287 * 1 - SST
288 * 2 - ST
289 *
290 * Default Value: 0
291 */
292static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
293static int num_flash_vendor;
294module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
295MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
296
297#define DEFAULT_INT_MOD_CNT 100 /* 200us */
298#define MAX_INT_MOD_CNT 65000
299#define MIN_INT_MOD_CNT 50
300
301#define FLASH_VENDOR_DEFAULT 0
302#define FLASH_VENDOR_MIN 0
303#define FLASH_VENDOR_MAX 2
304
305struct atl1_option {
306 enum { enable_option, range_option, list_option } type;
307 char *name;
308 char *err;
309 int def;
310 union {
311 struct { /* range_option info */
312 int min;
313 int max;
314 } r;
315 struct { /* list_option info */
316 int nr;
317 struct atl1_opt_list {
318 int i;
319 char *str;
320 } *p;
321 } l;
322 } arg;
323};
324
325static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
326 struct pci_dev *pdev)
327{
328 if (*value == OPTION_UNSET) {
329 *value = opt->def;
330 return 0;
331 }
332
333 switch (opt->type) {
334 case enable_option:
335 switch (*value) {
336 case OPTION_ENABLED:
337 dev_info(&pdev->dev, "%s enabled\n", opt->name);
338 return 0;
339 case OPTION_DISABLED:
340 dev_info(&pdev->dev, "%s disabled\n", opt->name);
341 return 0;
342 }
343 break;
344 case range_option:
345 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
346 dev_info(&pdev->dev, "%s set to %i\n", opt->name,
347 *value);
348 return 0;
349 }
350 break;
351 case list_option:{
352 int i;
353 struct atl1_opt_list *ent;
354
355 for (i = 0; i < opt->arg.l.nr; i++) {
356 ent = &opt->arg.l.p[i];
357 if (*value == ent->i) {
358 if (ent->str[0] != '\0')
359 dev_info(&pdev->dev, "%s\n",
360 ent->str);
361 return 0;
362 }
363 }
364 }
365 break;
366
367 default:
368 break;
369 }
370
371 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
372 opt->name, *value, opt->err);
373 *value = opt->def;
374 return -1;
375}
376
377/*
378 * atl1_check_options - Range Checking for Command Line Parameters
379 * @adapter: board private structure
380 *
381 * This routine checks all command line parameters for valid user
382 * input. If an invalid value is given, or if no user specified
383 * value exists, a default value is used. The final value is stored
384 * in a variable in the adapter structure.
385 */
386void __devinit atl1_check_options(struct atl1_adapter *adapter)
387{
388 struct pci_dev *pdev = adapter->pdev;
389 int bd = adapter->bd_number;
390 if (bd >= ATL1_MAX_NIC) {
391 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
392 dev_notice(&pdev->dev, "using defaults for all values\n");
393 }
394 { /* Interrupt Moderate Timer */
395 struct atl1_option opt = {
396 .type = range_option,
397 .name = "Interrupt Moderator Timer",
398 .err = "using default of "
399 __MODULE_STRING(DEFAULT_INT_MOD_CNT),
400 .def = DEFAULT_INT_MOD_CNT,
401 .arg = {.r = {.min = MIN_INT_MOD_CNT,
402 .max = MAX_INT_MOD_CNT} }
403 };
404 int val;
405 if (num_int_mod_timer > bd) {
406 val = int_mod_timer[bd];
407 atl1_validate_option(&val, &opt, pdev);
408 adapter->imt = (u16) val;
409 } else
410 adapter->imt = (u16) (opt.def);
411 }
412
413 { /* Flash Vendor */
414 struct atl1_option opt = {
415 .type = range_option,
416 .name = "SPI Flash Vendor",
417 .err = "using default of "
418 __MODULE_STRING(FLASH_VENDOR_DEFAULT),
419 .def = DEFAULT_INT_MOD_CNT,
420 .arg = {.r = {.min = FLASH_VENDOR_MIN,
421 .max = FLASH_VENDOR_MAX} }
422 };
423 int val;
424 if (num_flash_vendor > bd) {
425 val = flash_vendor[bd];
426 atl1_validate_option(&val, &opt, pdev);
427 adapter->hw.flash_vendor = (u8) val;
428 } else
429 adapter->hw.flash_vendor = (u8) (opt.def);
430 }
431}
432
433#endif /* ATLX_C */ 256#endif /* ATLX_C */
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 194949afacd0..0b4adf4a0f7d 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -1005,3 +1005,4 @@ module_exit(axdrv_exit);
1005MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); 1005MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
1006MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 1006MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
1007MODULE_LICENSE("GPL v2"); 1007MODULE_LICENSE("GPL v2");
1008MODULE_ALIAS("platform:ax88796");
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 717dcc1aa1e9..4fec8581bfd7 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -47,6 +47,7 @@
47MODULE_AUTHOR(DRV_AUTHOR); 47MODULE_AUTHOR(DRV_AUTHOR);
48MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
49MODULE_DESCRIPTION(DRV_DESC); 49MODULE_DESCRIPTION(DRV_DESC);
50MODULE_ALIAS("platform:bfin_mac");
50 51
51#if defined(CONFIG_BFIN_MAC_USE_L1) 52#if defined(CONFIG_BFIN_MAC_USE_L1)
52# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) 53# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
@@ -1089,8 +1090,9 @@ static struct platform_driver bfin_mac_driver = {
1089 .resume = bfin_mac_resume, 1090 .resume = bfin_mac_resume,
1090 .suspend = bfin_mac_suspend, 1091 .suspend = bfin_mac_suspend,
1091 .driver = { 1092 .driver = {
1092 .name = DRV_NAME, 1093 .name = DRV_NAME,
1093 }, 1094 .owner = THIS_MODULE,
1095 },
1094}; 1096};
1095 1097
1096static int __init bfin_mac_init(void) 1098static int __init bfin_mac_init(void)
@@ -1106,3 +1108,4 @@ static void __exit bfin_mac_cleanup(void)
1106} 1108}
1107 1109
1108module_exit(bfin_mac_cleanup); 1110module_exit(bfin_mac_cleanup);
1111
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 9da7ff437031..2b5740b3d182 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -42,6 +42,7 @@
42MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 42MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
43MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 43MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_ALIAS("platform:cpmac");
45 46
46static int debug_level = 8; 47static int debug_level = 8;
47static int dumb_switch; 48static int dumb_switch;
@@ -1103,6 +1104,7 @@ static int __devexit cpmac_remove(struct platform_device *pdev)
1103 1104
1104static struct platform_driver cpmac_driver = { 1105static struct platform_driver cpmac_driver = {
1105 .driver.name = "cpmac", 1106 .driver.name = "cpmac",
1107 .driver.owner = THIS_MODULE,
1106 .probe = cpmac_probe, 1108 .probe = cpmac_probe,
1107 .remove = __devexit_p(cpmac_remove), 1109 .remove = __devexit_p(cpmac_remove),
1108}; 1110};
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index d63cc93f055d..e6fe2614ea6d 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1418,3 +1418,4 @@ module_exit(dm9000_cleanup);
1418MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1418MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1419MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1419MODULE_DESCRIPTION("Davicom DM9000 network driver");
1420MODULE_LICENSE("GPL"); 1420MODULE_LICENSE("GPL");
1421MODULE_ALIAS("platform:dm9000");
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 01c88664bad3..462351ca2c81 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1326,12 +1326,10 @@ struct e1000_info e1000_82571_info = {
1326 .mac = e1000_82571, 1326 .mac = e1000_82571,
1327 .flags = FLAG_HAS_HW_VLAN_FILTER 1327 .flags = FLAG_HAS_HW_VLAN_FILTER
1328 | FLAG_HAS_JUMBO_FRAMES 1328 | FLAG_HAS_JUMBO_FRAMES
1329 | FLAG_HAS_STATS_PTC_PRC
1330 | FLAG_HAS_WOL 1329 | FLAG_HAS_WOL
1331 | FLAG_APME_IN_CTRL3 1330 | FLAG_APME_IN_CTRL3
1332 | FLAG_RX_CSUM_ENABLED 1331 | FLAG_RX_CSUM_ENABLED
1333 | FLAG_HAS_CTRLEXT_ON_LOAD 1332 | FLAG_HAS_CTRLEXT_ON_LOAD
1334 | FLAG_HAS_STATS_ICR_ICT
1335 | FLAG_HAS_SMART_POWER_DOWN 1333 | FLAG_HAS_SMART_POWER_DOWN
1336 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1334 | FLAG_RESET_OVERWRITES_LAA /* errata */
1337 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1335 | FLAG_TARC_SPEED_MODE_BIT /* errata */
@@ -1347,12 +1345,10 @@ struct e1000_info e1000_82572_info = {
1347 .mac = e1000_82572, 1345 .mac = e1000_82572,
1348 .flags = FLAG_HAS_HW_VLAN_FILTER 1346 .flags = FLAG_HAS_HW_VLAN_FILTER
1349 | FLAG_HAS_JUMBO_FRAMES 1347 | FLAG_HAS_JUMBO_FRAMES
1350 | FLAG_HAS_STATS_PTC_PRC
1351 | FLAG_HAS_WOL 1348 | FLAG_HAS_WOL
1352 | FLAG_APME_IN_CTRL3 1349 | FLAG_APME_IN_CTRL3
1353 | FLAG_RX_CSUM_ENABLED 1350 | FLAG_RX_CSUM_ENABLED
1354 | FLAG_HAS_CTRLEXT_ON_LOAD 1351 | FLAG_HAS_CTRLEXT_ON_LOAD
1355 | FLAG_HAS_STATS_ICR_ICT
1356 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1352 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1357 .pba = 38, 1353 .pba = 38,
1358 .get_variants = e1000_get_variants_82571, 1354 .get_variants = e1000_get_variants_82571,
@@ -1365,11 +1361,9 @@ struct e1000_info e1000_82573_info = {
1365 .mac = e1000_82573, 1361 .mac = e1000_82573,
1366 .flags = FLAG_HAS_HW_VLAN_FILTER 1362 .flags = FLAG_HAS_HW_VLAN_FILTER
1367 | FLAG_HAS_JUMBO_FRAMES 1363 | FLAG_HAS_JUMBO_FRAMES
1368 | FLAG_HAS_STATS_PTC_PRC
1369 | FLAG_HAS_WOL 1364 | FLAG_HAS_WOL
1370 | FLAG_APME_IN_CTRL3 1365 | FLAG_APME_IN_CTRL3
1371 | FLAG_RX_CSUM_ENABLED 1366 | FLAG_RX_CSUM_ENABLED
1372 | FLAG_HAS_STATS_ICR_ICT
1373 | FLAG_HAS_SMART_POWER_DOWN 1367 | FLAG_HAS_SMART_POWER_DOWN
1374 | FLAG_HAS_AMT 1368 | FLAG_HAS_AMT
1375 | FLAG_HAS_ERT 1369 | FLAG_HAS_ERT
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 572cfd44397a..2a53875cddbf 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -184,6 +184,7 @@
184#define E1000_SWFW_EEP_SM 0x1 184#define E1000_SWFW_EEP_SM 0x1
185#define E1000_SWFW_PHY0_SM 0x2 185#define E1000_SWFW_PHY0_SM 0x2
186#define E1000_SWFW_PHY1_SM 0x4 186#define E1000_SWFW_PHY1_SM 0x4
187#define E1000_SWFW_CSR_SM 0x8
187 188
188/* Device Control */ 189/* Device Control */
189#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 190#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
@@ -527,8 +528,10 @@
527#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 528#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
528#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 529#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
529#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ 530#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
531#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
530#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ 532#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
531#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ 533#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
534#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
532 535
533/* NVM Control */ 536/* NVM Control */
534#define E1000_EECD_SK 0x00000001 /* NVM Clock */ 537#define E1000_EECD_SK 0x00000001 /* NVM Clock */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 5a89dff52264..38bfd0d261fe 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -64,11 +64,14 @@ struct e1000_info;
64/* Tx/Rx descriptor defines */ 64/* Tx/Rx descriptor defines */
65#define E1000_DEFAULT_TXD 256 65#define E1000_DEFAULT_TXD 256
66#define E1000_MAX_TXD 4096 66#define E1000_MAX_TXD 4096
67#define E1000_MIN_TXD 80 67#define E1000_MIN_TXD 64
68 68
69#define E1000_DEFAULT_RXD 256 69#define E1000_DEFAULT_RXD 256
70#define E1000_MAX_RXD 4096 70#define E1000_MAX_RXD 4096
71#define E1000_MIN_RXD 80 71#define E1000_MIN_RXD 64
72
73#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
74#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
72 75
73/* Early Receive defines */ 76/* Early Receive defines */
74#define E1000_ERT_2048 0x100 77#define E1000_ERT_2048 0x100
@@ -147,6 +150,18 @@ struct e1000_ring {
147 struct e1000_queue_stats stats; 150 struct e1000_queue_stats stats;
148}; 151};
149 152
153/* PHY register snapshot values */
154struct e1000_phy_regs {
155 u16 bmcr; /* basic mode control register */
156 u16 bmsr; /* basic mode status register */
157 u16 advertise; /* auto-negotiation advertisement */
158 u16 lpa; /* link partner ability register */
159 u16 expansion; /* auto-negotiation expansion reg */
160 u16 ctrl1000; /* 1000BASE-T control register */
161 u16 stat1000; /* 1000BASE-T status register */
162 u16 estatus; /* extended status register */
163};
164
150/* board specific private data structure */ 165/* board specific private data structure */
151struct e1000_adapter { 166struct e1000_adapter {
152 struct timer_list watchdog_timer; 167 struct timer_list watchdog_timer;
@@ -202,8 +217,8 @@ struct e1000_adapter {
202 /* Tx stats */ 217 /* Tx stats */
203 u64 tpt_old; 218 u64 tpt_old;
204 u64 colc_old; 219 u64 colc_old;
205 u64 gotcl_old; 220 u32 gotc;
206 u32 gotcl; 221 u64 gotc_old;
207 u32 tx_timeout_count; 222 u32 tx_timeout_count;
208 u32 tx_fifo_head; 223 u32 tx_fifo_head;
209 u32 tx_head_addr; 224 u32 tx_head_addr;
@@ -227,8 +242,8 @@ struct e1000_adapter {
227 u64 hw_csum_err; 242 u64 hw_csum_err;
228 u64 hw_csum_good; 243 u64 hw_csum_good;
229 u64 rx_hdr_split; 244 u64 rx_hdr_split;
230 u64 gorcl_old; 245 u32 gorc;
231 u32 gorcl; 246 u64 gorc_old;
232 u32 alloc_rx_buff_failed; 247 u32 alloc_rx_buff_failed;
233 u32 rx_dma_failed; 248 u32 rx_dma_failed;
234 249
@@ -250,6 +265,9 @@ struct e1000_adapter {
250 struct e1000_phy_info phy_info; 265 struct e1000_phy_info phy_info;
251 struct e1000_phy_stats phy_stats; 266 struct e1000_phy_stats phy_stats;
252 267
268 /* Snapshot of PHY registers */
269 struct e1000_phy_regs phy_regs;
270
253 struct e1000_ring test_tx_ring; 271 struct e1000_ring test_tx_ring;
254 struct e1000_ring test_rx_ring; 272 struct e1000_ring test_rx_ring;
255 u32 test_icr; 273 u32 test_icr;
@@ -286,8 +304,6 @@ struct e1000_info {
286#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 304#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
287#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 305#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
288#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 306#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
289#define FLAG_HAS_STATS_ICR_ICT (1 << 9)
290#define FLAG_HAS_STATS_PTC_PRC (1 << 10)
291#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 307#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
292#define FLAG_IS_QUAD_PORT_A (1 << 12) 308#define FLAG_IS_QUAD_PORT_A (1 << 12)
293#define FLAG_IS_QUAD_PORT (1 << 13) 309#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -433,6 +449,8 @@ extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
433extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 449extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
434 u32 usec_interval, bool *success); 450 u32 usec_interval, bool *success);
435extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); 451extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
452extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
453extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
436extern s32 e1000e_check_downshift(struct e1000_hw *hw); 454extern s32 e1000e_check_downshift(struct e1000_hw *hw);
437 455
438static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 456static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index d59a99ae44be..dc552d7d6fac 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -41,6 +41,7 @@
41#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 41#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
42#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 42#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
43#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 43#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
44#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
44 45
45#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 46#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
46#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 47#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
@@ -48,6 +49,7 @@
48 49
49#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 50#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
50#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 51#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
52#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
51 53
52#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ 54#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
53#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 55#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
@@ -85,6 +87,9 @@
85/* Kumeran Mode Control Register (Page 193, Register 16) */ 87/* Kumeran Mode Control Register (Page 193, Register 16) */
86#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 88#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
87 89
90/* Max number of times Kumeran read/write should be validated */
91#define GG82563_MAX_KMRN_RETRY 0x5
92
88/* Power Management Control Register (Page 193, Register 20) */ 93/* Power Management Control Register (Page 193, Register 20) */
89#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 94#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
90 /* 1=Enable SERDES Electrical Idle */ 95 /* 1=Enable SERDES Electrical Idle */
@@ -270,6 +275,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
270 u16 mask; 275 u16 mask;
271 276
272 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 277 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
278 mask |= E1000_SWFW_CSR_SM;
273 279
274 return e1000_acquire_swfw_sync_80003es2lan(hw, mask); 280 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
275} 281}
@@ -286,6 +292,8 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
286 u16 mask; 292 u16 mask;
287 293
288 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 294 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
295 mask |= E1000_SWFW_CSR_SM;
296
289 e1000_release_swfw_sync_80003es2lan(hw, mask); 297 e1000_release_swfw_sync_80003es2lan(hw, mask);
290} 298}
291 299
@@ -410,20 +418,27 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
410 u32 page_select; 418 u32 page_select;
411 u16 temp; 419 u16 temp;
412 420
421 ret_val = e1000_acquire_phy_80003es2lan(hw);
422 if (ret_val)
423 return ret_val;
424
413 /* Select Configuration Page */ 425 /* Select Configuration Page */
414 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 426 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
415 page_select = GG82563_PHY_PAGE_SELECT; 427 page_select = GG82563_PHY_PAGE_SELECT;
416 else 428 } else {
417 /* 429 /*
418 * Use Alternative Page Select register to access 430 * Use Alternative Page Select register to access
419 * registers 30 and 31 431 * registers 30 and 31
420 */ 432 */
421 page_select = GG82563_PHY_PAGE_SELECT_ALT; 433 page_select = GG82563_PHY_PAGE_SELECT_ALT;
434 }
422 435
423 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); 436 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
424 ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); 437 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
425 if (ret_val) 438 if (ret_val) {
439 e1000_release_phy_80003es2lan(hw);
426 return ret_val; 440 return ret_val;
441 }
427 442
428 /* 443 /*
429 * The "ready" bit in the MDIC register may be incorrectly set 444 * The "ready" bit in the MDIC register may be incorrectly set
@@ -433,20 +448,21 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
433 udelay(200); 448 udelay(200);
434 449
435 /* ...and verify the command was successful. */ 450 /* ...and verify the command was successful. */
436 ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); 451 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
437 452
438 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { 453 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
439 ret_val = -E1000_ERR_PHY; 454 ret_val = -E1000_ERR_PHY;
455 e1000_release_phy_80003es2lan(hw);
440 return ret_val; 456 return ret_val;
441 } 457 }
442 458
443 udelay(200); 459 udelay(200);
444 460
445 ret_val = e1000e_read_phy_reg_m88(hw, 461 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
446 MAX_PHY_REG_ADDRESS & offset, 462 data);
447 data);
448 463
449 udelay(200); 464 udelay(200);
465 e1000_release_phy_80003es2lan(hw);
450 466
451 return ret_val; 467 return ret_val;
452} 468}
@@ -467,20 +483,27 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
467 u32 page_select; 483 u32 page_select;
468 u16 temp; 484 u16 temp;
469 485
486 ret_val = e1000_acquire_phy_80003es2lan(hw);
487 if (ret_val)
488 return ret_val;
489
470 /* Select Configuration Page */ 490 /* Select Configuration Page */
471 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 491 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
472 page_select = GG82563_PHY_PAGE_SELECT; 492 page_select = GG82563_PHY_PAGE_SELECT;
473 else 493 } else {
474 /* 494 /*
475 * Use Alternative Page Select register to access 495 * Use Alternative Page Select register to access
476 * registers 30 and 31 496 * registers 30 and 31
477 */ 497 */
478 page_select = GG82563_PHY_PAGE_SELECT_ALT; 498 page_select = GG82563_PHY_PAGE_SELECT_ALT;
499 }
479 500
480 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); 501 temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
481 ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); 502 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
482 if (ret_val) 503 if (ret_val) {
504 e1000_release_phy_80003es2lan(hw);
483 return ret_val; 505 return ret_val;
506 }
484 507
485 508
486 /* 509 /*
@@ -491,18 +514,20 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
491 udelay(200); 514 udelay(200);
492 515
493 /* ...and verify the command was successful. */ 516 /* ...and verify the command was successful. */
494 ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); 517 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
495 518
496 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) 519 if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
520 e1000_release_phy_80003es2lan(hw);
497 return -E1000_ERR_PHY; 521 return -E1000_ERR_PHY;
522 }
498 523
499 udelay(200); 524 udelay(200);
500 525
501 ret_val = e1000e_write_phy_reg_m88(hw, 526 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
502 MAX_PHY_REG_ADDRESS & offset, 527 data);
503 data);
504 528
505 udelay(200); 529 udelay(200);
530 e1000_release_phy_80003es2lan(hw);
506 531
507 return ret_val; 532 return ret_val;
508} 533}
@@ -882,10 +907,10 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
882 struct e1000_phy_info *phy = &hw->phy; 907 struct e1000_phy_info *phy = &hw->phy;
883 s32 ret_val; 908 s32 ret_val;
884 u32 ctrl_ext; 909 u32 ctrl_ext;
885 u16 data; 910 u32 i = 0;
911 u16 data, data2;
886 912
887 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, 913 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
888 &data);
889 if (ret_val) 914 if (ret_val)
890 return ret_val; 915 return ret_val;
891 916
@@ -893,8 +918,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
893 /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ 918 /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
894 data |= GG82563_MSCR_TX_CLK_1000MBPS_25; 919 data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
895 920
896 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, 921 ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
897 data);
898 if (ret_val) 922 if (ret_val)
899 return ret_val; 923 return ret_val;
900 924
@@ -954,6 +978,18 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
954 if (ret_val) 978 if (ret_val)
955 return ret_val; 979 return ret_val;
956 980
981 ret_val = e1000e_read_kmrn_reg(hw,
982 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
983 &data);
984 if (ret_val)
985 return ret_val;
986 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
987 ret_val = e1000e_write_kmrn_reg(hw,
988 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
989 data);
990 if (ret_val)
991 return ret_val;
992
957 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); 993 ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
958 if (ret_val) 994 if (ret_val)
959 return ret_val; 995 return ret_val;
@@ -983,9 +1019,18 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
983 if (ret_val) 1019 if (ret_val)
984 return ret_val; 1020 return ret_val;
985 1021
986 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); 1022 do {
987 if (ret_val) 1023 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL,
988 return ret_val; 1024 &data);
1025 if (ret_val)
1026 return ret_val;
1027
1028 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL,
1029 &data2);
1030 if (ret_val)
1031 return ret_val;
1032 i++;
1033 } while ((data != data2) && (i < GG82563_MAX_KMRN_RETRY));
989 1034
990 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1035 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
991 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); 1036 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
@@ -1074,7 +1119,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1074{ 1119{
1075 s32 ret_val; 1120 s32 ret_val;
1076 u32 tipg; 1121 u32 tipg;
1077 u16 reg_data; 1122 u32 i = 0;
1123 u16 reg_data, reg_data2;
1078 1124
1079 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1125 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1080 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1126 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
@@ -1088,9 +1134,16 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1088 tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; 1134 tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
1089 ew32(TIPG, tipg); 1135 ew32(TIPG, tipg);
1090 1136
1091 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1137 do {
1092 if (ret_val) 1138 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1093 return ret_val; 1139 if (ret_val)
1140 return ret_val;
1141
1142 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
1143 if (ret_val)
1144 return ret_val;
1145 i++;
1146 } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
1094 1147
1095 if (duplex == HALF_DUPLEX) 1148 if (duplex == HALF_DUPLEX)
1096 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; 1149 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
@@ -1112,8 +1165,9 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1112static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) 1165static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1113{ 1166{
1114 s32 ret_val; 1167 s32 ret_val;
1115 u16 reg_data; 1168 u16 reg_data, reg_data2;
1116 u32 tipg; 1169 u32 tipg;
1170 u32 i = 0;
1117 1171
1118 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1172 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1119 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1173 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
@@ -1127,9 +1181,16 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1127 tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; 1181 tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
1128 ew32(TIPG, tipg); 1182 ew32(TIPG, tipg);
1129 1183
1130 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1184 do {
1131 if (ret_val) 1185 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1132 return ret_val; 1186 if (ret_val)
1187 return ret_val;
1188
1189 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
1190 if (ret_val)
1191 return ret_val;
1192 i++;
1193 } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
1133 1194
1134 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1195 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1135 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); 1196 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
@@ -1231,12 +1292,10 @@ struct e1000_info e1000_es2_info = {
1231 .mac = e1000_80003es2lan, 1292 .mac = e1000_80003es2lan,
1232 .flags = FLAG_HAS_HW_VLAN_FILTER 1293 .flags = FLAG_HAS_HW_VLAN_FILTER
1233 | FLAG_HAS_JUMBO_FRAMES 1294 | FLAG_HAS_JUMBO_FRAMES
1234 | FLAG_HAS_STATS_PTC_PRC
1235 | FLAG_HAS_WOL 1295 | FLAG_HAS_WOL
1236 | FLAG_APME_IN_CTRL3 1296 | FLAG_APME_IN_CTRL3
1237 | FLAG_RX_CSUM_ENABLED 1297 | FLAG_RX_CSUM_ENABLED
1238 | FLAG_HAS_CTRLEXT_ON_LOAD 1298 | FLAG_HAS_CTRLEXT_ON_LOAD
1239 | FLAG_HAS_STATS_ICR_ICT
1240 | FLAG_RX_NEEDS_RESTART /* errata */ 1299 | FLAG_RX_NEEDS_RESTART /* errata */
1241 | FLAG_TARC_SET_BIT_ZERO /* errata */ 1300 | FLAG_TARC_SET_BIT_ZERO /* errata */
1242 | FLAG_APME_CHECK_PORT_B 1301 | FLAG_APME_CHECK_PORT_B
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 6d1b257bbda6..ce045acce63e 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,8 +46,8 @@ struct e1000_stats {
46static const struct e1000_stats e1000_gstrings_stats[] = { 46static const struct e1000_stats e1000_gstrings_stats[] = {
47 { "rx_packets", E1000_STAT(stats.gprc) }, 47 { "rx_packets", E1000_STAT(stats.gprc) },
48 { "tx_packets", E1000_STAT(stats.gptc) }, 48 { "tx_packets", E1000_STAT(stats.gptc) },
49 { "rx_bytes", E1000_STAT(stats.gorcl) }, 49 { "rx_bytes", E1000_STAT(stats.gorc) },
50 { "tx_bytes", E1000_STAT(stats.gotcl) }, 50 { "tx_bytes", E1000_STAT(stats.gotc) },
51 { "rx_broadcast", E1000_STAT(stats.bprc) }, 51 { "rx_broadcast", E1000_STAT(stats.bprc) },
52 { "tx_broadcast", E1000_STAT(stats.bptc) }, 52 { "tx_broadcast", E1000_STAT(stats.bptc) },
53 { "rx_multicast", E1000_STAT(stats.mprc) }, 53 { "rx_multicast", E1000_STAT(stats.mprc) },
@@ -83,7 +83,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
83 { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, 83 { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
84 { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, 84 { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
85 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, 85 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
86 { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, 86 { "rx_long_byte_count", E1000_STAT(stats.gorc) },
87 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 87 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
88 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 88 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
89 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 89 { "rx_header_split", E1000_STAT(rx_hdr_split) },
@@ -1770,6 +1770,47 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1770 return 0; 1770 return 0;
1771} 1771}
1772 1772
1773static int e1000_get_coalesce(struct net_device *netdev,
1774 struct ethtool_coalesce *ec)
1775{
1776 struct e1000_adapter *adapter = netdev_priv(netdev);
1777
1778 if (adapter->itr_setting <= 3)
1779 ec->rx_coalesce_usecs = adapter->itr_setting;
1780 else
1781 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1782
1783 return 0;
1784}
1785
1786static int e1000_set_coalesce(struct net_device *netdev,
1787 struct ethtool_coalesce *ec)
1788{
1789 struct e1000_adapter *adapter = netdev_priv(netdev);
1790 struct e1000_hw *hw = &adapter->hw;
1791
1792 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1793 ((ec->rx_coalesce_usecs > 3) &&
1794 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1795 (ec->rx_coalesce_usecs == 2))
1796 return -EINVAL;
1797
1798 if (ec->rx_coalesce_usecs <= 3) {
1799 adapter->itr = 20000;
1800 adapter->itr_setting = ec->rx_coalesce_usecs;
1801 } else {
1802 adapter->itr = (1000000 / ec->rx_coalesce_usecs);
1803 adapter->itr_setting = adapter->itr & ~3;
1804 }
1805
1806 if (adapter->itr_setting != 0)
1807 ew32(ITR, 1000000000 / (adapter->itr * 256));
1808 else
1809 ew32(ITR, 0);
1810
1811 return 0;
1812}
1813
1773static int e1000_nway_reset(struct net_device *netdev) 1814static int e1000_nway_reset(struct net_device *netdev)
1774{ 1815{
1775 struct e1000_adapter *adapter = netdev_priv(netdev); 1816 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1845,6 +1886,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1845 .phys_id = e1000_phys_id, 1886 .phys_id = e1000_phys_id,
1846 .get_ethtool_stats = e1000_get_ethtool_stats, 1887 .get_ethtool_stats = e1000_get_ethtool_stats,
1847 .get_sset_count = e1000e_get_sset_count, 1888 .get_sset_count = e1000e_get_sset_count,
1889 .get_coalesce = e1000_get_coalesce,
1890 .set_coalesce = e1000_set_coalesce,
1848}; 1891};
1849 1892
1850void e1000e_set_ethtool_ops(struct net_device *netdev) 1893void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 53f1ac6327fa..a930e6d9cf02 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -592,10 +592,8 @@ struct e1000_hw_stats {
592 u64 bprc; 592 u64 bprc;
593 u64 mprc; 593 u64 mprc;
594 u64 gptc; 594 u64 gptc;
595 u64 gorcl; 595 u64 gorc;
596 u64 gorch; 596 u64 gotc;
597 u64 gotcl;
598 u64 gotch;
599 u64 rnbc; 597 u64 rnbc;
600 u64 ruc; 598 u64 ruc;
601 u64 rfc; 599 u64 rfc;
@@ -604,10 +602,8 @@ struct e1000_hw_stats {
604 u64 mgprc; 602 u64 mgprc;
605 u64 mgpdc; 603 u64 mgpdc;
606 u64 mgptc; 604 u64 mgptc;
607 u64 torl; 605 u64 tor;
608 u64 torh; 606 u64 tot;
609 u64 totl;
610 u64 toth;
611 u64 tpr; 607 u64 tpr;
612 u64 tpt; 608 u64 tpt;
613 u64 ptc64; 609 u64 ptc64;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c8dc47fd132a..8991ab8911e2 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -46,7 +46,7 @@
46 46
47#include "e1000.h" 47#include "e1000.h"
48 48
49#define DRV_VERSION "0.2.0" 49#define DRV_VERSION "0.2.1"
50char e1000e_driver_name[] = "e1000e"; 50char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION; 51const char e1000e_driver_version[] = DRV_VERSION;
52 52
@@ -466,10 +466,10 @@ next_desc:
466 if (cleaned_count) 466 if (cleaned_count)
467 adapter->alloc_rx_buf(adapter, cleaned_count); 467 adapter->alloc_rx_buf(adapter, cleaned_count);
468 468
469 adapter->total_rx_packets += total_rx_packets;
470 adapter->total_rx_bytes += total_rx_bytes; 469 adapter->total_rx_bytes += total_rx_bytes;
471 adapter->net_stats.rx_packets += total_rx_packets; 470 adapter->total_rx_packets += total_rx_packets;
472 adapter->net_stats.rx_bytes += total_rx_bytes; 471 adapter->net_stats.rx_bytes += total_rx_bytes;
472 adapter->net_stats.rx_packets += total_rx_packets;
473 return cleaned; 473 return cleaned;
474} 474}
475 475
@@ -606,8 +606,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
606 } 606 }
607 adapter->total_tx_bytes += total_tx_bytes; 607 adapter->total_tx_bytes += total_tx_bytes;
608 adapter->total_tx_packets += total_tx_packets; 608 adapter->total_tx_packets += total_tx_packets;
609 adapter->net_stats.tx_packets += total_tx_packets;
610 adapter->net_stats.tx_bytes += total_tx_bytes; 609 adapter->net_stats.tx_bytes += total_tx_bytes;
610 adapter->net_stats.tx_packets += total_tx_packets;
611 return cleaned; 611 return cleaned;
612} 612}
613 613
@@ -775,10 +775,10 @@ next_desc:
775 if (cleaned_count) 775 if (cleaned_count)
776 adapter->alloc_rx_buf(adapter, cleaned_count); 776 adapter->alloc_rx_buf(adapter, cleaned_count);
777 777
778 adapter->total_rx_packets += total_rx_packets;
779 adapter->total_rx_bytes += total_rx_bytes; 778 adapter->total_rx_bytes += total_rx_bytes;
780 adapter->net_stats.rx_packets += total_rx_packets; 779 adapter->total_rx_packets += total_rx_packets;
781 adapter->net_stats.rx_bytes += total_rx_bytes; 780 adapter->net_stats.rx_bytes += total_rx_bytes;
781 adapter->net_stats.rx_packets += total_rx_packets;
782 return cleaned; 782 return cleaned;
783} 783}
784 784
@@ -2506,56 +2506,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2506 2506
2507 adapter->stats.crcerrs += er32(CRCERRS); 2507 adapter->stats.crcerrs += er32(CRCERRS);
2508 adapter->stats.gprc += er32(GPRC); 2508 adapter->stats.gprc += er32(GPRC);
2509 adapter->stats.gorcl += er32(GORCL); 2509 adapter->stats.gorc += er32(GORCL);
2510 adapter->stats.gorch += er32(GORCH); 2510 er32(GORCH); /* Clear gorc */
2511 adapter->stats.bprc += er32(BPRC); 2511 adapter->stats.bprc += er32(BPRC);
2512 adapter->stats.mprc += er32(MPRC); 2512 adapter->stats.mprc += er32(MPRC);
2513 adapter->stats.roc += er32(ROC); 2513 adapter->stats.roc += er32(ROC);
2514 2514
2515 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2516 adapter->stats.prc64 += er32(PRC64);
2517 adapter->stats.prc127 += er32(PRC127);
2518 adapter->stats.prc255 += er32(PRC255);
2519 adapter->stats.prc511 += er32(PRC511);
2520 adapter->stats.prc1023 += er32(PRC1023);
2521 adapter->stats.prc1522 += er32(PRC1522);
2522 adapter->stats.symerrs += er32(SYMERRS);
2523 adapter->stats.sec += er32(SEC);
2524 }
2525
2526 adapter->stats.mpc += er32(MPC); 2515 adapter->stats.mpc += er32(MPC);
2527 adapter->stats.scc += er32(SCC); 2516 adapter->stats.scc += er32(SCC);
2528 adapter->stats.ecol += er32(ECOL); 2517 adapter->stats.ecol += er32(ECOL);
2529 adapter->stats.mcc += er32(MCC); 2518 adapter->stats.mcc += er32(MCC);
2530 adapter->stats.latecol += er32(LATECOL); 2519 adapter->stats.latecol += er32(LATECOL);
2531 adapter->stats.dc += er32(DC); 2520 adapter->stats.dc += er32(DC);
2532 adapter->stats.rlec += er32(RLEC);
2533 adapter->stats.xonrxc += er32(XONRXC); 2521 adapter->stats.xonrxc += er32(XONRXC);
2534 adapter->stats.xontxc += er32(XONTXC); 2522 adapter->stats.xontxc += er32(XONTXC);
2535 adapter->stats.xoffrxc += er32(XOFFRXC); 2523 adapter->stats.xoffrxc += er32(XOFFRXC);
2536 adapter->stats.xofftxc += er32(XOFFTXC); 2524 adapter->stats.xofftxc += er32(XOFFTXC);
2537 adapter->stats.fcruc += er32(FCRUC);
2538 adapter->stats.gptc += er32(GPTC); 2525 adapter->stats.gptc += er32(GPTC);
2539 adapter->stats.gotcl += er32(GOTCL); 2526 adapter->stats.gotc += er32(GOTCL);
2540 adapter->stats.gotch += er32(GOTCH); 2527 er32(GOTCH); /* Clear gotc */
2541 adapter->stats.rnbc += er32(RNBC); 2528 adapter->stats.rnbc += er32(RNBC);
2542 adapter->stats.ruc += er32(RUC); 2529 adapter->stats.ruc += er32(RUC);
2543 adapter->stats.rfc += er32(RFC);
2544 adapter->stats.rjc += er32(RJC);
2545 adapter->stats.torl += er32(TORL);
2546 adapter->stats.torh += er32(TORH);
2547 adapter->stats.totl += er32(TOTL);
2548 adapter->stats.toth += er32(TOTH);
2549 adapter->stats.tpr += er32(TPR);
2550
2551 if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2552 adapter->stats.ptc64 += er32(PTC64);
2553 adapter->stats.ptc127 += er32(PTC127);
2554 adapter->stats.ptc255 += er32(PTC255);
2555 adapter->stats.ptc511 += er32(PTC511);
2556 adapter->stats.ptc1023 += er32(PTC1023);
2557 adapter->stats.ptc1522 += er32(PTC1522);
2558 }
2559 2530
2560 adapter->stats.mptc += er32(MPTC); 2531 adapter->stats.mptc += er32(MPTC);
2561 adapter->stats.bptc += er32(BPTC); 2532 adapter->stats.bptc += er32(BPTC);
@@ -2574,19 +2545,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2574 adapter->stats.tsctc += er32(TSCTC); 2545 adapter->stats.tsctc += er32(TSCTC);
2575 adapter->stats.tsctfc += er32(TSCTFC); 2546 adapter->stats.tsctfc += er32(TSCTFC);
2576 2547
2577 adapter->stats.iac += er32(IAC);
2578
2579 if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
2580 adapter->stats.icrxoc += er32(ICRXOC);
2581 adapter->stats.icrxptc += er32(ICRXPTC);
2582 adapter->stats.icrxatc += er32(ICRXATC);
2583 adapter->stats.ictxptc += er32(ICTXPTC);
2584 adapter->stats.ictxatc += er32(ICTXATC);
2585 adapter->stats.ictxqec += er32(ICTXQEC);
2586 adapter->stats.ictxqmtc += er32(ICTXQMTC);
2587 adapter->stats.icrxdmtc += er32(ICRXDMTC);
2588 }
2589
2590 /* Fill out the OS statistics structure */ 2548 /* Fill out the OS statistics structure */
2591 adapter->net_stats.multicast = adapter->stats.mprc; 2549 adapter->net_stats.multicast = adapter->stats.mprc;
2592 adapter->net_stats.collisions = adapter->stats.colc; 2550 adapter->net_stats.collisions = adapter->stats.colc;
@@ -2633,6 +2591,54 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2633 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 2591 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
2634} 2592}
2635 2593
2594/**
2595 * e1000_phy_read_status - Update the PHY register status snapshot
2596 * @adapter: board private structure
2597 **/
2598static void e1000_phy_read_status(struct e1000_adapter *adapter)
2599{
2600 struct e1000_hw *hw = &adapter->hw;
2601 struct e1000_phy_regs *phy = &adapter->phy_regs;
2602 int ret_val;
2603 unsigned long irq_flags;
2604
2605
2606 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2607
2608 if ((er32(STATUS) & E1000_STATUS_LU) &&
2609 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
2610 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
2611 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
2612 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
2613 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
2614 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
2615 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
2616 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
2617 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
2618 if (ret_val)
2619 ndev_warn(adapter->netdev,
2620 "Error reading PHY register\n");
2621 } else {
2622 /*
2623 * Do not read PHY registers if link is not up
2624 * Set values to typical power-on defaults
2625 */
2626 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
2627 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
2628 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
2629 BMSR_ERCAP);
2630 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
2631 ADVERTISE_ALL | ADVERTISE_CSMA);
2632 phy->lpa = 0;
2633 phy->expansion = EXPANSION_ENABLENPAGE;
2634 phy->ctrl1000 = ADVERTISE_1000FULL;
2635 phy->stat1000 = 0;
2636 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
2637 }
2638
2639 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
2640}
2641
2636static void e1000_print_link_info(struct e1000_adapter *adapter) 2642static void e1000_print_link_info(struct e1000_adapter *adapter)
2637{ 2643{
2638 struct e1000_hw *hw = &adapter->hw; 2644 struct e1000_hw *hw = &adapter->hw;
@@ -2745,6 +2751,7 @@ static void e1000_watchdog_task(struct work_struct *work)
2745 if (!netif_carrier_ok(netdev)) { 2751 if (!netif_carrier_ok(netdev)) {
2746 bool txb2b = 1; 2752 bool txb2b = 1;
2747 /* update snapshot of PHY registers on LSC */ 2753 /* update snapshot of PHY registers on LSC */
2754 e1000_phy_read_status(adapter);
2748 mac->ops.get_link_up_info(&adapter->hw, 2755 mac->ops.get_link_up_info(&adapter->hw,
2749 &adapter->link_speed, 2756 &adapter->link_speed,
2750 &adapter->link_duplex); 2757 &adapter->link_duplex);
@@ -2842,10 +2849,10 @@ link_up:
2842 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 2849 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2843 adapter->colc_old = adapter->stats.colc; 2850 adapter->colc_old = adapter->stats.colc;
2844 2851
2845 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2852 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2846 adapter->gorcl_old = adapter->stats.gorcl; 2853 adapter->gorc_old = adapter->stats.gorc;
2847 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2854 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2848 adapter->gotcl_old = adapter->stats.gotcl; 2855 adapter->gotc_old = adapter->stats.gotc;
2849 2856
2850 e1000e_update_adaptive(&adapter->hw); 2857 e1000e_update_adaptive(&adapter->hw);
2851 2858
@@ -3500,7 +3507,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3500{ 3507{
3501 struct e1000_adapter *adapter = netdev_priv(netdev); 3508 struct e1000_adapter *adapter = netdev_priv(netdev);
3502 struct mii_ioctl_data *data = if_mii(ifr); 3509 struct mii_ioctl_data *data = if_mii(ifr);
3503 unsigned long irq_flags;
3504 3510
3505 if (adapter->hw.phy.media_type != e1000_media_type_copper) 3511 if (adapter->hw.phy.media_type != e1000_media_type_copper)
3506 return -EOPNOTSUPP; 3512 return -EOPNOTSUPP;
@@ -3512,13 +3518,40 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3512 case SIOCGMIIREG: 3518 case SIOCGMIIREG:
3513 if (!capable(CAP_NET_ADMIN)) 3519 if (!capable(CAP_NET_ADMIN))
3514 return -EPERM; 3520 return -EPERM;
3515 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 3521 switch (data->reg_num & 0x1F) {
3516 if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F, 3522 case MII_BMCR:
3517 &data->val_out)) { 3523 data->val_out = adapter->phy_regs.bmcr;
3518 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 3524 break;
3525 case MII_BMSR:
3526 data->val_out = adapter->phy_regs.bmsr;
3527 break;
3528 case MII_PHYSID1:
3529 data->val_out = (adapter->hw.phy.id >> 16);
3530 break;
3531 case MII_PHYSID2:
3532 data->val_out = (adapter->hw.phy.id & 0xFFFF);
3533 break;
3534 case MII_ADVERTISE:
3535 data->val_out = adapter->phy_regs.advertise;
3536 break;
3537 case MII_LPA:
3538 data->val_out = adapter->phy_regs.lpa;
3539 break;
3540 case MII_EXPANSION:
3541 data->val_out = adapter->phy_regs.expansion;
3542 break;
3543 case MII_CTRL1000:
3544 data->val_out = adapter->phy_regs.ctrl1000;
3545 break;
3546 case MII_STAT1000:
3547 data->val_out = adapter->phy_regs.stat1000;
3548 break;
3549 case MII_ESTATUS:
3550 data->val_out = adapter->phy_regs.estatus;
3551 break;
3552 default:
3519 return -EIO; 3553 return -EIO;
3520 } 3554 }
3521 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3522 break; 3555 break;
3523 case SIOCSMIIREG: 3556 case SIOCSMIIREG:
3524 default: 3557 default:
@@ -3774,6 +3807,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
3774 return PCI_ERS_RESULT_DISCONNECT; 3807 return PCI_ERS_RESULT_DISCONNECT;
3775 } 3808 }
3776 pci_set_master(pdev); 3809 pci_set_master(pdev);
3810 pci_restore_state(pdev);
3777 3811
3778 pci_enable_wake(pdev, PCI_D3hot, 0); 3812 pci_enable_wake(pdev, PCI_D3hot, 0);
3779 pci_enable_wake(pdev, PCI_D3cold, 0); 3813 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -3900,6 +3934,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3900 goto err_pci_reg; 3934 goto err_pci_reg;
3901 3935
3902 pci_set_master(pdev); 3936 pci_set_master(pdev);
3937 pci_save_state(pdev);
3903 3938
3904 err = -ENOMEM; 3939 err = -ENOMEM;
3905 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 3940 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 3a4574caa75b..e102332a6bee 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -116,7 +116,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
116} 116}
117 117
118/** 118/**
119 * e1000_read_phy_reg_mdic - Read MDI control register 119 * e1000e_read_phy_reg_mdic - Read MDI control register
120 * @hw: pointer to the HW structure 120 * @hw: pointer to the HW structure
121 * @offset: register offset to be read 121 * @offset: register offset to be read
122 * @data: pointer to the read data 122 * @data: pointer to the read data
@@ -124,7 +124,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
124 * Reads the MDI control register in the PHY at offset and stores the 124 * Reads the MDI control register in the PHY at offset and stores the
125 * information read to data. 125 * information read to data.
126 **/ 126 **/
127static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) 127s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
128{ 128{
129 struct e1000_phy_info *phy = &hw->phy; 129 struct e1000_phy_info *phy = &hw->phy;
130 u32 i, mdic = 0; 130 u32 i, mdic = 0;
@@ -150,7 +150,7 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
150 * Increasing the time out as testing showed failures with 150 * Increasing the time out as testing showed failures with
151 * the lower time out 151 * the lower time out
152 */ 152 */
153 for (i = 0; i < 64; i++) { 153 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
154 udelay(50); 154 udelay(50);
155 mdic = er32(MDIC); 155 mdic = er32(MDIC);
156 if (mdic & E1000_MDIC_READY) 156 if (mdic & E1000_MDIC_READY)
@@ -170,14 +170,14 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
170} 170}
171 171
172/** 172/**
173 * e1000_write_phy_reg_mdic - Write MDI control register 173 * e1000e_write_phy_reg_mdic - Write MDI control register
174 * @hw: pointer to the HW structure 174 * @hw: pointer to the HW structure
175 * @offset: register offset to write to 175 * @offset: register offset to write to
176 * @data: data to write to register at offset 176 * @data: data to write to register at offset
177 * 177 *
178 * Writes data to MDI control register in the PHY at offset. 178 * Writes data to MDI control register in the PHY at offset.
179 **/ 179 **/
180static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) 180s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
181{ 181{
182 struct e1000_phy_info *phy = &hw->phy; 182 struct e1000_phy_info *phy = &hw->phy;
183 u32 i, mdic = 0; 183 u32 i, mdic = 0;
@@ -199,9 +199,13 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
199 199
200 ew32(MDIC, mdic); 200 ew32(MDIC, mdic);
201 201
202 /* Poll the ready bit to see if the MDI read completed */ 202 /*
203 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { 203 * Poll the ready bit to see if the MDI read completed
204 udelay(5); 204 * Increasing the time out as testing showed failures with
205 * the lower time out
206 */
207 for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
208 udelay(50);
205 mdic = er32(MDIC); 209 mdic = er32(MDIC);
206 if (mdic & E1000_MDIC_READY) 210 if (mdic & E1000_MDIC_READY)
207 break; 211 break;
@@ -210,6 +214,10 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
210 hw_dbg(hw, "MDI Write did not complete\n"); 214 hw_dbg(hw, "MDI Write did not complete\n");
211 return -E1000_ERR_PHY; 215 return -E1000_ERR_PHY;
212 } 216 }
217 if (mdic & E1000_MDIC_ERROR) {
218 hw_dbg(hw, "MDI Error\n");
219 return -E1000_ERR_PHY;
220 }
213 221
214 return 0; 222 return 0;
215} 223}
@@ -232,9 +240,8 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
232 if (ret_val) 240 if (ret_val)
233 return ret_val; 241 return ret_val;
234 242
235 ret_val = e1000_read_phy_reg_mdic(hw, 243 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
236 MAX_PHY_REG_ADDRESS & offset, 244 data);
237 data);
238 245
239 hw->phy.ops.release_phy(hw); 246 hw->phy.ops.release_phy(hw);
240 247
@@ -258,9 +265,8 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
258 if (ret_val) 265 if (ret_val)
259 return ret_val; 266 return ret_val;
260 267
261 ret_val = e1000_write_phy_reg_mdic(hw, 268 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
262 MAX_PHY_REG_ADDRESS & offset, 269 data);
263 data);
264 270
265 hw->phy.ops.release_phy(hw); 271 hw->phy.ops.release_phy(hw);
266 272
@@ -286,18 +292,17 @@ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
286 return ret_val; 292 return ret_val;
287 293
288 if (offset > MAX_PHY_MULTI_PAGE_REG) { 294 if (offset > MAX_PHY_MULTI_PAGE_REG) {
289 ret_val = e1000_write_phy_reg_mdic(hw, 295 ret_val = e1000e_write_phy_reg_mdic(hw,
290 IGP01E1000_PHY_PAGE_SELECT, 296 IGP01E1000_PHY_PAGE_SELECT,
291 (u16)offset); 297 (u16)offset);
292 if (ret_val) { 298 if (ret_val) {
293 hw->phy.ops.release_phy(hw); 299 hw->phy.ops.release_phy(hw);
294 return ret_val; 300 return ret_val;
295 } 301 }
296 } 302 }
297 303
298 ret_val = e1000_read_phy_reg_mdic(hw, 304 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
299 MAX_PHY_REG_ADDRESS & offset, 305 data);
300 data);
301 306
302 hw->phy.ops.release_phy(hw); 307 hw->phy.ops.release_phy(hw);
303 308
@@ -322,18 +327,17 @@ s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
322 return ret_val; 327 return ret_val;
323 328
324 if (offset > MAX_PHY_MULTI_PAGE_REG) { 329 if (offset > MAX_PHY_MULTI_PAGE_REG) {
325 ret_val = e1000_write_phy_reg_mdic(hw, 330 ret_val = e1000e_write_phy_reg_mdic(hw,
326 IGP01E1000_PHY_PAGE_SELECT, 331 IGP01E1000_PHY_PAGE_SELECT,
327 (u16)offset); 332 (u16)offset);
328 if (ret_val) { 333 if (ret_val) {
329 hw->phy.ops.release_phy(hw); 334 hw->phy.ops.release_phy(hw);
330 return ret_val; 335 return ret_val;
331 } 336 }
332 } 337 }
333 338
334 ret_val = e1000_write_phy_reg_mdic(hw, 339 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
335 MAX_PHY_REG_ADDRESS & offset, 340 data);
336 data);
337 341
338 hw->phy.ops.release_phy(hw); 342 hw->phy.ops.release_phy(hw);
339 343
@@ -420,7 +424,9 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
420 if (ret_val) 424 if (ret_val)
421 return ret_val; 425 return ret_val;
422 426
423 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 427 /* For newer PHYs this bit is downshift enable */
428 if (phy->type == e1000_phy_m88)
429 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
424 430
425 /* 431 /*
426 * Options: 432 * Options:
@@ -463,7 +469,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
463 if (ret_val) 469 if (ret_val)
464 return ret_val; 470 return ret_val;
465 471
466 if (phy->revision < 4) { 472 if ((phy->type == e1000_phy_m88) && (phy->revision < 4)) {
467 /* 473 /*
468 * Force TX_CLK in the Extended PHY Specific Control Register 474 * Force TX_CLK in the Extended PHY Specific Control Register
469 * to 25MHz clock. 475 * to 25MHz clock.
@@ -518,8 +524,11 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
518 return ret_val; 524 return ret_val;
519 } 525 }
520 526
521 /* Wait 15ms for MAC to configure PHY from NVM settings. */ 527 /*
522 msleep(15); 528 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
529 * timeout issues when LFS is enabled.
530 */
531 msleep(100);
523 532
524 /* disable lplu d0 during driver init */ 533 /* disable lplu d0 during driver init */
525 ret_val = e1000_set_d0_lplu_state(hw, 0); 534 ret_val = e1000_set_d0_lplu_state(hw, 0);
@@ -1152,9 +1161,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1152 1161
1153 if (!active) { 1162 if (!active) {
1154 data &= ~IGP02E1000_PM_D3_LPLU; 1163 data &= ~IGP02E1000_PM_D3_LPLU;
1155 ret_val = e1e_wphy(hw, 1164 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1156 IGP02E1000_PHY_POWER_MGMT,
1157 data);
1158 if (ret_val) 1165 if (ret_val)
1159 return ret_val; 1166 return ret_val;
1160 /* 1167 /*
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 9ff7538b7595..f9bc21c74b59 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2611,7 +2611,7 @@ static int ehea_stop(struct net_device *dev)
2611 return ret; 2611 return ret;
2612} 2612}
2613 2613
2614void ehea_purge_sq(struct ehea_qp *orig_qp) 2614static void ehea_purge_sq(struct ehea_qp *orig_qp)
2615{ 2615{
2616 struct ehea_qp qp = *orig_qp; 2616 struct ehea_qp qp = *orig_qp;
2617 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2617 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2625,7 +2625,7 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
2625 } 2625 }
2626} 2626}
2627 2627
2628void ehea_flush_sq(struct ehea_port *port) 2628static void ehea_flush_sq(struct ehea_port *port)
2629{ 2629{
2630 int i; 2630 int i;
2631 2631
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 8c4214b0ee1f..35f66d4a4595 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -96,6 +96,7 @@
96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */
99 100
100enum { 101enum {
101 NvRegIrqStatus = 0x000, 102 NvRegIrqStatus = 0x000,
@@ -174,11 +175,13 @@ enum {
174 NvRegReceiverStatus = 0x98, 175 NvRegReceiverStatus = 0x98,
175#define NVREG_RCVSTAT_BUSY 0x01 176#define NVREG_RCVSTAT_BUSY 0x01
176 177
177 NvRegRandomSeed = 0x9c, 178 NvRegSlotTime = 0x9c,
178#define NVREG_RNDSEED_MASK 0x00ff 179#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
179#define NVREG_RNDSEED_FORCE 0x7f00 180#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
180#define NVREG_RNDSEED_FORCE2 0x2d00 181#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
181#define NVREG_RNDSEED_FORCE3 0x7400 182#define NVREG_SLOTTIME_HALF 0x0000ff00
183#define NVREG_SLOTTIME_DEFAULT 0x00007f00
184#define NVREG_SLOTTIME_MASK 0x000000ff
182 185
183 NvRegTxDeferral = 0xA0, 186 NvRegTxDeferral = 0xA0,
184#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 187#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
@@ -201,6 +204,11 @@ enum {
201 204
202 NvRegPhyInterface = 0xC0, 205 NvRegPhyInterface = 0xC0,
203#define PHY_RGMII 0x10000000 206#define PHY_RGMII 0x10000000
207 NvRegBackOffControl = 0xC4,
208#define NVREG_BKOFFCTRL_DEFAULT 0x70000000
209#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
210#define NVREG_BKOFFCTRL_SELECT 24
211#define NVREG_BKOFFCTRL_GEAR 12
204 212
205 NvRegTxRingPhysAddr = 0x100, 213 NvRegTxRingPhysAddr = 0x100,
206 NvRegRxRingPhysAddr = 0x104, 214 NvRegRxRingPhysAddr = 0x104,
@@ -352,6 +360,7 @@ union ring_type {
352 360
353#define NV_TX_LASTPACKET (1<<16) 361#define NV_TX_LASTPACKET (1<<16)
354#define NV_TX_RETRYERROR (1<<19) 362#define NV_TX_RETRYERROR (1<<19)
363#define NV_TX_RETRYCOUNT_MASK (0xF<<20)
355#define NV_TX_FORCED_INTERRUPT (1<<24) 364#define NV_TX_FORCED_INTERRUPT (1<<24)
356#define NV_TX_DEFERRED (1<<26) 365#define NV_TX_DEFERRED (1<<26)
357#define NV_TX_CARRIERLOST (1<<27) 366#define NV_TX_CARRIERLOST (1<<27)
@@ -362,6 +371,7 @@ union ring_type {
362 371
363#define NV_TX2_LASTPACKET (1<<29) 372#define NV_TX2_LASTPACKET (1<<29)
364#define NV_TX2_RETRYERROR (1<<18) 373#define NV_TX2_RETRYERROR (1<<18)
374#define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
365#define NV_TX2_FORCED_INTERRUPT (1<<30) 375#define NV_TX2_FORCED_INTERRUPT (1<<30)
366#define NV_TX2_DEFERRED (1<<25) 376#define NV_TX2_DEFERRED (1<<25)
367#define NV_TX2_CARRIERLOST (1<<26) 377#define NV_TX2_CARRIERLOST (1<<26)
@@ -473,16 +483,22 @@ union ring_type {
473#define DESC_VER_3 3 483#define DESC_VER_3 3
474 484
475/* PHY defines */ 485/* PHY defines */
476#define PHY_OUI_MARVELL 0x5043 486#define PHY_OUI_MARVELL 0x5043
477#define PHY_OUI_CICADA 0x03f1 487#define PHY_OUI_CICADA 0x03f1
478#define PHY_OUI_VITESSE 0x01c1 488#define PHY_OUI_VITESSE 0x01c1
479#define PHY_OUI_REALTEK 0x0732 489#define PHY_OUI_REALTEK 0x0732
490#define PHY_OUI_REALTEK2 0x0020
480#define PHYID1_OUI_MASK 0x03ff 491#define PHYID1_OUI_MASK 0x03ff
481#define PHYID1_OUI_SHFT 6 492#define PHYID1_OUI_SHFT 6
482#define PHYID2_OUI_MASK 0xfc00 493#define PHYID2_OUI_MASK 0xfc00
483#define PHYID2_OUI_SHFT 10 494#define PHYID2_OUI_SHFT 10
484#define PHYID2_MODEL_MASK 0x03f0 495#define PHYID2_MODEL_MASK 0x03f0
485#define PHY_MODEL_MARVELL_E3016 0x220 496#define PHY_MODEL_REALTEK_8211 0x0110
497#define PHY_REV_MASK 0x0001
498#define PHY_REV_REALTEK_8211B 0x0000
499#define PHY_REV_REALTEK_8211C 0x0001
500#define PHY_MODEL_REALTEK_8201 0x0200
501#define PHY_MODEL_MARVELL_E3016 0x0220
486#define PHY_MARVELL_E3016_INITMASK 0x0300 502#define PHY_MARVELL_E3016_INITMASK 0x0300
487#define PHY_CICADA_INIT1 0x0f000 503#define PHY_CICADA_INIT1 0x0f000
488#define PHY_CICADA_INIT2 0x0e00 504#define PHY_CICADA_INIT2 0x0e00
@@ -509,10 +525,18 @@ union ring_type {
509#define PHY_REALTEK_INIT_REG1 0x1f 525#define PHY_REALTEK_INIT_REG1 0x1f
510#define PHY_REALTEK_INIT_REG2 0x19 526#define PHY_REALTEK_INIT_REG2 0x19
511#define PHY_REALTEK_INIT_REG3 0x13 527#define PHY_REALTEK_INIT_REG3 0x13
528#define PHY_REALTEK_INIT_REG4 0x14
529#define PHY_REALTEK_INIT_REG5 0x18
530#define PHY_REALTEK_INIT_REG6 0x11
512#define PHY_REALTEK_INIT1 0x0000 531#define PHY_REALTEK_INIT1 0x0000
513#define PHY_REALTEK_INIT2 0x8e00 532#define PHY_REALTEK_INIT2 0x8e00
514#define PHY_REALTEK_INIT3 0x0001 533#define PHY_REALTEK_INIT3 0x0001
515#define PHY_REALTEK_INIT4 0xad17 534#define PHY_REALTEK_INIT4 0xad17
535#define PHY_REALTEK_INIT5 0xfb54
536#define PHY_REALTEK_INIT6 0xf5c7
537#define PHY_REALTEK_INIT7 0x1000
538#define PHY_REALTEK_INIT8 0x0003
539#define PHY_REALTEK_INIT_MSK1 0x0003
516 540
517#define PHY_GIGABIT 0x0100 541#define PHY_GIGABIT 0x0100
518 542
@@ -691,6 +715,7 @@ struct fe_priv {
691 int wolenabled; 715 int wolenabled;
692 unsigned int phy_oui; 716 unsigned int phy_oui;
693 unsigned int phy_model; 717 unsigned int phy_model;
718 unsigned int phy_rev;
694 u16 gigabit; 719 u16 gigabit;
695 int intr_test; 720 int intr_test;
696 int recover_error; 721 int recover_error;
@@ -704,6 +729,7 @@ struct fe_priv {
704 u32 txrxctl_bits; 729 u32 txrxctl_bits;
705 u32 vlanctl_bits; 730 u32 vlanctl_bits;
706 u32 driver_data; 731 u32 driver_data;
732 u32 device_id;
707 u32 register_size; 733 u32 register_size;
708 int rx_csum; 734 int rx_csum;
709 u32 mac_in_use; 735 u32 mac_in_use;
@@ -814,6 +840,16 @@ enum {
814}; 840};
815static int dma_64bit = NV_DMA_64BIT_ENABLED; 841static int dma_64bit = NV_DMA_64BIT_ENABLED;
816 842
843/*
844 * Crossover Detection
845 * Realtek 8201 phy + some OEM boards do not work properly.
846 */
847enum {
848 NV_CROSSOVER_DETECTION_DISABLED,
849 NV_CROSSOVER_DETECTION_ENABLED
850};
851static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
852
817static inline struct fe_priv *get_nvpriv(struct net_device *dev) 853static inline struct fe_priv *get_nvpriv(struct net_device *dev)
818{ 854{
819 return netdev_priv(dev); 855 return netdev_priv(dev);
@@ -1078,25 +1114,53 @@ static int phy_init(struct net_device *dev)
1078 } 1114 }
1079 } 1115 }
1080 if (np->phy_oui == PHY_OUI_REALTEK) { 1116 if (np->phy_oui == PHY_OUI_REALTEK) {
1081 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1117 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1082 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1118 np->phy_rev == PHY_REV_REALTEK_8211B) {
1083 return PHY_ERROR; 1119 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1084 } 1120 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1085 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1121 return PHY_ERROR;
1086 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1122 }
1087 return PHY_ERROR; 1123 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1088 } 1124 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1089 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1125 return PHY_ERROR;
1090 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1126 }
1091 return PHY_ERROR; 1127 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1092 } 1128 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1093 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1129 return PHY_ERROR;
1094 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1130 }
1095 return PHY_ERROR; 1131 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1132 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1133 return PHY_ERROR;
1134 }
1135 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1136 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1137 return PHY_ERROR;
1138 }
1139 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1140 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1141 return PHY_ERROR;
1142 }
1143 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1144 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1145 return PHY_ERROR;
1146 }
1096 } 1147 }
1097 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1148 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1098 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1149 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1099 return PHY_ERROR; 1150 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1151 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1152 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1153 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1154 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1155 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1156 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1157 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1158 phy_reserved |= PHY_REALTEK_INIT7;
1159 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1161 return PHY_ERROR;
1162 }
1163 }
1100 } 1164 }
1101 } 1165 }
1102 1166
@@ -1236,26 +1300,71 @@ static int phy_init(struct net_device *dev)
1236 } 1300 }
1237 } 1301 }
1238 if (np->phy_oui == PHY_OUI_REALTEK) { 1302 if (np->phy_oui == PHY_OUI_REALTEK) {
1239 /* reset could have cleared these out, set them back */ 1303 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1240 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1304 np->phy_rev == PHY_REV_REALTEK_8211B) {
1241 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1305 /* reset could have cleared these out, set them back */
1242 return PHY_ERROR; 1306 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1243 } 1307 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1244 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1308 return PHY_ERROR;
1245 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1309 }
1246 return PHY_ERROR; 1310 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1247 } 1311 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1248 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1312 return PHY_ERROR;
1249 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1313 }
1250 return PHY_ERROR; 1314 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1251 } 1315 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1252 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1316 return PHY_ERROR;
1253 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1317 }
1254 return PHY_ERROR; 1318 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1319 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1320 return PHY_ERROR;
1321 }
1322 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1323 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1324 return PHY_ERROR;
1325 }
1326 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1327 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1328 return PHY_ERROR;
1329 }
1330 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1331 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1332 return PHY_ERROR;
1333 }
1255 } 1334 }
1256 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1335 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1257 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1336 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1258 return PHY_ERROR; 1337 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1338 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1339 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1340 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1341 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1342 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1343 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1345 phy_reserved |= PHY_REALTEK_INIT7;
1346 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1347 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1348 return PHY_ERROR;
1349 }
1350 }
1351 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1352 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1353 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1354 return PHY_ERROR;
1355 }
1356 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1357 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1358 phy_reserved |= PHY_REALTEK_INIT3;
1359 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1360 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1361 return PHY_ERROR;
1362 }
1363 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1364 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1365 return PHY_ERROR;
1366 }
1367 }
1259 } 1368 }
1260 } 1369 }
1261 1370
@@ -1769,6 +1878,115 @@ static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1769 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1878 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1770} 1879}
1771 1880
1881static void nv_legacybackoff_reseed(struct net_device *dev)
1882{
1883 u8 __iomem *base = get_hwbase(dev);
1884 u32 reg;
1885 u32 low;
1886 int tx_status = 0;
1887
1888 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1889 get_random_bytes(&low, sizeof(low));
1890 reg |= low & NVREG_SLOTTIME_MASK;
1891
1892 /* Need to stop tx before change takes effect.
1893 * Caller has already gained np->lock.
1894 */
1895 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1896 if (tx_status)
1897 nv_stop_tx(dev);
1898 nv_stop_rx(dev);
1899 writel(reg, base + NvRegSlotTime);
1900 if (tx_status)
1901 nv_start_tx(dev);
1902 nv_start_rx(dev);
1903}
1904
1905/* Gear Backoff Seeds */
1906#define BACKOFF_SEEDSET_ROWS 8
1907#define BACKOFF_SEEDSET_LFSRS 15
1908
1909/* Known Good seed sets */
1910static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1911 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1912 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1913 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1914 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
1915 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
1916 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
1917 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
1918 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
1919
1920static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1921 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1922 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1923 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
1924 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1925 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1926 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1927 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1928 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
1929
1930static void nv_gear_backoff_reseed(struct net_device *dev)
1931{
1932 u8 __iomem *base = get_hwbase(dev);
1933 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
1934 u32 temp, seedset, combinedSeed;
1935 int i;
1936
1937 /* Setup seed for free running LFSR */
1938 /* We are going to read the time stamp counter 3 times
1939 and swizzle bits around to increase randomness */
1940 get_random_bytes(&miniseed1, sizeof(miniseed1));
1941 miniseed1 &= 0x0fff;
1942 if (miniseed1 == 0)
1943 miniseed1 = 0xabc;
1944
1945 get_random_bytes(&miniseed2, sizeof(miniseed2));
1946 miniseed2 &= 0x0fff;
1947 if (miniseed2 == 0)
1948 miniseed2 = 0xabc;
1949 miniseed2_reversed =
1950 ((miniseed2 & 0xF00) >> 8) |
1951 (miniseed2 & 0x0F0) |
1952 ((miniseed2 & 0x00F) << 8);
1953
1954 get_random_bytes(&miniseed3, sizeof(miniseed3));
1955 miniseed3 &= 0x0fff;
1956 if (miniseed3 == 0)
1957 miniseed3 = 0xabc;
1958 miniseed3_reversed =
1959 ((miniseed3 & 0xF00) >> 8) |
1960 (miniseed3 & 0x0F0) |
1961 ((miniseed3 & 0x00F) << 8);
1962
1963 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
1964 (miniseed2 ^ miniseed3_reversed);
1965
1966 /* Seeds can not be zero */
1967 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
1968 combinedSeed |= 0x08;
1969 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
1970 combinedSeed |= 0x8000;
1971
1972 /* No need to disable tx here */
1973 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
1974 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
1975 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
1976 writel(temp,base + NvRegBackOffControl);
1977
1978 /* Setup seeds for all gear LFSRs. */
1979 get_random_bytes(&seedset, sizeof(seedset));
1980 seedset = seedset % BACKOFF_SEEDSET_ROWS;
1981 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
1982 {
1983 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
1984 temp |= main_seedset[seedset][i-1] & 0x3ff;
1985 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
1986 writel(temp, base + NvRegBackOffControl);
1987 }
1988}
1989
1772/* 1990/*
1773 * nv_start_xmit: dev->hard_start_xmit function 1991 * nv_start_xmit: dev->hard_start_xmit function
1774 * Called with netif_tx_lock held. 1992 * Called with netif_tx_lock held.
@@ -2088,6 +2306,8 @@ static void nv_tx_done(struct net_device *dev)
2088 dev->stats.tx_fifo_errors++; 2306 dev->stats.tx_fifo_errors++;
2089 if (flags & NV_TX_CARRIERLOST) 2307 if (flags & NV_TX_CARRIERLOST)
2090 dev->stats.tx_carrier_errors++; 2308 dev->stats.tx_carrier_errors++;
2309 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2310 nv_legacybackoff_reseed(dev);
2091 dev->stats.tx_errors++; 2311 dev->stats.tx_errors++;
2092 } else { 2312 } else {
2093 dev->stats.tx_packets++; 2313 dev->stats.tx_packets++;
@@ -2103,6 +2323,8 @@ static void nv_tx_done(struct net_device *dev)
2103 dev->stats.tx_fifo_errors++; 2323 dev->stats.tx_fifo_errors++;
2104 if (flags & NV_TX2_CARRIERLOST) 2324 if (flags & NV_TX2_CARRIERLOST)
2105 dev->stats.tx_carrier_errors++; 2325 dev->stats.tx_carrier_errors++;
2326 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2327 nv_legacybackoff_reseed(dev);
2106 dev->stats.tx_errors++; 2328 dev->stats.tx_errors++;
2107 } else { 2329 } else {
2108 dev->stats.tx_packets++; 2330 dev->stats.tx_packets++;
@@ -2144,6 +2366,15 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
2144 if (flags & NV_TX2_LASTPACKET) { 2366 if (flags & NV_TX2_LASTPACKET) {
2145 if (!(flags & NV_TX2_ERROR)) 2367 if (!(flags & NV_TX2_ERROR))
2146 dev->stats.tx_packets++; 2368 dev->stats.tx_packets++;
2369 else {
2370 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2371 if (np->driver_data & DEV_HAS_GEAR_MODE)
2372 nv_gear_backoff_reseed(dev);
2373 else
2374 nv_legacybackoff_reseed(dev);
2375 }
2376 }
2377
2147 dev_kfree_skb_any(np->get_tx_ctx->skb); 2378 dev_kfree_skb_any(np->get_tx_ctx->skb);
2148 np->get_tx_ctx->skb = NULL; 2379 np->get_tx_ctx->skb = NULL;
2149 2380
@@ -2905,15 +3136,14 @@ set_speed:
2905 } 3136 }
2906 3137
2907 if (np->gigabit == PHY_GIGABIT) { 3138 if (np->gigabit == PHY_GIGABIT) {
2908 phyreg = readl(base + NvRegRandomSeed); 3139 phyreg = readl(base + NvRegSlotTime);
2909 phyreg &= ~(0x3FF00); 3140 phyreg &= ~(0x3FF00);
2910 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 3141 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
2911 phyreg |= NVREG_RNDSEED_FORCE3; 3142 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
2912 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 3143 phyreg |= NVREG_SLOTTIME_10_100_FULL;
2913 phyreg |= NVREG_RNDSEED_FORCE2;
2914 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3144 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2915 phyreg |= NVREG_RNDSEED_FORCE; 3145 phyreg |= NVREG_SLOTTIME_1000_FULL;
2916 writel(phyreg, base + NvRegRandomSeed); 3146 writel(phyreg, base + NvRegSlotTime);
2917 } 3147 }
2918 3148
2919 phyreg = readl(base + NvRegPhyInterface); 3149 phyreg = readl(base + NvRegPhyInterface);
@@ -4843,6 +5073,7 @@ static int nv_open(struct net_device *dev)
4843 u8 __iomem *base = get_hwbase(dev); 5073 u8 __iomem *base = get_hwbase(dev);
4844 int ret = 1; 5074 int ret = 1;
4845 int oom, i; 5075 int oom, i;
5076 u32 low;
4846 5077
4847 dprintk(KERN_DEBUG "nv_open: begin\n"); 5078 dprintk(KERN_DEBUG "nv_open: begin\n");
4848 5079
@@ -4902,8 +5133,20 @@ static int nv_open(struct net_device *dev)
4902 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5133 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4903 5134
4904 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5135 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4905 get_random_bytes(&i, sizeof(i)); 5136
4906 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 5137 get_random_bytes(&low, sizeof(low));
5138 low &= NVREG_SLOTTIME_MASK;
5139 if (np->desc_ver == DESC_VER_1) {
5140 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5141 } else {
5142 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5143 /* setup legacy backoff */
5144 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5145 } else {
5146 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5147 nv_gear_backoff_reseed(dev);
5148 }
5149 }
4907 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5150 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4908 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5151 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4909 if (poll_interval == -1) { 5152 if (poll_interval == -1) {
@@ -5110,6 +5353,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5110 5353
5111 /* copy of driver data */ 5354 /* copy of driver data */
5112 np->driver_data = id->driver_data; 5355 np->driver_data = id->driver_data;
5356 /* copy of device id */
5357 np->device_id = id->device;
5113 5358
5114 /* handle different descriptor versions */ 5359 /* handle different descriptor versions */
5115 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5360 if (id->driver_data & DEV_HAS_HIGH_DMA) {
@@ -5399,6 +5644,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5399 pci_name(pci_dev), id1, id2, phyaddr); 5644 pci_name(pci_dev), id1, id2, phyaddr);
5400 np->phyaddr = phyaddr; 5645 np->phyaddr = phyaddr;
5401 np->phy_oui = id1 | id2; 5646 np->phy_oui = id1 | id2;
5647
5648 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5649 if (np->phy_oui == PHY_OUI_REALTEK2)
5650 np->phy_oui = PHY_OUI_REALTEK;
5651 /* Setup phy revision for Realtek */
5652 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5653 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5654
5402 break; 5655 break;
5403 } 5656 }
5404 if (i == 33) { 5657 if (i == 33) {
@@ -5477,6 +5730,28 @@ out:
5477 return err; 5730 return err;
5478} 5731}
5479 5732
5733static void nv_restore_phy(struct net_device *dev)
5734{
5735 struct fe_priv *np = netdev_priv(dev);
5736 u16 phy_reserved, mii_control;
5737
5738 if (np->phy_oui == PHY_OUI_REALTEK &&
5739 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5740 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5741 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5742 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5743 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5744 phy_reserved |= PHY_REALTEK_INIT8;
5745 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5746 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5747
5748 /* restart auto negotiation */
5749 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5750 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5751 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5752 }
5753}
5754
5480static void __devexit nv_remove(struct pci_dev *pci_dev) 5755static void __devexit nv_remove(struct pci_dev *pci_dev)
5481{ 5756{
5482 struct net_device *dev = pci_get_drvdata(pci_dev); 5757 struct net_device *dev = pci_get_drvdata(pci_dev);
@@ -5493,6 +5768,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5493 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5768 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5494 base + NvRegTransmitPoll); 5769 base + NvRegTransmitPoll);
5495 5770
5771 /* restore any phy related changes */
5772 nv_restore_phy(dev);
5773
5496 /* free all structures */ 5774 /* free all structures */
5497 free_rings(dev); 5775 free_rings(dev);
5498 iounmap(get_hwbase(dev)); 5776 iounmap(get_hwbase(dev));
@@ -5632,83 +5910,83 @@ static struct pci_device_id pci_tbl[] = {
5632 }, 5910 },
5633 { /* MCP65 Ethernet Controller */ 5911 { /* MCP65 Ethernet Controller */
5634 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5912 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5635 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT, 5913 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5636 }, 5914 },
5637 { /* MCP65 Ethernet Controller */ 5915 { /* MCP65 Ethernet Controller */
5638 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5916 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5639 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT, 5917 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5640 }, 5918 },
5641 { /* MCP65 Ethernet Controller */ 5919 { /* MCP65 Ethernet Controller */
5642 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5920 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5643 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT, 5921 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5644 }, 5922 },
5645 { /* MCP65 Ethernet Controller */ 5923 { /* MCP65 Ethernet Controller */
5646 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5924 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5647 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT, 5925 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5648 }, 5926 },
5649 { /* MCP67 Ethernet Controller */ 5927 { /* MCP67 Ethernet Controller */
5650 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5928 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5651 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5929 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5652 }, 5930 },
5653 { /* MCP67 Ethernet Controller */ 5931 { /* MCP67 Ethernet Controller */
5654 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5932 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5655 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5933 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5656 }, 5934 },
5657 { /* MCP67 Ethernet Controller */ 5935 { /* MCP67 Ethernet Controller */
5658 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5936 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5659 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5937 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5660 }, 5938 },
5661 { /* MCP67 Ethernet Controller */ 5939 { /* MCP67 Ethernet Controller */
5662 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5940 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5663 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5941 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5664 }, 5942 },
5665 { /* MCP73 Ethernet Controller */ 5943 { /* MCP73 Ethernet Controller */
5666 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5944 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5667 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5945 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5668 }, 5946 },
5669 { /* MCP73 Ethernet Controller */ 5947 { /* MCP73 Ethernet Controller */
5670 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5948 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5671 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5949 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5672 }, 5950 },
5673 { /* MCP73 Ethernet Controller */ 5951 { /* MCP73 Ethernet Controller */
5674 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5952 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5675 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5953 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5676 }, 5954 },
5677 { /* MCP73 Ethernet Controller */ 5955 { /* MCP73 Ethernet Controller */
5678 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5956 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5679 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5957 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5680 }, 5958 },
5681 { /* MCP77 Ethernet Controller */ 5959 { /* MCP77 Ethernet Controller */
5682 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5960 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
5683 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5961 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5684 }, 5962 },
5685 { /* MCP77 Ethernet Controller */ 5963 { /* MCP77 Ethernet Controller */
5686 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5964 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
5687 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5965 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5688 }, 5966 },
5689 { /* MCP77 Ethernet Controller */ 5967 { /* MCP77 Ethernet Controller */
5690 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5968 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
5691 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5969 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5692 }, 5970 },
5693 { /* MCP77 Ethernet Controller */ 5971 { /* MCP77 Ethernet Controller */
5694 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5972 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
5695 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5973 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5696 }, 5974 },
5697 { /* MCP79 Ethernet Controller */ 5975 { /* MCP79 Ethernet Controller */
5698 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5976 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5699 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5977 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5700 }, 5978 },
5701 { /* MCP79 Ethernet Controller */ 5979 { /* MCP79 Ethernet Controller */
5702 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5980 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5703 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5981 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5704 }, 5982 },
5705 { /* MCP79 Ethernet Controller */ 5983 { /* MCP79 Ethernet Controller */
5706 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5984 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5707 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5985 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5708 }, 5986 },
5709 { /* MCP79 Ethernet Controller */ 5987 { /* MCP79 Ethernet Controller */
5710 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5988 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5711 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT, 5989 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5712 }, 5990 },
5713 {0,}, 5991 {0,},
5714}; 5992};
@@ -5744,6 +6022,8 @@ module_param(msix, int, 0);
5744MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6022MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5745module_param(dma_64bit, int, 0); 6023module_param(dma_64bit, int, 0);
5746MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6024MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6025module_param(phy_cross, int, 0);
6026MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
5747 6027
5748MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6028MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5749MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6029MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c8c3df737d73..99a4b990939f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -98,7 +98,6 @@
98#include "gianfar_mii.h" 98#include "gianfar_mii.h"
99 99
100#define TX_TIMEOUT (1*HZ) 100#define TX_TIMEOUT (1*HZ)
101#define SKB_ALLOC_TIMEOUT 1000000
102#undef BRIEF_GFAR_ERRORS 101#undef BRIEF_GFAR_ERRORS
103#undef VERBOSE_GFAR_ERRORS 102#undef VERBOSE_GFAR_ERRORS
104 103
@@ -115,7 +114,9 @@ static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 114static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_timeout(struct net_device *dev); 115static void gfar_timeout(struct net_device *dev);
117static int gfar_close(struct net_device *dev); 116static int gfar_close(struct net_device *dev);
118struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 117struct sk_buff *gfar_new_skb(struct net_device *dev);
118static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
119 struct sk_buff *skb);
119static int gfar_set_mac_address(struct net_device *dev); 120static int gfar_set_mac_address(struct net_device *dev);
120static int gfar_change_mtu(struct net_device *dev, int new_mtu); 121static int gfar_change_mtu(struct net_device *dev, int new_mtu);
121static irqreturn_t gfar_error(int irq, void *dev_id); 122static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -783,14 +784,21 @@ int startup_gfar(struct net_device *dev)
783 784
784 rxbdp = priv->rx_bd_base; 785 rxbdp = priv->rx_bd_base;
785 for (i = 0; i < priv->rx_ring_size; i++) { 786 for (i = 0; i < priv->rx_ring_size; i++) {
786 struct sk_buff *skb = NULL; 787 struct sk_buff *skb;
787 788
788 rxbdp->status = 0; 789 skb = gfar_new_skb(dev);
789 790
790 skb = gfar_new_skb(dev, rxbdp); 791 if (!skb) {
792 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
793 dev->name);
794
795 goto err_rxalloc_fail;
796 }
791 797
792 priv->rx_skbuff[i] = skb; 798 priv->rx_skbuff[i] = skb;
793 799
800 gfar_new_rxbdp(dev, rxbdp, skb);
801
794 rxbdp++; 802 rxbdp++;
795 } 803 }
796 804
@@ -916,6 +924,7 @@ rx_irq_fail:
916tx_irq_fail: 924tx_irq_fail:
917 free_irq(priv->interruptError, dev); 925 free_irq(priv->interruptError, dev);
918err_irq_fail: 926err_irq_fail:
927err_rxalloc_fail:
919rx_skb_fail: 928rx_skb_fail:
920 free_skb_resources(priv); 929 free_skb_resources(priv);
921tx_skb_fail: 930tx_skb_fail:
@@ -1328,18 +1337,37 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1328 return IRQ_HANDLED; 1337 return IRQ_HANDLED;
1329} 1338}
1330 1339
1331struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1340static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1341 struct sk_buff *skb)
1342{
1343 struct gfar_private *priv = netdev_priv(dev);
1344 u32 * status_len = (u32 *)bdp;
1345 u16 flags;
1346
1347 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1348 priv->rx_buffer_size, DMA_FROM_DEVICE);
1349
1350 flags = RXBD_EMPTY | RXBD_INTERRUPT;
1351
1352 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1353 flags |= RXBD_WRAP;
1354
1355 eieio();
1356
1357 *status_len = (u32)flags << 16;
1358}
1359
1360
1361struct sk_buff * gfar_new_skb(struct net_device *dev)
1332{ 1362{
1333 unsigned int alignamount; 1363 unsigned int alignamount;
1334 struct gfar_private *priv = netdev_priv(dev); 1364 struct gfar_private *priv = netdev_priv(dev);
1335 struct sk_buff *skb = NULL; 1365 struct sk_buff *skb = NULL;
1336 unsigned int timeout = SKB_ALLOC_TIMEOUT;
1337 1366
1338 /* We have to allocate the skb, so keep trying till we succeed */ 1367 /* We have to allocate the skb, so keep trying till we succeed */
1339 while ((!skb) && timeout--) 1368 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1340 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1341 1369
1342 if (NULL == skb) 1370 if (!skb)
1343 return NULL; 1371 return NULL;
1344 1372
1345 alignamount = RXBUF_ALIGNMENT - 1373 alignamount = RXBUF_ALIGNMENT -
@@ -1350,15 +1378,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1350 */ 1378 */
1351 skb_reserve(skb, alignamount); 1379 skb_reserve(skb, alignamount);
1352 1380
1353 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1354 priv->rx_buffer_size, DMA_FROM_DEVICE);
1355
1356 bdp->length = 0;
1357
1358 /* Mark the buffer empty */
1359 eieio();
1360 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1361
1362 return skb; 1381 return skb;
1363} 1382}
1364 1383
@@ -1544,10 +1563,31 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1544 bdp = priv->cur_rx; 1563 bdp = priv->cur_rx;
1545 1564
1546 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1565 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1566 struct sk_buff *newskb;
1547 rmb(); 1567 rmb();
1568
1569 /* Add another skb for the future */
1570 newskb = gfar_new_skb(dev);
1571
1548 skb = priv->rx_skbuff[priv->skb_currx]; 1572 skb = priv->rx_skbuff[priv->skb_currx];
1549 1573
1550 if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) { 1574 /* We drop the frame if we failed to allocate a new buffer */
1575 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1576 bdp->status & RXBD_ERR)) {
1577 count_errors(bdp->status, dev);
1578
1579 if (unlikely(!newskb))
1580 newskb = skb;
1581
1582 if (skb) {
1583 dma_unmap_single(&priv->dev->dev,
1584 bdp->bufPtr,
1585 priv->rx_buffer_size,
1586 DMA_FROM_DEVICE);
1587
1588 dev_kfree_skb_any(skb);
1589 }
1590 } else {
1551 /* Increment the number of packets */ 1591 /* Increment the number of packets */
1552 dev->stats.rx_packets++; 1592 dev->stats.rx_packets++;
1553 howmany++; 1593 howmany++;
@@ -1558,23 +1598,14 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1558 gfar_process_frame(dev, skb, pkt_len); 1598 gfar_process_frame(dev, skb, pkt_len);
1559 1599
1560 dev->stats.rx_bytes += pkt_len; 1600 dev->stats.rx_bytes += pkt_len;
1561 } else {
1562 count_errors(bdp->status, dev);
1563
1564 if (skb)
1565 dev_kfree_skb_any(skb);
1566
1567 priv->rx_skbuff[priv->skb_currx] = NULL;
1568 } 1601 }
1569 1602
1570 dev->last_rx = jiffies; 1603 dev->last_rx = jiffies;
1571 1604
1572 /* Clear the status flags for this buffer */ 1605 priv->rx_skbuff[priv->skb_currx] = newskb;
1573 bdp->status &= ~RXBD_STATS;
1574 1606
1575 /* Add another skb for the future */ 1607 /* Setup the new bdp */
1576 skb = gfar_new_skb(dev, bdp); 1608 gfar_new_rxbdp(dev, bdp, newskb);
1577 priv->rx_skbuff[priv->skb_currx] = skb;
1578 1609
1579 /* Update to the next pointer */ 1610 /* Update to the next pointer */
1580 if (bdp->status & RXBD_WRAP) 1611 if (bdp->status & RXBD_WRAP)
@@ -1584,9 +1615,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1584 1615
1585 /* update to point at the next skb */ 1616 /* update to point at the next skb */
1586 priv->skb_currx = 1617 priv->skb_currx =
1587 (priv->skb_currx + 1618 (priv->skb_currx + 1) &
1588 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1619 RX_RING_MOD_MASK(priv->rx_ring_size);
1589
1590 } 1620 }
1591 1621
1592 /* Update the current rxbd pointer to be the next one */ 1622 /* Update the current rxbd pointer to be the next one */
@@ -2001,12 +2031,16 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2001 return IRQ_HANDLED; 2031 return IRQ_HANDLED;
2002} 2032}
2003 2033
2034/* work with hotplug and coldplug */
2035MODULE_ALIAS("platform:fsl-gianfar");
2036
2004/* Structure for a device driver */ 2037/* Structure for a device driver */
2005static struct platform_driver gfar_driver = { 2038static struct platform_driver gfar_driver = {
2006 .probe = gfar_probe, 2039 .probe = gfar_probe,
2007 .remove = gfar_remove, 2040 .remove = gfar_remove,
2008 .driver = { 2041 .driver = {
2009 .name = "fsl-gianfar", 2042 .name = "fsl-gianfar",
2043 .owner = THIS_MODULE,
2010 }, 2044 },
2011}; 2045};
2012 2046
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 378a23963495..5d2108c5ac7c 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -43,6 +43,8 @@
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/dma.h> 44#include <asm/dma.h>
45#include <asm/uaccess.h> 45#include <asm/uaccess.h>
46#include <asm/dcr.h>
47#include <asm/dcr-regs.h>
46 48
47#include "core.h" 49#include "core.h"
48 50
@@ -127,10 +129,35 @@ static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
127static inline void emac_report_timeout_error(struct emac_instance *dev, 129static inline void emac_report_timeout_error(struct emac_instance *dev,
128 const char *error) 130 const char *error)
129{ 131{
130 if (net_ratelimit()) 132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
131 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error); 136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
132} 137}
133 138
139/* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
142 */
143static inline void emac_rx_clk_tx(struct emac_instance *dev)
144{
145#ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
149#endif
150}
151
152static inline void emac_rx_clk_default(struct emac_instance *dev)
153{
154#ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
158#endif
159}
160
134/* PHY polling intervals */ 161/* PHY polling intervals */
135#define PHY_POLL_LINK_ON HZ 162#define PHY_POLL_LINK_ON HZ
136#define PHY_POLL_LINK_OFF (HZ / 5) 163#define PHY_POLL_LINK_OFF (HZ / 5)
@@ -524,7 +551,10 @@ static int emac_configure(struct emac_instance *dev)
524 rx_size = dev->rx_fifo_size_gige; 551 rx_size = dev->rx_fifo_size_gige;
525 552
526 if (dev->ndev->mtu > ETH_DATA_LEN) { 553 if (dev->ndev->mtu > ETH_DATA_LEN) {
527 mr1 |= EMAC_MR1_JPSM; 554 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
555 mr1 |= EMAC4_MR1_JPSM;
556 else
557 mr1 |= EMAC_MR1_JPSM;
528 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO; 558 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
529 } else 559 } else
530 dev->stop_timeout = STOP_TIMEOUT_1000; 560 dev->stop_timeout = STOP_TIMEOUT_1000;
@@ -708,7 +738,7 @@ static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
708 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); 738 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
709 739
710 /* Wait for management interface to become idle */ 740 /* Wait for management interface to become idle */
711 n = 10; 741 n = 20;
712 while (!emac_phy_done(dev, in_be32(&p->stacr))) { 742 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
713 udelay(1); 743 udelay(1);
714 if (!--n) { 744 if (!--n) {
@@ -733,7 +763,7 @@ static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
733 out_be32(&p->stacr, r); 763 out_be32(&p->stacr, r);
734 764
735 /* Wait for read to complete */ 765 /* Wait for read to complete */
736 n = 100; 766 n = 200;
737 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) { 767 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
738 udelay(1); 768 udelay(1);
739 if (!--n) { 769 if (!--n) {
@@ -780,7 +810,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
780 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); 810 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
781 811
782 /* Wait for management interface to be idle */ 812 /* Wait for management interface to be idle */
783 n = 10; 813 n = 20;
784 while (!emac_phy_done(dev, in_be32(&p->stacr))) { 814 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
785 udelay(1); 815 udelay(1);
786 if (!--n) { 816 if (!--n) {
@@ -806,7 +836,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
806 out_be32(&p->stacr, r); 836 out_be32(&p->stacr, r);
807 837
808 /* Wait for write to complete */ 838 /* Wait for write to complete */
809 n = 100; 839 n = 200;
810 while (!emac_phy_done(dev, in_be32(&p->stacr))) { 840 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
811 udelay(1); 841 udelay(1);
812 if (!--n) { 842 if (!--n) {
@@ -1094,9 +1124,11 @@ static int emac_open(struct net_device *ndev)
1094 int link_poll_interval; 1124 int link_poll_interval;
1095 if (dev->phy.def->ops->poll_link(&dev->phy)) { 1125 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1096 dev->phy.def->ops->read_link(&dev->phy); 1126 dev->phy.def->ops->read_link(&dev->phy);
1127 emac_rx_clk_default(dev);
1097 netif_carrier_on(dev->ndev); 1128 netif_carrier_on(dev->ndev);
1098 link_poll_interval = PHY_POLL_LINK_ON; 1129 link_poll_interval = PHY_POLL_LINK_ON;
1099 } else { 1130 } else {
1131 emac_rx_clk_tx(dev);
1100 netif_carrier_off(dev->ndev); 1132 netif_carrier_off(dev->ndev);
1101 link_poll_interval = PHY_POLL_LINK_OFF; 1133 link_poll_interval = PHY_POLL_LINK_OFF;
1102 } 1134 }
@@ -1174,6 +1206,7 @@ static void emac_link_timer(struct work_struct *work)
1174 1206
1175 if (dev->phy.def->ops->poll_link(&dev->phy)) { 1207 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1176 if (!netif_carrier_ok(dev->ndev)) { 1208 if (!netif_carrier_ok(dev->ndev)) {
1209 emac_rx_clk_default(dev);
1177 /* Get new link parameters */ 1210 /* Get new link parameters */
1178 dev->phy.def->ops->read_link(&dev->phy); 1211 dev->phy.def->ops->read_link(&dev->phy);
1179 1212
@@ -1186,6 +1219,7 @@ static void emac_link_timer(struct work_struct *work)
1186 link_poll_interval = PHY_POLL_LINK_ON; 1219 link_poll_interval = PHY_POLL_LINK_ON;
1187 } else { 1220 } else {
1188 if (netif_carrier_ok(dev->ndev)) { 1221 if (netif_carrier_ok(dev->ndev)) {
1222 emac_rx_clk_tx(dev);
1189 netif_carrier_off(dev->ndev); 1223 netif_carrier_off(dev->ndev);
1190 netif_tx_disable(dev->ndev); 1224 netif_tx_disable(dev->ndev);
1191 emac_reinitialize(dev); 1225 emac_reinitialize(dev);
@@ -2237,7 +2271,7 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2237 return 0; 2271 return 0;
2238} 2272}
2239 2273
2240static struct notifier_block emac_of_bus_notifier = { 2274static struct notifier_block emac_of_bus_notifier __devinitdata = {
2241 .notifier_call = emac_of_bus_notify 2275 .notifier_call = emac_of_bus_notify
2242}; 2276};
2243 2277
@@ -2330,6 +2364,19 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
2330 dev->phy.mdio_read = emac_mdio_read; 2364 dev->phy.mdio_read = emac_mdio_read;
2331 dev->phy.mdio_write = emac_mdio_write; 2365 dev->phy.mdio_write = emac_mdio_write;
2332 2366
2367 /* Enable internal clock source */
2368#ifdef CONFIG_PPC_DCR_NATIVE
2369 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2370 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2371#endif
2372 /* PHY clock workaround */
2373 emac_rx_clk_tx(dev);
2374
2375 /* Enable internal clock source on 440GX*/
2376#ifdef CONFIG_PPC_DCR_NATIVE
2377 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2378 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2379#endif
2333 /* Configure EMAC with defaults so we can at least use MDIO 2380 /* Configure EMAC with defaults so we can at least use MDIO
2334 * This is needed mostly for 440GX 2381 * This is needed mostly for 440GX
2335 */ 2382 */
@@ -2362,6 +2409,12 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
2362 if (!emac_mii_phy_probe(&dev->phy, i)) 2409 if (!emac_mii_phy_probe(&dev->phy, i))
2363 break; 2410 break;
2364 } 2411 }
2412
2413 /* Enable external clock source */
2414#ifdef CONFIG_PPC_DCR_NATIVE
2415 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2416 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2417#endif
2365 mutex_unlock(&emac_phy_map_lock); 2418 mutex_unlock(&emac_phy_map_lock);
2366 if (i == 0x20) { 2419 if (i == 0x20) {
2367 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); 2420 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
@@ -2487,8 +2540,15 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2487 } 2540 }
2488 2541
2489 /* Check EMAC version */ 2542 /* Check EMAC version */
2490 if (of_device_is_compatible(np, "ibm,emac4")) 2543 if (of_device_is_compatible(np, "ibm,emac4")) {
2491 dev->features |= EMAC_FTR_EMAC4; 2544 dev->features |= EMAC_FTR_EMAC4;
2545 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2546 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2547 } else {
2548 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2549 of_device_is_compatible(np, "ibm,emac-440gr"))
2550 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2551 }
2492 2552
2493 /* Fixup some feature bits based on the device tree */ 2553 /* Fixup some feature bits based on the device tree */
2494 if (of_get_property(np, "has-inverted-stacr-oc", NULL)) 2554 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
@@ -2559,8 +2619,11 @@ static int __devinit emac_probe(struct of_device *ofdev,
2559 struct device_node **blist = NULL; 2619 struct device_node **blist = NULL;
2560 int err, i; 2620 int err, i;
2561 2621
2562 /* Skip unused/unwired EMACS */ 2622 /* Skip unused/unwired EMACS. We leave the check for an unused
2563 if (of_get_property(np, "unused", NULL)) 2623 * property here for now, but new flat device trees should set a
2624 * status property to "disabled" instead.
2625 */
2626 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2564 return -ENODEV; 2627 return -ENODEV;
2565 2628
2566 /* Find ourselves in the bootlist if we are there */ 2629 /* Find ourselves in the bootlist if we are there */
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 4e74d8287c65..1683db9870a4 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -301,6 +301,14 @@ struct emac_instance {
301 * Set if we have new type STACR with STAOPC 301 * Set if we have new type STACR with STAOPC
302 */ 302 */
303#define EMAC_FTR_HAS_NEW_STACR 0x00000040 303#define EMAC_FTR_HAS_NEW_STACR 0x00000040
304/*
305 * Set if we need phy clock workaround for 440gx
306 */
307#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
308/*
309 * Set if we need phy clock workaround for 440ep or 440gr
310 */
311#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
304 312
305 313
306/* Right now, we don't quite handle the always/possible masks on the 314/* Right now, we don't quite handle the always/possible masks on the
@@ -312,8 +320,8 @@ enum {
312 320
313 EMAC_FTRS_POSSIBLE = 321 EMAC_FTRS_POSSIBLE =
314#ifdef CONFIG_IBM_NEW_EMAC_EMAC4 322#ifdef CONFIG_IBM_NEW_EMAC_EMAC4
315 EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR | 323 EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR |
316 EMAC_FTR_STACR_OC_INVERT | 324 EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
317#endif 325#endif
318#ifdef CONFIG_IBM_NEW_EMAC_TAH 326#ifdef CONFIG_IBM_NEW_EMAC_TAH
319 EMAC_FTR_HAS_TAH | 327 EMAC_FTR_HAS_TAH |
@@ -324,7 +332,7 @@ enum {
324#ifdef CONFIG_IBM_NEW_EMAC_RGMII 332#ifdef CONFIG_IBM_NEW_EMAC_RGMII
325 EMAC_FTR_HAS_RGMII | 333 EMAC_FTR_HAS_RGMII |
326#endif 334#endif
327 0, 335 EMAC_FTR_440EP_PHY_CLK_FIX,
328}; 336};
329 337
330static inline int emac_has_feature(struct emac_instance *dev, 338static inline int emac_has_feature(struct emac_instance *dev,
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 6869f08c9dcb..10c267b2b961 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -61,8 +61,8 @@ int __devinit mal_register_commac(struct mal_instance *mal,
61 return 0; 61 return 0;
62} 62}
63 63
64void __devexit mal_unregister_commac(struct mal_instance *mal, 64void mal_unregister_commac(struct mal_instance *mal,
65 struct mal_commac *commac) 65 struct mal_commac *commac)
66{ 66{
67 unsigned long flags; 67 unsigned long flags;
68 68
@@ -136,6 +136,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int channel)
136{ 136{
137 unsigned long flags; 137 unsigned long flags;
138 138
139 /*
140 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
141 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
142 * for the bitmask
143 */
144 if (!(channel % 8))
145 channel >>= 3;
146
139 spin_lock_irqsave(&mal->lock, flags); 147 spin_lock_irqsave(&mal->lock, flags);
140 148
141 MAL_DBG(mal, "enable_rx(%d)" NL, channel); 149 MAL_DBG(mal, "enable_rx(%d)" NL, channel);
@@ -148,6 +156,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int channel)
148 156
149void mal_disable_rx_channel(struct mal_instance *mal, int channel) 157void mal_disable_rx_channel(struct mal_instance *mal, int channel)
150{ 158{
159 /*
160 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
161 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
162 * for the bitmask
163 */
164 if (!(channel % 8))
165 channel >>= 3;
166
151 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); 167 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
152 168
153 MAL_DBG(mal, "disable_rx(%d)" NL, channel); 169 MAL_DBG(mal, "disable_rx(%d)" NL, channel);
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 5757788227be..e32da3de2695 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -179,7 +179,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
179 mutex_unlock(&dev->lock); 179 mutex_unlock(&dev->lock);
180} 180}
181 181
182void __devexit rgmii_detach(struct of_device *ofdev, int input) 182void rgmii_detach(struct of_device *ofdev, int input)
183{ 183{
184 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 184 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
185 struct rgmii_regs __iomem *p = dev->base; 185 struct rgmii_regs __iomem *p = dev->base;
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index b023d10d7e1c..30173a9fb557 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -35,7 +35,7 @@ int __devinit tah_attach(struct of_device *ofdev, int channel)
35 return 0; 35 return 0;
36} 36}
37 37
38void __devexit tah_detach(struct of_device *ofdev, int channel) 38void tah_detach(struct of_device *ofdev, int channel)
39{ 39{
40 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 40 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
41 41
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 2ea472aeab06..17b154124943 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -189,7 +189,7 @@ void zmii_set_speed(struct of_device *ofdev, int input, int speed)
189 mutex_unlock(&dev->lock); 189 mutex_unlock(&dev->lock);
190} 190}
191 191
192void __devexit zmii_detach(struct of_device *ofdev, int input) 192void zmii_detach(struct of_device *ofdev, int input)
193{ 193{
194 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 194 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
195 195
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index aaee02e9e3f0..ae398f04c7b4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -871,6 +871,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
871 goto err_pci_reg; 871 goto err_pci_reg;
872 872
873 pci_set_master(pdev); 873 pci_set_master(pdev);
874 pci_save_state(pdev);
874 875
875 err = -ENOMEM; 876 err = -ENOMEM;
876 netdev = alloc_etherdev(sizeof(struct igb_adapter)); 877 netdev = alloc_etherdev(sizeof(struct igb_adapter));
@@ -4079,6 +4080,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4079 return PCI_ERS_RESULT_DISCONNECT; 4080 return PCI_ERS_RESULT_DISCONNECT;
4080 } 4081 }
4081 pci_set_master(pdev); 4082 pci_set_master(pdev);
4083 pci_restore_state(pdev);
4082 4084
4083 pci_enable_wake(pdev, PCI_D3hot, 0); 4085 pci_enable_wake(pdev, PCI_D3hot, 0);
4084 pci_enable_wake(pdev, PCI_D3cold, 0); 4086 pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9f584521304a..083b0dd70fef 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -60,6 +60,7 @@ static struct platform_driver ali_ircc_driver = {
60 .resume = ali_ircc_resume, 60 .resume = ali_ircc_resume,
61 .driver = { 61 .driver = {
62 .name = ALI_IRCC_DRIVER_NAME, 62 .name = ALI_IRCC_DRIVER_NAME,
63 .owner = THIS_MODULE,
63 }, 64 },
64}; 65};
65 66
@@ -2256,6 +2257,7 @@ static void FIR2SIR(int iobase)
2256MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); 2257MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
2257MODULE_DESCRIPTION("ALi FIR Controller Driver"); 2258MODULE_DESCRIPTION("ALi FIR Controller Driver");
2258MODULE_LICENSE("GPL"); 2259MODULE_LICENSE("GPL");
2260MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
2259 2261
2260 2262
2261module_param_array(io, int, NULL, 0); 2263module_param_array(io, int, NULL, 0);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 8db71ab20456..d5c2d27f3ea4 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -908,6 +908,7 @@ static int pxa_irda_remove(struct platform_device *_dev)
908static struct platform_driver pxa_ir_driver = { 908static struct platform_driver pxa_ir_driver = {
909 .driver = { 909 .driver = {
910 .name = "pxa2xx-ir", 910 .name = "pxa2xx-ir",
911 .owner = THIS_MODULE,
911 }, 912 },
912 .probe = pxa_irda_probe, 913 .probe = pxa_irda_probe,
913 .remove = pxa_irda_remove, 914 .remove = pxa_irda_remove,
@@ -929,3 +930,4 @@ module_init(pxa_irda_init);
929module_exit(pxa_irda_exit); 930module_exit(pxa_irda_exit);
930 931
931MODULE_LICENSE("GPL"); 932MODULE_LICENSE("GPL");
933MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 056639f72bec..1bc8518f9197 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -1008,6 +1008,7 @@ static struct platform_driver sa1100ir_driver = {
1008 .resume = sa1100_irda_resume, 1008 .resume = sa1100_irda_resume,
1009 .driver = { 1009 .driver = {
1010 .name = "sa11x0-ir", 1010 .name = "sa11x0-ir",
1011 .owner = THIS_MODULE,
1011 }, 1012 },
1012}; 1013};
1013 1014
@@ -1041,3 +1042,4 @@ MODULE_LICENSE("GPL");
1041MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)"); 1042MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
1042MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode"); 1043MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
1043MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)"); 1044MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
1045MODULE_ALIAS("platform:sa11x0-ir");
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index cb371a8c24a7..7b859220c255 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3431,6 +3431,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3431 } 3431 }
3432 3432
3433 pci_set_master(pdev); 3433 pci_set_master(pdev);
3434 pci_save_state(pdev);
3434 3435
3435#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3436#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3436 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); 3437 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
@@ -3721,6 +3722,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3721 return PCI_ERS_RESULT_DISCONNECT; 3722 return PCI_ERS_RESULT_DISCONNECT;
3722 } 3723 }
3723 pci_set_master(pdev); 3724 pci_set_master(pdev);
3725 pci_restore_state(pdev);
3724 3726
3725 pci_enable_wake(pdev, PCI_D3hot, 0); 3727 pci_enable_wake(pdev, PCI_D3hot, 0);
3726 pci_enable_wake(pdev, PCI_D3cold, 0); 3728 pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 5c154fe13859..07944820f745 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -249,6 +249,7 @@ out:
249MODULE_DESCRIPTION("Jazz SONIC ethernet driver"); 249MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
250module_param(sonic_debug, int, 0); 250module_param(sonic_debug, int, 0);
251MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)"); 251MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
252MODULE_ALIAS("platform:jazzsonic");
252 253
253#include "sonic.c" 254#include "sonic.c"
254 255
@@ -271,6 +272,7 @@ static struct platform_driver jazz_sonic_driver = {
271 .remove = __devexit_p(jazz_sonic_device_remove), 272 .remove = __devexit_p(jazz_sonic_device_remove),
272 .driver = { 273 .driver = {
273 .name = jazz_sonic_string, 274 .name = jazz_sonic_string,
275 .owner = THIS_MODULE,
274 }, 276 },
275}; 277};
276 278
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 1d24a73a0e1a..e18576316bda 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -883,7 +883,7 @@ static int korina_init(struct net_device *dev)
883static int korina_restart(struct net_device *dev) 883static int korina_restart(struct net_device *dev)
884{ 884{
885 struct korina_private *lp = netdev_priv(dev); 885 struct korina_private *lp = netdev_priv(dev);
886 int ret = 0; 886 int ret;
887 887
888 /* 888 /*
889 * Disable interrupts 889 * Disable interrupts
@@ -987,7 +987,7 @@ static void korina_poll_controller(struct net_device *dev)
987static int korina_open(struct net_device *dev) 987static int korina_open(struct net_device *dev)
988{ 988{
989 struct korina_private *lp = netdev_priv(dev); 989 struct korina_private *lp = netdev_priv(dev);
990 int ret = 0; 990 int ret;
991 991
992 /* Initialize */ 992 /* Initialize */
993 ret = korina_init(dev); 993 ret = korina_init(dev);
@@ -1031,6 +1031,8 @@ static int korina_open(struct net_device *dev)
1031 dev->name, lp->und_irq); 1031 dev->name, lp->und_irq);
1032 goto err_free_ovr_irq; 1032 goto err_free_ovr_irq;
1033 } 1033 }
1034out:
1035 return ret;
1034 1036
1035err_free_ovr_irq: 1037err_free_ovr_irq:
1036 free_irq(lp->ovr_irq, dev); 1038 free_irq(lp->ovr_irq, dev);
@@ -1041,8 +1043,6 @@ err_free_rx_irq:
1041err_release: 1043err_release:
1042 korina_free_ring(dev); 1044 korina_free_ring(dev);
1043 goto out; 1045 goto out;
1044out:
1045 return ret;
1046} 1046}
1047 1047
1048static int korina_close(struct net_device *dev) 1048static int korina_close(struct net_device *dev)
@@ -1082,7 +1082,7 @@ static int korina_probe(struct platform_device *pdev)
1082 struct korina_private *lp; 1082 struct korina_private *lp;
1083 struct net_device *dev; 1083 struct net_device *dev;
1084 struct resource *r; 1084 struct resource *r;
1085 int retval, err; 1085 int rc;
1086 1086
1087 dev = alloc_etherdev(sizeof(struct korina_private)); 1087 dev = alloc_etherdev(sizeof(struct korina_private));
1088 if (!dev) { 1088 if (!dev) {
@@ -1106,7 +1106,7 @@ static int korina_probe(struct platform_device *pdev)
1106 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start); 1106 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
1107 if (!lp->eth_regs) { 1107 if (!lp->eth_regs) {
1108 printk(KERN_ERR DRV_NAME "cannot remap registers\n"); 1108 printk(KERN_ERR DRV_NAME "cannot remap registers\n");
1109 retval = -ENXIO; 1109 rc = -ENXIO;
1110 goto probe_err_out; 1110 goto probe_err_out;
1111 } 1111 }
1112 1112
@@ -1114,7 +1114,7 @@ static int korina_probe(struct platform_device *pdev)
1114 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1114 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1115 if (!lp->rx_dma_regs) { 1115 if (!lp->rx_dma_regs) {
1116 printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n"); 1116 printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
1117 retval = -ENXIO; 1117 rc = -ENXIO;
1118 goto probe_err_dma_rx; 1118 goto probe_err_dma_rx;
1119 } 1119 }
1120 1120
@@ -1122,14 +1122,14 @@ static int korina_probe(struct platform_device *pdev)
1122 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1122 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1123 if (!lp->tx_dma_regs) { 1123 if (!lp->tx_dma_regs) {
1124 printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n"); 1124 printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
1125 retval = -ENXIO; 1125 rc = -ENXIO;
1126 goto probe_err_dma_tx; 1126 goto probe_err_dma_tx;
1127 } 1127 }
1128 1128
1129 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); 1129 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1130 if (!lp->td_ring) { 1130 if (!lp->td_ring) {
1131 printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n"); 1131 printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
1132 retval = -ENOMEM; 1132 rc = -ENXIO;
1133 goto probe_err_td_ring; 1133 goto probe_err_td_ring;
1134 } 1134 }
1135 1135
@@ -1166,14 +1166,14 @@ static int korina_probe(struct platform_device *pdev)
1166 lp->mii_if.phy_id_mask = 0x1f; 1166 lp->mii_if.phy_id_mask = 0x1f;
1167 lp->mii_if.reg_num_mask = 0x1f; 1167 lp->mii_if.reg_num_mask = 0x1f;
1168 1168
1169 err = register_netdev(dev); 1169 rc = register_netdev(dev);
1170 if (err) { 1170 if (rc < 0) {
1171 printk(KERN_ERR DRV_NAME 1171 printk(KERN_ERR DRV_NAME
1172 ": cannot register net device %d\n", err); 1172 ": cannot register net device %d\n", rc);
1173 retval = -EINVAL;
1174 goto probe_err_register; 1173 goto probe_err_register;
1175 } 1174 }
1176 return 0; 1175out:
1176 return rc;
1177 1177
1178probe_err_register: 1178probe_err_register:
1179 kfree(lp->td_ring); 1179 kfree(lp->td_ring);
@@ -1185,7 +1185,7 @@ probe_err_dma_rx:
1185 iounmap(lp->eth_regs); 1185 iounmap(lp->eth_regs);
1186probe_err_out: 1186probe_err_out:
1187 free_netdev(dev); 1187 free_netdev(dev);
1188 return retval; 1188 goto out;
1189} 1189}
1190 1190
1191static int korina_remove(struct platform_device *pdev) 1191static int korina_remove(struct platform_device *pdev)
@@ -1193,12 +1193,9 @@ static int korina_remove(struct platform_device *pdev)
1193 struct korina_device *bif = platform_get_drvdata(pdev); 1193 struct korina_device *bif = platform_get_drvdata(pdev);
1194 struct korina_private *lp = netdev_priv(bif->dev); 1194 struct korina_private *lp = netdev_priv(bif->dev);
1195 1195
1196 if (lp->eth_regs) 1196 iounmap(lp->eth_regs);
1197 iounmap(lp->eth_regs); 1197 iounmap(lp->rx_dma_regs);
1198 if (lp->rx_dma_regs) 1198 iounmap(lp->tx_dma_regs);
1199 iounmap(lp->rx_dma_regs);
1200 if (lp->tx_dma_regs)
1201 iounmap(lp->tx_dma_regs);
1202 1199
1203 platform_set_drvdata(pdev, NULL); 1200 platform_set_drvdata(pdev, NULL);
1204 unregister_netdev(bif->dev); 1201 unregister_netdev(bif->dev);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index d513bb8a4902..92dccd43bdca 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1281,6 +1281,7 @@ static struct platform_driver macb_driver = {
1281 .remove = __exit_p(macb_remove), 1281 .remove = __exit_p(macb_remove),
1282 .driver = { 1282 .driver = {
1283 .name = "macb", 1283 .name = "macb",
1284 .owner = THIS_MODULE,
1284 }, 1285 },
1285}; 1286};
1286 1287
@@ -1300,3 +1301,4 @@ module_exit(macb_exit);
1300MODULE_LICENSE("GPL"); 1301MODULE_LICENSE("GPL");
1301MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); 1302MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
1302MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); 1303MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
1304MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index cdaa8fc21809..0b32648a2136 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -830,6 +830,7 @@ static struct platform_driver meth_driver = {
830 .remove = __devexit_p(meth_remove), 830 .remove = __devexit_p(meth_remove),
831 .driver = { 831 .driver = {
832 .name = "meth", 832 .name = "meth",
833 .owner = THIS_MODULE,
833 } 834 }
834}; 835};
835 836
@@ -855,3 +856,4 @@ module_exit(meth_exit_module);
855MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); 856MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
856MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); 857MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
857MODULE_LICENSE("GPL"); 858MODULE_LICENSE("GPL");
859MODULE_ALIAS("platform:meth");
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 601ffd69ebc8..381b36e5f64c 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2030,6 +2030,7 @@ static struct platform_driver mv643xx_eth_driver = {
2030 .shutdown = mv643xx_eth_shutdown, 2030 .shutdown = mv643xx_eth_shutdown,
2031 .driver = { 2031 .driver = {
2032 .name = MV643XX_ETH_NAME, 2032 .name = MV643XX_ETH_NAME,
2033 .owner = THIS_MODULE,
2033 }, 2034 },
2034}; 2035};
2035 2036
@@ -2038,6 +2039,7 @@ static struct platform_driver mv643xx_eth_shared_driver = {
2038 .remove = mv643xx_eth_shared_remove, 2039 .remove = mv643xx_eth_shared_remove,
2039 .driver = { 2040 .driver = {
2040 .name = MV643XX_ETH_SHARED_NAME, 2041 .name = MV643XX_ETH_SHARED_NAME,
2042 .owner = THIS_MODULE,
2041 }, 2043 },
2042}; 2044};
2043 2045
@@ -2085,7 +2087,8 @@ MODULE_LICENSE("GPL");
2085MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" 2087MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
2086 " and Dale Farnsworth"); 2088 " and Dale Farnsworth");
2087MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2089MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2088MODULE_ALIAS("platform:mv643xx_eth"); 2090MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
2091MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
2089 2092
2090/* 2093/*
2091 * The second part is the low level driver of the gigE ethernet ports. 2094 * The second part is the low level driver of the gigE ethernet ports.
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 78d34af13a1c..dc442e370850 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -502,4 +502,4 @@ module_exit(netx_eth_cleanup);
502 502
503MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 503MODULE_AUTHOR("Sascha Hauer, Pengutronix");
504MODULE_LICENSE("GPL"); 504MODULE_LICENSE("GPL");
505 505MODULE_ALIAS("platform:" CARDNAME);
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 05748ca6f216..af7356468251 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1132,8 +1132,8 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
1132 u32 fw_minor = 0; 1132 u32 fw_minor = 0;
1133 u32 fw_build = 0; 1133 u32 fw_build = 0;
1134 char brd_name[NETXEN_MAX_SHORT_NAME]; 1134 char brd_name[NETXEN_MAX_SHORT_NAME];
1135 struct netxen_new_user_info user_info; 1135 char serial_num[32];
1136 int i, addr = NETXEN_USER_START; 1136 int i, addr;
1137 __le32 *ptr32; 1137 __le32 *ptr32;
1138 1138
1139 struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); 1139 struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
@@ -1150,10 +1150,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
1150 valid = 0; 1150 valid = 0;
1151 } 1151 }
1152 if (valid) { 1152 if (valid) {
1153 ptr32 = (u32 *) & user_info; 1153 ptr32 = (u32 *)&serial_num;
1154 for (i = 0; 1154 addr = NETXEN_USER_START +
1155 i < sizeof(struct netxen_new_user_info) / sizeof(u32); 1155 offsetof(struct netxen_new_user_info, serial_num);
1156 i++) { 1156 for (i = 0; i < 8; i++) {
1157 if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { 1157 if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
1158 printk("%s: ERROR reading %s board userarea.\n", 1158 printk("%s: ERROR reading %s board userarea.\n",
1159 netxen_nic_driver_name, 1159 netxen_nic_driver_name,
@@ -1163,10 +1163,11 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
1163 ptr32++; 1163 ptr32++;
1164 addr += sizeof(u32); 1164 addr += sizeof(u32);
1165 } 1165 }
1166
1166 get_brd_name_by_type(board_info->board_type, brd_name); 1167 get_brd_name_by_type(board_info->board_type, brd_name);
1167 1168
1168 printk("NetXen %s Board S/N %s Chip id 0x%x\n", 1169 printk("NetXen %s Board S/N %s Chip id 0x%x\n",
1169 brd_name, user_info.serial_num, board_info->chip_id); 1170 brd_name, serial_num, board_info->chip_id);
1170 1171
1171 printk("NetXen %s Board #%d, Chip id 0x%x\n", 1172 printk("NetXen %s Board #%d, Chip id 0x%x\n",
1172 board_info->board_type == 0x0b ? "XGB" : "GBE", 1173 board_info->board_type == 0x0b ? "XGB" : "GBE",
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 7565c2d7f30e..4009c4ce96b4 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.7" 36#define DRV_MODULE_VERSION "0.8"
37#define DRV_MODULE_RELDATE "February 18, 2008" 37#define DRV_MODULE_RELDATE "April 24, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -673,11 +673,16 @@ static int serdes_init_10g(struct niu *np)
673 } 673 }
674 674
675 if ((sig & mask) != val) { 675 if ((sig & mask) != val) {
676 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
677 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
678 return 0;
679 }
676 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 680 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
677 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 681 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
678 return -ENODEV; 682 return -ENODEV;
679 } 683 }
680 684 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
685 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
681 return 0; 686 return 0;
682} 687}
683 688
@@ -998,6 +1003,28 @@ static int bcm8704_user_dev3_readback(struct niu *np, int reg)
998 return 0; 1003 return 0;
999} 1004}
1000 1005
1006static int bcm8706_init_user_dev3(struct niu *np)
1007{
1008 int err;
1009
1010
1011 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1012 BCM8704_USER_OPT_DIGITAL_CTRL);
1013 if (err < 0)
1014 return err;
1015 err &= ~USER_ODIG_CTRL_GPIOS;
1016 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1017 err |= USER_ODIG_CTRL_RESV2;
1018 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1019 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1020 if (err)
1021 return err;
1022
1023 mdelay(1000);
1024
1025 return 0;
1026}
1027
1001static int bcm8704_init_user_dev3(struct niu *np) 1028static int bcm8704_init_user_dev3(struct niu *np)
1002{ 1029{
1003 int err; 1030 int err;
@@ -1127,33 +1154,11 @@ static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1127 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1154 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1128} 1155}
1129 1156
1130static int xcvr_init_10g_bcm8704(struct niu *np) 1157
1158static int xcvr_diag_bcm870x(struct niu *np)
1131{ 1159{
1132 struct niu_link_config *lp = &np->link_config;
1133 u16 analog_stat0, tx_alarm_status; 1160 u16 analog_stat0, tx_alarm_status;
1134 int err; 1161 int err = 0;
1135
1136 err = bcm8704_reset(np);
1137 if (err)
1138 return err;
1139
1140 err = bcm8704_init_user_dev3(np);
1141 if (err)
1142 return err;
1143
1144 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1145 MII_BMCR);
1146 if (err < 0)
1147 return err;
1148 err &= ~BMCR_LOOPBACK;
1149
1150 if (lp->loopback_mode == LOOPBACK_MAC)
1151 err |= BMCR_LOOPBACK;
1152
1153 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1154 MII_BMCR, err);
1155 if (err)
1156 return err;
1157 1162
1158#if 1 1163#if 1
1159 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1164 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
@@ -1211,6 +1216,89 @@ static int xcvr_init_10g_bcm8704(struct niu *np)
1211 return 0; 1216 return 0;
1212} 1217}
1213 1218
1219static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1220{
1221 struct niu_link_config *lp = &np->link_config;
1222 int err;
1223
1224 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1225 MII_BMCR);
1226 if (err < 0)
1227 return err;
1228
1229 err &= ~BMCR_LOOPBACK;
1230
1231 if (lp->loopback_mode == LOOPBACK_MAC)
1232 err |= BMCR_LOOPBACK;
1233
1234 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1235 MII_BMCR, err);
1236 if (err)
1237 return err;
1238
1239 return 0;
1240}
1241
1242static int xcvr_init_10g_bcm8706(struct niu *np)
1243{
1244 int err = 0;
1245 u64 val;
1246
1247 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1248 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1249 return err;
1250
1251 val = nr64_mac(XMAC_CONFIG);
1252 val &= ~XMAC_CONFIG_LED_POLARITY;
1253 val |= XMAC_CONFIG_FORCE_LED_ON;
1254 nw64_mac(XMAC_CONFIG, val);
1255
1256 val = nr64(MIF_CONFIG);
1257 val |= MIF_CONFIG_INDIRECT_MODE;
1258 nw64(MIF_CONFIG, val);
1259
1260 err = bcm8704_reset(np);
1261 if (err)
1262 return err;
1263
1264 err = xcvr_10g_set_lb_bcm870x(np);
1265 if (err)
1266 return err;
1267
1268 err = bcm8706_init_user_dev3(np);
1269 if (err)
1270 return err;
1271
1272 err = xcvr_diag_bcm870x(np);
1273 if (err)
1274 return err;
1275
1276 return 0;
1277}
1278
1279static int xcvr_init_10g_bcm8704(struct niu *np)
1280{
1281 int err;
1282
1283 err = bcm8704_reset(np);
1284 if (err)
1285 return err;
1286
1287 err = bcm8704_init_user_dev3(np);
1288 if (err)
1289 return err;
1290
1291 err = xcvr_10g_set_lb_bcm870x(np);
1292 if (err)
1293 return err;
1294
1295 err = xcvr_diag_bcm870x(np);
1296 if (err)
1297 return err;
1298
1299 return 0;
1300}
1301
1214static int xcvr_init_10g(struct niu *np) 1302static int xcvr_init_10g(struct niu *np)
1215{ 1303{
1216 int phy_id, err; 1304 int phy_id, err;
@@ -1548,6 +1636,59 @@ out:
1548 return err; 1636 return err;
1549} 1637}
1550 1638
1639static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1640{
1641 int err, link_up;
1642 link_up = 0;
1643
1644 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1645 BCM8704_PMD_RCV_SIGDET);
1646 if (err < 0)
1647 goto out;
1648 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1649 err = 0;
1650 goto out;
1651 }
1652
1653 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1654 BCM8704_PCS_10G_R_STATUS);
1655 if (err < 0)
1656 goto out;
1657
1658 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1659 err = 0;
1660 goto out;
1661 }
1662
1663 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1664 BCM8704_PHYXS_XGXS_LANE_STAT);
1665 if (err < 0)
1666 goto out;
1667 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1668 PHYXS_XGXS_LANE_STAT_MAGIC |
1669 PHYXS_XGXS_LANE_STAT_PATTEST |
1670 PHYXS_XGXS_LANE_STAT_LANE3 |
1671 PHYXS_XGXS_LANE_STAT_LANE2 |
1672 PHYXS_XGXS_LANE_STAT_LANE1 |
1673 PHYXS_XGXS_LANE_STAT_LANE0)) {
1674 err = 0;
1675 np->link_config.active_speed = SPEED_INVALID;
1676 np->link_config.active_duplex = DUPLEX_INVALID;
1677 goto out;
1678 }
1679
1680 link_up = 1;
1681 np->link_config.active_speed = SPEED_10000;
1682 np->link_config.active_duplex = DUPLEX_FULL;
1683 err = 0;
1684
1685out:
1686 *link_up_p = link_up;
1687 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1688 err = 0;
1689 return err;
1690}
1691
1551static int link_status_10g_bcom(struct niu *np, int *link_up_p) 1692static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1552{ 1693{
1553 int err, link_up; 1694 int err, link_up;
@@ -1627,6 +1768,82 @@ static int link_status_10g(struct niu *np, int *link_up_p)
1627 return err; 1768 return err;
1628} 1769}
1629 1770
1771static int niu_10g_phy_present(struct niu *np)
1772{
1773 u64 sig, mask, val;
1774
1775 sig = nr64(ESR_INT_SIGNALS);
1776 switch (np->port) {
1777 case 0:
1778 mask = ESR_INT_SIGNALS_P0_BITS;
1779 val = (ESR_INT_SRDY0_P0 |
1780 ESR_INT_DET0_P0 |
1781 ESR_INT_XSRDY_P0 |
1782 ESR_INT_XDP_P0_CH3 |
1783 ESR_INT_XDP_P0_CH2 |
1784 ESR_INT_XDP_P0_CH1 |
1785 ESR_INT_XDP_P0_CH0);
1786 break;
1787
1788 case 1:
1789 mask = ESR_INT_SIGNALS_P1_BITS;
1790 val = (ESR_INT_SRDY0_P1 |
1791 ESR_INT_DET0_P1 |
1792 ESR_INT_XSRDY_P1 |
1793 ESR_INT_XDP_P1_CH3 |
1794 ESR_INT_XDP_P1_CH2 |
1795 ESR_INT_XDP_P1_CH1 |
1796 ESR_INT_XDP_P1_CH0);
1797 break;
1798
1799 default:
1800 return 0;
1801 }
1802
1803 if ((sig & mask) != val)
1804 return 0;
1805 return 1;
1806}
1807
1808static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
1809{
1810 unsigned long flags;
1811 int err = 0;
1812 int phy_present;
1813 int phy_present_prev;
1814
1815 spin_lock_irqsave(&np->lock, flags);
1816
1817 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1818 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
1819 1 : 0;
1820 phy_present = niu_10g_phy_present(np);
1821 if (phy_present != phy_present_prev) {
1822 /* state change */
1823 if (phy_present) {
1824 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1825 if (np->phy_ops->xcvr_init)
1826 err = np->phy_ops->xcvr_init(np);
1827 if (err) {
1828 /* debounce */
1829 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1830 }
1831 } else {
1832 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1833 *link_up_p = 0;
1834 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
1835 np->dev->name);
1836 }
1837 }
1838 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
1839 err = link_status_10g_bcm8706(np, link_up_p);
1840 }
1841
1842 spin_unlock_irqrestore(&np->lock, flags);
1843
1844 return err;
1845}
1846
1630static int link_status_1g(struct niu *np, int *link_up_p) 1847static int link_status_1g(struct niu *np, int *link_up_p)
1631{ 1848{
1632 struct niu_link_config *lp = &np->link_config; 1849 struct niu_link_config *lp = &np->link_config;
@@ -1761,6 +1978,12 @@ static const struct niu_phy_ops phy_ops_10g_fiber = {
1761 .link_status = link_status_10g, 1978 .link_status = link_status_10g,
1762}; 1979};
1763 1980
1981static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
1982 .serdes_init = serdes_init_10g,
1983 .xcvr_init = xcvr_init_10g_bcm8706,
1984 .link_status = link_status_10g_hotplug,
1985};
1986
1764static const struct niu_phy_ops phy_ops_10g_copper = { 1987static const struct niu_phy_ops phy_ops_10g_copper = {
1765 .serdes_init = serdes_init_10g, 1988 .serdes_init = serdes_init_10g,
1766 .link_status = link_status_10g, /* XXX */ 1989 .link_status = link_status_10g, /* XXX */
@@ -1792,6 +2015,11 @@ static const struct niu_phy_template phy_template_10g_fiber = {
1792 .phy_addr_base = 8, 2015 .phy_addr_base = 8,
1793}; 2016};
1794 2017
2018static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2019 .ops = &phy_ops_10g_fiber_hotplug,
2020 .phy_addr_base = 8,
2021};
2022
1795static const struct niu_phy_template phy_template_10g_copper = { 2023static const struct niu_phy_template phy_template_10g_copper = {
1796 .ops = &phy_ops_10g_copper, 2024 .ops = &phy_ops_10g_copper,
1797 .phy_addr_base = 10, 2025 .phy_addr_base = 10,
@@ -1996,6 +2224,13 @@ static int niu_determine_phy_disposition(struct niu *np)
1996 plat_type == PLAT_TYPE_VF_P1) 2224 plat_type == PLAT_TYPE_VF_P1)
1997 phy_addr_off = 8; 2225 phy_addr_off = 8;
1998 phy_addr_off += np->port; 2226 phy_addr_off += np->port;
2227 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2228 tp = &phy_template_10g_fiber_hotplug;
2229 if (np->port == 0)
2230 phy_addr_off = 8;
2231 if (np->port == 1)
2232 phy_addr_off = 12;
2233 }
1999 break; 2234 break;
2000 2235
2001 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2236 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
@@ -6773,6 +7008,37 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
6773 return 0; 7008 return 0;
6774} 7009}
6775 7010
7011/* niu board models have a trailing dash version incremented
7012 * with HW rev change. Need to ingnore the dash version while
7013 * checking for match
7014 *
7015 * for example, for the 10G card the current vpd.board_model
7016 * is 501-5283-04, of which -04 is the dash version and have
7017 * to be ignored
7018 */
7019static int niu_board_model_match(struct niu *np, const char *model)
7020{
7021 return !strncmp(np->vpd.board_model, model, strlen(model));
7022}
7023
7024static int niu_pci_vpd_get_nports(struct niu *np)
7025{
7026 int ports = 0;
7027
7028 if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) ||
7029 (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) ||
7030 (niu_board_model_match(np, NIU_ALONSO_BM_STR))) {
7031 ports = 4;
7032 } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) ||
7033 (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) ||
7034 (niu_board_model_match(np, NIU_FOXXY_BM_STR)) ||
7035 (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) {
7036 ports = 2;
7037 }
7038
7039 return ports;
7040}
7041
6776static void __devinit niu_pci_vpd_validate(struct niu *np) 7042static void __devinit niu_pci_vpd_validate(struct niu *np)
6777{ 7043{
6778 struct net_device *dev = np->dev; 7044 struct net_device *dev = np->dev;
@@ -6799,6 +7065,9 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
6799 } 7065 }
6800 if (np->flags & NIU_FLAGS_10G) 7066 if (np->flags & NIU_FLAGS_10G)
6801 np->mac_xcvr = MAC_XCVR_XPCS; 7067 np->mac_xcvr = MAC_XCVR_XPCS;
7068 } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
7069 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
7070 NIU_FLAGS_HOTPLUG_PHY);
6802 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 7071 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
6803 dev_err(np->device, PFX "Illegal phy string [%s].\n", 7072 dev_err(np->device, PFX "Illegal phy string [%s].\n",
6804 np->vpd.phy_type); 7073 np->vpd.phy_type);
@@ -6987,11 +7256,17 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
6987 if (parent->plat_type == PLAT_TYPE_NIU) { 7256 if (parent->plat_type == PLAT_TYPE_NIU) {
6988 parent->num_ports = 2; 7257 parent->num_ports = 2;
6989 } else { 7258 } else {
6990 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 7259 parent->num_ports = niu_pci_vpd_get_nports(np);
6991 ESPC_NUM_PORTS_MACS_VAL; 7260 if (!parent->num_ports) {
6992 7261 /* Fall back to SPROM as last resort.
6993 if (!parent->num_ports) 7262 * This will fail on most cards.
6994 parent->num_ports = 4; 7263 */
7264 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7265 ESPC_NUM_PORTS_MACS_VAL;
7266
7267 if (!parent->num_ports)
7268 return -ENODEV;
7269 }
6995 } 7270 }
6996 } 7271 }
6997 7272
@@ -7015,7 +7290,8 @@ static int __devinit phy_record(struct niu_parent *parent,
7015 return 0; 7290 return 0;
7016 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 7291 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
7017 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 7292 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
7018 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) 7293 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
7294 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
7019 return 0; 7295 return 0;
7020 } else { 7296 } else {
7021 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 7297 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
@@ -7262,7 +7538,6 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7262 u32 val; 7538 u32 val;
7263 int err; 7539 int err;
7264 7540
7265
7266 if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7541 if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
7267 !strcmp(np->vpd.model, "SUNW,CP3260")) { 7542 !strcmp(np->vpd.model, "SUNW,CP3260")) {
7268 num_10g = 0; 7543 num_10g = 0;
@@ -7273,6 +7548,12 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7273 phy_encode(PORT_TYPE_1G, 1) | 7548 phy_encode(PORT_TYPE_1G, 1) |
7274 phy_encode(PORT_TYPE_1G, 2) | 7549 phy_encode(PORT_TYPE_1G, 2) |
7275 phy_encode(PORT_TYPE_1G, 3)); 7550 phy_encode(PORT_TYPE_1G, 3));
7551 } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
7552 num_10g = 2;
7553 num_1g = 0;
7554 parent->num_ports = 2;
7555 val = (phy_encode(PORT_TYPE_10G, 0) |
7556 phy_encode(PORT_TYPE_10G, 1));
7276 } else { 7557 } else {
7277 err = fill_phy_probe_info(np, parent, info); 7558 err = fill_phy_probe_info(np, parent, info);
7278 if (err) 7559 if (err)
@@ -7733,15 +8014,16 @@ static int __devinit niu_get_invariants(struct niu *np)
7733 8014
7734 have_props = !err; 8015 have_props = !err;
7735 8016
7736 err = niu_get_and_validate_port(np);
7737 if (err)
7738 return err;
7739
7740 err = niu_init_mac_ipp_pcs_base(np); 8017 err = niu_init_mac_ipp_pcs_base(np);
7741 if (err) 8018 if (err)
7742 return err; 8019 return err;
7743 8020
7744 if (!have_props) { 8021 if (have_props) {
8022 err = niu_get_and_validate_port(np);
8023 if (err)
8024 return err;
8025
8026 } else {
7745 if (np->parent->plat_type == PLAT_TYPE_NIU) 8027 if (np->parent->plat_type == PLAT_TYPE_NIU)
7746 return -EINVAL; 8028 return -EINVAL;
7747 8029
@@ -7753,10 +8035,17 @@ static int __devinit niu_get_invariants(struct niu *np)
7753 niu_pci_vpd_fetch(np, offset); 8035 niu_pci_vpd_fetch(np, offset);
7754 nw64(ESPC_PIO_EN, 0); 8036 nw64(ESPC_PIO_EN, 0);
7755 8037
7756 if (np->flags & NIU_FLAGS_VPD_VALID) 8038 if (np->flags & NIU_FLAGS_VPD_VALID) {
7757 niu_pci_vpd_validate(np); 8039 niu_pci_vpd_validate(np);
8040 err = niu_get_and_validate_port(np);
8041 if (err)
8042 return err;
8043 }
7758 8044
7759 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 8045 if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
8046 err = niu_get_and_validate_port(np);
8047 if (err)
8048 return err;
7760 err = niu_pci_probe_sprom(np); 8049 err = niu_pci_probe_sprom(np);
7761 if (err) 8050 if (err)
7762 return err; 8051 return err;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 336aed08b275..97ffbe137bcb 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2537,6 +2537,7 @@ struct fcram_hash_ipv6 {
2537 2537
2538#define NIU_PHY_ID_MASK 0xfffff0f0 2538#define NIU_PHY_ID_MASK 0xfffff0f0
2539#define NIU_PHY_ID_BCM8704 0x00206030 2539#define NIU_PHY_ID_BCM8704 0x00206030
2540#define NIU_PHY_ID_BCM8706 0x00206035
2540#define NIU_PHY_ID_BCM5464R 0x002060b0 2541#define NIU_PHY_ID_BCM5464R 0x002060b0
2541#define NIU_PHY_ID_MRVL88X2011 0x01410020 2542#define NIU_PHY_ID_MRVL88X2011 0x01410020
2542 2543
@@ -2937,6 +2938,15 @@ struct rx_ring_info {
2937 2938
2938#define NIU_MAX_MTU 9216 2939#define NIU_MAX_MTU 9216
2939 2940
2941/* VPD strings */
2942#define NIU_QGC_LP_BM_STR "501-7606"
2943#define NIU_2XGF_LP_BM_STR "501-7283"
2944#define NIU_QGC_PEM_BM_STR "501-7765"
2945#define NIU_2XGF_PEM_BM_STR "501-7626"
2946#define NIU_ALONSO_BM_STR "373-0202"
2947#define NIU_FOXXY_BM_STR "501-7961"
2948#define NIU_2XGF_MRVL_BM_STR "SK-6E82"
2949
2940#define NIU_VPD_MIN_MAJOR 3 2950#define NIU_VPD_MIN_MAJOR 3
2941#define NIU_VPD_MIN_MINOR 4 2951#define NIU_VPD_MIN_MINOR 4
2942 2952
@@ -3199,6 +3209,8 @@ struct niu {
3199 struct niu_parent *parent; 3209 struct niu_parent *parent;
3200 3210
3201 u32 flags; 3211 u32 flags;
3212#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/
3213#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */
3202#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ 3214#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
3203#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ 3215#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
3204#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ 3216#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 963630c65ca9..94e0b7ed76f1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -89,6 +89,9 @@ int mdiobus_register(struct mii_bus *bus)
89 89
90 phydev->bus = bus; 90 phydev->bus = bus;
91 91
92 /* Run all of the fixups for this PHY */
93 phy_scan_fixups(phydev);
94
92 err = device_register(&phydev->dev); 95 err = device_register(&phydev->dev);
93 96
94 if (err) { 97 if (err) {
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 12fccb1c76dc..3c18bb594957 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -406,8 +406,10 @@ int phy_mii_ioctl(struct phy_device *phydev,
406 406
407 if (mii_data->reg_num == MII_BMCR 407 if (mii_data->reg_num == MII_BMCR
408 && val & BMCR_RESET 408 && val & BMCR_RESET
409 && phydev->drv->config_init) 409 && phydev->drv->config_init) {
410 phy_scan_fixups(phydev);
410 phydev->drv->config_init(phydev); 411 phydev->drv->config_init(phydev);
412 }
411 break; 413 break;
412 414
413 default: 415 default:
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8b1121b02f98..ddf8d51832a6 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -53,6 +53,96 @@ static void phy_device_release(struct device *dev)
53 phy_device_free(to_phy_device(dev)); 53 phy_device_free(to_phy_device(dev));
54} 54}
55 55
56static LIST_HEAD(phy_fixup_list);
57static DEFINE_MUTEX(phy_fixup_lock);
58
59/*
60 * Creates a new phy_fixup and adds it to the list
61 * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
62 * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
63 * It can also be PHY_ANY_UID
64 * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
65 * comparison
66 * @run: The actual code to be run when a matching PHY is found
67 */
68int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
69 int (*run)(struct phy_device *))
70{
71 struct phy_fixup *fixup;
72
73 fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
74 if (!fixup)
75 return -ENOMEM;
76
77 strncpy(fixup->bus_id, bus_id, BUS_ID_SIZE);
78 fixup->phy_uid = phy_uid;
79 fixup->phy_uid_mask = phy_uid_mask;
80 fixup->run = run;
81
82 mutex_lock(&phy_fixup_lock);
83 list_add_tail(&fixup->list, &phy_fixup_list);
84 mutex_unlock(&phy_fixup_lock);
85
86 return 0;
87}
88EXPORT_SYMBOL(phy_register_fixup);
89
90/* Registers a fixup to be run on any PHY with the UID in phy_uid */
91int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
92 int (*run)(struct phy_device *))
93{
94 return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
95}
96EXPORT_SYMBOL(phy_register_fixup_for_uid);
97
98/* Registers a fixup to be run on the PHY with id string bus_id */
99int phy_register_fixup_for_id(const char *bus_id,
100 int (*run)(struct phy_device *))
101{
102 return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
103}
104EXPORT_SYMBOL(phy_register_fixup_for_id);
105
106/*
107 * Returns 1 if fixup matches phydev in bus_id and phy_uid.
108 * Fixups can be set to match any in one or more fields.
109 */
110static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
111{
112 if (strcmp(fixup->bus_id, phydev->dev.bus_id) != 0)
113 if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0)
114 return 0;
115
116 if ((fixup->phy_uid & fixup->phy_uid_mask) !=
117 (phydev->phy_id & fixup->phy_uid_mask))
118 if (fixup->phy_uid != PHY_ANY_UID)
119 return 0;
120
121 return 1;
122}
123
124/* Runs any matching fixups for this phydev */
125int phy_scan_fixups(struct phy_device *phydev)
126{
127 struct phy_fixup *fixup;
128
129 mutex_lock(&phy_fixup_lock);
130 list_for_each_entry(fixup, &phy_fixup_list, list) {
131 if (phy_needs_fixup(phydev, fixup)) {
132 int err;
133
134 err = fixup->run(phydev);
135
136 if (err < 0)
137 return err;
138 }
139 }
140 mutex_unlock(&phy_fixup_lock);
141
142 return 0;
143}
144EXPORT_SYMBOL(phy_scan_fixups);
145
56struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 146struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
57{ 147{
58 struct phy_device *dev; 148 struct phy_device *dev;
@@ -179,13 +269,13 @@ void phy_prepare_link(struct phy_device *phydev,
179 * choose to call only the subset of functions which provide 269 * choose to call only the subset of functions which provide
180 * the desired functionality. 270 * the desired functionality.
181 */ 271 */
182struct phy_device * phy_connect(struct net_device *dev, const char *phy_id, 272struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
183 void (*handler)(struct net_device *), u32 flags, 273 void (*handler)(struct net_device *), u32 flags,
184 phy_interface_t interface) 274 phy_interface_t interface)
185{ 275{
186 struct phy_device *phydev; 276 struct phy_device *phydev;
187 277
188 phydev = phy_attach(dev, phy_id, flags, interface); 278 phydev = phy_attach(dev, bus_id, flags, interface);
189 279
190 if (IS_ERR(phydev)) 280 if (IS_ERR(phydev))
191 return phydev; 281 return phydev;
@@ -226,7 +316,7 @@ static int phy_compare_id(struct device *dev, void *data)
226/** 316/**
227 * phy_attach - attach a network device to a particular PHY device 317 * phy_attach - attach a network device to a particular PHY device
228 * @dev: network device to attach 318 * @dev: network device to attach
229 * @phy_id: PHY device to attach 319 * @bus_id: PHY device to attach
230 * @flags: PHY device's dev_flags 320 * @flags: PHY device's dev_flags
231 * @interface: PHY device's interface 321 * @interface: PHY device's interface
232 * 322 *
@@ -238,7 +328,7 @@ static int phy_compare_id(struct device *dev, void *data)
238 * change. The phy_device is returned to the attaching driver. 328 * change. The phy_device is returned to the attaching driver.
239 */ 329 */
240struct phy_device *phy_attach(struct net_device *dev, 330struct phy_device *phy_attach(struct net_device *dev,
241 const char *phy_id, u32 flags, phy_interface_t interface) 331 const char *bus_id, u32 flags, phy_interface_t interface)
242{ 332{
243 struct bus_type *bus = &mdio_bus_type; 333 struct bus_type *bus = &mdio_bus_type;
244 struct phy_device *phydev; 334 struct phy_device *phydev;
@@ -246,12 +336,12 @@ struct phy_device *phy_attach(struct net_device *dev,
246 336
247 /* Search the list of PHY devices on the mdio bus for the 337 /* Search the list of PHY devices on the mdio bus for the
248 * PHY with the requested name */ 338 * PHY with the requested name */
249 d = bus_find_device(bus, NULL, (void *)phy_id, phy_compare_id); 339 d = bus_find_device(bus, NULL, (void *)bus_id, phy_compare_id);
250 340
251 if (d) { 341 if (d) {
252 phydev = to_phy_device(d); 342 phydev = to_phy_device(d);
253 } else { 343 } else {
254 printk(KERN_ERR "%s not found\n", phy_id); 344 printk(KERN_ERR "%s not found\n", bus_id);
255 return ERR_PTR(-ENODEV); 345 return ERR_PTR(-ENODEV);
256 } 346 }
257 347
@@ -271,7 +361,7 @@ struct phy_device *phy_attach(struct net_device *dev,
271 361
272 if (phydev->attached_dev) { 362 if (phydev->attached_dev) {
273 printk(KERN_ERR "%s: %s already attached\n", 363 printk(KERN_ERR "%s: %s already attached\n",
274 dev->name, phy_id); 364 dev->name, bus_id);
275 return ERR_PTR(-EBUSY); 365 return ERR_PTR(-EBUSY);
276 } 366 }
277 367
@@ -287,6 +377,11 @@ struct phy_device *phy_attach(struct net_device *dev,
287 if (phydev->drv->config_init) { 377 if (phydev->drv->config_init) {
288 int err; 378 int err;
289 379
380 err = phy_scan_fixups(phydev);
381
382 if (err < 0)
383 return ERR_PTR(err);
384
290 err = phydev->drv->config_init(phydev); 385 err = phydev->drv->config_init(phydev);
291 386
292 if (err < 0) 387 if (err < 0)
@@ -395,6 +490,7 @@ EXPORT_SYMBOL(genphy_config_advert);
395 */ 490 */
396int genphy_setup_forced(struct phy_device *phydev) 491int genphy_setup_forced(struct phy_device *phydev)
397{ 492{
493 int err;
398 int ctl = 0; 494 int ctl = 0;
399 495
400 phydev->pause = phydev->asym_pause = 0; 496 phydev->pause = phydev->asym_pause = 0;
@@ -407,17 +503,26 @@ int genphy_setup_forced(struct phy_device *phydev)
407 if (DUPLEX_FULL == phydev->duplex) 503 if (DUPLEX_FULL == phydev->duplex)
408 ctl |= BMCR_FULLDPLX; 504 ctl |= BMCR_FULLDPLX;
409 505
410 ctl = phy_write(phydev, MII_BMCR, ctl); 506 err = phy_write(phydev, MII_BMCR, ctl);
411 507
412 if (ctl < 0) 508 if (err < 0)
413 return ctl; 509 return err;
510
511 /*
512 * Run the fixups on this PHY, just in case the
513 * board code needs to change something after a reset
514 */
515 err = phy_scan_fixups(phydev);
516
517 if (err < 0)
518 return err;
414 519
415 /* We just reset the device, so we'd better configure any 520 /* We just reset the device, so we'd better configure any
416 * settings the PHY requires to operate */ 521 * settings the PHY requires to operate */
417 if (phydev->drv->config_init) 522 if (phydev->drv->config_init)
418 ctl = phydev->drv->config_init(phydev); 523 err = phydev->drv->config_init(phydev);
419 524
420 return ctl; 525 return err;
421} 526}
422 527
423 528
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index dcbe01b0ca0d..157fd932e951 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.20" 89#define DRV_VERSION "2.0.26.22"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -117,20 +117,6 @@ static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
117 117
118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ 118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121#define PANIC 1
122#define LOW 2
123static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
124{
125 struct mac_info *mac_control;
126
127 mac_control = &sp->mac_control;
128 if (rxb_size <= rxd_count[sp->rxd_mode])
129 return PANIC;
130 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
131 return LOW;
132 return 0;
133}
134 120
135static inline int is_s2io_card_up(const struct s2io_nic * sp) 121static inline int is_s2io_card_up(const struct s2io_nic * sp)
136{ 122{
@@ -2458,7 +2444,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2458 for (i = 0; i < config->tx_fifo_num; i++) { 2444 for (i = 0; i < config->tx_fifo_num; i++) {
2459 unsigned long flags; 2445 unsigned long flags;
2460 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags); 2446 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2461 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2447 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2462 txdp = (struct TxD *) \ 2448 txdp = (struct TxD *) \
2463 mac_control->fifos[i].list_info[j].list_virt_addr; 2449 mac_control->fifos[i].list_info[j].list_virt_addr;
2464 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2450 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
@@ -2544,7 +2530,6 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2544 struct config_param *config; 2530 struct config_param *config;
2545 u64 tmp; 2531 u64 tmp;
2546 struct buffAdd *ba; 2532 struct buffAdd *ba;
2547 unsigned long flags;
2548 struct RxD_t *first_rxdp = NULL; 2533 struct RxD_t *first_rxdp = NULL;
2549 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2534 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2550 struct RxD1 *rxdp1; 2535 struct RxD1 *rxdp1;
@@ -2592,15 +2577,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2592 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2577 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2593 dev->name, rxdp); 2578 dev->name, rxdp);
2594 } 2579 }
2595 if(!napi) { 2580
2596 spin_lock_irqsave(&nic->put_lock, flags);
2597 mac_control->rings[ring_no].put_pos =
2598 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2599 spin_unlock_irqrestore(&nic->put_lock, flags);
2600 } else {
2601 mac_control->rings[ring_no].put_pos =
2602 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2603 }
2604 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2581 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2605 ((nic->rxd_mode == RXD_MODE_3B) && 2582 ((nic->rxd_mode == RXD_MODE_3B) &&
2606 (rxdp->Control_2 & s2BIT(0)))) { 2583 (rxdp->Control_2 & s2BIT(0)))) {
@@ -2978,7 +2955,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
2978{ 2955{
2979 struct s2io_nic *nic = ring_data->nic; 2956 struct s2io_nic *nic = ring_data->nic;
2980 struct net_device *dev = (struct net_device *) nic->dev; 2957 struct net_device *dev = (struct net_device *) nic->dev;
2981 int get_block, put_block, put_offset; 2958 int get_block, put_block;
2982 struct rx_curr_get_info get_info, put_info; 2959 struct rx_curr_get_info get_info, put_info;
2983 struct RxD_t *rxdp; 2960 struct RxD_t *rxdp;
2984 struct sk_buff *skb; 2961 struct sk_buff *skb;
@@ -2987,19 +2964,11 @@ static void rx_intr_handler(struct ring_info *ring_data)
2987 struct RxD1* rxdp1; 2964 struct RxD1* rxdp1;
2988 struct RxD3* rxdp3; 2965 struct RxD3* rxdp3;
2989 2966
2990 spin_lock(&nic->rx_lock);
2991
2992 get_info = ring_data->rx_curr_get_info; 2967 get_info = ring_data->rx_curr_get_info;
2993 get_block = get_info.block_index; 2968 get_block = get_info.block_index;
2994 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); 2969 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2995 put_block = put_info.block_index; 2970 put_block = put_info.block_index;
2996 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2971 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2997 if (!napi) {
2998 spin_lock(&nic->put_lock);
2999 put_offset = ring_data->put_pos;
3000 spin_unlock(&nic->put_lock);
3001 } else
3002 put_offset = ring_data->put_pos;
3003 2972
3004 while (RXD_IS_UP2DT(rxdp)) { 2973 while (RXD_IS_UP2DT(rxdp)) {
3005 /* 2974 /*
@@ -3016,7 +2985,6 @@ static void rx_intr_handler(struct ring_info *ring_data)
3016 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2985 DBG_PRINT(ERR_DBG, "%s: The skb is ",
3017 dev->name); 2986 dev->name);
3018 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2987 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3019 spin_unlock(&nic->rx_lock);
3020 return; 2988 return;
3021 } 2989 }
3022 if (nic->rxd_mode == RXD_MODE_1) { 2990 if (nic->rxd_mode == RXD_MODE_1) {
@@ -3072,8 +3040,6 @@ static void rx_intr_handler(struct ring_info *ring_data)
3072 } 3040 }
3073 } 3041 }
3074 } 3042 }
3075
3076 spin_unlock(&nic->rx_lock);
3077} 3043}
3078 3044
3079/** 3045/**
@@ -4105,7 +4071,6 @@ static int s2io_close(struct net_device *dev)
4105 do_s2io_delete_unicast_mc(sp, tmp64); 4071 do_s2io_delete_unicast_mc(sp, tmp64);
4106 } 4072 }
4107 4073
4108 /* Reset card, kill tasklet and free Tx and Rx buffers. */
4109 s2io_card_down(sp); 4074 s2io_card_down(sp);
4110 4075
4111 return 0; 4076 return 0;
@@ -4370,29 +4335,9 @@ s2io_alarm_handle(unsigned long data)
4370 4335
4371static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) 4336static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4372{ 4337{
4373 int rxb_size, level; 4338 if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4374 4339 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4375 if (!sp->lro) { 4340 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4376 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4377 level = rx_buffer_level(sp, rxb_size, rng_n);
4378
4379 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4380 int ret;
4381 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4382 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4383 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4384 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4385 __FUNCTION__);
4386 clear_bit(0, (&sp->tasklet_status));
4387 return -1;
4388 }
4389 clear_bit(0, (&sp->tasklet_status));
4390 } else if (level == LOW)
4391 tasklet_schedule(&sp->task);
4392
4393 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4394 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4395 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4396 } 4341 }
4397 return 0; 4342 return 0;
4398} 4343}
@@ -6770,49 +6715,6 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6770} 6715}
6771 6716
6772/** 6717/**
6773 * s2io_tasklet - Bottom half of the ISR.
6774 * @dev_adr : address of the device structure in dma_addr_t format.
6775 * Description:
6776 * This is the tasklet or the bottom half of the ISR. This is
6777 * an extension of the ISR which is scheduled by the scheduler to be run
6778 * when the load on the CPU is low. All low priority tasks of the ISR can
6779 * be pushed into the tasklet. For now the tasklet is used only to
6780 * replenish the Rx buffers in the Rx buffer descriptors.
6781 * Return value:
6782 * void.
6783 */
6784
6785static void s2io_tasklet(unsigned long dev_addr)
6786{
6787 struct net_device *dev = (struct net_device *) dev_addr;
6788 struct s2io_nic *sp = dev->priv;
6789 int i, ret;
6790 struct mac_info *mac_control;
6791 struct config_param *config;
6792
6793 mac_control = &sp->mac_control;
6794 config = &sp->config;
6795
6796 if (!TASKLET_IN_USE) {
6797 for (i = 0; i < config->rx_ring_num; i++) {
6798 ret = fill_rx_buffers(sp, i);
6799 if (ret == -ENOMEM) {
6800 DBG_PRINT(INFO_DBG, "%s: Out of ",
6801 dev->name);
6802 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6803 break;
6804 } else if (ret == -EFILL) {
6805 DBG_PRINT(INFO_DBG,
6806 "%s: Rx Ring %d is full\n",
6807 dev->name, i);
6808 break;
6809 }
6810 }
6811 clear_bit(0, (&sp->tasklet_status));
6812 }
6813}
6814
6815/**
6816 * s2io_set_link - Set the LInk status 6718 * s2io_set_link - Set the LInk status
6817 * @data: long pointer to device private structue 6719 * @data: long pointer to device private structue
6818 * Description: Sets the link status for the adapter 6720 * Description: Sets the link status for the adapter
@@ -7161,7 +7063,6 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7161{ 7063{
7162 int cnt = 0; 7064 int cnt = 0;
7163 struct XENA_dev_config __iomem *bar0 = sp->bar0; 7065 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7164 unsigned long flags;
7165 register u64 val64 = 0; 7066 register u64 val64 = 0;
7166 struct config_param *config; 7067 struct config_param *config;
7167 config = &sp->config; 7068 config = &sp->config;
@@ -7186,9 +7087,6 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7186 7087
7187 s2io_rem_isr(sp); 7088 s2io_rem_isr(sp);
7188 7089
7189 /* Kill tasklet. */
7190 tasklet_kill(&sp->task);
7191
7192 /* Check if the device is Quiescent and then Reset the NIC */ 7090 /* Check if the device is Quiescent and then Reset the NIC */
7193 while(do_io) { 7091 while(do_io) {
7194 /* As per the HW requirement we need to replenish the 7092 /* As per the HW requirement we need to replenish the
@@ -7223,9 +7121,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7223 free_tx_buffers(sp); 7121 free_tx_buffers(sp);
7224 7122
7225 /* Free all Rx buffers */ 7123 /* Free all Rx buffers */
7226 spin_lock_irqsave(&sp->rx_lock, flags);
7227 free_rx_buffers(sp); 7124 free_rx_buffers(sp);
7228 spin_unlock_irqrestore(&sp->rx_lock, flags);
7229 7125
7230 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); 7126 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7231} 7127}
@@ -7314,9 +7210,6 @@ static int s2io_card_up(struct s2io_nic * sp)
7314 7210
7315 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); 7211 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7316 7212
7317 /* Enable tasklet for the device */
7318 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
7319
7320 /* Enable select interrupts */ 7213 /* Enable select interrupts */
7321 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7214 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7322 if (sp->config.intr_type != INTA) 7215 if (sp->config.intr_type != INTA)
@@ -8119,20 +8012,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8119 s2io_reset(sp); 8012 s2io_reset(sp);
8120 8013
8121 /* 8014 /*
8122 * Initialize the tasklet status and link state flags 8015 * Initialize link state flags
8123 * and the card state parameter 8016 * and the card state parameter
8124 */ 8017 */
8125 sp->tasklet_status = 0;
8126 sp->state = 0; 8018 sp->state = 0;
8127 8019
8128 /* Initialize spinlocks */ 8020 /* Initialize spinlocks */
8129 for (i = 0; i < sp->config.tx_fifo_num; i++) 8021 for (i = 0; i < sp->config.tx_fifo_num; i++)
8130 spin_lock_init(&mac_control->fifos[i].tx_lock); 8022 spin_lock_init(&mac_control->fifos[i].tx_lock);
8131 8023
8132 if (!napi)
8133 spin_lock_init(&sp->put_lock);
8134 spin_lock_init(&sp->rx_lock);
8135
8136 /* 8024 /*
8137 * SXE-002: Configure link and activity LED to init state 8025 * SXE-002: Configure link and activity LED to init state
8138 * on driver load. 8026 * on driver load.
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index e68fdf7e4260..ce53a02105f2 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -703,9 +703,6 @@ struct ring_info {
703 */ 703 */
704 struct rx_curr_get_info rx_curr_get_info; 704 struct rx_curr_get_info rx_curr_get_info;
705 705
706 /* Index to the absolute position of the put pointer of Rx ring */
707 int put_pos;
708
709 /* Buffer Address store. */ 706 /* Buffer Address store. */
710 struct buffAdd **ba; 707 struct buffAdd **ba;
711 struct s2io_nic *nic; 708 struct s2io_nic *nic;
@@ -868,8 +865,6 @@ struct s2io_nic {
868 int device_enabled_once; 865 int device_enabled_once;
869 866
870 char name[60]; 867 char name[60];
871 struct tasklet_struct task;
872 volatile unsigned long tasklet_status;
873 868
874 /* Timer that handles I/O errors/exceptions */ 869 /* Timer that handles I/O errors/exceptions */
875 struct timer_list alarm_timer; 870 struct timer_list alarm_timer;
@@ -879,8 +874,6 @@ struct s2io_nic {
879 874
880 atomic_t rx_bufs_left[MAX_RX_RINGS]; 875 atomic_t rx_bufs_left[MAX_RX_RINGS];
881 876
882 spinlock_t put_lock;
883
884#define PROMISC 1 877#define PROMISC 1
885#define ALL_MULTI 2 878#define ALL_MULTI 2
886 879
@@ -964,7 +957,6 @@ struct s2io_nic {
964 u8 lro; 957 u8 lro;
965 u16 lro_max_aggr_per_sess; 958 u16 lro_max_aggr_per_sess;
966 volatile unsigned long state; 959 volatile unsigned long state;
967 spinlock_t rx_lock;
968 u64 general_int_mask; 960 u64 general_int_mask;
969#define VPD_STRING_LEN 80 961#define VPD_STRING_LEN 80
970 u8 product_name[VPD_STRING_LEN]; 962 u8 product_name[VPD_STRING_LEN];
@@ -1094,7 +1086,6 @@ static void s2io_handle_errors(void * dev_id);
1094static int s2io_starter(void); 1086static int s2io_starter(void);
1095static void s2io_closer(void); 1087static void s2io_closer(void);
1096static void s2io_tx_watchdog(struct net_device *dev); 1088static void s2io_tx_watchdog(struct net_device *dev);
1097static void s2io_tasklet(unsigned long dev_addr);
1098static void s2io_set_multicast(struct net_device *dev); 1089static void s2io_set_multicast(struct net_device *dev);
1099static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1090static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
1100static void s2io_link(struct s2io_nic * sp, int link); 1091static void s2io_link(struct s2io_nic * sp, int link);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 78994ede0cb0..6261201403cd 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -825,7 +825,8 @@ static struct platform_driver sgiseeq_driver = {
825 .probe = sgiseeq_probe, 825 .probe = sgiseeq_probe,
826 .remove = __devexit_p(sgiseeq_remove), 826 .remove = __devexit_p(sgiseeq_remove),
827 .driver = { 827 .driver = {
828 .name = "sgiseeq" 828 .name = "sgiseeq",
829 .owner = THIS_MODULE,
829 } 830 }
830}; 831};
831 832
@@ -850,3 +851,4 @@ module_exit(sgiseeq_module_exit);
850MODULE_DESCRIPTION("SGI Seeq 8003 driver"); 851MODULE_DESCRIPTION("SGI Seeq 8003 driver");
851MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); 852MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
852MODULE_LICENSE("GPL"); 853MODULE_LICENSE("GPL");
854MODULE_ALIAS("platform:sgiseeq");
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 76cc1d3adf71..4e2800205189 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -92,6 +92,7 @@ module_param(tx_fifo_kb, int, 0400);
92MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)"); 92MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
93 93
94MODULE_LICENSE("GPL"); 94MODULE_LICENSE("GPL");
95MODULE_ALIAS("platform:smc911x");
95 96
96/* 97/*
97 * The internal workings of the driver. If you are changing anything 98 * The internal workings of the driver. If you are changing anything
@@ -243,7 +244,7 @@ static void smc911x_reset(struct net_device *dev)
243 do { 244 do {
244 udelay(10); 245 udelay(10);
245 reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_; 246 reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_;
246 } while ( timeout-- && !reg); 247 } while (--timeout && !reg);
247 if (timeout == 0) { 248 if (timeout == 0) {
248 PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); 249 PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
249 return; 250 return;
@@ -267,7 +268,7 @@ static void smc911x_reset(struct net_device *dev)
267 resets++; 268 resets++;
268 break; 269 break;
269 } 270 }
270 } while ( timeout-- && (reg & HW_CFG_SRST_)); 271 } while (--timeout && (reg & HW_CFG_SRST_));
271 } 272 }
272 if (timeout == 0) { 273 if (timeout == 0) {
273 PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name); 274 PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
@@ -413,7 +414,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
413 do { 414 do {
414 udelay(10); 415 udelay(10);
415 reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_; 416 reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_;
416 } while ( timeout-- && reg); 417 } while (--timeout && reg);
417 if (timeout == 0) { 418 if (timeout == 0) {
418 PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); 419 PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
419 } 420 }
@@ -2262,6 +2263,7 @@ static struct platform_driver smc911x_driver = {
2262 .resume = smc911x_drv_resume, 2263 .resume = smc911x_drv_resume,
2263 .driver = { 2264 .driver = {
2264 .name = CARDNAME, 2265 .name = CARDNAME,
2266 .owner = THIS_MODULE,
2265 }, 2267 },
2266}; 2268};
2267 2269
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 600b92af3334..a188e33484e6 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -132,6 +132,7 @@ module_param(watchdog, int, 0400);
132MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 132MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
133 133
134MODULE_LICENSE("GPL"); 134MODULE_LICENSE("GPL");
135MODULE_ALIAS("platform:smc91x");
135 136
136/* 137/*
137 * The internal workings of the driver. If you are changing anything 138 * The internal workings of the driver. If you are changing anything
@@ -2308,6 +2309,7 @@ static struct platform_driver smc_driver = {
2308 .resume = smc_drv_resume, 2309 .resume = smc_drv_resume,
2309 .driver = { 2310 .driver = {
2310 .name = CARDNAME, 2311 .name = CARDNAME,
2312 .owner = THIS_MODULE,
2311 }, 2313 },
2312}; 2314};
2313 2315
diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c
index 2cf6794acb4f..854ccf2b4105 100644
--- a/drivers/net/sni_82596.c
+++ b/drivers/net/sni_82596.c
@@ -44,6 +44,7 @@ static const char sni_82596_string[] = "snirm_82596";
44MODULE_AUTHOR("Thomas Bogendoerfer"); 44MODULE_AUTHOR("Thomas Bogendoerfer");
45MODULE_DESCRIPTION("i82596 driver"); 45MODULE_DESCRIPTION("i82596 driver");
46MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
47MODULE_ALIAS("platform:snirm_82596");
47module_param(i596_debug, int, 0); 48module_param(i596_debug, int, 0);
48MODULE_PARM_DESC(i596_debug, "82596 debug mask"); 49MODULE_PARM_DESC(i596_debug, "82596 debug mask");
49 50
@@ -166,6 +167,7 @@ static struct platform_driver sni_82596_driver = {
166 .remove = __devexit_p(sni_82596_driver_remove), 167 .remove = __devexit_p(sni_82596_driver_remove),
167 .driver = { 168 .driver = {
168 .name = sni_82596_string, 169 .name = sni_82596_string,
170 .owner = THIS_MODULE,
169 }, 171 },
170}; 172};
171 173
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 17585e5eed53..e83b166aa6b9 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -625,6 +625,12 @@ static void __init bdx_firmware_endianess(void)
625 s_firmLoad[i] = CPU_CHIP_SWAP32(s_firmLoad[i]); 625 s_firmLoad[i] = CPU_CHIP_SWAP32(s_firmLoad[i]);
626} 626}
627 627
628static int bdx_range_check(struct bdx_priv *priv, u32 offset)
629{
630 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
631 -EINVAL : 0;
632}
633
628static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) 634static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
629{ 635{
630 struct bdx_priv *priv = ndev->priv; 636 struct bdx_priv *priv = ndev->priv;
@@ -643,9 +649,15 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
643 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 649 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
644 } 650 }
645 651
652 if (!capable(CAP_NET_ADMIN))
653 return -EPERM;
654
646 switch (data[0]) { 655 switch (data[0]) {
647 656
648 case BDX_OP_READ: 657 case BDX_OP_READ:
658 error = bdx_range_check(priv, data[1]);
659 if (error < 0)
660 return error;
649 data[2] = READ_REG(priv, data[1]); 661 data[2] = READ_REG(priv, data[1]);
650 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], 662 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
651 data[2]); 663 data[2]);
@@ -655,6 +667,9 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
655 break; 667 break;
656 668
657 case BDX_OP_WRITE: 669 case BDX_OP_WRITE:
670 error = bdx_range_check(priv, data[1]);
671 if (error < 0)
672 return error;
658 WRITE_REG(priv, data[1], data[2]); 673 WRITE_REG(priv, data[1], data[2]);
659 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); 674 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
660 break; 675 break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc4c62b8e81a..e3f74c9f78bd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4017,6 +4017,8 @@ static int tg3_halt(struct tg3 *, int, int);
4017 * Invoked with tp->lock held. 4017 * Invoked with tp->lock held.
4018 */ 4018 */
4019static int tg3_restart_hw(struct tg3 *tp, int reset_phy) 4019static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4020 __releases(tp->lock)
4021 __acquires(tp->lock)
4020{ 4022{
4021 int err; 4023 int err;
4022 4024
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 6f33f84d37b0..6017d5267d08 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -162,6 +162,7 @@ static struct platform_driver tsi_eth_driver = {
162 .remove = tsi108_ether_remove, 162 .remove = tsi108_ether_remove,
163 .driver = { 163 .driver = {
164 .name = "tsi-ethernet", 164 .name = "tsi-ethernet",
165 .owner = THIS_MODULE,
165 }, 166 },
166}; 167};
167 168
@@ -1729,3 +1730,4 @@ module_exit(tsi108_ether_exit);
1729MODULE_AUTHOR("Tundra Semiconductor Corporation"); 1730MODULE_AUTHOR("Tundra Semiconductor Corporation");
1730MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver"); 1731MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
1731MODULE_LICENSE("GPL"); 1732MODULE_LICENSE("GPL");
1733MODULE_ALIAS("platform:tsi-ethernet");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 333961bb7873..c0dd25ba7a18 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2183,7 +2183,6 @@ typhoon_resume(struct pci_dev *pdev)
2183 } 2183 }
2184 2184
2185 netif_device_attach(dev); 2185 netif_device_attach(dev);
2186 netif_start_queue(dev);
2187 return 0; 2186 return 0;
2188 2187
2189reset: 2188reset:
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 2f11254bcc07..281ce3d39532 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3932,7 +3932,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3932 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3932 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3933 fixed_link = of_get_property(np, "fixed-link", NULL); 3933 fixed_link = of_get_property(np, "fixed-link", NULL);
3934 if (fixed_link) { 3934 if (fixed_link) {
3935 ug_info->mdio_bus = 0; 3935 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0");
3936 ug_info->phy_address = fixed_link[0]; 3936 ug_info->phy_address = fixed_link[0];
3937 phy = NULL; 3937 phy = NULL;
3938 } else { 3938 } else {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index ed1afaf683a4..6b8d882d197b 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -605,7 +605,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
605static void velocity_init_cam_filter(struct velocity_info *vptr) 605static void velocity_init_cam_filter(struct velocity_info *vptr)
606{ 606{
607 struct mac_regs __iomem * regs = vptr->mac_regs; 607 struct mac_regs __iomem * regs = vptr->mac_regs;
608 unsigned short vid;
609 608
610 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ 609 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
611 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG); 610 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
@@ -617,29 +616,33 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
617 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); 616 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
618 mac_set_cam_mask(regs, vptr->mCAMmask); 617 mac_set_cam_mask(regs, vptr->mCAMmask);
619 618
620 /* Enable first VCAM */ 619 /* Enable VCAMs */
621 if (vptr->vlgrp) { 620 if (vptr->vlgrp) {
622 for (vid = 0; vid < VLAN_VID_MASK; vid++) { 621 unsigned int vid, i = 0;
623 if (vlan_group_get_device(vptr->vlgrp, vid)) { 622
624 /* If Tagging option is enabled and 623 if (!vlan_group_get_device(vptr->vlgrp, 0))
625 VLAN ID is not zero, then 624 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
626 turn on MCFG_RTGOPT also */
627 if (vid != 0)
628 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
629 625
630 mac_set_vlan_cam(regs, 0, (u8 *) &vid); 626 for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
627 if (vlan_group_get_device(vptr->vlgrp, vid)) {
628 mac_set_vlan_cam(regs, i, (u8 *) &vid);
629 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
630 if (++i >= VCAM_SIZE)
631 break;
631 } 632 }
632 } 633 }
633 vptr->vCAMmask[0] |= 1;
634 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); 634 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
635 } else {
636 u16 temp = 0;
637 mac_set_vlan_cam(regs, 0, (u8 *) &temp);
638 temp = 1;
639 mac_set_vlan_cam_mask(regs, (u8 *) &temp);
640 } 635 }
641} 636}
642 637
638static void velocity_vlan_rx_register(struct net_device *dev,
639 struct vlan_group *grp)
640{
641 struct velocity_info *vptr = netdev_priv(dev);
642
643 vptr->vlgrp = grp;
644}
645
643static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 646static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
644{ 647{
645 struct velocity_info *vptr = netdev_priv(dev); 648 struct velocity_info *vptr = netdev_priv(dev);
@@ -959,11 +962,13 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
959 962
960 dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid; 963 dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
961 dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid; 964 dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
965 dev->vlan_rx_register = velocity_vlan_rx_register;
962 966
963#ifdef VELOCITY_ZERO_COPY_SUPPORT 967#ifdef VELOCITY_ZERO_COPY_SUPPORT
964 dev->features |= NETIF_F_SG; 968 dev->features |= NETIF_F_SG;
965#endif 969#endif
966 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER; 970 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
971 NETIF_F_HW_VLAN_RX;
967 972
968 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) 973 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
969 dev->features |= NETIF_F_IP_CSUM; 974 dev->features |= NETIF_F_IP_CSUM;
@@ -1597,8 +1602,13 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1597 skb_put(skb, pkt_len - 4); 1602 skb_put(skb, pkt_len - 4);
1598 skb->protocol = eth_type_trans(skb, vptr->dev); 1603 skb->protocol = eth_type_trans(skb, vptr->dev);
1599 1604
1605 if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
1606 vlan_hwaccel_rx(skb, vptr->vlgrp,
1607 swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
1608 } else
1609 netif_rx(skb);
1610
1600 stats->rx_bytes += pkt_len; 1611 stats->rx_bytes += pkt_len;
1601 netif_rx(skb);
1602 1612
1603 return 0; 1613 return 0;
1604} 1614}
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c4c8eab8574f..c2cc42f723d5 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -402,7 +402,7 @@ static int __init c101_init(void)
402#ifdef MODULE 402#ifdef MODULE
403 printk(KERN_INFO "c101: no card initialized\n"); 403 printk(KERN_INFO "c101: no card initialized\n");
404#endif 404#endif
405 return -ENOSYS; /* no parameters specified, abort */ 405 return -EINVAL; /* no parameters specified, abort */
406 } 406 }
407 407
408 printk(KERN_INFO "%s\n", version); 408 printk(KERN_INFO "%s\n", version);
@@ -420,11 +420,11 @@ static int __init c101_init(void)
420 c101_run(irq, ram); 420 c101_run(irq, ram);
421 421
422 if (*hw == '\x0') 422 if (*hw == '\x0')
423 return first_card ? 0 : -ENOSYS; 423 return first_card ? 0 : -EINVAL;
424 }while(*hw++ == ':'); 424 }while(*hw++ == ':');
425 425
426 printk(KERN_ERR "c101: invalid hardware parameters\n"); 426 printk(KERN_ERR "c101: invalid hardware parameters\n");
427 return first_card ? 0 : -ENOSYS; 427 return first_card ? 0 : -EINVAL;
428} 428}
429 429
430 430
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index c4ab0326f911..520bb0b1a9a2 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1090,10 +1090,6 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1090 pvc_device *pvc = NULL; 1090 pvc_device *pvc = NULL;
1091 struct net_device *dev; 1091 struct net_device *dev;
1092 int result, used; 1092 int result, used;
1093 char * prefix = "pvc%d";
1094
1095 if (type == ARPHRD_ETHER)
1096 prefix = "pvceth%d";
1097 1093
1098 if ((pvc = add_pvc(frad, dlci)) == NULL) { 1094 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1099 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", 1095 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7483d45bc5bc..e62018a36133 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1809,3 +1809,5 @@ module_exit(netif_exit);
1809 1809
1810MODULE_DESCRIPTION("Xen virtual network device frontend"); 1810MODULE_DESCRIPTION("Xen virtual network device frontend");
1811MODULE_LICENSE("GPL"); 1811MODULE_LICENSE("GPL");
1812MODULE_ALIAS("xen:vif");
1813MODULE_ALIAS("xennet");
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1bd5fb30237d..e3dc8f8d0c3e 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1930,6 +1930,20 @@ config FB_VIRTUAL
1930 1930
1931 If unsure, say N. 1931 If unsure, say N.
1932 1932
1933config XEN_FBDEV_FRONTEND
1934 tristate "Xen virtual frame buffer support"
1935 depends on FB && XEN
1936 select FB_SYS_FILLRECT
1937 select FB_SYS_COPYAREA
1938 select FB_SYS_IMAGEBLIT
1939 select FB_SYS_FOPS
1940 select FB_DEFERRED_IO
1941 default y
1942 help
1943 This driver implements the front-end of the Xen virtual
1944 frame buffer driver. It communicates with a back-end
1945 in another domain.
1946
1933source "drivers/video/omap/Kconfig" 1947source "drivers/video/omap/Kconfig"
1934 1948
1935source "drivers/video/backlight/Kconfig" 1949source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 11c0e5e05f21..f172b9b73314 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -114,6 +114,7 @@ obj-$(CONFIG_FB_PS3) += ps3fb.o
114obj-$(CONFIG_FB_SM501) += sm501fb.o 114obj-$(CONFIG_FB_SM501) += sm501fb.o
115obj-$(CONFIG_FB_XILINX) += xilinxfb.o 115obj-$(CONFIG_FB_XILINX) += xilinxfb.o
116obj-$(CONFIG_FB_OMAP) += omap/ 116obj-$(CONFIG_FB_OMAP) += omap/
117obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
117 118
118# Platform or fallback drivers go here 119# Platform or fallback drivers go here
119obj-$(CONFIG_FB_UVESA) += uvesafb.o 120obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
new file mode 100644
index 000000000000..619a6f8d65a2
--- /dev/null
+++ b/drivers/video/xen-fbfront.c
@@ -0,0 +1,550 @@
1/*
2 * Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13
14/*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
20
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/fb.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
26#include <linux/mm.h>
27#include <asm/xen/hypervisor.h>
28#include <xen/events.h>
29#include <xen/page.h>
30#include <xen/interface/io/fbif.h>
31#include <xen/interface/io/protocols.h>
32#include <xen/xenbus.h>
33
34struct xenfb_info {
35 unsigned char *fb;
36 struct fb_info *fb_info;
37 int x1, y1, x2, y2; /* dirty rectangle,
38 protected by dirty_lock */
39 spinlock_t dirty_lock;
40 int nr_pages;
41 int irq;
42 struct xenfb_page *page;
43 unsigned long *mfns;
44 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
45
46 struct xenbus_device *xbdev;
47};
48
49static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
50
51static int xenfb_remove(struct xenbus_device *);
52static void xenfb_init_shared_page(struct xenfb_info *);
53static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
54static void xenfb_disconnect_backend(struct xenfb_info *);
55
56static void xenfb_do_update(struct xenfb_info *info,
57 int x, int y, int w, int h)
58{
59 union xenfb_out_event event;
60 u32 prod;
61
62 event.type = XENFB_TYPE_UPDATE;
63 event.update.x = x;
64 event.update.y = y;
65 event.update.width = w;
66 event.update.height = h;
67
68 prod = info->page->out_prod;
69 /* caller ensures !xenfb_queue_full() */
70 mb(); /* ensure ring space available */
71 XENFB_OUT_RING_REF(info->page, prod) = event;
72 wmb(); /* ensure ring contents visible */
73 info->page->out_prod = prod + 1;
74
75 notify_remote_via_irq(info->irq);
76}
77
78static int xenfb_queue_full(struct xenfb_info *info)
79{
80 u32 cons, prod;
81
82 prod = info->page->out_prod;
83 cons = info->page->out_cons;
84 return prod - cons == XENFB_OUT_RING_LEN;
85}
86
87static void xenfb_refresh(struct xenfb_info *info,
88 int x1, int y1, int w, int h)
89{
90 unsigned long flags;
91 int y2 = y1 + h - 1;
92 int x2 = x1 + w - 1;
93
94 if (!info->update_wanted)
95 return;
96
97 spin_lock_irqsave(&info->dirty_lock, flags);
98
99 /* Combine with dirty rectangle: */
100 if (info->y1 < y1)
101 y1 = info->y1;
102 if (info->y2 > y2)
103 y2 = info->y2;
104 if (info->x1 < x1)
105 x1 = info->x1;
106 if (info->x2 > x2)
107 x2 = info->x2;
108
109 if (xenfb_queue_full(info)) {
110 /* Can't send right now, stash it in the dirty rectangle */
111 info->x1 = x1;
112 info->x2 = x2;
113 info->y1 = y1;
114 info->y2 = y2;
115 spin_unlock_irqrestore(&info->dirty_lock, flags);
116 return;
117 }
118
119 /* Clear dirty rectangle: */
120 info->x1 = info->y1 = INT_MAX;
121 info->x2 = info->y2 = 0;
122
123 spin_unlock_irqrestore(&info->dirty_lock, flags);
124
125 if (x1 <= x2 && y1 <= y2)
126 xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1);
127}
128
129static void xenfb_deferred_io(struct fb_info *fb_info,
130 struct list_head *pagelist)
131{
132 struct xenfb_info *info = fb_info->par;
133 struct page *page;
134 unsigned long beg, end;
135 int y1, y2, miny, maxy;
136
137 miny = INT_MAX;
138 maxy = 0;
139 list_for_each_entry(page, pagelist, lru) {
140 beg = page->index << PAGE_SHIFT;
141 end = beg + PAGE_SIZE - 1;
142 y1 = beg / fb_info->fix.line_length;
143 y2 = end / fb_info->fix.line_length;
144 if (y2 >= fb_info->var.yres)
145 y2 = fb_info->var.yres - 1;
146 if (miny > y1)
147 miny = y1;
148 if (maxy < y2)
149 maxy = y2;
150 }
151 xenfb_refresh(info, 0, miny, fb_info->var.xres, maxy - miny + 1);
152}
153
154static struct fb_deferred_io xenfb_defio = {
155 .delay = HZ / 20,
156 .deferred_io = xenfb_deferred_io,
157};
158
159static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
160 unsigned blue, unsigned transp,
161 struct fb_info *info)
162{
163 u32 v;
164
165 if (regno > info->cmap.len)
166 return 1;
167
168#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
169 red = CNVT_TOHW(red, info->var.red.length);
170 green = CNVT_TOHW(green, info->var.green.length);
171 blue = CNVT_TOHW(blue, info->var.blue.length);
172 transp = CNVT_TOHW(transp, info->var.transp.length);
173#undef CNVT_TOHW
174
175 v = (red << info->var.red.offset) |
176 (green << info->var.green.offset) |
177 (blue << info->var.blue.offset);
178
179 switch (info->var.bits_per_pixel) {
180 case 16:
181 case 24:
182 case 32:
183 ((u32 *)info->pseudo_palette)[regno] = v;
184 break;
185 }
186
187 return 0;
188}
189
190static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
191{
192 struct xenfb_info *info = p->par;
193
194 sys_fillrect(p, rect);
195 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
196}
197
198static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
199{
200 struct xenfb_info *info = p->par;
201
202 sys_imageblit(p, image);
203 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
204}
205
206static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
207{
208 struct xenfb_info *info = p->par;
209
210 sys_copyarea(p, area);
211 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
212}
213
214static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
215 size_t count, loff_t *ppos)
216{
217 struct xenfb_info *info = p->par;
218 ssize_t res;
219
220 res = fb_sys_write(p, buf, count, ppos);
221 xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
222 return res;
223}
224
225static struct fb_ops xenfb_fb_ops = {
226 .owner = THIS_MODULE,
227 .fb_read = fb_sys_read,
228 .fb_write = xenfb_write,
229 .fb_setcolreg = xenfb_setcolreg,
230 .fb_fillrect = xenfb_fillrect,
231 .fb_copyarea = xenfb_copyarea,
232 .fb_imageblit = xenfb_imageblit,
233};
234
235static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
236{
237 /*
238 * No in events recognized, simply ignore them all.
239 * If you need to recognize some, see xen-kbdfront's
240 * input_handler() for how to do that.
241 */
242 struct xenfb_info *info = dev_id;
243 struct xenfb_page *page = info->page;
244
245 if (page->in_cons != page->in_prod) {
246 info->page->in_cons = info->page->in_prod;
247 notify_remote_via_irq(info->irq);
248 }
249
250 /* Flush dirty rectangle: */
251 xenfb_refresh(info, INT_MAX, INT_MAX, -INT_MAX, -INT_MAX);
252
253 return IRQ_HANDLED;
254}
255
256static int __devinit xenfb_probe(struct xenbus_device *dev,
257 const struct xenbus_device_id *id)
258{
259 struct xenfb_info *info;
260 struct fb_info *fb_info;
261 int ret;
262
263 info = kzalloc(sizeof(*info), GFP_KERNEL);
264 if (info == NULL) {
265 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
266 return -ENOMEM;
267 }
268 dev->dev.driver_data = info;
269 info->xbdev = dev;
270 info->irq = -1;
271 info->x1 = info->y1 = INT_MAX;
272 spin_lock_init(&info->dirty_lock);
273
274 info->fb = vmalloc(xenfb_mem_len);
275 if (info->fb == NULL)
276 goto error_nomem;
277 memset(info->fb, 0, xenfb_mem_len);
278
279 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
280
281 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
282 if (!info->mfns)
283 goto error_nomem;
284
285 /* set up shared page */
286 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
287 if (!info->page)
288 goto error_nomem;
289
290 xenfb_init_shared_page(info);
291
292 /* abusing framebuffer_alloc() to allocate pseudo_palette */
293 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
294 if (fb_info == NULL)
295 goto error_nomem;
296
297 /* complete the abuse: */
298 fb_info->pseudo_palette = fb_info->par;
299 fb_info->par = info;
300
301 fb_info->screen_base = info->fb;
302
303 fb_info->fbops = &xenfb_fb_ops;
304 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
305 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
306 fb_info->var.bits_per_pixel = info->page->depth;
307
308 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
309 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
310 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
311
312 fb_info->var.activate = FB_ACTIVATE_NOW;
313 fb_info->var.height = -1;
314 fb_info->var.width = -1;
315 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
316
317 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
318 fb_info->fix.line_length = info->page->line_length;
319 fb_info->fix.smem_start = 0;
320 fb_info->fix.smem_len = xenfb_mem_len;
321 strcpy(fb_info->fix.id, "xen");
322 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
323 fb_info->fix.accel = FB_ACCEL_NONE;
324
325 fb_info->flags = FBINFO_FLAG_DEFAULT;
326
327 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
328 if (ret < 0) {
329 framebuffer_release(fb_info);
330 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
331 goto error;
332 }
333
334 fb_info->fbdefio = &xenfb_defio;
335 fb_deferred_io_init(fb_info);
336
337 ret = register_framebuffer(fb_info);
338 if (ret) {
339 fb_deferred_io_cleanup(fb_info);
340 fb_dealloc_cmap(&fb_info->cmap);
341 framebuffer_release(fb_info);
342 xenbus_dev_fatal(dev, ret, "register_framebuffer");
343 goto error;
344 }
345 info->fb_info = fb_info;
346
347 ret = xenfb_connect_backend(dev, info);
348 if (ret < 0)
349 goto error;
350
351 return 0;
352
353 error_nomem:
354 ret = -ENOMEM;
355 xenbus_dev_fatal(dev, ret, "allocating device memory");
356 error:
357 xenfb_remove(dev);
358 return ret;
359}
360
361static int xenfb_resume(struct xenbus_device *dev)
362{
363 struct xenfb_info *info = dev->dev.driver_data;
364
365 xenfb_disconnect_backend(info);
366 xenfb_init_shared_page(info);
367 return xenfb_connect_backend(dev, info);
368}
369
370static int xenfb_remove(struct xenbus_device *dev)
371{
372 struct xenfb_info *info = dev->dev.driver_data;
373
374 xenfb_disconnect_backend(info);
375 if (info->fb_info) {
376 fb_deferred_io_cleanup(info->fb_info);
377 unregister_framebuffer(info->fb_info);
378 fb_dealloc_cmap(&info->fb_info->cmap);
379 framebuffer_release(info->fb_info);
380 }
381 free_page((unsigned long)info->page);
382 vfree(info->mfns);
383 vfree(info->fb);
384 kfree(info);
385
386 return 0;
387}
388
389static unsigned long vmalloc_to_mfn(void *address)
390{
391 return pfn_to_mfn(vmalloc_to_pfn(address));
392}
393
394static void xenfb_init_shared_page(struct xenfb_info *info)
395{
396 int i;
397
398 for (i = 0; i < info->nr_pages; i++)
399 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
400
401 info->page->pd[0] = vmalloc_to_mfn(info->mfns);
402 info->page->pd[1] = 0;
403 info->page->width = XENFB_WIDTH;
404 info->page->height = XENFB_HEIGHT;
405 info->page->depth = XENFB_DEPTH;
406 info->page->line_length = (info->page->depth / 8) * info->page->width;
407 info->page->mem_length = xenfb_mem_len;
408 info->page->in_cons = info->page->in_prod = 0;
409 info->page->out_cons = info->page->out_prod = 0;
410}
411
412static int xenfb_connect_backend(struct xenbus_device *dev,
413 struct xenfb_info *info)
414{
415 int ret, evtchn;
416 struct xenbus_transaction xbt;
417
418 ret = xenbus_alloc_evtchn(dev, &evtchn);
419 if (ret)
420 return ret;
421 ret = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
422 0, dev->devicetype, info);
423 if (ret < 0) {
424 xenbus_free_evtchn(dev, evtchn);
425 xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
426 return ret;
427 }
428 info->irq = ret;
429
430 again:
431 ret = xenbus_transaction_start(&xbt);
432 if (ret) {
433 xenbus_dev_fatal(dev, ret, "starting transaction");
434 return ret;
435 }
436 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
437 virt_to_mfn(info->page));
438 if (ret)
439 goto error_xenbus;
440 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
441 evtchn);
442 if (ret)
443 goto error_xenbus;
444 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
445 XEN_IO_PROTO_ABI_NATIVE);
446 if (ret)
447 goto error_xenbus;
448 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
449 if (ret)
450 goto error_xenbus;
451 ret = xenbus_transaction_end(xbt, 0);
452 if (ret) {
453 if (ret == -EAGAIN)
454 goto again;
455 xenbus_dev_fatal(dev, ret, "completing transaction");
456 return ret;
457 }
458
459 xenbus_switch_state(dev, XenbusStateInitialised);
460 return 0;
461
462 error_xenbus:
463 xenbus_transaction_end(xbt, 1);
464 xenbus_dev_fatal(dev, ret, "writing xenstore");
465 return ret;
466}
467
468static void xenfb_disconnect_backend(struct xenfb_info *info)
469{
470 if (info->irq >= 0)
471 unbind_from_irqhandler(info->irq, info);
472 info->irq = -1;
473}
474
475static void xenfb_backend_changed(struct xenbus_device *dev,
476 enum xenbus_state backend_state)
477{
478 struct xenfb_info *info = dev->dev.driver_data;
479 int val;
480
481 switch (backend_state) {
482 case XenbusStateInitialising:
483 case XenbusStateInitialised:
484 case XenbusStateUnknown:
485 case XenbusStateClosed:
486 break;
487
488 case XenbusStateInitWait:
489InitWait:
490 xenbus_switch_state(dev, XenbusStateConnected);
491 break;
492
493 case XenbusStateConnected:
494 /*
495 * Work around xenbus race condition: If backend goes
496 * through InitWait to Connected fast enough, we can
497 * get Connected twice here.
498 */
499 if (dev->state != XenbusStateConnected)
500 goto InitWait; /* no InitWait seen yet, fudge it */
501
502 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
503 "request-update", "%d", &val) < 0)
504 val = 0;
505 if (val)
506 info->update_wanted = 1;
507 break;
508
509 case XenbusStateClosing:
510 xenbus_frontend_closed(dev);
511 break;
512 }
513}
514
515static struct xenbus_device_id xenfb_ids[] = {
516 { "vfb" },
517 { "" }
518};
519
520static struct xenbus_driver xenfb = {
521 .name = "vfb",
522 .owner = THIS_MODULE,
523 .ids = xenfb_ids,
524 .probe = xenfb_probe,
525 .remove = xenfb_remove,
526 .resume = xenfb_resume,
527 .otherend_changed = xenfb_backend_changed,
528};
529
530static int __init xenfb_init(void)
531{
532 if (!is_running_on_xen())
533 return -ENODEV;
534
535 /* Nothing to do if running in dom0. */
536 if (is_initial_xendomain())
537 return -ENODEV;
538
539 return xenbus_register_frontend(&xenfb);
540}
541
542static void __exit xenfb_cleanup(void)
543{
544 xenbus_unregister_driver(&xenfb);
545}
546
547module_init(xenfb_init);
548module_exit(xenfb_cleanup);
549
550MODULE_LICENSE("GPL");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
new file mode 100644
index 000000000000..4b75a16de009
--- /dev/null
+++ b/drivers/xen/Kconfig
@@ -0,0 +1,19 @@
1config XEN_BALLOON
2 bool "Xen memory balloon driver"
3 depends on XEN
4 default y
5 help
6 The balloon driver allows the Xen domain to request more memory from
7 the system to expand the domain's memory allocation, or alternatively
8 return unneeded memory to the system.
9
10config XEN_SCRUB_PAGES
11 bool "Scrub pages before returning them to system"
12 depends on XEN_BALLOON
13 default y
14 help
15 Scrub pages before returning them to the system for reuse by
16 other domains. This makes sure that any confidential data
17 is not accidentally visible to other domains. Is it more
18 secure, but slightly less efficient.
19 If in doubt, say yes.
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 56592f0d6cef..37af04f1ffd9 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,2 +1,4 @@
1obj-y += grant-table.o 1obj-y += grant-table.o features.o events.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
new file mode 100644
index 000000000000..ab25ba6cbbb9
--- /dev/null
+++ b/drivers/xen/balloon.c
@@ -0,0 +1,712 @@
1/******************************************************************************
2 * balloon.c
3 *
4 * Xen balloon driver - enables returning/claiming memory to/from Xen.
5 *
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/sched.h>
38#include <linux/errno.h>
39#include <linux/mm.h>
40#include <linux/bootmem.h>
41#include <linux/pagemap.h>
42#include <linux/highmem.h>
43#include <linux/mutex.h>
44#include <linux/highmem.h>
45#include <linux/list.h>
46#include <linux/sysdev.h>
47
48#include <asm/xen/hypervisor.h>
49#include <asm/page.h>
50#include <asm/pgalloc.h>
51#include <asm/pgtable.h>
52#include <asm/uaccess.h>
53#include <asm/tlb.h>
54
55#include <xen/interface/memory.h>
56#include <xen/balloon.h>
57#include <xen/xenbus.h>
58#include <xen/features.h>
59#include <xen/page.h>
60
61#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
62
63#define BALLOON_CLASS_NAME "memory"
64
65struct balloon_stats {
66 /* We aim for 'current allocation' == 'target allocation'. */
67 unsigned long current_pages;
68 unsigned long target_pages;
69 /* We may hit the hard limit in Xen. If we do then we remember it. */
70 unsigned long hard_limit;
71 /*
72 * Drivers may alter the memory reservation independently, but they
73 * must inform the balloon driver so we avoid hitting the hard limit.
74 */
75 unsigned long driver_pages;
76 /* Number of pages in high- and low-memory balloons. */
77 unsigned long balloon_low;
78 unsigned long balloon_high;
79};
80
81static DEFINE_MUTEX(balloon_mutex);
82
83static struct sys_device balloon_sysdev;
84
85static int register_balloon(struct sys_device *sysdev);
86
87/*
88 * Protects atomic reservation decrease/increase against concurrent increases.
89 * Also protects non-atomic updates of current_pages and driver_pages, and
90 * balloon lists.
91 */
92static DEFINE_SPINLOCK(balloon_lock);
93
94static struct balloon_stats balloon_stats;
95
96/* We increase/decrease in batches which fit in a page */
97static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
98
99/* VM /proc information for memory */
100extern unsigned long totalram_pages;
101
102#ifdef CONFIG_HIGHMEM
103extern unsigned long totalhigh_pages;
104#define inc_totalhigh_pages() (totalhigh_pages++)
105#define dec_totalhigh_pages() (totalhigh_pages--)
106#else
107#define inc_totalhigh_pages() do {} while(0)
108#define dec_totalhigh_pages() do {} while(0)
109#endif
110
111/* List of ballooned pages, threaded through the mem_map array. */
112static LIST_HEAD(ballooned_pages);
113
114/* Main work function, always executed in process context. */
115static void balloon_process(struct work_struct *work);
116static DECLARE_WORK(balloon_worker, balloon_process);
117static struct timer_list balloon_timer;
118
119/* When ballooning out (allocating memory to return to Xen) we don't really
120 want the kernel to try too hard since that can trigger the oom killer. */
121#define GFP_BALLOON \
122 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
123
124static void scrub_page(struct page *page)
125{
126#ifdef CONFIG_XEN_SCRUB_PAGES
127 if (PageHighMem(page)) {
128 void *v = kmap(page);
129 clear_page(v);
130 kunmap(v);
131 } else {
132 void *v = page_address(page);
133 clear_page(v);
134 }
135#endif
136}
137
138/* balloon_append: add the given page to the balloon. */
139static void balloon_append(struct page *page)
140{
141 /* Lowmem is re-populated first, so highmem pages go at list tail. */
142 if (PageHighMem(page)) {
143 list_add_tail(&page->lru, &ballooned_pages);
144 balloon_stats.balloon_high++;
145 dec_totalhigh_pages();
146 } else {
147 list_add(&page->lru, &ballooned_pages);
148 balloon_stats.balloon_low++;
149 }
150}
151
152/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
153static struct page *balloon_retrieve(void)
154{
155 struct page *page;
156
157 if (list_empty(&ballooned_pages))
158 return NULL;
159
160 page = list_entry(ballooned_pages.next, struct page, lru);
161 list_del(&page->lru);
162
163 if (PageHighMem(page)) {
164 balloon_stats.balloon_high--;
165 inc_totalhigh_pages();
166 }
167 else
168 balloon_stats.balloon_low--;
169
170 return page;
171}
172
173static struct page *balloon_first_page(void)
174{
175 if (list_empty(&ballooned_pages))
176 return NULL;
177 return list_entry(ballooned_pages.next, struct page, lru);
178}
179
180static struct page *balloon_next_page(struct page *page)
181{
182 struct list_head *next = page->lru.next;
183 if (next == &ballooned_pages)
184 return NULL;
185 return list_entry(next, struct page, lru);
186}
187
188static void balloon_alarm(unsigned long unused)
189{
190 schedule_work(&balloon_worker);
191}
192
193static unsigned long current_target(void)
194{
195 unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit);
196
197 target = min(target,
198 balloon_stats.current_pages +
199 balloon_stats.balloon_low +
200 balloon_stats.balloon_high);
201
202 return target;
203}
204
205static int increase_reservation(unsigned long nr_pages)
206{
207 unsigned long pfn, i, flags;
208 struct page *page;
209 long rc;
210 struct xen_memory_reservation reservation = {
211 .address_bits = 0,
212 .extent_order = 0,
213 .domid = DOMID_SELF
214 };
215
216 if (nr_pages > ARRAY_SIZE(frame_list))
217 nr_pages = ARRAY_SIZE(frame_list);
218
219 spin_lock_irqsave(&balloon_lock, flags);
220
221 page = balloon_first_page();
222 for (i = 0; i < nr_pages; i++) {
223 BUG_ON(page == NULL);
224 frame_list[i] = page_to_pfn(page);;
225 page = balloon_next_page(page);
226 }
227
228 reservation.extent_start = (unsigned long)frame_list;
229 reservation.nr_extents = nr_pages;
230 rc = HYPERVISOR_memory_op(
231 XENMEM_populate_physmap, &reservation);
232 if (rc < nr_pages) {
233 if (rc > 0) {
234 int ret;
235
236 /* We hit the Xen hard limit: reprobe. */
237 reservation.nr_extents = rc;
238 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
239 &reservation);
240 BUG_ON(ret != rc);
241 }
242 if (rc >= 0)
243 balloon_stats.hard_limit = (balloon_stats.current_pages + rc -
244 balloon_stats.driver_pages);
245 goto out;
246 }
247
248 for (i = 0; i < nr_pages; i++) {
249 page = balloon_retrieve();
250 BUG_ON(page == NULL);
251
252 pfn = page_to_pfn(page);
253 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
254 phys_to_machine_mapping_valid(pfn));
255
256 set_phys_to_machine(pfn, frame_list[i]);
257
258 /* Link back into the page tables if not highmem. */
259 if (pfn < max_low_pfn) {
260 int ret;
261 ret = HYPERVISOR_update_va_mapping(
262 (unsigned long)__va(pfn << PAGE_SHIFT),
263 mfn_pte(frame_list[i], PAGE_KERNEL),
264 0);
265 BUG_ON(ret);
266 }
267
268 /* Relinquish the page back to the allocator. */
269 ClearPageReserved(page);
270 init_page_count(page);
271 __free_page(page);
272 }
273
274 balloon_stats.current_pages += nr_pages;
275 totalram_pages = balloon_stats.current_pages;
276
277 out:
278 spin_unlock_irqrestore(&balloon_lock, flags);
279
280 return 0;
281}
282
283static int decrease_reservation(unsigned long nr_pages)
284{
285 unsigned long pfn, i, flags;
286 struct page *page;
287 int need_sleep = 0;
288 int ret;
289 struct xen_memory_reservation reservation = {
290 .address_bits = 0,
291 .extent_order = 0,
292 .domid = DOMID_SELF
293 };
294
295 if (nr_pages > ARRAY_SIZE(frame_list))
296 nr_pages = ARRAY_SIZE(frame_list);
297
298 for (i = 0; i < nr_pages; i++) {
299 if ((page = alloc_page(GFP_BALLOON)) == NULL) {
300 nr_pages = i;
301 need_sleep = 1;
302 break;
303 }
304
305 pfn = page_to_pfn(page);
306 frame_list[i] = pfn_to_mfn(pfn);
307
308 scrub_page(page);
309 }
310
311 /* Ensure that ballooned highmem pages don't have kmaps. */
312 kmap_flush_unused();
313 flush_tlb_all();
314
315 spin_lock_irqsave(&balloon_lock, flags);
316
317 /* No more mappings: invalidate P2M and add to balloon. */
318 for (i = 0; i < nr_pages; i++) {
319 pfn = mfn_to_pfn(frame_list[i]);
320 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
321 balloon_append(pfn_to_page(pfn));
322 }
323
324 reservation.extent_start = (unsigned long)frame_list;
325 reservation.nr_extents = nr_pages;
326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
327 BUG_ON(ret != nr_pages);
328
329 balloon_stats.current_pages -= nr_pages;
330 totalram_pages = balloon_stats.current_pages;
331
332 spin_unlock_irqrestore(&balloon_lock, flags);
333
334 return need_sleep;
335}
336
337/*
338 * We avoid multiple worker processes conflicting via the balloon mutex.
339 * We may of course race updates of the target counts (which are protected
340 * by the balloon lock), or with changes to the Xen hard limit, but we will
341 * recover from these in time.
342 */
343static void balloon_process(struct work_struct *work)
344{
345 int need_sleep = 0;
346 long credit;
347
348 mutex_lock(&balloon_mutex);
349
350 do {
351 credit = current_target() - balloon_stats.current_pages;
352 if (credit > 0)
353 need_sleep = (increase_reservation(credit) != 0);
354 if (credit < 0)
355 need_sleep = (decrease_reservation(-credit) != 0);
356
357#ifndef CONFIG_PREEMPT
358 if (need_resched())
359 schedule();
360#endif
361 } while ((credit != 0) && !need_sleep);
362
363 /* Schedule more work if there is some still to be done. */
364 if (current_target() != balloon_stats.current_pages)
365 mod_timer(&balloon_timer, jiffies + HZ);
366
367 mutex_unlock(&balloon_mutex);
368}
369
370/* Resets the Xen limit, sets new target, and kicks off processing. */
371void balloon_set_new_target(unsigned long target)
372{
373 /* No need for lock. Not read-modify-write updates. */
374 balloon_stats.hard_limit = ~0UL;
375 balloon_stats.target_pages = target;
376 schedule_work(&balloon_worker);
377}
378
379static struct xenbus_watch target_watch =
380{
381 .node = "memory/target"
382};
383
384/* React to a change in the target key */
385static void watch_target(struct xenbus_watch *watch,
386 const char **vec, unsigned int len)
387{
388 unsigned long long new_target;
389 int err;
390
391 err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
392 if (err != 1) {
393 /* This is ok (for domain0 at least) - so just return */
394 return;
395 }
396
397 /* The given memory/target value is in KiB, so it needs converting to
398 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
399 */
400 balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
401}
402
403static int balloon_init_watcher(struct notifier_block *notifier,
404 unsigned long event,
405 void *data)
406{
407 int err;
408
409 err = register_xenbus_watch(&target_watch);
410 if (err)
411 printk(KERN_ERR "Failed to set balloon watcher\n");
412
413 return NOTIFY_DONE;
414}
415
416static struct notifier_block xenstore_notifier;
417
418static int __init balloon_init(void)
419{
420 unsigned long pfn;
421 struct page *page;
422
423 if (!is_running_on_xen())
424 return -ENODEV;
425
426 pr_info("xen_balloon: Initialising balloon driver.\n");
427
428 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
429 totalram_pages = balloon_stats.current_pages;
430 balloon_stats.target_pages = balloon_stats.current_pages;
431 balloon_stats.balloon_low = 0;
432 balloon_stats.balloon_high = 0;
433 balloon_stats.driver_pages = 0UL;
434 balloon_stats.hard_limit = ~0UL;
435
436 init_timer(&balloon_timer);
437 balloon_timer.data = 0;
438 balloon_timer.function = balloon_alarm;
439
440 register_balloon(&balloon_sysdev);
441
442 /* Initialise the balloon with excess memory space. */
443 for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
444 page = pfn_to_page(pfn);
445 if (!PageReserved(page))
446 balloon_append(page);
447 }
448
449 target_watch.callback = watch_target;
450 xenstore_notifier.notifier_call = balloon_init_watcher;
451
452 register_xenstore_notifier(&xenstore_notifier);
453
454 return 0;
455}
456
457subsys_initcall(balloon_init);
458
459static void balloon_exit(void)
460{
461 /* XXX - release balloon here */
462 return;
463}
464
465module_exit(balloon_exit);
466
467static void balloon_update_driver_allowance(long delta)
468{
469 unsigned long flags;
470
471 spin_lock_irqsave(&balloon_lock, flags);
472 balloon_stats.driver_pages += delta;
473 spin_unlock_irqrestore(&balloon_lock, flags);
474}
475
476static int dealloc_pte_fn(
477 pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
478{
479 unsigned long mfn = pte_mfn(*pte);
480 int ret;
481 struct xen_memory_reservation reservation = {
482 .nr_extents = 1,
483 .extent_order = 0,
484 .domid = DOMID_SELF
485 };
486 reservation.extent_start = (unsigned long)&mfn;
487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
490 BUG_ON(ret != 1);
491 return 0;
492}
493
494static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
495{
496 unsigned long vaddr, flags;
497 struct page *page, **pagevec;
498 int i, ret;
499
500 pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
501 if (pagevec == NULL)
502 return NULL;
503
504 for (i = 0; i < nr_pages; i++) {
505 page = pagevec[i] = alloc_page(GFP_KERNEL);
506 if (page == NULL)
507 goto err;
508
509 vaddr = (unsigned long)page_address(page);
510
511 scrub_page(page);
512
513 spin_lock_irqsave(&balloon_lock, flags);
514
515 if (xen_feature(XENFEAT_auto_translated_physmap)) {
516 unsigned long gmfn = page_to_pfn(page);
517 struct xen_memory_reservation reservation = {
518 .nr_extents = 1,
519 .extent_order = 0,
520 .domid = DOMID_SELF
521 };
522 reservation.extent_start = (unsigned long)&gmfn;
523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
524 &reservation);
525 if (ret == 1)
526 ret = 0; /* success */
527 } else {
528 ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
529 dealloc_pte_fn, NULL);
530 }
531
532 if (ret != 0) {
533 spin_unlock_irqrestore(&balloon_lock, flags);
534 __free_page(page);
535 goto err;
536 }
537
538 totalram_pages = --balloon_stats.current_pages;
539
540 spin_unlock_irqrestore(&balloon_lock, flags);
541 }
542
543 out:
544 schedule_work(&balloon_worker);
545 flush_tlb_all();
546 return pagevec;
547
548 err:
549 spin_lock_irqsave(&balloon_lock, flags);
550 while (--i >= 0)
551 balloon_append(pagevec[i]);
552 spin_unlock_irqrestore(&balloon_lock, flags);
553 kfree(pagevec);
554 pagevec = NULL;
555 goto out;
556}
557
558static void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
559{
560 unsigned long flags;
561 int i;
562
563 if (pagevec == NULL)
564 return;
565
566 spin_lock_irqsave(&balloon_lock, flags);
567 for (i = 0; i < nr_pages; i++) {
568 BUG_ON(page_count(pagevec[i]) != 1);
569 balloon_append(pagevec[i]);
570 }
571 spin_unlock_irqrestore(&balloon_lock, flags);
572
573 kfree(pagevec);
574
575 schedule_work(&balloon_worker);
576}
577
578static void balloon_release_driver_page(struct page *page)
579{
580 unsigned long flags;
581
582 spin_lock_irqsave(&balloon_lock, flags);
583 balloon_append(page);
584 balloon_stats.driver_pages--;
585 spin_unlock_irqrestore(&balloon_lock, flags);
586
587 schedule_work(&balloon_worker);
588}
589
590
591#define BALLOON_SHOW(name, format, args...) \
592 static ssize_t show_##name(struct sys_device *dev, \
593 char *buf) \
594 { \
595 return sprintf(buf, format, ##args); \
596 } \
597 static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
598
599BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
600BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
601BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
602BALLOON_SHOW(hard_limit_kb,
603 (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
604 (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
605BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
606
607static ssize_t show_target_kb(struct sys_device *dev, char *buf)
608{
609 return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
610}
611
612static ssize_t store_target_kb(struct sys_device *dev,
613 const char *buf,
614 size_t count)
615{
616 char memstring[64], *endchar;
617 unsigned long long target_bytes;
618
619 if (!capable(CAP_SYS_ADMIN))
620 return -EPERM;
621
622 if (count <= 1)
623 return -EBADMSG; /* runt */
624 if (count > sizeof(memstring))
625 return -EFBIG; /* too long */
626 strcpy(memstring, buf);
627
628 target_bytes = memparse(memstring, &endchar);
629 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
630
631 return count;
632}
633
634static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
635 show_target_kb, store_target_kb);
636
637static struct sysdev_attribute *balloon_attrs[] = {
638 &attr_target_kb,
639};
640
641static struct attribute *balloon_info_attrs[] = {
642 &attr_current_kb.attr,
643 &attr_low_kb.attr,
644 &attr_high_kb.attr,
645 &attr_hard_limit_kb.attr,
646 &attr_driver_kb.attr,
647 NULL
648};
649
650static struct attribute_group balloon_info_group = {
651 .name = "info",
652 .attrs = balloon_info_attrs,
653};
654
655static struct sysdev_class balloon_sysdev_class = {
656 .name = BALLOON_CLASS_NAME,
657};
658
659static int register_balloon(struct sys_device *sysdev)
660{
661 int i, error;
662
663 error = sysdev_class_register(&balloon_sysdev_class);
664 if (error)
665 return error;
666
667 sysdev->id = 0;
668 sysdev->cls = &balloon_sysdev_class;
669
670 error = sysdev_register(sysdev);
671 if (error) {
672 sysdev_class_unregister(&balloon_sysdev_class);
673 return error;
674 }
675
676 for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
677 error = sysdev_create_file(sysdev, balloon_attrs[i]);
678 if (error)
679 goto fail;
680 }
681
682 error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
683 if (error)
684 goto fail;
685
686 return 0;
687
688 fail:
689 while (--i >= 0)
690 sysdev_remove_file(sysdev, balloon_attrs[i]);
691 sysdev_unregister(sysdev);
692 sysdev_class_unregister(&balloon_sysdev_class);
693 return error;
694}
695
696static void unregister_balloon(struct sys_device *sysdev)
697{
698 int i;
699
700 sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
701 for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
702 sysdev_remove_file(sysdev, balloon_attrs[i]);
703 sysdev_unregister(sysdev);
704 sysdev_class_unregister(&balloon_sysdev_class);
705}
706
707static void balloon_sysfs_exit(void)
708{
709 unregister_balloon(&balloon_sysdev);
710}
711
712MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
new file mode 100644
index 000000000000..4f0f22b020ea
--- /dev/null
+++ b/drivers/xen/events.c
@@ -0,0 +1,674 @@
1/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
29
30#include <asm/ptrace.h>
31#include <asm/irq.h>
32#include <asm/sync_bitops.h>
33#include <asm/xen/hypercall.h>
34#include <asm/xen/hypervisor.h>
35
36#include <xen/xen-ops.h>
37#include <xen/events.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h>
40
41/*
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
44 */
45static DEFINE_SPINLOCK(irq_mapping_update_lock);
46
47/* IRQ <-> VIRQ mapping. */
48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
49
50/* IRQ <-> IPI mapping */
51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
52
53/* Packed IRQ information: binding type, sub-type index, and event channel. */
54struct packed_irq
55{
56 unsigned short evtchn;
57 unsigned char index;
58 unsigned char type;
59};
60
61static struct packed_irq irq_info[NR_IRQS];
62
63/* Binding types. */
64enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
71
72/* Convenient shorthand for packed representation of an unbound IRQ. */
73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
74
75static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1
77};
78static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
79static u8 cpu_evtchn[NR_EVENT_CHANNELS];
80
81/* Reference counts for bindings to IRQs. */
82static int irq_bindcount[NR_IRQS];
83
84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0)
86
87/*
88 * Force a proper event-channel callback from Xen after clearing the
89 * callback mask. We do this in a very simple manner, by making a call
90 * down into Xen. The pending flag will be checked by Xen on return.
91 */
92void force_evtchn_callback(void)
93{
94 (void)HYPERVISOR_xen_version(0, NULL);
95}
96EXPORT_SYMBOL_GPL(force_evtchn_callback);
97
98static struct irq_chip xen_dynamic_chip;
99
100/* Constructor for packed IRQ information. */
101static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
102{
103 return (struct packed_irq) { evtchn, index, type };
104}
105
106/*
107 * Accessors for packed IRQ information.
108 */
109static inline unsigned int evtchn_from_irq(int irq)
110{
111 return irq_info[irq].evtchn;
112}
113
114static inline unsigned int index_from_irq(int irq)
115{
116 return irq_info[irq].index;
117}
118
119static inline unsigned int type_from_irq(int irq)
120{
121 return irq_info[irq].type;
122}
123
124static inline unsigned long active_evtchns(unsigned int cpu,
125 struct shared_info *sh,
126 unsigned int idx)
127{
128 return (sh->evtchn_pending[idx] &
129 cpu_evtchn_mask[cpu][idx] &
130 ~sh->evtchn_mask[idx]);
131}
132
133static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
134{
135 int irq = evtchn_to_irq[chn];
136
137 BUG_ON(irq == -1);
138#ifdef CONFIG_SMP
139 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
140#endif
141
142 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
143 __set_bit(chn, cpu_evtchn_mask[cpu]);
144
145 cpu_evtchn[chn] = cpu;
146}
147
148static void init_evtchn_cpu_bindings(void)
149{
150#ifdef CONFIG_SMP
151 int i;
152 /* By default all event channels notify CPU#0. */
153 for (i = 0; i < NR_IRQS; i++)
154 irq_desc[i].affinity = cpumask_of_cpu(0);
155#endif
156
157 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
158 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
159}
160
161static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
162{
163 return cpu_evtchn[evtchn];
164}
165
166static inline void clear_evtchn(int port)
167{
168 struct shared_info *s = HYPERVISOR_shared_info;
169 sync_clear_bit(port, &s->evtchn_pending[0]);
170}
171
172static inline void set_evtchn(int port)
173{
174 struct shared_info *s = HYPERVISOR_shared_info;
175 sync_set_bit(port, &s->evtchn_pending[0]);
176}
177
178
179/**
180 * notify_remote_via_irq - send event to remote end of event channel via irq
181 * @irq: irq of event channel to send event to
182 *
183 * Unlike notify_remote_via_evtchn(), this is safe to use across
184 * save/restore. Notifications on a broken connection are silently
185 * dropped.
186 */
187void notify_remote_via_irq(int irq)
188{
189 int evtchn = evtchn_from_irq(irq);
190
191 if (VALID_EVTCHN(evtchn))
192 notify_remote_via_evtchn(evtchn);
193}
194EXPORT_SYMBOL_GPL(notify_remote_via_irq);
195
196static void mask_evtchn(int port)
197{
198 struct shared_info *s = HYPERVISOR_shared_info;
199 sync_set_bit(port, &s->evtchn_mask[0]);
200}
201
202static void unmask_evtchn(int port)
203{
204 struct shared_info *s = HYPERVISOR_shared_info;
205 unsigned int cpu = get_cpu();
206
207 BUG_ON(!irqs_disabled());
208
209 /* Slow path (hypercall) if this is a non-local port. */
210 if (unlikely(cpu != cpu_from_evtchn(port))) {
211 struct evtchn_unmask unmask = { .port = port };
212 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
213 } else {
214 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
215
216 sync_clear_bit(port, &s->evtchn_mask[0]);
217
218 /*
219 * The following is basically the equivalent of
220 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
221 * the interrupt edge' if the channel is masked.
222 */
223 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
224 !sync_test_and_set_bit(port / BITS_PER_LONG,
225 &vcpu_info->evtchn_pending_sel))
226 vcpu_info->evtchn_upcall_pending = 1;
227 }
228
229 put_cpu();
230}
231
232static int find_unbound_irq(void)
233{
234 int irq;
235
236 /* Only allocate from dynirq range */
237 for (irq = 0; irq < NR_IRQS; irq++)
238 if (irq_bindcount[irq] == 0)
239 break;
240
241 if (irq == NR_IRQS)
242 panic("No available IRQ to bind to: increase NR_IRQS!\n");
243
244 return irq;
245}
246
247int bind_evtchn_to_irq(unsigned int evtchn)
248{
249 int irq;
250
251 spin_lock(&irq_mapping_update_lock);
252
253 irq = evtchn_to_irq[evtchn];
254
255 if (irq == -1) {
256 irq = find_unbound_irq();
257
258 dynamic_irq_init(irq);
259 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
260 handle_level_irq, "event");
261
262 evtchn_to_irq[evtchn] = irq;
263 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
264 }
265
266 irq_bindcount[irq]++;
267
268 spin_unlock(&irq_mapping_update_lock);
269
270 return irq;
271}
272EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
273
274static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
275{
276 struct evtchn_bind_ipi bind_ipi;
277 int evtchn, irq;
278
279 spin_lock(&irq_mapping_update_lock);
280
281 irq = per_cpu(ipi_to_irq, cpu)[ipi];
282 if (irq == -1) {
283 irq = find_unbound_irq();
284 if (irq < 0)
285 goto out;
286
287 dynamic_irq_init(irq);
288 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
289 handle_level_irq, "ipi");
290
291 bind_ipi.vcpu = cpu;
292 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
293 &bind_ipi) != 0)
294 BUG();
295 evtchn = bind_ipi.port;
296
297 evtchn_to_irq[evtchn] = irq;
298 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
299
300 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
301
302 bind_evtchn_to_cpu(evtchn, cpu);
303 }
304
305 irq_bindcount[irq]++;
306
307 out:
308 spin_unlock(&irq_mapping_update_lock);
309 return irq;
310}
311
312
313static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
314{
315 struct evtchn_bind_virq bind_virq;
316 int evtchn, irq;
317
318 spin_lock(&irq_mapping_update_lock);
319
320 irq = per_cpu(virq_to_irq, cpu)[virq];
321
322 if (irq == -1) {
323 bind_virq.virq = virq;
324 bind_virq.vcpu = cpu;
325 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
326 &bind_virq) != 0)
327 BUG();
328 evtchn = bind_virq.port;
329
330 irq = find_unbound_irq();
331
332 dynamic_irq_init(irq);
333 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
334 handle_level_irq, "virq");
335
336 evtchn_to_irq[evtchn] = irq;
337 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
338
339 per_cpu(virq_to_irq, cpu)[virq] = irq;
340
341 bind_evtchn_to_cpu(evtchn, cpu);
342 }
343
344 irq_bindcount[irq]++;
345
346 spin_unlock(&irq_mapping_update_lock);
347
348 return irq;
349}
350
351static void unbind_from_irq(unsigned int irq)
352{
353 struct evtchn_close close;
354 int evtchn = evtchn_from_irq(irq);
355
356 spin_lock(&irq_mapping_update_lock);
357
358 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
359 close.port = evtchn;
360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
361 BUG();
362
363 switch (type_from_irq(irq)) {
364 case IRQT_VIRQ:
365 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
366 [index_from_irq(irq)] = -1;
367 break;
368 default:
369 break;
370 }
371
372 /* Closed ports are implicitly re-bound to VCPU0. */
373 bind_evtchn_to_cpu(evtchn, 0);
374
375 evtchn_to_irq[evtchn] = -1;
376 irq_info[irq] = IRQ_UNBOUND;
377
378 dynamic_irq_init(irq);
379 }
380
381 spin_unlock(&irq_mapping_update_lock);
382}
383
384int bind_evtchn_to_irqhandler(unsigned int evtchn,
385 irq_handler_t handler,
386 unsigned long irqflags,
387 const char *devname, void *dev_id)
388{
389 unsigned int irq;
390 int retval;
391
392 irq = bind_evtchn_to_irq(evtchn);
393 retval = request_irq(irq, handler, irqflags, devname, dev_id);
394 if (retval != 0) {
395 unbind_from_irq(irq);
396 return retval;
397 }
398
399 return irq;
400}
401EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
402
403int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
404 irq_handler_t handler,
405 unsigned long irqflags, const char *devname, void *dev_id)
406{
407 unsigned int irq;
408 int retval;
409
410 irq = bind_virq_to_irq(virq, cpu);
411 retval = request_irq(irq, handler, irqflags, devname, dev_id);
412 if (retval != 0) {
413 unbind_from_irq(irq);
414 return retval;
415 }
416
417 return irq;
418}
419EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
420
421int bind_ipi_to_irqhandler(enum ipi_vector ipi,
422 unsigned int cpu,
423 irq_handler_t handler,
424 unsigned long irqflags,
425 const char *devname,
426 void *dev_id)
427{
428 int irq, retval;
429
430 irq = bind_ipi_to_irq(ipi, cpu);
431 if (irq < 0)
432 return irq;
433
434 retval = request_irq(irq, handler, irqflags, devname, dev_id);
435 if (retval != 0) {
436 unbind_from_irq(irq);
437 return retval;
438 }
439
440 return irq;
441}
442
443void unbind_from_irqhandler(unsigned int irq, void *dev_id)
444{
445 free_irq(irq, dev_id);
446 unbind_from_irq(irq);
447}
448EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
449
450void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
451{
452 int irq = per_cpu(ipi_to_irq, cpu)[vector];
453 BUG_ON(irq < 0);
454 notify_remote_via_irq(irq);
455}
456
457irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
458{
459 struct shared_info *sh = HYPERVISOR_shared_info;
460 int cpu = smp_processor_id();
461 int i;
462 unsigned long flags;
463 static DEFINE_SPINLOCK(debug_lock);
464
465 spin_lock_irqsave(&debug_lock, flags);
466
467 printk("vcpu %d\n ", cpu);
468
469 for_each_online_cpu(i) {
470 struct vcpu_info *v = per_cpu(xen_vcpu, i);
471 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
472 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
473 v->evtchn_upcall_pending,
474 v->evtchn_pending_sel);
475 }
476 printk("pending:\n ");
477 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
478 printk("%08lx%s", sh->evtchn_pending[i],
479 i % 8 == 0 ? "\n " : " ");
480 printk("\nmasks:\n ");
481 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
482 printk("%08lx%s", sh->evtchn_mask[i],
483 i % 8 == 0 ? "\n " : " ");
484
485 printk("\nunmasked:\n ");
486 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
487 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
488 i % 8 == 0 ? "\n " : " ");
489
490 printk("\npending list:\n");
491 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
492 if (sync_test_bit(i, sh->evtchn_pending)) {
493 printk(" %d: event %d -> irq %d\n",
494 cpu_evtchn[i], i,
495 evtchn_to_irq[i]);
496 }
497 }
498
499 spin_unlock_irqrestore(&debug_lock, flags);
500
501 return IRQ_HANDLED;
502}
503
504
505/*
506 * Search the CPUs pending events bitmasks. For each one found, map
507 * the event number to an irq, and feed it into do_IRQ() for
508 * handling.
509 *
510 * Xen uses a two-level bitmap to speed searching. The first level is
511 * a bitset of words which contain pending event bits. The second
512 * level is a bitset of pending events themselves.
513 */
514void xen_evtchn_do_upcall(struct pt_regs *regs)
515{
516 int cpu = get_cpu();
517 struct shared_info *s = HYPERVISOR_shared_info;
518 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
519 static DEFINE_PER_CPU(unsigned, nesting_count);
520 unsigned count;
521
522 do {
523 unsigned long pending_words;
524
525 vcpu_info->evtchn_upcall_pending = 0;
526
527 if (__get_cpu_var(nesting_count)++)
528 goto out;
529
530#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
531 /* Clear master flag /before/ clearing selector flag. */
532 rmb();
533#endif
534 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
535 while (pending_words != 0) {
536 unsigned long pending_bits;
537 int word_idx = __ffs(pending_words);
538 pending_words &= ~(1UL << word_idx);
539
540 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
541 int bit_idx = __ffs(pending_bits);
542 int port = (word_idx * BITS_PER_LONG) + bit_idx;
543 int irq = evtchn_to_irq[port];
544
545 if (irq != -1)
546 xen_do_IRQ(irq, regs);
547 }
548 }
549
550 BUG_ON(!irqs_disabled());
551
552 count = __get_cpu_var(nesting_count);
553 __get_cpu_var(nesting_count) = 0;
554 } while(count != 1);
555
556out:
557 put_cpu();
558}
559
560/* Rebind an evtchn so that it gets delivered to a specific cpu */
561static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
562{
563 struct evtchn_bind_vcpu bind_vcpu;
564 int evtchn = evtchn_from_irq(irq);
565
566 if (!VALID_EVTCHN(evtchn))
567 return;
568
569 /* Send future instances of this interrupt to other vcpu. */
570 bind_vcpu.port = evtchn;
571 bind_vcpu.vcpu = tcpu;
572
573 /*
574 * If this fails, it usually just indicates that we're dealing with a
575 * virq or IPI channel, which don't actually need to be rebound. Ignore
576 * it, but don't do the xenlinux-level rebind in that case.
577 */
578 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
579 bind_evtchn_to_cpu(evtchn, tcpu);
580}
581
582
583static void set_affinity_irq(unsigned irq, cpumask_t dest)
584{
585 unsigned tcpu = first_cpu(dest);
586 rebind_irq_to_cpu(irq, tcpu);
587}
588
589int resend_irq_on_evtchn(unsigned int irq)
590{
591 int masked, evtchn = evtchn_from_irq(irq);
592 struct shared_info *s = HYPERVISOR_shared_info;
593
594 if (!VALID_EVTCHN(evtchn))
595 return 1;
596
597 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
598 sync_set_bit(evtchn, s->evtchn_pending);
599 if (!masked)
600 unmask_evtchn(evtchn);
601
602 return 1;
603}
604
605static void enable_dynirq(unsigned int irq)
606{
607 int evtchn = evtchn_from_irq(irq);
608
609 if (VALID_EVTCHN(evtchn))
610 unmask_evtchn(evtchn);
611}
612
613static void disable_dynirq(unsigned int irq)
614{
615 int evtchn = evtchn_from_irq(irq);
616
617 if (VALID_EVTCHN(evtchn))
618 mask_evtchn(evtchn);
619}
620
621static void ack_dynirq(unsigned int irq)
622{
623 int evtchn = evtchn_from_irq(irq);
624
625 move_native_irq(irq);
626
627 if (VALID_EVTCHN(evtchn))
628 clear_evtchn(evtchn);
629}
630
631static int retrigger_dynirq(unsigned int irq)
632{
633 int evtchn = evtchn_from_irq(irq);
634 struct shared_info *sh = HYPERVISOR_shared_info;
635 int ret = 0;
636
637 if (VALID_EVTCHN(evtchn)) {
638 int masked;
639
640 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
641 sync_set_bit(evtchn, sh->evtchn_pending);
642 if (!masked)
643 unmask_evtchn(evtchn);
644 ret = 1;
645 }
646
647 return ret;
648}
649
650static struct irq_chip xen_dynamic_chip __read_mostly = {
651 .name = "xen-dyn",
652 .mask = disable_dynirq,
653 .unmask = enable_dynirq,
654 .ack = ack_dynirq,
655 .set_affinity = set_affinity_irq,
656 .retrigger = retrigger_dynirq,
657};
658
659void __init xen_init_IRQ(void)
660{
661 int i;
662
663 init_evtchn_cpu_bindings();
664
665 /* No event channels are 'live' right now. */
666 for (i = 0; i < NR_EVENT_CHANNELS; i++)
667 mask_evtchn(i);
668
669 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
670 for (i = 0; i < NR_IRQS; i++)
671 irq_bindcount[i] = 0;
672
673 irq_ctx_init(smp_processor_id());
674}
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
new file mode 100644
index 000000000000..0707714e40d6
--- /dev/null
+++ b/drivers/xen/features.c
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * features.c
3 *
4 * Xen feature flags.
5 *
6 * Copyright (c) 2006, Ian Campbell, XenSource Inc.
7 */
8#include <linux/types.h>
9#include <linux/cache.h>
10#include <linux/module.h>
11#include <asm/xen/hypervisor.h>
12#include <xen/features.h>
13
14u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
15EXPORT_SYMBOL_GPL(xen_features);
16
17void xen_setup_features(void)
18{
19 struct xen_feature_info fi;
20 int i, j;
21
22 for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
23 fi.submap_idx = i;
24 if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
25 break;
26 for (j = 0; j < 32; j++)
27 xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
28 }
29}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d85dc6d41c2a..52b6b41b909d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -439,24 +439,6 @@ static inline unsigned int max_nr_grant_frames(void)
439 return xen_max; 439 return xen_max;
440} 440}
441 441
442static int map_pte_fn(pte_t *pte, struct page *pmd_page,
443 unsigned long addr, void *data)
444{
445 unsigned long **frames = (unsigned long **)data;
446
447 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
448 (*frames)++;
449 return 0;
450}
451
452static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
453 unsigned long addr, void *data)
454{
455
456 set_pte_at(&init_mm, addr, pte, __pte(0));
457 return 0;
458}
459
460static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 442static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
461{ 443{
462 struct gnttab_setup_table setup; 444 struct gnttab_setup_table setup;
@@ -470,7 +452,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
470 452
471 setup.dom = DOMID_SELF; 453 setup.dom = DOMID_SELF;
472 setup.nr_frames = nr_gframes; 454 setup.nr_frames = nr_gframes;
473 setup.frame_list = frames; 455 set_xen_guest_handle(setup.frame_list, frames);
474 456
475 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); 457 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
476 if (rc == -ENOSYS) { 458 if (rc == -ENOSYS) {
@@ -480,17 +462,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
480 462
481 BUG_ON(rc || setup.status); 463 BUG_ON(rc || setup.status);
482 464
483 if (shared == NULL) { 465 rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(),
484 struct vm_struct *area; 466 &shared);
485 area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
486 BUG_ON(area == NULL);
487 shared = area->addr;
488 }
489 rc = apply_to_page_range(&init_mm, (unsigned long)shared,
490 PAGE_SIZE * nr_gframes,
491 map_pte_fn, &frames);
492 BUG_ON(rc); 467 BUG_ON(rc);
493 frames -= nr_gframes; /* adjust after map_pte_fn() */
494 468
495 kfree(frames); 469 kfree(frames);
496 470
@@ -506,10 +480,7 @@ static int gnttab_resume(void)
506 480
507static int gnttab_suspend(void) 481static int gnttab_suspend(void)
508{ 482{
509 apply_to_page_range(&init_mm, (unsigned long)shared, 483 arch_gnttab_unmap_shared(shared, nr_grant_frames);
510 PAGE_SIZE * nr_grant_frames,
511 unmap_pte_fn, NULL);
512
513 return 0; 484 return 0;
514} 485}
515 486
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9fd2f70ab46d..0f86b0ff7879 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -399,7 +399,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
399 399
400 *vaddr = NULL; 400 *vaddr = NULL;
401 401
402 area = alloc_vm_area(PAGE_SIZE); 402 area = xen_alloc_vm_area(PAGE_SIZE);
403 if (!area) 403 if (!area)
404 return -ENOMEM; 404 return -ENOMEM;
405 405
@@ -409,7 +409,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
409 BUG(); 409 BUG();
410 410
411 if (op.status != GNTST_okay) { 411 if (op.status != GNTST_okay) {
412 free_vm_area(area); 412 xen_free_vm_area(area);
413 xenbus_dev_fatal(dev, op.status, 413 xenbus_dev_fatal(dev, op.status,
414 "mapping in shared page %d from domain %d", 414 "mapping in shared page %d from domain %d",
415 gnt_ref, dev->otherend_id); 415 gnt_ref, dev->otherend_id);
@@ -508,7 +508,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
508 BUG(); 508 BUG();
509 509
510 if (op.status == GNTST_okay) 510 if (op.status == GNTST_okay)
511 free_vm_area(area); 511 xen_free_vm_area(area);
512 else 512 else
513 xenbus_dev_error(dev, op.status, 513 xenbus_dev_error(dev, op.status,
514 "unmapping page at handle %d error %d", 514 "unmapping page at handle %d error %d",
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 4750de316ad3..57ceb5346b74 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -88,6 +88,16 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv)
88 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; 88 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
89} 89}
90 90
91static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
92{
93 struct xenbus_device *dev = to_xenbus_device(_dev);
94
95 if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
96 return -ENOMEM;
97
98 return 0;
99}
100
91/* device/<type>/<id> => <type>-<id> */ 101/* device/<type>/<id> => <type>-<id> */
92static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) 102static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
93{ 103{
@@ -166,6 +176,7 @@ static struct xen_bus_type xenbus_frontend = {
166 .bus = { 176 .bus = {
167 .name = "xen", 177 .name = "xen",
168 .match = xenbus_match, 178 .match = xenbus_match,
179 .uevent = xenbus_uevent,
169 .probe = xenbus_dev_probe, 180 .probe = xenbus_dev_probe,
170 .remove = xenbus_dev_remove, 181 .remove = xenbus_dev_remove,
171 .shutdown = xenbus_dev_shutdown, 182 .shutdown = xenbus_dev_shutdown,
@@ -438,6 +449,12 @@ static ssize_t xendev_show_devtype(struct device *dev,
438} 449}
439DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); 450DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
440 451
452static ssize_t xendev_show_modalias(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
456}
457DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
441 458
442int xenbus_probe_node(struct xen_bus_type *bus, 459int xenbus_probe_node(struct xen_bus_type *bus,
443 const char *type, 460 const char *type,
@@ -492,10 +509,16 @@ int xenbus_probe_node(struct xen_bus_type *bus,
492 509
493 err = device_create_file(&xendev->dev, &dev_attr_devtype); 510 err = device_create_file(&xendev->dev, &dev_attr_devtype);
494 if (err) 511 if (err)
495 goto fail_remove_file; 512 goto fail_remove_nodename;
513
514 err = device_create_file(&xendev->dev, &dev_attr_modalias);
515 if (err)
516 goto fail_remove_devtype;
496 517
497 return 0; 518 return 0;
498fail_remove_file: 519fail_remove_devtype:
520 device_remove_file(&xendev->dev, &dev_attr_devtype);
521fail_remove_nodename:
499 device_remove_file(&xendev->dev, &dev_attr_nodename); 522 device_remove_file(&xendev->dev, &dev_attr_nodename);
500fail_unregister: 523fail_unregister:
501 device_unregister(&xendev->dev); 524 device_unregister(&xendev->dev);
@@ -846,6 +869,7 @@ static int is_disconnected_device(struct device *dev, void *data)
846{ 869{
847 struct xenbus_device *xendev = to_xenbus_device(dev); 870 struct xenbus_device *xendev = to_xenbus_device(dev);
848 struct device_driver *drv = data; 871 struct device_driver *drv = data;
872 struct xenbus_driver *xendrv;
849 873
850 /* 874 /*
851 * A device with no driver will never connect. We care only about 875 * A device with no driver will never connect. We care only about
@@ -858,7 +882,9 @@ static int is_disconnected_device(struct device *dev, void *data)
858 if (drv && (dev->driver != drv)) 882 if (drv && (dev->driver != drv))
859 return 0; 883 return 0;
860 884
861 return (xendev->state != XenbusStateConnected); 885 xendrv = to_xenbus_driver(dev->driver);
886 return (xendev->state != XenbusStateConnected ||
887 (xendrv->is_ready && !xendrv->is_ready(xendev)));
862} 888}
863 889
864static int exists_disconnected_device(struct device_driver *drv) 890static int exists_disconnected_device(struct device_driver *drv)
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
new file mode 100644
index 000000000000..797cb4e31f07
--- /dev/null
+++ b/drivers/xen/xencomm.c
@@ -0,0 +1,232 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) IBM Corp. 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/gfp.h>
22#include <linux/mm.h>
23#include <asm/page.h>
24#include <xen/xencomm.h>
25#include <xen/interface/xen.h>
26#ifdef __ia64__
27#include <asm/xen/xencomm.h> /* for is_kern_addr() */
28#endif
29
30#ifdef HAVE_XEN_PLATFORM_COMPAT_H
31#include <xen/platform-compat.h>
32#endif
33
34static int xencomm_init(struct xencomm_desc *desc,
35 void *buffer, unsigned long bytes)
36{
37 unsigned long recorded = 0;
38 int i = 0;
39
40 while ((recorded < bytes) && (i < desc->nr_addrs)) {
41 unsigned long vaddr = (unsigned long)buffer + recorded;
42 unsigned long paddr;
43 int offset;
44 int chunksz;
45
46 offset = vaddr % PAGE_SIZE; /* handle partial pages */
47 chunksz = min(PAGE_SIZE - offset, bytes - recorded);
48
49 paddr = xencomm_vtop(vaddr);
50 if (paddr == ~0UL) {
51 printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
52 __func__, vaddr);
53 return -EINVAL;
54 }
55
56 desc->address[i++] = paddr;
57 recorded += chunksz;
58 }
59
60 if (recorded < bytes) {
61 printk(KERN_DEBUG
62 "%s: could only translate %ld of %ld bytes\n",
63 __func__, recorded, bytes);
64 return -ENOSPC;
65 }
66
67 /* mark remaining addresses invalid (just for safety) */
68 while (i < desc->nr_addrs)
69 desc->address[i++] = XENCOMM_INVALID;
70
71 desc->magic = XENCOMM_MAGIC;
72
73 return 0;
74}
75
76static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
77 void *buffer, unsigned long bytes)
78{
79 struct xencomm_desc *desc;
80 unsigned long buffer_ulong = (unsigned long)buffer;
81 unsigned long start = buffer_ulong & PAGE_MASK;
82 unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
83 unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
84 unsigned long size = sizeof(*desc) +
85 sizeof(desc->address[0]) * nr_addrs;
86
87 /*
88 * slab allocator returns at least sizeof(void*) aligned pointer.
89 * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
90 * cross page boundary.
91 */
92 if (sizeof(*desc) > sizeof(void *)) {
93 unsigned long order = get_order(size);
94 desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
95 order);
96 if (desc == NULL)
97 return NULL;
98
99 desc->nr_addrs =
100 ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
101 sizeof(*desc->address);
102 } else {
103 desc = kmalloc(size, gfp_mask);
104 if (desc == NULL)
105 return NULL;
106
107 desc->nr_addrs = nr_addrs;
108 }
109 return desc;
110}
111
112void xencomm_free(struct xencomm_handle *desc)
113{
114 if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
115 struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
116 if (sizeof(*desc__) > sizeof(void *)) {
117 unsigned long size = sizeof(*desc__) +
118 sizeof(desc__->address[0]) * desc__->nr_addrs;
119 unsigned long order = get_order(size);
120 free_pages((unsigned long)__va(desc), order);
121 } else
122 kfree(__va(desc));
123 }
124}
125
126static int xencomm_create(void *buffer, unsigned long bytes,
127 struct xencomm_desc **ret, gfp_t gfp_mask)
128{
129 struct xencomm_desc *desc;
130 int rc;
131
132 pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
133
134 if (bytes == 0) {
135 /* don't create a descriptor; Xen recognizes NULL. */
136 BUG_ON(buffer != NULL);
137 *ret = NULL;
138 return 0;
139 }
140
141 BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
142
143 desc = xencomm_alloc(gfp_mask, buffer, bytes);
144 if (!desc) {
145 printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
146 return -ENOMEM;
147 }
148
149 rc = xencomm_init(desc, buffer, bytes);
150 if (rc) {
151 printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
152 xencomm_free((struct xencomm_handle *)__pa(desc));
153 return rc;
154 }
155
156 *ret = desc;
157 return 0;
158}
159
160/* check if memory address is within VMALLOC region */
161static int is_phys_contiguous(unsigned long addr)
162{
163 if (!is_kernel_addr(addr))
164 return 0;
165
166 return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
167}
168
169static struct xencomm_handle *xencomm_create_inline(void *ptr)
170{
171 unsigned long paddr;
172
173 BUG_ON(!is_phys_contiguous((unsigned long)ptr));
174
175 paddr = (unsigned long)xencomm_pa(ptr);
176 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
177 return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
178}
179
180/* "mini" routine, for stack-based communications: */
181static int xencomm_create_mini(void *buffer,
182 unsigned long bytes, struct xencomm_mini *xc_desc,
183 struct xencomm_desc **ret)
184{
185 int rc = 0;
186 struct xencomm_desc *desc;
187 BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
188
189 desc = (void *)xc_desc;
190
191 desc->nr_addrs = XENCOMM_MINI_ADDRS;
192
193 rc = xencomm_init(desc, buffer, bytes);
194 if (!rc)
195 *ret = desc;
196
197 return rc;
198}
199
200struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
201{
202 int rc;
203 struct xencomm_desc *desc;
204
205 if (is_phys_contiguous((unsigned long)ptr))
206 return xencomm_create_inline(ptr);
207
208 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
209
210 if (rc || desc == NULL)
211 return NULL;
212
213 return xencomm_pa(desc);
214}
215
216struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
217 struct xencomm_mini *xc_desc)
218{
219 int rc;
220 struct xencomm_desc *desc = NULL;
221
222 if (is_phys_contiguous((unsigned long)ptr))
223 return xencomm_create_inline(ptr);
224
225 rc = xencomm_create_mini(ptr, bytes, xc_desc,
226 &desc);
227
228 if (rc)
229 return NULL;
230
231 return xencomm_pa(desc);
232}