aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/mpc512x_dma.c187
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c155
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c373
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c45
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c57
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h11
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c62
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c392
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h46
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/macintosh/therm_pm72.c30
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c6
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.c206
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c206
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c91
-rw-r--r--drivers/mmc/core/mmc_ops.c101
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c16
-rw-r--r--drivers/mmc/core/sdio.c36
-rw-r--r--drivers/mmc/core/sdio_bus.c32
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/davinci_mmc.c80
-rw-r--r--drivers/mmc/host/dw_mmc.c1796
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.c161
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c66
-rw-r--r--drivers/mmc/host/sdhci-tegra.c257
-rw-r--r--drivers/mmc/host/sdhci.c45
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c561
-rw-r--r--drivers/mmc/host/tmio_mmc.h228
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/bfin_mac.c74
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h74
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c5
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c11
-rw-r--r--drivers/net/e1000/e1000_hw.c328
-rw-r--r--drivers/net/e1000/e1000_hw.h59
-rw-r--r--drivers/net/e1000/e1000_main.c35
-rw-r--r--drivers/net/e1000/e1000_osdep.h19
-rw-r--r--drivers/net/e1000e/82571.c77
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/es2lan.c4
-rw-r--r--drivers/net/e1000e/ethtool.c54
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c77
-rw-r--r--drivers/net/e1000e/lib.c3
-rw-r--r--drivers/net/e1000e/netdev.c53
-rw-r--r--drivers/net/e1000e/phy.c40
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/fec.c248
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/forcedeth.c34
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c749
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c142
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c169
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h91
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/qlcnic/qlcnic.h24
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c63
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/r8169.c143
-rw-r--r--drivers/net/sky2.c143
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ps3/Makefile2
-rw-r--r--drivers/rtc/class.c13
-rw-r--r--drivers/rtc/interface.c574
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-dev.c104
-rw-r--r--drivers/rtc/rtc-lib.c28
-rw-r--r--drivers/watchdog/hpwdt.c2
133 files changed, 7831 insertions, 3100 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b655292e..9f47e8625266 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1926 const struct firmware *fw; 1926 const struct firmware *fw;
1927 unsigned long start_address; 1927 unsigned long start_address;
1928 const struct ihex_binrec *rec; 1928 const struct ihex_binrec *rec;
1929 const char *errmsg = 0;
1929 int res; 1930 int res;
1930 1931
1931 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); 1932 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
1932 if (res) { 1933 if (res) {
1933 PRINTK (KERN_ERR, "Cannot load microcode data"); 1934 PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1937 /* First record contains just the start address */ 1938 /* First record contains just the start address */
1938 rec = (const struct ihex_binrec *)fw->data; 1939 rec = (const struct ihex_binrec *)fw->data;
1939 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { 1940 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
1940 PRINTK (KERN_ERR, "Bad microcode data (no start record)"); 1941 errmsg = "no start record";
1941 return -EINVAL; 1942 goto fail;
1942 } 1943 }
1943 start_address = be32_to_cpup((__be32 *)rec->data); 1944 start_address = be32_to_cpup((__be32 *)rec->data);
1944 1945
@@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1950 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), 1951 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
1951 be16_to_cpu(rec->len)); 1952 be16_to_cpu(rec->len));
1952 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { 1953 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
1953 PRINTK (KERN_ERR, "Bad microcode data (record too long)"); 1954 errmsg = "record too long";
1954 return -EINVAL; 1955 goto fail;
1955 } 1956 }
1956 if (be16_to_cpu(rec->len) & 3) { 1957 if (be16_to_cpu(rec->len) & 3) {
1957 PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)"); 1958 errmsg = "odd number of bytes";
1958 return -EINVAL; 1959 goto fail;
1959 } 1960 }
1960 res = loader_write(lb, dev, rec); 1961 res = loader_write(lb, dev, rec);
1961 if (res) 1962 if (res)
@@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1970 res = loader_start(lb, dev, start_address); 1971 res = loader_start(lb, dev, start_address);
1971 1972
1972 return res; 1973 return res;
1974fail:
1975 release_firmware(fw);
1976 PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
1977 return -EINVAL;
1973} 1978}
1974 1979
1975/********** give adapter parameters **********/ 1980/********** give adapter parameters **********/
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 27370e99c66f..5e2f52b33327 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@
39 39
40#include "hvc_console.h" 40#include "hvc_console.h"
41 41
42char hvc_driver_name[] = "hvc_console"; 42static const char hvc_driver_name[] = "hvc_console";
43 43
44static struct vio_device_id hvc_driver_table[] __devinitdata = { 44static struct vio_device_id hvc_driver_table[] __devinitdata = {
45 {"serial", "hvterm1"}, 45 {"serial", "hvterm1"},
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f2536e..320668f4c3aa 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1081{ 1081{
1082 struct die_args *args = data; 1082 struct die_args *args = data;
1083 1083
1084 if (val != DIE_NMI) 1084 if (val != DIE_NMIUNKNOWN)
1085 return NOTIFY_OK; 1085 return NOTIFY_OK;
1086 1086
1087 /* Hack, if it's a memory or I/O error, ignore it. */ 1087 /* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee23592700a..ef138731c0ea 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@ config FSL_DMA
109 109
110config MPC512X_DMA 110config MPC512X_DMA
111 tristate "Freescale MPC512x built-in DMA engine support" 111 tristate "Freescale MPC512x built-in DMA engine support"
112 depends on PPC_MPC512x 112 depends on PPC_MPC512x || PPC_MPC831x
113 select DMA_ENGINE 113 select DMA_ENGINE
114 ---help--- 114 ---help---
115 Enable support for the Freescale MPC512x built-in DMA engine. 115 Enable support for the Freescale MPC512x built-in DMA engine.
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf300594..59c270192ccc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009 3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
4 * 5 *
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver 7 * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
70#define MPC_DMA_DMAES_SBE (1 << 1) 71#define MPC_DMA_DMAES_SBE (1 << 1)
71#define MPC_DMA_DMAES_DBE (1 << 0) 72#define MPC_DMA_DMAES_DBE (1 << 0)
72 73
74#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
75
73#define MPC_DMA_TSIZE_1 0x00 76#define MPC_DMA_TSIZE_1 0x00
74#define MPC_DMA_TSIZE_2 0x01 77#define MPC_DMA_TSIZE_2 0x01
75#define MPC_DMA_TSIZE_4 0x02 78#define MPC_DMA_TSIZE_4 0x02
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {
104 /* 0x30 */ 107 /* 0x30 */
105 u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 108 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
106 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 109 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
107 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 110 union {
111 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
112 u32 dmagpor; /* (General purpose register on MPC8308) */
113 };
108 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 114 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
109 /* 0x40 ~ 0xff */ 115 /* 0x40 ~ 0xff */
110 u32 reserve0[48]; /* Reserved */ 116 u32 reserve0[48]; /* Reserved */
@@ -195,7 +201,9 @@ struct mpc_dma {
195 struct mpc_dma_regs __iomem *regs; 201 struct mpc_dma_regs __iomem *regs;
196 struct mpc_dma_tcd __iomem *tcd; 202 struct mpc_dma_tcd __iomem *tcd;
197 int irq; 203 int irq;
204 int irq2;
198 uint error_status; 205 uint error_status;
206 int is_mpc8308;
199 207
200 /* Lock for error_status field in this structure */ 208 /* Lock for error_status field in this structure */
201 spinlock_t error_status_lock; 209 spinlock_t error_status_lock;
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
252 prev = mdesc; 260 prev = mdesc;
253 } 261 }
254 262
255 prev->tcd->start = 0;
256 prev->tcd->int_maj = 1; 263 prev->tcd->int_maj = 1;
257 264
258 /* Send first descriptor in chain into hardware */ 265 /* Send first descriptor in chain into hardware */
259 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 266 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
267
268 if (first != prev)
269 mdma->tcd[cid].e_sg = 1;
260 out_8(&mdma->regs->dmassrt, cid); 270 out_8(&mdma->regs->dmassrt, cid);
261} 271}
262 272
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
274 284
275 spin_lock(&mchan->lock); 285 spin_lock(&mchan->lock);
276 286
287 out_8(&mdma->regs->dmacint, ch + off);
288 out_8(&mdma->regs->dmacerr, ch + off);
289
277 /* Check error status */ 290 /* Check error status */
278 if (es & (1 << ch)) 291 if (es & (1 << ch))
279 list_for_each_entry(mdesc, &mchan->active, node) 292 list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
302 spin_unlock(&mdma->error_status_lock); 315 spin_unlock(&mdma->error_status_lock);
303 316
304 /* Handle interrupt on each channel */ 317 /* Handle interrupt on each channel */
305 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 318 if (mdma->dma.chancnt > 32) {
319 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
306 in_be32(&mdma->regs->dmaerrh), 32); 320 in_be32(&mdma->regs->dmaerrh), 32);
321 }
307 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 322 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
308 in_be32(&mdma->regs->dmaerrl), 0); 323 in_be32(&mdma->regs->dmaerrl), 0);
309 324
310 /* Ack interrupt on all channels */
311 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
312 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
313 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
314 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
315
316 /* Schedule tasklet */ 325 /* Schedule tasklet */
317 tasklet_schedule(&mdma->tasklet); 326 tasklet_schedule(&mdma->tasklet);
318 327
319 return IRQ_HANDLED; 328 return IRQ_HANDLED;
320} 329}
321 330
322/* DMA Tasklet */ 331/* proccess completed descriptors */
323static void mpc_dma_tasklet(unsigned long data) 332static void mpc_dma_process_completed(struct mpc_dma *mdma)
324{ 333{
325 struct mpc_dma *mdma = (void *)data;
326 dma_cookie_t last_cookie = 0; 334 dma_cookie_t last_cookie = 0;
327 struct mpc_dma_chan *mchan; 335 struct mpc_dma_chan *mchan;
328 struct mpc_dma_desc *mdesc; 336 struct mpc_dma_desc *mdesc;
329 struct dma_async_tx_descriptor *desc; 337 struct dma_async_tx_descriptor *desc;
330 unsigned long flags; 338 unsigned long flags;
331 LIST_HEAD(list); 339 LIST_HEAD(list);
332 uint es;
333 int i; 340 int i;
334 341
342 for (i = 0; i < mdma->dma.chancnt; i++) {
343 mchan = &mdma->channels[i];
344
345 /* Get all completed descriptors */
346 spin_lock_irqsave(&mchan->lock, flags);
347 if (!list_empty(&mchan->completed))
348 list_splice_tail_init(&mchan->completed, &list);
349 spin_unlock_irqrestore(&mchan->lock, flags);
350
351 if (list_empty(&list))
352 continue;
353
354 /* Execute callbacks and run dependencies */
355 list_for_each_entry(mdesc, &list, node) {
356 desc = &mdesc->desc;
357
358 if (desc->callback)
359 desc->callback(desc->callback_param);
360
361 last_cookie = desc->cookie;
362 dma_run_dependencies(desc);
363 }
364
365 /* Free descriptors */
366 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags);
370 }
371}
372
373/* DMA Tasklet */
374static void mpc_dma_tasklet(unsigned long data)
375{
376 struct mpc_dma *mdma = (void *)data;
377 unsigned long flags;
378 uint es;
379
335 spin_lock_irqsave(&mdma->error_status_lock, flags); 380 spin_lock_irqsave(&mdma->error_status_lock, flags);
336 es = mdma->error_status; 381 es = mdma->error_status;
337 mdma->error_status = 0; 382 mdma->error_status = 0;
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data)
370 dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 415 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
371 } 416 }
372 417
373 for (i = 0; i < mdma->dma.chancnt; i++) { 418 mpc_dma_process_completed(mdma);
374 mchan = &mdma->channels[i];
375
376 /* Get all completed descriptors */
377 spin_lock_irqsave(&mchan->lock, flags);
378 if (!list_empty(&mchan->completed))
379 list_splice_tail_init(&mchan->completed, &list);
380 spin_unlock_irqrestore(&mchan->lock, flags);
381
382 if (list_empty(&list))
383 continue;
384
385 /* Execute callbacks and run dependencies */
386 list_for_each_entry(mdesc, &list, node) {
387 desc = &mdesc->desc;
388
389 if (desc->callback)
390 desc->callback(desc->callback_param);
391
392 last_cookie = desc->cookie;
393 dma_run_dependencies(desc);
394 }
395
396 /* Free descriptors */
397 spin_lock_irqsave(&mchan->lock, flags);
398 list_splice_tail_init(&list, &mchan->free);
399 mchan->completed_cookie = last_cookie;
400 spin_unlock_irqrestore(&mchan->lock, flags);
401 }
402} 419}
403 420
404/* Submit descriptor to hardware */ 421/* Submit descriptor to hardware */
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor *
563mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 580mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
564 size_t len, unsigned long flags) 581 size_t len, unsigned long flags)
565{ 582{
583 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
566 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 584 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
567 struct mpc_dma_desc *mdesc = NULL; 585 struct mpc_dma_desc *mdesc = NULL;
568 struct mpc_dma_tcd *tcd; 586 struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
577 } 595 }
578 spin_unlock_irqrestore(&mchan->lock, iflags); 596 spin_unlock_irqrestore(&mchan->lock, iflags);
579 597
580 if (!mdesc) 598 if (!mdesc) {
599 /* try to free completed descriptors */
600 mpc_dma_process_completed(mdma);
581 return NULL; 601 return NULL;
602 }
582 603
583 mdesc->error = 0; 604 mdesc->error = 0;
584 tcd = mdesc->tcd; 605 tcd = mdesc->tcd;
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
591 tcd->dsize = MPC_DMA_TSIZE_32; 612 tcd->dsize = MPC_DMA_TSIZE_32;
592 tcd->soff = 32; 613 tcd->soff = 32;
593 tcd->doff = 32; 614 tcd->doff = 32;
594 } else if (IS_ALIGNED(src | dst | len, 16)) { 615 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
616 /* MPC8308 doesn't support 16 byte transfers */
595 tcd->ssize = MPC_DMA_TSIZE_16; 617 tcd->ssize = MPC_DMA_TSIZE_16;
596 tcd->dsize = MPC_DMA_TSIZE_16; 618 tcd->dsize = MPC_DMA_TSIZE_16;
597 tcd->soff = 16; 619 tcd->soff = 16;
@@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
651 return -EINVAL; 673 return -EINVAL;
652 } 674 }
653 675
676 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
677 mdma->is_mpc8308 = 1;
678 mdma->irq2 = irq_of_parse_and_map(dn, 1);
679 if (mdma->irq2 == NO_IRQ) {
680 dev_err(dev, "Error mapping IRQ!\n");
681 return -EINVAL;
682 }
683 }
684
654 retval = of_address_to_resource(dn, 0, &res); 685 retval = of_address_to_resource(dn, 0, &res);
655 if (retval) { 686 if (retval) {
656 dev_err(dev, "Error parsing memory region!\n"); 687 dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
681 return -EINVAL; 712 return -EINVAL;
682 } 713 }
683 714
715 if (mdma->is_mpc8308) {
716 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
717 DRV_NAME, mdma);
718 if (retval) {
719 dev_err(dev, "Error requesting IRQ2!\n");
720 return -EINVAL;
721 }
722 }
723
684 spin_lock_init(&mdma->error_status_lock); 724 spin_lock_init(&mdma->error_status_lock);
685 725
686 dma = &mdma->dma; 726 dma = &mdma->dma;
687 dma->dev = dev; 727 dma->dev = dev;
688 dma->chancnt = MPC_DMA_CHANNELS; 728 if (!mdma->is_mpc8308)
729 dma->chancnt = MPC_DMA_CHANNELS;
730 else
731 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
689 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 732 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
690 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 733 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
691 dma->device_issue_pending = mpc_dma_issue_pending; 734 dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
721 * - Round-robin group arbitration, 764 * - Round-robin group arbitration,
722 * - Round-robin channel arbitration. 765 * - Round-robin channel arbitration.
723 */ 766 */
724 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 767 if (!mdma->is_mpc8308) {
725 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 768 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
726 769 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
727 /* Disable hardware DMA requests */ 770
728 out_be32(&mdma->regs->dmaerqh, 0); 771 /* Disable hardware DMA requests */
729 out_be32(&mdma->regs->dmaerql, 0); 772 out_be32(&mdma->regs->dmaerqh, 0);
730 773 out_be32(&mdma->regs->dmaerql, 0);
731 /* Disable error interrupts */ 774
732 out_be32(&mdma->regs->dmaeeih, 0); 775 /* Disable error interrupts */
733 out_be32(&mdma->regs->dmaeeil, 0); 776 out_be32(&mdma->regs->dmaeeih, 0);
734 777 out_be32(&mdma->regs->dmaeeil, 0);
735 /* Clear interrupts status */ 778
736 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 779 /* Clear interrupts status */
737 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 780 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
738 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 781 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
739 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 782 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
740 783 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
741 /* Route interrupts to IPIC */ 784
742 out_be32(&mdma->regs->dmaihsa, 0); 785 /* Route interrupts to IPIC */
743 out_be32(&mdma->regs->dmailsa, 0); 786 out_be32(&mdma->regs->dmaihsa, 0);
787 out_be32(&mdma->regs->dmailsa, 0);
788 } else {
789 /* MPC8308 has 16 channels and lacks some registers */
790 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
791
792 /* enable snooping */
793 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
794 /* Disable error interrupts */
795 out_be32(&mdma->regs->dmaeeil, 0);
796
797 /* Clear interrupts status */
798 out_be32(&mdma->regs->dmaintl, 0xFFFF);
799 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
800 }
744 801
745 /* Register DMA engine */ 802 /* Register DMA engine */
746 dev_set_drvdata(dev, mdma); 803 dev_set_drvdata(dev, mdma);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b8740e..c3f5aca4ef00 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
190} 190}
191 191
192#ifdef notyet
192int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 193int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
193{ 194{
194 struct rdma_cq_setup setup; 195 struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
200 setup.ovfl_mode = 1; 201 setup.ovfl_mode = 1;
201 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 202 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
202} 203}
204#endif
203 205
204static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) 206static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
205{ 207{
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49bdcc9..c5406da3f4cd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
335int iwch_post_zb_read(struct iwch_qp *qhp); 335int iwch_post_zb_read(struct iwch_qp *qhp);
336int iwch_register_device(struct iwch_dev *dev); 336int iwch_register_device(struct iwch_dev *dev);
337void iwch_unregister_device(struct iwch_dev *dev); 337void iwch_unregister_device(struct iwch_dev *dev);
338int iwch_quiesce_qps(struct iwch_cq *chp);
339int iwch_resume_qps(struct iwch_cq *chp);
340void stop_read_rep_timer(struct iwch_qp *qhp); 338void stop_read_rep_timer(struct iwch_qp *qhp);
341int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 339int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
342 struct iwch_mr *mhp, int shift); 340 struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137181d7..1b4cd09f74dc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@ out:
1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state); 1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1150 return ret; 1150 return ret;
1151} 1151}
1152
1153static int quiesce_qp(struct iwch_qp *qhp)
1154{
1155 spin_lock_irq(&qhp->lock);
1156 iwch_quiesce_tid(qhp->ep);
1157 qhp->flags |= QP_QUIESCED;
1158 spin_unlock_irq(&qhp->lock);
1159 return 0;
1160}
1161
1162static int resume_qp(struct iwch_qp *qhp)
1163{
1164 spin_lock_irq(&qhp->lock);
1165 iwch_resume_tid(qhp->ep);
1166 qhp->flags &= ~QP_QUIESCED;
1167 spin_unlock_irq(&qhp->lock);
1168 return 0;
1169}
1170
1171int iwch_quiesce_qps(struct iwch_cq *chp)
1172{
1173 int i;
1174 struct iwch_qp *qhp;
1175
1176 for (i=0; i < T3_MAX_NUM_QP; i++) {
1177 qhp = get_qhp(chp->rhp, i);
1178 if (!qhp)
1179 continue;
1180 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1181 quiesce_qp(qhp);
1182 continue;
1183 }
1184 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1185 quiesce_qp(qhp);
1186 }
1187 return 0;
1188}
1189
1190int iwch_resume_qps(struct iwch_cq *chp)
1191{
1192 int i;
1193 struct iwch_qp *qhp;
1194
1195 for (i=0; i < T3_MAX_NUM_QP; i++) {
1196 qhp = get_qhp(chp->rhp, i);
1197 if (!qhp)
1198 continue;
1199 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1200 resume_qp(qhp);
1201 continue;
1202 }
1203 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
1204 resume_qp(qhp);
1205 }
1206 return 0;
1207}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cdb4337..cc600c2dd0b3 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); 760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); 761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
762u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 762u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
763int c4iw_post_zb_read(struct c4iw_qp *qhp);
764int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 763int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
765u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 764u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
766void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 765void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb2505ea1..20800900ef3f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
892 } 892 }
893} 893}
894 894
895int c4iw_post_zb_read(struct c4iw_qp *qhp)
896{
897 union t4_wr *wqe;
898 struct sk_buff *skb;
899 u8 len16;
900
901 PDBG("%s enter\n", __func__);
902 skb = alloc_skb(40, GFP_KERNEL);
903 if (!skb) {
904 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
905 return -ENOMEM;
906 }
907 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
908
909 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
910 memset(wqe, 0, sizeof wqe->read);
911 wqe->read.r2 = cpu_to_be64(0);
912 wqe->read.stag_sink = cpu_to_be32(1);
913 wqe->read.to_sink_hi = cpu_to_be32(0);
914 wqe->read.to_sink_lo = cpu_to_be32(1);
915 wqe->read.stag_src = cpu_to_be32(1);
916 wqe->read.plen = cpu_to_be32(0);
917 wqe->read.to_src_hi = cpu_to_be32(0);
918 wqe->read.to_src_lo = cpu_to_be32(1);
919 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
920 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
921
922 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
923}
924
925static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 895static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
926 gfp_t gfp) 896 gfp_t gfp)
927{ 897{
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1029 wqe->cookie = (unsigned long) &ep->com.wr_wait; 999 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1030 1000
1031 wqe->u.fini.type = FW_RI_TYPE_FINI; 1001 wqe->u.fini.type = FW_RI_TYPE_FINI;
1032 c4iw_init_wr_wait(&ep->com.wr_wait);
1033 ret = c4iw_ofld_send(&rhp->rdev, skb); 1002 ret = c4iw_ofld_send(&rhp->rdev, skb);
1034 if (ret) 1003 if (ret)
1035 goto out; 1004 goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1125 if (qhp->attr.mpa_attr.initiator) 1094 if (qhp->attr.mpa_attr.initiator)
1126 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1095 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1127 1096
1128 c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
1129 ret = c4iw_ofld_send(&rhp->rdev, skb); 1097 ret = c4iw_ofld_send(&rhp->rdev, skb);
1130 if (ret) 1098 if (ret)
1131 goto out; 1099 goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc1da76..b33f0457a1ff 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
530 for (j = 0; j < 6; j++) { 530 for (j = 0; j < 6; j++) {
531 if (!pdev->resource[j].start) 531 if (!pdev->resource[j].start)
532 continue; 532 continue;
533 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", 533 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
534 j, (unsigned long long)pdev->resource[j].start, 534 j, &pdev->resource[j],
535 (unsigned long long)pdev->resource[j].end,
536 (unsigned long long)pci_resource_len(pdev, j)); 535 (unsigned long long)pci_resource_len(pdev, j));
537 } 536 }
538 537
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2fdf16..e8df155bc3b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
397 cq->resize_buf = NULL; 397 cq->resize_buf = NULL;
398 cq->resize_umem = NULL; 398 cq->resize_umem = NULL;
399 } else { 399 } else {
400 struct mlx4_ib_cq_buf tmp_buf;
401 int tmp_cqe = 0;
402
400 spin_lock_irq(&cq->lock); 403 spin_lock_irq(&cq->lock);
401 if (cq->resize_buf) { 404 if (cq->resize_buf) {
402 mlx4_ib_cq_resize_copy_cqes(cq); 405 mlx4_ib_cq_resize_copy_cqes(cq);
403 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 406 tmp_buf = cq->buf;
407 tmp_cqe = cq->ibcq.cqe;
404 cq->buf = cq->resize_buf->buf; 408 cq->buf = cq->resize_buf->buf;
405 cq->ibcq.cqe = cq->resize_buf->cqe; 409 cq->ibcq.cqe = cq->resize_buf->cqe;
406 410
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
408 cq->resize_buf = NULL; 412 cq->resize_buf = NULL;
409 } 413 }
410 spin_unlock_irq(&cq->lock); 414 spin_unlock_irq(&cq->lock);
415
416 if (tmp_cqe)
417 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
411 } 418 }
412 419
413 goto out; 420 goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd63b9e2..57ffa50f509e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
211 if (agent) { 211 if (agent) {
212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
213 IB_MGMT_MAD_DATA, GFP_ATOMIC); 213 IB_MGMT_MAD_DATA, GFP_ATOMIC);
214 if (IS_ERR(send_buf))
215 return;
214 /* 216 /*
215 * We rely here on the fact that MLX QPs don't use the 217 * We rely here on the fact that MLX QPs don't use the
216 * address handle after the send is posted (this is 218 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659ff0b0..03a59534f59e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
171 if (agent) { 171 if (agent) {
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
173 IB_MGMT_MAD_DATA, GFP_ATOMIC); 173 IB_MGMT_MAD_DATA, GFP_ATOMIC);
174 if (IS_ERR(send_buf))
175 return;
174 /* 176 /*
175 * We rely here on the fact that MLX QPs don't use the 177 * We rely here on the fact that MLX QPs don't use the
176 * address handle after the send is posted (this is 178 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c0e95a..5a4c36484722 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
908 nesvnic->nic_index && 908 nesvnic->nic_index &&
909 mc_index < max_pft_entries_avaiable) { 909 mc_index < max_pft_entries_avaiable) {
910 nes_debug(NES_DBG_NIC_RX, 910 nes_debug(NES_DBG_NIC_RX,
911 "mc_index=%d skipping nic_index=%d,\ 911 "mc_index=%d skipping nic_index=%d, "
912 used for=%d \n", mc_index, 912 "used for=%d \n", mc_index,
913 nesvnic->nic_index, 913 nesvnic->nic_index,
914 nesadapter->pft_mcast_map[mc_index]); 914 nesadapter->pft_mcast_map[mc_index]);
915 mc_index++; 915 mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d02d4a..73225eee3cc6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@ struct qib_devdata {
766 void (*f_sdma_hw_start_up)(struct qib_pportdata *); 766 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
767 void (*f_sdma_init_early)(struct qib_pportdata *); 767 void (*f_sdma_init_early)(struct qib_pportdata *);
768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); 768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); 769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
770 u32 (*f_hdrqempty)(struct qib_ctxtdata *); 770 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
771 u64 (*f_portcntr)(struct qib_pportdata *, u32); 771 u64 (*f_portcntr)(struct qib_pportdata *, u32);
772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, 772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf880f98..5246aa486bbe 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
100 wc->head = next; 100 wc->head = next;
101 101
102 if (cq->notify == IB_CQ_NEXT_COMP || 102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED && solicited)) { 103 (cq->notify == IB_CQ_SOLICITED &&
104 (solicited || entry->status != IB_WC_SUCCESS))) {
104 cq->notify = IB_CQ_NONE; 105 cq->notify = IB_CQ_NONE;
105 cq->triggered++; 106 cq->triggered++;
106 /* 107 /*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd193603fb1..23e584f4c36c 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
71 */ 71 */
72#define QIB_PIO_MAXIBHDR 128 72#define QIB_PIO_MAXIBHDR 128
73 73
74/*
75 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
76 */
77#define QIB_MAX_PKT_RECV 64
78
74struct qlogic_ib_stats qib_stats; 79struct qlogic_ib_stats qib_stats;
75 80
76const char *qib_get_unit_name(int unit) 81const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
284 * Returns 1 if error was a CRC, else 0. 289 * Returns 1 if error was a CRC, else 0.
285 * Needed for some chip's synthesized error counters. 290 * Needed for some chip's synthesized error counters.
286 */ 291 */
287static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, 292static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
288 u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, 293 u32 ctxt, u32 eflags, u32 l, u32 etail,
289 struct qib_message_header *hdr) 294 __le32 *rhf_addr, struct qib_message_header *rhdr)
290{ 295{
291 u32 ret = 0; 296 u32 ret = 0;
292 297
293 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) 298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
294 ret = 1; 299 ret = 1;
300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
301 /* For TIDERR and RC QPs premptively schedule a NAK */
302 struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
303 struct qib_other_headers *ohdr = NULL;
304 struct qib_ibport *ibp = &ppd->ibport_data;
305 struct qib_qp *qp = NULL;
306 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
307 u16 lid = be16_to_cpu(hdr->lrh[1]);
308 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
309 u32 qp_num;
310 u32 opcode;
311 u32 psn;
312 int diff;
313 unsigned long flags;
314
315 /* Sanity check packet */
316 if (tlen < 24)
317 goto drop;
318
319 if (lid < QIB_MULTICAST_LID_BASE) {
320 lid &= ~((1 << ppd->lmc) - 1);
321 if (unlikely(lid != ppd->lid))
322 goto drop;
323 }
324
325 /* Check for GRH */
326 if (lnh == QIB_LRH_BTH)
327 ohdr = &hdr->u.oth;
328 else if (lnh == QIB_LRH_GRH) {
329 u32 vtf;
330
331 ohdr = &hdr->u.l.oth;
332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
333 goto drop;
334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
336 goto drop;
337 } else
338 goto drop;
339
340 /* Get opcode and PSN from packet */
341 opcode = be32_to_cpu(ohdr->bth[0]);
342 opcode >>= 24;
343 psn = be32_to_cpu(ohdr->bth[2]);
344
345 /* Get the destination QP number. */
346 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
347 if (qp_num != QIB_MULTICAST_QPN) {
348 int ruc_res;
349 qp = qib_lookup_qpn(ibp, qp_num);
350 if (!qp)
351 goto drop;
352
353 /*
354 * Handle only RC QPs - for other QP types drop error
355 * packet.
356 */
357 spin_lock(&qp->r_lock);
358
359 /* Check for valid receive state. */
360 if (!(ib_qib_state_ops[qp->state] &
361 QIB_PROCESS_RECV_OK)) {
362 ibp->n_pkt_drops++;
363 goto unlock;
364 }
365
366 switch (qp->ibqp.qp_type) {
367 case IB_QPT_RC:
368 spin_lock_irqsave(&qp->s_lock, flags);
369 ruc_res =
370 qib_ruc_check_hdr(
371 ibp, hdr,
372 lnh == QIB_LRH_GRH,
373 qp,
374 be32_to_cpu(ohdr->bth[0]));
375 if (ruc_res) {
376 spin_unlock_irqrestore(&qp->s_lock,
377 flags);
378 goto unlock;
379 }
380 spin_unlock_irqrestore(&qp->s_lock, flags);
381
382 /* Only deal with RDMA Writes for now */
383 if (opcode <
384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
385 diff = qib_cmp24(psn, qp->r_psn);
386 if (!qp->r_nak_state && diff >= 0) {
387 ibp->n_rc_seqnak++;
388 qp->r_nak_state =
389 IB_NAK_PSN_ERROR;
390 /* Use the expected PSN. */
391 qp->r_ack_psn = qp->r_psn;
392 /*
393 * Wait to send the sequence
394 * NAK until all packets
395 * in the receive queue have
396 * been processed.
397 * Otherwise, we end up
398 * propagating congestion.
399 */
400 if (list_empty(&qp->rspwait)) {
401 qp->r_flags |=
402 QIB_R_RSP_NAK;
403 atomic_inc(
404 &qp->refcount);
405 list_add_tail(
406 &qp->rspwait,
407 &rcd->qp_wait_list);
408 }
409 } /* Out of sequence NAK */
410 } /* QP Request NAKs */
411 break;
412 case IB_QPT_SMI:
413 case IB_QPT_GSI:
414 case IB_QPT_UD:
415 case IB_QPT_UC:
416 default:
417 /* For now don't handle any other QP types */
418 break;
419 }
420
421unlock:
422 spin_unlock(&qp->r_lock);
423 /*
424 * Notify qib_destroy_qp() if it is waiting
425 * for us to finish.
426 */
427 if (atomic_dec_and_test(&qp->refcount))
428 wake_up(&qp->wait);
429 } /* Unicast QP */
430 } /* Valid packet with TIDErr */
431
432drop:
295 return ret; 433 return ret;
296} 434}
297 435
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 473 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 } 474 }
337 475
338 for (last = 0, i = 1; !last && i <= 64; i += !last) { 476 for (last = 0, i = 1; !last; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr); 477 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr); 478 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr); 479 etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
371 * packets; only qibhdrerr should be set. 509 * packets; only qibhdrerr should be set.
372 */ 510 */
373 if (unlikely(eflags)) 511 if (unlikely(eflags))
374 crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, 512 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
375 etail, rhf_addr, hdr); 513 etail, rhf_addr, hdr);
376 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 514 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
377 qib_ib_rcv(rcd, hdr, ebuf, tlen); 515 qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
384 l += rsize; 522 l += rsize;
385 if (l >= maxcnt) 523 if (l >= maxcnt)
386 l = 0; 524 l = 0;
525 if (i == QIB_MAX_PKT_RECV)
526 last = 1;
527
387 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 528 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
388 if (dd->flags & QIB_NODMA_RTAIL) { 529 if (dd->flags & QIB_NODMA_RTAIL) {
389 u32 seq = qib_hdrget_seq(rhf_addr); 530 u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
402 */ 543 */
403 lval = l; 544 lval = l;
404 if (!last && !(i & 0xf)) { 545 if (!last && !(i & 0xf)) {
405 dd->f_update_usrhead(rcd, lval, updegr, etail); 546 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
406 updegr = 0; 547 updegr = 0;
407 } 548 }
408 } 549 }
@@ -444,7 +585,7 @@ bail:
444 * if no packets were processed. 585 * if no packets were processed.
445 */ 586 */
446 lval = (u64)rcd->head | dd->rhdrhead_intr_off; 587 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
447 dd->f_update_usrhead(rcd, lval, updegr, etail); 588 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
448 return crcs; 589 return crcs;
449} 590}
450 591
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971aff1f..75bfad16c114 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1379 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1379 /* find device (with ACTIVE ports) with fewest ctxts in use */
1380 for (ndev = 0; ndev < devmax; ndev++) { 1380 for (ndev = 0; ndev < devmax; ndev++) {
1381 struct qib_devdata *dd = qib_lookup(ndev); 1381 struct qib_devdata *dd = qib_lookup(ndev);
1382 unsigned cused = 0, cfree = 0; 1382 unsigned cused = 0, cfree = 0, pusable = 0;
1383 if (!dd) 1383 if (!dd)
1384 continue; 1384 continue;
1385 if (port && port <= dd->num_pports && 1385 if (port && port <= dd->num_pports &&
1386 usable(dd->pport + port - 1)) 1386 usable(dd->pport + port - 1))
1387 dusable = 1; 1387 pusable = 1;
1388 else 1388 else
1389 for (i = 0; i < dd->num_pports; i++) 1389 for (i = 0; i < dd->num_pports; i++)
1390 if (usable(dd->pport + i)) 1390 if (usable(dd->pport + i))
1391 dusable++; 1391 pusable++;
1392 if (!dusable) 1392 if (!pusable)
1393 continue; 1393 continue;
1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; 1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1395 ctxt++) 1395 ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1397 cused++; 1397 cused++;
1398 else 1398 else
1399 cfree++; 1399 cfree++;
1400 if (cfree && cused < inuse) { 1400 if (pusable && cfree && cused < inuse) {
1401 udd = dd; 1401 udd = dd;
1402 inuse = cused; 1402 inuse = cused;
1403 } 1403 }
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29dbb9537..774dea897e9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
2074} 2074}
2075 2075
2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2077 u32 updegr, u32 egrhd) 2077 u32 updegr, u32 egrhd, u32 npkts)
2078{ 2078{
2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2080 if (updegr) 2080 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74e7392..127a0d5069f0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt); 2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2298 dd->cspec->numctxts = nchipctxts; 2298 dd->cspec->numctxts = nchipctxts;
2299 if (qib_n_krcv_queues > 1) { 2299 if (qib_n_krcv_queues > 1) {
2300 dd->qpn_mask = 0x3f; 2300 dd->qpn_mask = 0x3e;
2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; 2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2302 if (dd->first_user_ctxt > nchipctxts) 2302 if (dd->first_user_ctxt > nchipctxts)
2303 dd->first_user_ctxt = nchipctxts; 2303 dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2703} 2703}
2704 2704
2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2706 u32 updegr, u32 egrhd) 2706 u32 updegr, u32 egrhd, u32 npkts)
2707{ 2707{
2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709 if (updegr) 2709 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443b5335..dbbb0e85afe4 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71 71
72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); 72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); 73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74static void serdes_7322_los_enable(struct qib_pportdata *, int);
75static int serdes_7322_init_old(struct qib_pportdata *);
76static int serdes_7322_init_new(struct qib_pportdata *);
74 77
75#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) 78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
76 79
@@ -111,6 +114,21 @@ static ushort qib_singleport;
111module_param_named(singleport, qib_singleport, ushort, S_IRUGO); 114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
112MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); 115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
113 116
117/*
118 * Receive header queue sizes
119 */
120static unsigned qib_rcvhdrcnt;
121module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
122MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
123
124static unsigned qib_rcvhdrsize;
125module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
126MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
127
128static unsigned qib_rcvhdrentsize;
129module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
130MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
131
114#define MAX_ATTEN_LEN 64 /* plenty for any real system */ 132#define MAX_ATTEN_LEN 64 /* plenty for any real system */
115/* for read back, default index is ~5m copper cable */ 133/* for read back, default index is ~5m copper cable */
116static char txselect_list[MAX_ATTEN_LEN] = "10"; 134static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
544 562
545#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ 563#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ 564#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
565#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
547#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ 566#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
548 567
549#define H1_FORCE_VAL 8 568#define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@ struct qib_chippport_specific {
604 u8 ibmalfusesnap; 623 u8 ibmalfusesnap;
605 struct qib_qsfp_data qsfp_data; 624 struct qib_qsfp_data qsfp_data;
606 char epmsgbuf[192]; /* for port error interrupt msg buffer */ 625 char epmsgbuf[192]; /* for port error interrupt msg buffer */
626 u8 bounced;
607}; 627};
608 628
609static struct { 629static struct {
@@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1677 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1697 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1678 force_h1(ppd); 1698 force_h1(ppd);
1679 ppd->cpspec->qdr_reforce = 1; 1699 ppd->cpspec->qdr_reforce = 1;
1700 if (!ppd->dd->cspec->r1)
1701 serdes_7322_los_enable(ppd, 0);
1680 } else if (ppd->cpspec->qdr_reforce && 1702 } else if (ppd->cpspec->qdr_reforce &&
1681 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && 1703 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1682 (ibclt == IB_7322_LT_STATE_CFGENH || 1704 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) 1714 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1693 adj_tx_serdes(ppd); 1715 adj_tx_serdes(ppd);
1694 1716
1695 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && 1717 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1696 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { 1718 u8 ltstate = qib_7322_phys_portstate(ibcst);
1697 ppd->cpspec->qdr_dfe_on = 1; 1719 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1698 ppd->cpspec->qdr_dfe_time = 0; 1720 LinkTrainingState);
1699 /* On link down, reenable QDR adaptation */ 1721 if (!ppd->dd->cspec->r1 &&
1700 qib_write_kreg_port(ppd, krp_static_adapt_dis(2), 1722 pibclt == IB_7322_LT_STATE_LINKUP &&
1701 ppd->dd->cspec->r1 ? 1723 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1702 QDR_STATIC_ADAPT_DOWN_R1 : 1724 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1703 QDR_STATIC_ADAPT_DOWN); 1725 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1726 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1727 /* If the link went down (but no into recovery,
1728 * turn LOS back on */
1729 serdes_7322_los_enable(ppd, 1);
1730 if (!ppd->cpspec->qdr_dfe_on &&
1731 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1732 ppd->cpspec->qdr_dfe_on = 1;
1733 ppd->cpspec->qdr_dfe_time = 0;
1734 /* On link down, reenable QDR adaptation */
1735 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1736 ppd->dd->cspec->r1 ?
1737 QDR_STATIC_ADAPT_DOWN_R1 :
1738 QDR_STATIC_ADAPT_DOWN);
1739 printk(KERN_INFO QIB_DRV_NAME
1740 " IB%u:%u re-enabled QDR adaptation "
1741 "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1742 }
1704 } 1743 }
1705} 1744}
1706 1745
1746static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1747
1707/* 1748/*
1708 * This is per-pport error handling. 1749 * This is per-pport error handling.
1709 * will likely get it's own MSIx interrupt (one for each port, 1750 * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1840 IB_PHYSPORTSTATE_DISABLED) 1881 IB_PHYSPORTSTATE_DISABLED)
1841 qib_set_ib_7322_lstate(ppd, 0, 1882 qib_set_ib_7322_lstate(ppd, 0,
1842 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); 1883 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1843 else 1884 else {
1885 u32 lstate;
1886 /*
1887 * We need the current logical link state before
1888 * lflags are set in handle_e_ibstatuschanged.
1889 */
1890 lstate = qib_7322_iblink_state(ibcs);
1891
1892 if (IS_QMH(dd) && !ppd->cpspec->bounced &&
1893 ltstate == IB_PHYSPORTSTATE_LINKUP &&
1894 (lstate >= IB_PORT_INIT &&
1895 lstate <= IB_PORT_ACTIVE)) {
1896 ppd->cpspec->bounced = 1;
1897 qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
1898 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
1899 }
1900
1844 /* 1901 /*
1845 * Since going into a recovery state causes the link 1902 * Since going into a recovery state causes the link
1846 * state to go down and since recovery is transitory, 1903 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1854 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && 1911 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1855 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) 1912 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1856 qib_handle_e_ibstatuschanged(ppd, ibcs); 1913 qib_handle_e_ibstatuschanged(ppd, ibcs);
1914 }
1857 } 1915 }
1858 if (*msg && iserr) 1916 if (*msg && iserr)
1859 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); 1917 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
2785 ctxtrbits &= ~rmask; 2843 ctxtrbits &= ~rmask;
2786 if (dd->rcd[i]) { 2844 if (dd->rcd[i]) {
2787 qib_kreceive(dd->rcd[i], NULL, &npkts); 2845 qib_kreceive(dd->rcd[i], NULL, &npkts);
2788 adjust_rcv_timeout(dd->rcd[i], npkts);
2789 } 2846 }
2790 } 2847 }
2791 rmask <<= 1; 2848 rmask <<= 1;
@@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
2835 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); 2892 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2836 2893
2837 qib_kreceive(rcd, NULL, &npkts); 2894 qib_kreceive(rcd, NULL, &npkts);
2838 adjust_rcv_timeout(rcd, npkts);
2839 2895
2840 return IRQ_HANDLED; 2896 return IRQ_HANDLED;
2841} 2897}
@@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3157 case BOARD_QME7342: 3213 case BOARD_QME7342:
3158 n = "InfiniPath_QME7342"; 3214 n = "InfiniPath_QME7342";
3159 break; 3215 break;
3216 case 8:
3217 n = "InfiniPath_QME7362";
3218 dd->flags |= QIB_HAS_QSFP;
3219 break;
3160 case 15: 3220 case 15:
3161 n = "InfiniPath_QLE7342_TEST"; 3221 n = "InfiniPath_QLE7342_TEST";
3162 dd->flags |= QIB_HAS_QSFP; 3222 dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3475 nchipctxts = qib_read_kreg32(dd, kr_contextcnt); 3535 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3476 dd->cspec->numctxts = nchipctxts; 3536 dd->cspec->numctxts = nchipctxts;
3477 if (qib_n_krcv_queues > 1 && dd->num_pports) { 3537 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3478 /*
3479 * Set the mask for which bits from the QPN are used
3480 * to select a context number.
3481 */
3482 dd->qpn_mask = 0x3f;
3483 dd->first_user_ctxt = NUM_IB_PORTS + 3538 dd->first_user_ctxt = NUM_IB_PORTS +
3484 (qib_n_krcv_queues - 1) * dd->num_pports; 3539 (qib_n_krcv_queues - 1) * dd->num_pports;
3485 if (dd->first_user_ctxt > nchipctxts) 3540 if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3530 3585
3531 /* kr_rcvegrcnt changes based on the number of contexts enabled */ 3586 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3532 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); 3587 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3533 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, 3588 if (qib_rcvhdrcnt)
3534 dd->num_pports > 1 ? 1024U : 2048U); 3589 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3590 else
3591 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3592 dd->num_pports > 1 ? 1024U : 2048U);
3535} 3593}
3536 3594
3537static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) 3595static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4002} 4060}
4003 4061
4004static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, 4062static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4005 u32 updegr, u32 egrhd) 4063 u32 updegr, u32 egrhd, u32 npkts)
4006{ 4064{
4065 /*
4066 * Need to write timeout register before updating rcvhdrhead to ensure
4067 * that the timer is enabled on reception of a packet.
4068 */
4069 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4070 adjust_rcv_timeout(rcd, npkts);
4007 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4071 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4008 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4072 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4009 if (updegr) 4073 if (updegr)
@@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work)
5522 u64 now = get_jiffies_64(); 5586 u64 now = get_jiffies_64();
5523 if (time_after64(now, pwrup)) 5587 if (time_after64(now, pwrup))
5524 break; 5588 break;
5525 msleep(1); 5589 msleep(20);
5526 } 5590 }
5527 ret = qib_refresh_qsfp_cache(ppd, &qd->cache); 5591 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5528 /* 5592 /*
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5579 u32 pidx, unit, port, deflt, h1; 5643 u32 pidx, unit, port, deflt, h1;
5580 unsigned long val; 5644 unsigned long val;
5581 int any = 0, seth1; 5645 int any = 0, seth1;
5646 int txdds_size;
5582 5647
5583 str = txselect_list; 5648 str = txselect_list;
5584 5649
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5587 for (pidx = 0; pidx < dd->num_pports; ++pidx) 5652 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5588 dd->pport[pidx].cpspec->no_eep = deflt; 5653 dd->pport[pidx].cpspec->no_eep = deflt;
5589 5654
5655 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5656 if (IS_QME(dd) || IS_QMH(dd))
5657 txdds_size += TXDDS_MFG_SZ;
5658
5590 while (*nxt && nxt[1]) { 5659 while (*nxt && nxt[1]) {
5591 str = ++nxt; 5660 str = ++nxt;
5592 unit = simple_strtoul(str, &nxt, 0); 5661 unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5609 ; 5678 ;
5610 continue; 5679 continue;
5611 } 5680 }
5612 if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) 5681 if (val >= txdds_size)
5613 continue; 5682 continue;
5614 seth1 = 0; 5683 seth1 = 0;
5615 h1 = 0; /* gcc thinks it might be used uninitted */ 5684 h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
5661 return -ENOSPC; 5730 return -ENOSPC;
5662 } 5731 }
5663 val = simple_strtoul(str, &n, 0); 5732 val = simple_strtoul(str, &n, 0);
5664 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { 5733 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5734 TXDDS_MFG_SZ)) {
5665 printk(KERN_INFO QIB_DRV_NAME 5735 printk(KERN_INFO QIB_DRV_NAME
5666 "txselect_values must start with a number < %d\n", 5736 "txselect_values must start with a number < %d\n",
5667 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); 5737 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
5668 return -EINVAL; 5738 return -EINVAL;
5669 } 5739 }
5670 strcpy(txselect_list, str); 5740 strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
5810 unsigned n, regno; 5880 unsigned n, regno;
5811 unsigned long flags; 5881 unsigned long flags;
5812 5882
5813 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) 5883 if (dd->n_krcv_queues < 2 ||
5884 !dd->pport[pidx].link_speed_supported)
5814 continue; 5885 continue;
5815 5886
5816 ppd = &dd->pport[pidx]; 5887 ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6097 ppd++; 6168 ppd++;
6098 } 6169 }
6099 6170
6100 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; 6171 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6101 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; 6172 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6173 dd->rcvhdrsize = qib_rcvhdrsize ?
6174 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6102 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); 6175 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6103 6176
6104 /* we always allocate at least 2048 bytes for eager buffers */ 6177 /* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6495 /* make sure we see an updated copy next time around */ 6568 /* make sure we see an updated copy next time around */
6496 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 6569 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6497 sleeps++; 6570 sleeps++;
6498 msleep(1); 6571 msleep(20);
6499 } 6572 }
6500 6573
6501 switch (which) { 6574 switch (which) {
@@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6993 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ 7066 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
6994}; 7067};
6995 7068
7069static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7070 /* amp, pre, main, post */
7071 { 0, 0, 0, 0 }, /* QME7342 mfg settings */
7072 { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7073};
7074
6996static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, 7075static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
6997 unsigned atten) 7076 unsigned atten)
6998{ 7077{
@@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
7066 *sdr_dds = &txdds_extra_sdr[idx]; 7145 *sdr_dds = &txdds_extra_sdr[idx];
7067 *ddr_dds = &txdds_extra_ddr[idx]; 7146 *ddr_dds = &txdds_extra_ddr[idx];
7068 *qdr_dds = &txdds_extra_qdr[idx]; 7147 *qdr_dds = &txdds_extra_qdr[idx];
7148 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7149 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7150 TXDDS_MFG_SZ)) {
7151 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7152 printk(KERN_INFO QIB_DRV_NAME
7153 " IB%u:%u use idx %u into txdds_mfg\n",
7154 ppd->dd->unit, ppd->port, idx);
7155 *sdr_dds = &txdds_extra_mfg[idx];
7156 *ddr_dds = &txdds_extra_mfg[idx];
7157 *qdr_dds = &txdds_extra_mfg[idx];
7069 } else { 7158 } else {
7070 /* this shouldn't happen, it's range checked */ 7159 /* this shouldn't happen, it's range checked */
7071 *sdr_dds = txdds_sdr + qib_long_atten; 7160 *sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7210 } 7299 }
7211} 7300}
7212 7301
7302static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7303{
7304 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7305 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
7306 ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
7307 if (enable)
7308 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7309 else
7310 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7311 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7312}
7313
7213static int serdes_7322_init(struct qib_pportdata *ppd) 7314static int serdes_7322_init(struct qib_pportdata *ppd)
7214{ 7315{
7215 u64 data; 7316 int ret = 0;
7317 if (ppd->dd->cspec->r1)
7318 ret = serdes_7322_init_old(ppd);
7319 else
7320 ret = serdes_7322_init_new(ppd);
7321 return ret;
7322}
7323
7324static int serdes_7322_init_old(struct qib_pportdata *ppd)
7325{
7216 u32 le_val; 7326 u32 le_val;
7217 7327
7218 /* 7328 /*
@@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7270 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ 7380 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7271 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ 7381 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7272 7382
7273 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7383 serdes_7322_los_enable(ppd, 1);
7274 /* Turn off IB latency mode */
7275 data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
7276 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7277 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7278 7384
7279 /* rxbistena; set 0 to avoid effects of it switch later */ 7385 /* rxbistena; set 0 to avoid effects of it switch later */
7280 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); 7386 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7314 return 0; 7420 return 0;
7315} 7421}
7316 7422
7423static int serdes_7322_init_new(struct qib_pportdata *ppd)
7424{
7425 u64 tstart;
7426 u32 le_val, rxcaldone;
7427 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7428
7429 /*
7430 * Initialize the Tx DDS tables. Also done every QSFP event,
7431 * for adapters with QSFP
7432 */
7433 init_txdds_table(ppd, 0);
7434
7435 /* Clear cmode-override, may be set from older driver */
7436 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7437
7438 /* ensure no tx overrides from earlier driver loads */
7439 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7440 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7441 reset_tx_deemphasis_override));
7442
7443 /* START OF LSI SUGGESTED SERDES BRINGUP */
7444 /* Reset - Calibration Setup */
7445 /* Stop DFE adaptaion */
7446 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7447 /* Disable LE1 */
7448 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7449 /* Disable autoadapt for LE1 */
7450 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7451 /* Disable LE2 */
7452 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7453 /* Disable VGA */
7454 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7455 /* Disable AFE Offset Cancel */
7456 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7457 /* Disable Timing Loop */
7458 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7459 /* Disable Frequency Loop */
7460 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7461 /* Disable Baseline Wander Correction */
7462 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7463 /* Disable RX Calibration */
7464 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7465 /* Disable RX Offset Calibration */
7466 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7467 /* Select BB CDR */
7468 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7469 /* CDR Step Size */
7470 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7471 /* Enable phase Calibration */
7472 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7473 /* DFE Bandwidth [2:14-12] */
7474 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7475 /* DFE Config (4 taps only) */
7476 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7477 /* Gain Loop Bandwidth */
7478 if (!ppd->dd->cspec->r1) {
7479 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7480 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7481 } else {
7482 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7483 }
7484 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7485 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7486 /* Data Rate Select [5:7-6] (leave as default) */
7487 /* RX Parralel Word Width [3:10-8] (leave as default) */
7488
7489 /* RX REST */
7490 /* Single- or Multi-channel reset */
7491 /* RX Analog reset */
7492 /* RX Digital reset */
7493 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7494 msleep(20);
7495 /* RX Analog reset */
7496 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7497 msleep(20);
7498 /* RX Digital reset */
7499 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7500 msleep(20);
7501
7502 /* setup LoS params; these are subsystem, so chan == 5 */
7503 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7504 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7505 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7506 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7507 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7508
7509 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7510 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7511 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7512 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7513 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7514
7515 /* LoS filter select enabled */
7516 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7517
7518 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7519 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7520 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7521 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7522
7523 /* Turn on LOS on initial SERDES init */
7524 serdes_7322_los_enable(ppd, 1);
7525 /* FLoop LOS gate: PPM filter enabled */
7526 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7527
7528 /* RX LATCH CALIBRATION */
7529 /* Enable Eyefinder Phase Calibration latch */
7530 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7531 /* Enable RX Offset Calibration latch */
7532 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7533 msleep(20);
7534 /* Start Calibration */
7535 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7536 tstart = get_jiffies_64();
7537 while (chan_done &&
7538 !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
7539 msleep(20);
7540 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7541 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7542 (chan + (chan >> 1)),
7543 25, 0, 0);
7544 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7545 (~chan_done & (1 << chan)) == 0)
7546 chan_done &= ~(1 << chan);
7547 }
7548 }
7549 if (chan_done) {
7550 printk(KERN_INFO QIB_DRV_NAME
7551 " Serdes %d calibration not done after .5 sec: 0x%x\n",
7552 IBSD(ppd->hw_pidx), chan_done);
7553 } else {
7554 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7555 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7556 (chan + (chan >> 1)),
7557 25, 0, 0);
7558 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7559 printk(KERN_INFO QIB_DRV_NAME
7560 " Serdes %d chan %d calibration "
7561 "failed\n", IBSD(ppd->hw_pidx), chan);
7562 }
7563 }
7564
7565 /* Turn off Calibration */
7566 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7567 msleep(20);
7568
7569 /* BRING RX UP */
7570 /* Set LE2 value (May be overridden in qsfp_7322_event) */
7571 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7572 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7573 /* Set LE2 Loop bandwidth */
7574 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7575 /* Enable LE2 */
7576 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7577 msleep(20);
7578 /* Enable H0 only */
7579 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7580 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7581 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7582 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7583 /* Enable VGA */
7584 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7585 msleep(20);
7586 /* Set Frequency Loop Bandwidth */
7587 ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
7588 /* Enable Frequency Loop */
7589 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7590 /* Set Timing Loop Bandwidth */
7591 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7592 /* Enable Timing Loop */
7593 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7594 msleep(50);
7595 /* Enable DFE
7596 * Set receive adaptation mode. SDR and DDR adaptation are
7597 * always on, and QDR is initially enabled; later disabled.
7598 */
7599 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7600 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7601 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7602 ppd->dd->cspec->r1 ?
7603 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7604 ppd->cpspec->qdr_dfe_on = 1;
7605 /* Disable LE1 */
7606 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7607 /* Disable auto adapt for LE1 */
7608 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7609 msleep(20);
7610 /* Enable AFE Offset Cancel */
7611 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7612 /* Enable Baseline Wander Correction */
7613 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7614 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7615 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7616 /* VGA output common mode */
7617 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7618
7619 return 0;
7620}
7621
7317/* start adjust QMH serdes parameters */ 7622/* start adjust QMH serdes parameters */
7318 7623
7319static void set_man_code(struct qib_pportdata *ppd, int chan, int code) 7624static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b503936043..7896afbb9ce8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@ unsigned long *qib_cpulist;
92/* set number of contexts we'll actually use */ 92/* set number of contexts we'll actually use */
93void qib_set_ctxtcnt(struct qib_devdata *dd) 93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{ 94{
95 if (!qib_cfgctxts) 95 if (!qib_cfgctxts) {
96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 else if (qib_cfgctxts < dd->num_pports) 97 if (dd->cfgctxts > dd->ctxtcnt)
98 dd->cfgctxts = dd->ctxtcnt;
99 } else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt; 100 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt) 101 else if (qib_cfgctxts <= dd->ctxtcnt)
100 dd->cfgctxts = qib_cfgctxts; 102 dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828a106..a693c56ec8a6 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
131 /* start a 75msec timer to clear symbol errors */ 131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer, 132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75)); 133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { 134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
135 !(ppd->lflags & QIBL_LINKACTIVE)) {
135 /* active, but not active defered */ 136 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */ 137 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |= 138 *ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d57..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
136 struct qib_mregion *mr; 136 struct qib_mregion *mr;
137 unsigned n, m; 137 unsigned n, m;
138 size_t off; 138 size_t off;
139 int ret = 0;
140 unsigned long flags; 139 unsigned long flags;
141 140
142 /* 141 /*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 if (!dev->dma_mr) 151 if (!dev->dma_mr)
153 goto bail; 152 goto bail;
154 atomic_inc(&dev->dma_mr->refcount); 153 atomic_inc(&dev->dma_mr->refcount);
154 spin_unlock_irqrestore(&rkt->lock, flags);
155
155 isge->mr = dev->dma_mr; 156 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr; 157 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length; 158 isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
170 off + sge->length > mr->length || 171 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc)) 172 (mr->access_flags & acc) != acc))
172 goto bail; 173 goto bail;
174 atomic_inc(&mr->refcount);
175 spin_unlock_irqrestore(&rkt->lock, flags);
173 176
174 off += mr->offset; 177 off += mr->offset;
175 m = 0; 178 if (mr->page_shift) {
176 n = 0; 179 /*
177 while (off >= mr->map[m]->segs[n].length) { 180 page sizes are uniform power of 2 so no loop is necessary
178 off -= mr->map[m]->segs[n].length; 181 entries_spanned_by_off is the number of times the loop below
179 n++; 182 would have executed.
180 if (n >= QIB_SEGSZ) { 183 */
181 m++; 184 size_t entries_spanned_by_off;
182 n = 0; 185
186 entries_spanned_by_off = off >> mr->page_shift;
187 off -= (entries_spanned_by_off << mr->page_shift);
188 m = entries_spanned_by_off/QIB_SEGSZ;
189 n = entries_spanned_by_off%QIB_SEGSZ;
190 } else {
191 m = 0;
192 n = 0;
193 while (off >= mr->map[m]->segs[n].length) {
194 off -= mr->map[m]->segs[n].length;
195 n++;
196 if (n >= QIB_SEGSZ) {
197 m++;
198 n = 0;
199 }
183 } 200 }
184 } 201 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr; 202 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off; 203 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off; 204 isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
190 isge->m = m; 206 isge->m = m;
191 isge->n = n; 207 isge->n = n;
192ok: 208ok:
193 ret = 1; 209 return 1;
194bail: 210bail:
195 spin_unlock_irqrestore(&rkt->lock, flags); 211 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret; 212 return 0;
197} 213}
198 214
199/** 215/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
214 struct qib_mregion *mr; 230 struct qib_mregion *mr;
215 unsigned n, m; 231 unsigned n, m;
216 size_t off; 232 size_t off;
217 int ret = 0;
218 unsigned long flags; 233 unsigned long flags;
219 234
220 /* 235 /*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
231 if (!dev->dma_mr) 246 if (!dev->dma_mr)
232 goto bail; 247 goto bail;
233 atomic_inc(&dev->dma_mr->refcount); 248 atomic_inc(&dev->dma_mr->refcount);
249 spin_unlock_irqrestore(&rkt->lock, flags);
250
234 sge->mr = dev->dma_mr; 251 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr; 252 sge->vaddr = (void *) vaddr;
236 sge->length = len; 253 sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 if (unlikely(vaddr < mr->iova || off + len > mr->length || 265 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0)) 266 (mr->access_flags & acc) == 0))
250 goto bail; 267 goto bail;
268 atomic_inc(&mr->refcount);
269 spin_unlock_irqrestore(&rkt->lock, flags);
251 270
252 off += mr->offset; 271 off += mr->offset;
253 m = 0; 272 if (mr->page_shift) {
254 n = 0; 273 /*
255 while (off >= mr->map[m]->segs[n].length) { 274 page sizes are uniform power of 2 so no loop is necessary
256 off -= mr->map[m]->segs[n].length; 275 entries_spanned_by_off is the number of times the loop below
257 n++; 276 would have executed.
258 if (n >= QIB_SEGSZ) { 277 */
259 m++; 278 size_t entries_spanned_by_off;
260 n = 0; 279
280 entries_spanned_by_off = off >> mr->page_shift;
281 off -= (entries_spanned_by_off << mr->page_shift);
282 m = entries_spanned_by_off/QIB_SEGSZ;
283 n = entries_spanned_by_off%QIB_SEGSZ;
284 } else {
285 m = 0;
286 n = 0;
287 while (off >= mr->map[m]->segs[n].length) {
288 off -= mr->map[m]->segs[n].length;
289 n++;
290 if (n >= QIB_SEGSZ) {
291 m++;
292 n = 0;
293 }
261 } 294 }
262 } 295 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr; 296 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off; 297 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off; 298 sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
268 sge->m = m; 300 sge->m = m;
269 sge->n = n; 301 sge->n = n;
270ok: 302ok:
271 ret = 1; 303 return 1;
272bail: 304bail:
273 spin_unlock_irqrestore(&rkt->lock, flags); 305 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret; 306 return 0;
275} 307}
276 308
277/* 309/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f3a8f0..5ad224e4a38b 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
668 lid = be16_to_cpu(pip->lid); 668 lid = be16_to_cpu(pip->lid);
669 /* Must be a valid unicast LID address. */ 669 /* Must be a valid unicast LID address. */
670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) 670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
671 goto err; 671 smp->status |= IB_SMP_INVALID_FIELD;
672 if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { 672 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
673 if (ppd->lid != lid) 673 if (ppd->lid != lid)
674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); 674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) 675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
683 msl = pip->neighbormtu_mastersmsl & 0xF; 683 msl = pip->neighbormtu_mastersmsl & 0xF;
684 /* Must be a valid unicast LID address. */ 684 /* Must be a valid unicast LID address. */
685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) 685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
686 goto err; 686 smp->status |= IB_SMP_INVALID_FIELD;
687 if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { 687 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
688 spin_lock_irqsave(&ibp->lock, flags); 688 spin_lock_irqsave(&ibp->lock, flags);
689 if (ibp->sm_ah) { 689 if (ibp->sm_ah) {
690 if (smlid != ibp->sm_lid) 690 if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
707 if (lwe == 0xFF) 707 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported; 708 lwe = ppd->link_width_supported;
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) 709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 goto err; 710 smp->status |= IB_SMP_INVALID_FIELD;
711 set_link_width_enabled(ppd, lwe); 711 else if (lwe != ppd->link_width_enabled)
712 set_link_width_enabled(ppd, lwe);
712 } 713 }
713 714
714 lse = pip->linkspeedactive_enabled & 0xF; 715 lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
721 if (lse == 15) 722 if (lse == 15)
722 lse = ppd->link_speed_supported; 723 lse = ppd->link_speed_supported;
723 else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) 724 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
724 goto err; 725 smp->status |= IB_SMP_INVALID_FIELD;
725 set_link_speed_enabled(ppd, lse); 726 else if (lse != ppd->link_speed_enabled)
727 set_link_speed_enabled(ppd, lse);
726 } 728 }
727 729
728 /* Set link down default state. */ 730 /* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
738 IB_LINKINITCMD_POLL); 740 IB_LINKINITCMD_POLL);
739 break; 741 break;
740 default: 742 default:
741 goto err; 743 smp->status |= IB_SMP_INVALID_FIELD;
742 } 744 }
743 745
744 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; 746 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
748 750
749 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); 751 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
750 if (mtu == -1) 752 if (mtu == -1)
751 goto err; 753 smp->status |= IB_SMP_INVALID_FIELD;
752 qib_set_mtu(ppd, mtu); 754 else
755 qib_set_mtu(ppd, mtu);
753 756
754 /* Set operational VLs */ 757 /* Set operational VLs */
755 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; 758 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
756 if (vls) { 759 if (vls) {
757 if (vls > ppd->vls_supported) 760 if (vls > ppd->vls_supported)
758 goto err; 761 smp->status |= IB_SMP_INVALID_FIELD;
759 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); 762 else
763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
760 } 764 }
761 765
762 if (pip->mkey_violations == 0) 766 if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
770 774
771 ore = pip->localphyerrors_overrunerrors; 775 ore = pip->localphyerrors_overrunerrors;
772 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) 776 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
773 goto err; 777 smp->status |= IB_SMP_INVALID_FIELD;
774 778
775 if (set_overrunthreshold(ppd, (ore & 0xF))) 779 if (set_overrunthreshold(ppd, (ore & 0xF)))
776 goto err; 780 smp->status |= IB_SMP_INVALID_FIELD;
777 781
778 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 782 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
779 783
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
792 state = pip->linkspeed_portstate & 0xF; 796 state = pip->linkspeed_portstate & 0xF;
793 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 797 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
794 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 798 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
795 goto err; 799 smp->status |= IB_SMP_INVALID_FIELD;
796 800
797 /* 801 /*
798 * Only state changes of DOWN, ARM, and ACTIVE are valid 802 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
812 lstate = QIB_IB_LINKDOWN; 816 lstate = QIB_IB_LINKDOWN;
813 else if (lstate == 3) 817 else if (lstate == 3)
814 lstate = QIB_IB_LINKDOWN_DISABLE; 818 lstate = QIB_IB_LINKDOWN_DISABLE;
815 else 819 else {
816 goto err; 820 smp->status |= IB_SMP_INVALID_FIELD;
821 break;
822 }
817 spin_lock_irqsave(&ppd->lflags_lock, flags); 823 spin_lock_irqsave(&ppd->lflags_lock, flags);
818 ppd->lflags &= ~QIBL_LINKV; 824 ppd->lflags &= ~QIBL_LINKV;
819 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 825 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
835 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); 841 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
836 break; 842 break;
837 default: 843 default:
838 /* XXX We have already partially updated our state! */ 844 smp->status |= IB_SMP_INVALID_FIELD;
839 goto err;
840 } 845 }
841 846
842 ret = subn_get_portinfo(smp, ibdev, port); 847 ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f6385d..08944e2ee334 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
39/* Fast memory region */ 39/* Fast memory region */
40struct qib_fmr { 40struct qib_fmr {
41 struct ib_fmr ibfmr; 41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */ 42 struct qib_mregion mr; /* must be last */
44}; 43};
45 44
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
107 goto bail; 106 goto bail;
108 } 107 }
109 mr->mr.mapsz = m; 108 mr->mr.mapsz = m;
109 mr->mr.page_shift = 0;
110 mr->mr.max_segs = count; 110 mr->mr.max_segs = count;
111 111
112 /* 112 /*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
231 mr->mr.access_flags = mr_access_flags; 231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem; 232 mr->umem = umem;
233 233
234 if (is_power_of_2(umem->page_size))
235 mr->mr.page_shift = ilog2(umem->page_size);
234 m = 0; 236 m = 0;
235 n = 0; 237 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) { 238 list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
390 fmr->mr.offset = 0; 392 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags; 393 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages; 394 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift; 395 fmr->mr.page_shift = fmr_attr->page_shift;
394 396
395 atomic_set(&fmr->mr.refcount, 0); 397 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr; 398 ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
437 spin_lock_irqsave(&rkt->lock, flags); 439 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova; 440 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova; 441 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift; 442 ps = 1 << fmr->mr.page_shift;
441 fmr->mr.length = list_len * ps; 443 fmr->mr.length = list_len * ps;
442 m = 0; 444 m = 0;
443 n = 0; 445 n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851d2ded..e16751f8639e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48 48
49static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 49static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off, 50 struct qpn_map *map, unsigned off,
51 unsigned r) 51 unsigned n)
52{ 52{
53 if (qpt->mask) { 53 if (qpt->mask) {
54 off++; 54 off++;
55 if ((off & qpt->mask) >> 1 != r) 55 if (((off & qpt->mask) >> 1) >= n)
56 off = ((off & qpt->mask) ? 56 off = (off | qpt->mask) + 2;
57 (off | qpt->mask) + 1 : off) | (r << 1);
58 } else 57 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 58 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off; 59 return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
123 u32 i, offset, max_scan, qpn; 122 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map; 123 struct qpn_map *map;
125 u32 ret; 124 u32 ret;
126 int r;
127 125
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 126 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129 unsigned n; 127 unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
139 goto bail; 137 goto bail;
140 } 138 }
141 139
142 r = smp_processor_id(); 140 qpn = qpt->last + 2;
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
145 qpn = qpt->last + 1;
146 if (qpn >= QPN_MAX) 141 if (qpn >= QPN_MAX)
147 qpn = 2; 142 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) 143 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | 144 qpn = (qpn | qpt->mask) + 2;
150 (r << 1);
151 offset = qpn & BITS_PER_PAGE_MASK; 145 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE]; 146 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset; 147 max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
163 ret = qpn; 157 ret = qpn;
164 goto bail; 158 goto bail;
165 } 159 }
166 offset = find_next_offset(qpt, map, offset, r); 160 offset = find_next_offset(qpt, map, offset,
161 dd->n_krcv_queues);
167 qpn = mk_qpn(qpt, map, offset); 162 qpn = mk_qpn(qpt, map, offset);
168 /* 163 /*
169 * This test differs from alloc_pidmap(). 164 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
183 if (qpt->nmaps == QPNMAP_ENTRIES) 178 if (qpt->nmaps == QPNMAP_ENTRIES)
184 break; 179 break;
185 map = &qpt->map[qpt->nmaps++]; 180 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0; 181 offset = 0;
187 } else if (map < &qpt->map[qpt->nmaps]) { 182 } else if (map < &qpt->map[qpt->nmaps]) {
188 ++map; 183 ++map;
189 offset = qpt->mask ? (r << 1) : 0; 184 offset = 0;
190 } else { 185 } else {
191 map = &qpt->map[0]; 186 map = &qpt->map[0];
192 offset = qpt->mask ? (r << 1) : 2; 187 offset = 2;
193 } 188 }
194 qpn = mk_qpn(qpt, map, offset); 189 qpn = mk_qpn(qpt, map, offset);
195 } 190 }
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); 463 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer); 464 del_timer(&qp->s_timer);
470 } 465 }
466
467 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
468 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
469
471 spin_lock(&dev->pending_lock); 470 spin_lock(&dev->pending_lock);
472 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { 471 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
473 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; 472 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
1061 } 1060 }
1062 qp->ibqp.qp_num = err; 1061 qp->ibqp.qp_num = err;
1063 qp->port_num = init_attr->port_num; 1062 qp->port_num = init_attr->port_num;
1064 qp->processor_id = smp_processor_id();
1065 qib_reset_qp(qp, init_attr->qp_type); 1063 qib_reset_qp(qp, init_attr->qp_type);
1066 break; 1064 break;
1067 1065
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb7157793..8245237b67ce 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1407 struct qib_ctxtdata *rcd) 1407 struct qib_ctxtdata *rcd)
1408{ 1408{
1409 struct qib_swqe *wqe; 1409 struct qib_swqe *wqe;
1410 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1410 enum ib_wc_status status; 1411 enum ib_wc_status status;
1411 unsigned long flags; 1412 unsigned long flags;
1412 int diff; 1413 int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1414 u32 aeth; 1415 u32 aeth;
1415 u64 val; 1416 u64 val;
1416 1417
1418 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1419 /*
1420 * If ACK'd PSN on SDMA busy list try to make progress to
1421 * reclaim SDMA credits.
1422 */
1423 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1424 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1425
1426 /*
1427 * If send tasklet not running attempt to progress
1428 * SDMA queue.
1429 */
1430 if (!(qp->s_flags & QIB_S_BUSY)) {
1431 /* Acquire SDMA Lock */
1432 spin_lock_irqsave(&ppd->sdma_lock, flags);
1433 /* Invoke sdma make progress */
1434 qib_sdma_make_progress(ppd);
1435 /* Release SDMA Lock */
1436 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1437 }
1438 }
1439 }
1440
1417 spin_lock_irqsave(&qp->s_lock, flags); 1441 spin_lock_irqsave(&qp->s_lock, flags);
1418 1442
1419 /* Ignore invalid responses. */ 1443 /* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2a1f85..4a51fd1e9cb7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]); 445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; 446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447 447
448 /* Get the number of bytes the message was padded by. */ 448 /*
449 * Get the number of bytes the message was padded by
450 * and drop incomplete packets.
451 */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 452 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) { 453 if (unlikely(tlen < (hdrsize + pad + 4)))
451 /* Drop incomplete packets. */ 454 goto drop;
452 ibp->n_pkt_drops++; 455
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4; 456 tlen -= hdrsize + pad + 4;
456 457
457 /* 458 /*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
460 */ 461 */
461 if (qp->ibqp.qp_num) { 462 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || 463 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) { 464 hdr->lrh[3] == IB_LID_PERMISSIVE))
464 ibp->n_pkt_drops++; 465 goto drop;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) { 466 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2; 467 u16 pkey1, pkey2;
469 468
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
476 0xF, 475 0xF,
477 src_qp, qp->ibqp.qp_num, 476 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]); 477 hdr->lrh[3], hdr->lrh[1]);
479 goto bail; 478 return;
480 } 479 }
481 } 480 }
482 if (unlikely(qkey != qp->qkey)) { 481 if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 483 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num, 484 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]); 485 hdr->lrh[3], hdr->lrh[1]);
487 goto bail; 486 return;
488 } 487 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */ 488 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 && 489 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 || 490 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { 491 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
493 ibp->n_pkt_drops++; 492 goto drop;
494 goto bail;
495 }
496 } else { 493 } else {
497 struct ib_smp *smp; 494 struct ib_smp *smp;
498 495
499 /* Drop invalid MAD packets (see 13.5.3.1). */ 496 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { 497 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
501 ibp->n_pkt_drops++; 498 goto drop;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data; 499 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE || 500 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) && 501 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 502 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
508 ibp->n_pkt_drops++; 503 goto drop;
509 goto bail;
510 }
511 } 504 }
512 505
513 /* 506 /*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32); 515 tlen -= sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0; 517 wc.ex.imm_data = 0;
525 wc.wc_flags = 0; 518 wc.wc_flags = 0;
526 } else { 519 } else
527 ibp->n_pkt_drops++; 520 goto drop;
528 goto bail;
529 }
530 521
531 /* 522 /*
532 * A GRH is expected to preceed the data even if not 523 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
556 /* Silently drop packets which are too big. */ 547 /* Silently drop packets which are too big. */
557 if (unlikely(wc.byte_len > qp->r_len)) { 548 if (unlikely(wc.byte_len > qp->r_len)) {
558 qp->r_flags |= QIB_R_REUSE_SGE; 549 qp->r_flags |= QIB_R_REUSE_SGE;
559 ibp->n_pkt_drops++; 550 goto drop;
560 return;
561 } 551 }
562 if (has_grh) { 552 if (has_grh) {
563 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, 553 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
594 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 584 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
595 (ohdr->bth[0] & 585 (ohdr->bth[0] &
596 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 586 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
597bail:; 587 return;
588
589drop:
590 ibp->n_pkt_drops++;
598} 591}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06b5e85..66208bcd7c13 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
382 382
383 kmem_cache_free(pq->pkt_slab, pkt); 383 kmem_cache_free(pq->pkt_slab, pkt);
384 } 384 }
385 INIT_LIST_HEAD(list);
385} 386}
386 387
387/* 388/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c1273225..63b22a9a7feb 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@ struct qib_mregion {
301 int access_flags; 301 int access_flags;
302 u32 max_segs; /* number of qib_segs in all the arrays */ 302 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */ 303 u32 mapsz; /* size of the map array */
304 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
304 atomic_t refcount; 305 atomic_t refcount;
305 struct qib_segarray *map[0]; /* the segments */ 306 struct qib_segarray *map[0]; /* the segments */
306}; 307};
@@ -435,7 +436,6 @@ struct qib_qp {
435 spinlock_t r_lock; /* used for APM */ 436 spinlock_t r_lock; /* used for APM */
436 spinlock_t s_lock; 437 spinlock_t s_lock;
437 atomic_t s_dma_busy; 438 atomic_t s_dma_busy;
438 unsigned processor_id; /* Processor ID QP is bound to */
439 u32 s_flags; 439 u32 s_flags;
440 u32 s_cur_size; /* size of send packet in bytes */ 440 u32 s_cur_size; /* size of send packet in bytes */
441 u32 s_len; /* total length of s_sge */ 441 u32 s_len; /* total length of s_sge */
@@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq;
813 */ 813 */
814static inline void qib_schedule_send(struct qib_qp *qp) 814static inline void qib_schedule_send(struct qib_qp *qp)
815{ 815{
816 if (qib_send_ok(qp)) { 816 if (qib_send_ok(qp))
817 if (qp->processor_id == smp_processor_id()) 817 queue_work(qib_wq, &qp->s_work);
818 queue_work(qib_wq, &qp->s_work);
819 else
820 queue_work_on(qp->processor_id,
821 qib_wq, &qp->s_work);
822 }
823} 818}
824 819
825static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 820static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc51f18..55855eeabae7 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
1config INFINIBAND_IPOIB 1config INFINIBAND_IPOIB
2 tristate "IP-over-InfiniBand" 2 tristate "IP-over-InfiniBand"
3 depends on NETDEVICES && INET && (IPV6 || IPV6=n) 3 depends on NETDEVICES && INET && (IPV6 || IPV6=n)
4 select INET_LRO
5 ---help--- 4 ---help---
6 Support for the IP-over-InfiniBand protocol (IPoIB). This 5 Support for the IP-over-InfiniBand protocol (IPoIB). This
7 transports IP packets over InfiniBand so you can use your IB 6 transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983a5fdc..ab97f92fc257 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
50#include <rdma/ib_verbs.h> 50#include <rdma/ib_verbs.h>
51#include <rdma/ib_pack.h> 51#include <rdma/ib_pack.h>
52#include <rdma/ib_sa.h> 52#include <rdma/ib_sa.h>
53#include <linux/inet_lro.h> 53#include <linux/sched.h>
54 54
55/* constants */ 55/* constants */
56 56
@@ -100,9 +100,6 @@ enum {
100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ 100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
101 IPOIB_MCAST_FLAG_ATTACHED = 3, 101 IPOIB_MCAST_FLAG_ATTACHED = 3,
102 102
103 IPOIB_MAX_LRO_DESCRIPTORS = 8,
104 IPOIB_LRO_MAX_AGGR = 64,
105
106 MAX_SEND_CQE = 16, 103 MAX_SEND_CQE = 16,
107 IPOIB_CM_COPYBREAK = 256, 104 IPOIB_CM_COPYBREAK = 256,
108}; 105};
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
262 u16 max_coalesced_frames; 259 u16 max_coalesced_frames;
263}; 260};
264 261
265struct ipoib_lro {
266 struct net_lro_mgr lro_mgr;
267 struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
268};
269
270/* 262/*
271 * Device private locking: network stack tx_lock protects members used 263 * Device private locking: network stack tx_lock protects members used
272 * in TX fast path, lock protects everything else. lock nests inside 264 * in TX fast path, lock protects everything else. lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
352 int hca_caps; 344 int hca_caps;
353 struct ipoib_ethtool_st ethtool; 345 struct ipoib_ethtool_st ethtool;
354 struct timer_list poll_timer; 346 struct timer_list poll_timer;
355
356 struct ipoib_lro lro;
357}; 347};
358 348
359struct ipoib_ah { 349struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb1004114dec..c1c49f2d35b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1480 1480
1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { 1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1483 priv->dev->features |= NETIF_F_GRO;
1483 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1484 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1484 dev->features |= NETIF_F_TSO; 1485 dev->features |= NETIF_F_TSO;
1485 } 1486 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c82edd..19f7f5206f78 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
106 return 0; 106 return 0;
107} 107}
108 108
109static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
110 "LRO aggregated", "LRO flushed",
111 "LRO avg aggr", "LRO no desc"
112};
113
114static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
115{
116 switch (stringset) {
117 case ETH_SS_STATS:
118 memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
119 break;
120 }
121}
122
123static int ipoib_get_sset_count(struct net_device *dev, int sset)
124{
125 switch (sset) {
126 case ETH_SS_STATS:
127 return ARRAY_SIZE(ipoib_stats_keys);
128 default:
129 return -EOPNOTSUPP;
130 }
131}
132
133static void ipoib_get_ethtool_stats(struct net_device *dev,
134 struct ethtool_stats *stats, uint64_t *data)
135{
136 struct ipoib_dev_priv *priv = netdev_priv(dev);
137 int index = 0;
138
139 /* Get LRO statistics */
140 data[index++] = priv->lro.lro_mgr.stats.aggregated;
141 data[index++] = priv->lro.lro_mgr.stats.flushed;
142 if (priv->lro.lro_mgr.stats.flushed)
143 data[index++] = priv->lro.lro_mgr.stats.aggregated /
144 priv->lro.lro_mgr.stats.flushed;
145 else
146 data[index++] = 0;
147 data[index++] = priv->lro.lro_mgr.stats.no_desc;
148}
149
150static int ipoib_set_flags(struct net_device *dev, u32 flags)
151{
152 return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
153}
154
155static const struct ethtool_ops ipoib_ethtool_ops = { 109static const struct ethtool_ops ipoib_ethtool_ops = {
156 .get_drvinfo = ipoib_get_drvinfo, 110 .get_drvinfo = ipoib_get_drvinfo,
157 .get_rx_csum = ipoib_get_rx_csum, 111 .get_rx_csum = ipoib_get_rx_csum,
158 .set_tso = ipoib_set_tso, 112 .set_tso = ipoib_set_tso,
159 .get_coalesce = ipoib_get_coalesce, 113 .get_coalesce = ipoib_get_coalesce,
160 .set_coalesce = ipoib_set_coalesce, 114 .set_coalesce = ipoib_set_coalesce,
161 .get_flags = ethtool_op_get_flags,
162 .set_flags = ipoib_set_flags,
163 .get_strings = ipoib_get_strings,
164 .get_sset_count = ipoib_get_sset_count,
165 .get_ethtool_stats = ipoib_get_ethtool_stats,
166}; 115};
167 116
168void ipoib_set_ethtool_ops(struct net_device *dev) 117void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa71903d6e4..806d0292dc39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) 295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
296 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 297
298 if (dev->features & NETIF_F_LRO) 298 napi_gro_receive(&priv->napi, skb);
299 lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
300 else
301 netif_receive_skb(skb);
302 299
303repost: 300repost:
304 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 301 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
450 } 447 }
451 448
452 if (done < budget) { 449 if (done < budget) {
453 if (dev->features & NETIF_F_LRO)
454 lro_flush_all(&priv->lro.lro_mgr);
455
456 napi_complete(napi); 450 napi_complete(napi);
457 if (unlikely(ib_req_notify_cq(priv->recv_cq, 451 if (unlikely(ib_req_notify_cq(priv->recv_cq,
458 IB_CQ_NEXT_COMP | 452 IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc73ed95..7a07a728fe0d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62 62
63static int lro;
64module_param(lro, bool, 0444);
65MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
66
67static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68module_param(lro_max_aggr, int, 0644);
69MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
70 "(default = 64)");
71
72#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 63#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73int ipoib_debug_level; 64int ipoib_debug_level;
74 65
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
976 .create = ipoib_hard_header, 967 .create = ipoib_hard_header,
977}; 968};
978 969
979static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
980 void **tcph, u64 *hdr_flags, void *priv)
981{
982 unsigned int ip_len;
983 struct iphdr *iph;
984
985 if (unlikely(skb->protocol != htons(ETH_P_IP)))
986 return -1;
987
988 /*
989 * In the future we may add an else clause that verifies the
990 * checksum and allows devices which do not calculate checksum
991 * to use LRO.
992 */
993 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
994 return -1;
995
996 /* Check for non-TCP packet */
997 skb_reset_network_header(skb);
998 iph = ip_hdr(skb);
999 if (iph->protocol != IPPROTO_TCP)
1000 return -1;
1001
1002 ip_len = ip_hdrlen(skb);
1003 skb_set_transport_header(skb, ip_len);
1004 *tcph = tcp_hdr(skb);
1005
1006 /* check if IP header and TCP header are complete */
1007 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
1008 return -1;
1009
1010 *hdr_flags = LRO_IPV4 | LRO_TCP;
1011 *iphdr = iph;
1012
1013 return 0;
1014}
1015
1016static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
1017{
1018 priv->lro.lro_mgr.max_aggr = lro_max_aggr;
1019 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
1020 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
1021 priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
1022 priv->lro.lro_mgr.features = LRO_F_NAPI;
1023 priv->lro.lro_mgr.dev = priv->dev;
1024 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1025}
1026
1027static const struct net_device_ops ipoib_netdev_ops = { 970static const struct net_device_ops ipoib_netdev_ops = {
1028 .ndo_open = ipoib_open, 971 .ndo_open = ipoib_open,
1029 .ndo_stop = ipoib_stop, 972 .ndo_stop = ipoib_stop,
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
1067 1010
1068 priv->dev = dev; 1011 priv->dev = dev;
1069 1012
1070 ipoib_lro_setup(priv);
1071
1072 spin_lock_init(&priv->lock); 1013 spin_lock_init(&priv->lock);
1073 1014
1074 mutex_init(&priv->vlan_mutex); 1015 mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1218 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1159 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1219 } 1160 }
1220 1161
1221 if (lro) 1162 priv->dev->features |= NETIF_F_GRO;
1222 priv->dev->features |= NETIF_F_LRO;
1223 1163
1224 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1164 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1225 priv->dev->features |= NETIF_F_TSO; 1165 priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a7715..4b62105ed1e8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
441 wait_for_completion(&target->done); 441 wait_for_completion(&target->done);
442} 442}
443 443
444static bool srp_change_state(struct srp_target_port *target,
445 enum srp_target_state old,
446 enum srp_target_state new)
447{
448 bool changed = false;
449
450 spin_lock_irq(&target->lock);
451 if (target->state == old) {
452 target->state = new;
453 changed = true;
454 }
455 spin_unlock_irq(&target->lock);
456 return changed;
457}
458
444static void srp_remove_work(struct work_struct *work) 459static void srp_remove_work(struct work_struct *work)
445{ 460{
446 struct srp_target_port *target = 461 struct srp_target_port *target =
447 container_of(work, struct srp_target_port, work); 462 container_of(work, struct srp_target_port, work);
448 463
449 spin_lock_irq(target->scsi_host->host_lock); 464 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
450 if (target->state != SRP_TARGET_DEAD) {
451 spin_unlock_irq(target->scsi_host->host_lock);
452 return; 465 return;
453 }
454 target->state = SRP_TARGET_REMOVED;
455 spin_unlock_irq(target->scsi_host->host_lock);
456 466
457 spin_lock(&target->srp_host->target_lock); 467 spin_lock(&target->srp_host->target_lock);
458 list_del(&target->list); 468 list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
539 scsi_sg_count(scmnd), scmnd->sc_data_direction); 549 scsi_sg_count(scmnd), scmnd->sc_data_direction);
540} 550}
541 551
542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 552static void srp_remove_req(struct srp_target_port *target,
553 struct srp_request *req, s32 req_lim_delta)
543{ 554{
555 unsigned long flags;
556
544 srp_unmap_data(req->scmnd, target, req); 557 srp_unmap_data(req->scmnd, target, req);
545 list_move_tail(&req->list, &target->free_reqs); 558 spin_lock_irqsave(&target->lock, flags);
559 target->req_lim += req_lim_delta;
560 req->scmnd = NULL;
561 list_add_tail(&req->list, &target->free_reqs);
562 spin_unlock_irqrestore(&target->lock, flags);
546} 563}
547 564
548static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 565static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
549{ 566{
550 req->scmnd->result = DID_RESET << 16; 567 req->scmnd->result = DID_RESET << 16;
551 req->scmnd->scsi_done(req->scmnd); 568 req->scmnd->scsi_done(req->scmnd);
552 srp_remove_req(target, req); 569 srp_remove_req(target, req, 0);
553} 570}
554 571
555static int srp_reconnect_target(struct srp_target_port *target) 572static int srp_reconnect_target(struct srp_target_port *target)
556{ 573{
557 struct ib_qp_attr qp_attr; 574 struct ib_qp_attr qp_attr;
558 struct srp_request *req, *tmp;
559 struct ib_wc wc; 575 struct ib_wc wc;
560 int ret; 576 int i, ret;
561 577
562 spin_lock_irq(target->scsi_host->host_lock); 578 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
563 if (target->state != SRP_TARGET_LIVE) {
564 spin_unlock_irq(target->scsi_host->host_lock);
565 return -EAGAIN; 579 return -EAGAIN;
566 }
567 target->state = SRP_TARGET_CONNECTING;
568 spin_unlock_irq(target->scsi_host->host_lock);
569 580
570 srp_disconnect_target(target); 581 srp_disconnect_target(target);
571 /* 582 /*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0) 601 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
591 ; /* nothing */ 602 ; /* nothing */
592 603
593 spin_lock_irq(target->scsi_host->host_lock); 604 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
594 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 605 struct srp_request *req = &target->req_ring[i];
595 srp_reset_req(target, req); 606 if (req->scmnd)
596 spin_unlock_irq(target->scsi_host->host_lock); 607 srp_reset_req(target, req);
608 }
597 609
598 target->rx_head = 0; 610 INIT_LIST_HEAD(&target->free_tx);
599 target->tx_head = 0; 611 for (i = 0; i < SRP_SQ_SIZE; ++i)
600 target->tx_tail = 0; 612 list_add(&target->tx_ring[i]->list, &target->free_tx);
601 613
602 target->qp_in_error = 0; 614 target->qp_in_error = 0;
603 ret = srp_connect_target(target); 615 ret = srp_connect_target(target);
604 if (ret) 616 if (ret)
605 goto err; 617 goto err;
606 618
607 spin_lock_irq(target->scsi_host->host_lock); 619 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
608 if (target->state == SRP_TARGET_CONNECTING) {
609 ret = 0;
610 target->state = SRP_TARGET_LIVE;
611 } else
612 ret = -EAGAIN; 620 ret = -EAGAIN;
613 spin_unlock_irq(target->scsi_host->host_lock);
614 621
615 return ret; 622 return ret;
616 623
@@ -620,17 +627,20 @@ err:
620 627
621 /* 628 /*
622 * We couldn't reconnect, so kill our target port off. 629 * We couldn't reconnect, so kill our target port off.
623 * However, we have to defer the real removal because we might 630 * However, we have to defer the real removal because we
624 * be in the context of the SCSI error handler now, which 631 * are in the context of the SCSI error handler now, which
625 * would deadlock if we call scsi_remove_host(). 632 * will deadlock if we call scsi_remove_host().
633 *
634 * Schedule our work inside the lock to avoid a race with
635 * the flush_scheduled_work() in srp_remove_one().
626 */ 636 */
627 spin_lock_irq(target->scsi_host->host_lock); 637 spin_lock_irq(&target->lock);
628 if (target->state == SRP_TARGET_CONNECTING) { 638 if (target->state == SRP_TARGET_CONNECTING) {
629 target->state = SRP_TARGET_DEAD; 639 target->state = SRP_TARGET_DEAD;
630 INIT_WORK(&target->work, srp_remove_work); 640 INIT_WORK(&target->work, srp_remove_work);
631 schedule_work(&target->work); 641 schedule_work(&target->work);
632 } 642 }
633 spin_unlock_irq(target->scsi_host->host_lock); 643 spin_unlock_irq(&target->lock);
634 644
635 return ret; 645 return ret;
636} 646}
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
758 struct srp_direct_buf *buf = (void *) cmd->add_data; 768 struct srp_direct_buf *buf = (void *) cmd->add_data;
759 769
760 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
761 buf->key = cpu_to_be32(dev->mr->rkey); 771 buf->key = cpu_to_be32(target->rkey);
762 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
763 } else if (srp_map_fmr(target, scat, count, req, 773 } else if (srp_map_fmr(target, scat, count, req,
764 (void *) cmd->add_data)) { 774 (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
783 buf->desc_list[i].va = 793 buf->desc_list[i].va =
784 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 794 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
785 buf->desc_list[i].key = 795 buf->desc_list[i].key =
786 cpu_to_be32(dev->mr->rkey); 796 cpu_to_be32(target->rkey);
787 buf->desc_list[i].len = cpu_to_be32(dma_len); 797 buf->desc_list[i].len = cpu_to_be32(dma_len);
788 datalen += dma_len; 798 datalen += dma_len;
789 } 799 }
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
796 buf->table_desc.va = 806 buf->table_desc.va =
797 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 807 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
798 buf->table_desc.key = 808 buf->table_desc.key =
799 cpu_to_be32(target->srp_host->srp_dev->mr->rkey); 809 cpu_to_be32(target->rkey);
800 buf->table_desc.len = 810 buf->table_desc.len =
801 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 811 cpu_to_be32(count * sizeof (struct srp_direct_buf));
802 812
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
812} 822}
813 823
814/* 824/*
815 * Must be called with target->scsi_host->host_lock held to protect 825 * Return an IU and possible credit to the free pool
816 * req_lim and tx_head. Lock cannot be dropped between call here and 826 */
817 * call to __srp_post_send(). 827static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
828 enum srp_iu_type iu_type)
829{
830 unsigned long flags;
831
832 spin_lock_irqsave(&target->lock, flags);
833 list_add(&iu->list, &target->free_tx);
834 if (iu_type != SRP_IU_RSP)
835 ++target->req_lim;
836 spin_unlock_irqrestore(&target->lock, flags);
837}
838
839/*
840 * Must be called with target->lock held to protect req_lim and free_tx.
841 * If IU is not sent, it must be returned using srp_put_tx_iu().
818 * 842 *
819 * Note: 843 * Note:
820 * An upper limit for the number of allocated information units for each 844 * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
833 857
834 srp_send_completion(target->send_cq, target); 858 srp_send_completion(target->send_cq, target);
835 859
836 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 860 if (list_empty(&target->free_tx))
837 return NULL; 861 return NULL;
838 862
839 /* Initiator responses to target requests do not consume credits */ 863 /* Initiator responses to target requests do not consume credits */
840 if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { 864 if (iu_type != SRP_IU_RSP) {
841 ++target->zero_req_lim; 865 if (target->req_lim <= rsv) {
842 return NULL; 866 ++target->zero_req_lim;
867 return NULL;
868 }
869
870 --target->req_lim;
843 } 871 }
844 872
845 iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; 873 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
846 iu->type = iu_type; 874 list_del(&iu->list);
847 return iu; 875 return iu;
848} 876}
849 877
850/* 878static int srp_post_send(struct srp_target_port *target,
851 * Must be called with target->scsi_host->host_lock held to protect 879 struct srp_iu *iu, int len)
852 * req_lim and tx_head.
853 */
854static int __srp_post_send(struct srp_target_port *target,
855 struct srp_iu *iu, int len)
856{ 880{
857 struct ib_sge list; 881 struct ib_sge list;
858 struct ib_send_wr wr, *bad_wr; 882 struct ib_send_wr wr, *bad_wr;
859 int ret = 0;
860 883
861 list.addr = iu->dma; 884 list.addr = iu->dma;
862 list.length = len; 885 list.length = len;
863 list.lkey = target->srp_host->srp_dev->mr->lkey; 886 list.lkey = target->lkey;
864 887
865 wr.next = NULL; 888 wr.next = NULL;
866 wr.wr_id = target->tx_head & SRP_SQ_MASK; 889 wr.wr_id = (uintptr_t) iu;
867 wr.sg_list = &list; 890 wr.sg_list = &list;
868 wr.num_sge = 1; 891 wr.num_sge = 1;
869 wr.opcode = IB_WR_SEND; 892 wr.opcode = IB_WR_SEND;
870 wr.send_flags = IB_SEND_SIGNALED; 893 wr.send_flags = IB_SEND_SIGNALED;
871 894
872 ret = ib_post_send(target->qp, &wr, &bad_wr); 895 return ib_post_send(target->qp, &wr, &bad_wr);
873
874 if (!ret) {
875 ++target->tx_head;
876 if (iu->type != SRP_IU_RSP)
877 --target->req_lim;
878 }
879
880 return ret;
881} 896}
882 897
883static int srp_post_recv(struct srp_target_port *target) 898static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
884{ 899{
885 unsigned long flags;
886 struct srp_iu *iu;
887 struct ib_sge list;
888 struct ib_recv_wr wr, *bad_wr; 900 struct ib_recv_wr wr, *bad_wr;
889 unsigned int next; 901 struct ib_sge list;
890 int ret;
891
892 spin_lock_irqsave(target->scsi_host->host_lock, flags);
893
894 next = target->rx_head & SRP_RQ_MASK;
895 wr.wr_id = next;
896 iu = target->rx_ring[next];
897 902
898 list.addr = iu->dma; 903 list.addr = iu->dma;
899 list.length = iu->size; 904 list.length = iu->size;
900 list.lkey = target->srp_host->srp_dev->mr->lkey; 905 list.lkey = target->lkey;
901 906
902 wr.next = NULL; 907 wr.next = NULL;
908 wr.wr_id = (uintptr_t) iu;
903 wr.sg_list = &list; 909 wr.sg_list = &list;
904 wr.num_sge = 1; 910 wr.num_sge = 1;
905 911
906 ret = ib_post_recv(target->qp, &wr, &bad_wr); 912 return ib_post_recv(target->qp, &wr, &bad_wr);
907 if (!ret)
908 ++target->rx_head;
909
910 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
911
912 return ret;
913} 913}
914 914
915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
917 struct srp_request *req; 917 struct srp_request *req;
918 struct scsi_cmnd *scmnd; 918 struct scsi_cmnd *scmnd;
919 unsigned long flags; 919 unsigned long flags;
920 s32 delta;
921
922 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
923
924 spin_lock_irqsave(target->scsi_host->host_lock, flags);
925
926 target->req_lim += delta;
927
928 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
929 920
930 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 921 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
931 if (be32_to_cpu(rsp->resp_data_len) < 4) 922 spin_lock_irqsave(&target->lock, flags);
932 req->tsk_status = -1; 923 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
933 else 924 spin_unlock_irqrestore(&target->lock, flags);
934 req->tsk_status = rsp->data[3]; 925
935 complete(&req->done); 926 target->tsk_mgmt_status = -1;
927 if (be32_to_cpu(rsp->resp_data_len) >= 4)
928 target->tsk_mgmt_status = rsp->data[3];
929 complete(&target->tsk_mgmt_done);
936 } else { 930 } else {
931 req = &target->req_ring[rsp->tag];
937 scmnd = req->scmnd; 932 scmnd = req->scmnd;
938 if (!scmnd) 933 if (!scmnd)
939 shost_printk(KERN_ERR, target->scsi_host, 934 shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 948 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
954 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 949 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
955 950
956 if (!req->tsk_mgmt) { 951 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
957 scmnd->host_scribble = (void *) -1L; 952 scmnd->host_scribble = NULL;
958 scmnd->scsi_done(scmnd); 953 scmnd->scsi_done(scmnd);
959
960 srp_remove_req(target, req);
961 } else
962 req->cmd_done = 1;
963 } 954 }
964
965 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
966} 955}
967 956
968static int srp_response_common(struct srp_target_port *target, s32 req_delta, 957static int srp_response_common(struct srp_target_port *target, s32 req_delta,
969 void *rsp, int len) 958 void *rsp, int len)
970{ 959{
971 struct ib_device *dev; 960 struct ib_device *dev = target->srp_host->srp_dev->dev;
972 unsigned long flags; 961 unsigned long flags;
973 struct srp_iu *iu; 962 struct srp_iu *iu;
974 int err = 1; 963 int err;
975 964
976 dev = target->srp_host->srp_dev->dev; 965 spin_lock_irqsave(&target->lock, flags);
977
978 spin_lock_irqsave(target->scsi_host->host_lock, flags);
979 target->req_lim += req_delta; 966 target->req_lim += req_delta;
980
981 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 967 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
968 spin_unlock_irqrestore(&target->lock, flags);
969
982 if (!iu) { 970 if (!iu) {
983 shost_printk(KERN_ERR, target->scsi_host, PFX 971 shost_printk(KERN_ERR, target->scsi_host, PFX
984 "no IU available to send response\n"); 972 "no IU available to send response\n");
985 goto out; 973 return 1;
986 } 974 }
987 975
988 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 976 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
989 memcpy(iu->buf, rsp, len); 977 memcpy(iu->buf, rsp, len);
990 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 978 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
991 979
992 err = __srp_post_send(target, iu, len); 980 err = srp_post_send(target, iu, len);
993 if (err) 981 if (err) {
994 shost_printk(KERN_ERR, target->scsi_host, PFX 982 shost_printk(KERN_ERR, target->scsi_host, PFX
995 "unable to post response: %d\n", err); 983 "unable to post response: %d\n", err);
984 srp_put_tx_iu(target, iu, SRP_IU_RSP);
985 }
996 986
997out:
998 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
999 return err; 987 return err;
1000} 988}
1001 989
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
1032 1020
1033static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1021static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1034{ 1022{
1035 struct ib_device *dev; 1023 struct ib_device *dev = target->srp_host->srp_dev->dev;
1036 struct srp_iu *iu; 1024 struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
1037 int res; 1025 int res;
1038 u8 opcode; 1026 u8 opcode;
1039 1027
1040 iu = target->rx_ring[wc->wr_id];
1041
1042 dev = target->srp_host->srp_dev->dev;
1043 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1044 DMA_FROM_DEVICE); 1029 DMA_FROM_DEVICE);
1045 1030
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1080 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1065 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1081 DMA_FROM_DEVICE); 1066 DMA_FROM_DEVICE);
1082 1067
1083 res = srp_post_recv(target); 1068 res = srp_post_recv(target, iu);
1084 if (res != 0) 1069 if (res != 0)
1085 shost_printk(KERN_ERR, target->scsi_host, 1070 shost_printk(KERN_ERR, target->scsi_host,
1086 PFX "Recv failed with error code %d\n", res); 1071 PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1109{ 1094{
1110 struct srp_target_port *target = target_ptr; 1095 struct srp_target_port *target = target_ptr;
1111 struct ib_wc wc; 1096 struct ib_wc wc;
1097 struct srp_iu *iu;
1112 1098
1113 while (ib_poll_cq(cq, 1, &wc) > 0) { 1099 while (ib_poll_cq(cq, 1, &wc) > 0) {
1114 if (wc.status) { 1100 if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1119 break; 1105 break;
1120 } 1106 }
1121 1107
1122 ++target->tx_tail; 1108 iu = (struct srp_iu *) wc.wr_id;
1109 list_add(&iu->list, &target->free_tx);
1123 } 1110 }
1124} 1111}
1125 1112
1126static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, 1113static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1127 void (*done)(struct scsi_cmnd *))
1128{ 1114{
1129 struct srp_target_port *target = host_to_target(scmnd->device->host); 1115 struct srp_target_port *target = host_to_target(shost);
1130 struct srp_request *req; 1116 struct srp_request *req;
1131 struct srp_iu *iu; 1117 struct srp_iu *iu;
1132 struct srp_cmd *cmd; 1118 struct srp_cmd *cmd;
1133 struct ib_device *dev; 1119 struct ib_device *dev;
1120 unsigned long flags;
1134 int len; 1121 int len;
1135 1122
1136 if (target->state == SRP_TARGET_CONNECTING) 1123 if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1139 if (target->state == SRP_TARGET_DEAD || 1126 if (target->state == SRP_TARGET_DEAD ||
1140 target->state == SRP_TARGET_REMOVED) { 1127 target->state == SRP_TARGET_REMOVED) {
1141 scmnd->result = DID_BAD_TARGET << 16; 1128 scmnd->result = DID_BAD_TARGET << 16;
1142 done(scmnd); 1129 scmnd->scsi_done(scmnd);
1143 return 0; 1130 return 0;
1144 } 1131 }
1145 1132
1133 spin_lock_irqsave(&target->lock, flags);
1146 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1135 if (iu) {
1136 req = list_first_entry(&target->free_reqs, struct srp_request,
1137 list);
1138 list_del(&req->list);
1139 }
1140 spin_unlock_irqrestore(&target->lock, flags);
1141
1147 if (!iu) 1142 if (!iu)
1148 goto err; 1143 goto err;
1149 1144
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1151 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1152 DMA_TO_DEVICE); 1147 DMA_TO_DEVICE);
1153 1148
1154 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1155
1156 scmnd->scsi_done = done;
1157 scmnd->result = 0; 1149 scmnd->result = 0;
1158 scmnd->host_scribble = (void *) (long) req->index; 1150 scmnd->host_scribble = (void *) req;
1159 1151
1160 cmd = iu->buf; 1152 cmd = iu->buf;
1161 memset(cmd, 0, sizeof *cmd); 1153 memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1167 1159
1168 req->scmnd = scmnd; 1160 req->scmnd = scmnd;
1169 req->cmd = iu; 1161 req->cmd = iu;
1170 req->cmd_done = 0;
1171 req->tsk_mgmt = NULL;
1172 1162
1173 len = srp_map_data(scmnd, target, req); 1163 len = srp_map_data(scmnd, target, req);
1174 if (len < 0) { 1164 if (len < 0) {
1175 shost_printk(KERN_ERR, target->scsi_host, 1165 shost_printk(KERN_ERR, target->scsi_host,
1176 PFX "Failed to map data\n"); 1166 PFX "Failed to map data\n");
1177 goto err; 1167 goto err_iu;
1178 } 1168 }
1179 1169
1180 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1170 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1181 DMA_TO_DEVICE); 1171 DMA_TO_DEVICE);
1182 1172
1183 if (__srp_post_send(target, iu, len)) { 1173 if (srp_post_send(target, iu, len)) {
1184 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1174 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1185 goto err_unmap; 1175 goto err_unmap;
1186 } 1176 }
1187 1177
1188 list_move_tail(&req->list, &target->req_queue);
1189
1190 return 0; 1178 return 0;
1191 1179
1192err_unmap: 1180err_unmap:
1193 srp_unmap_data(scmnd, target, req); 1181 srp_unmap_data(scmnd, target, req);
1194 1182
1183err_iu:
1184 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1185
1186 spin_lock_irqsave(&target->lock, flags);
1187 list_add(&req->list, &target->free_reqs);
1188 spin_unlock_irqrestore(&target->lock, flags);
1189
1195err: 1190err:
1196 return SCSI_MLQUEUE_HOST_BUSY; 1191 return SCSI_MLQUEUE_HOST_BUSY;
1197} 1192}
1198 1193
1199static DEF_SCSI_QCMD(srp_queuecommand)
1200
1201static int srp_alloc_iu_bufs(struct srp_target_port *target) 1194static int srp_alloc_iu_bufs(struct srp_target_port *target)
1202{ 1195{
1203 int i; 1196 int i;
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1216 GFP_KERNEL, DMA_TO_DEVICE); 1209 GFP_KERNEL, DMA_TO_DEVICE);
1217 if (!target->tx_ring[i]) 1210 if (!target->tx_ring[i])
1218 goto err; 1211 goto err;
1212
1213 list_add(&target->tx_ring[i]->list, &target->free_tx);
1219 } 1214 }
1220 1215
1221 return 0; 1216 return 0;
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1377 break; 1372 break;
1378 1373
1379 for (i = 0; i < SRP_RQ_SIZE; i++) { 1374 for (i = 0; i < SRP_RQ_SIZE; i++) {
1380 target->status = srp_post_recv(target); 1375 struct srp_iu *iu = target->rx_ring[i];
1376 target->status = srp_post_recv(target, iu);
1381 if (target->status) 1377 if (target->status)
1382 break; 1378 break;
1383 } 1379 }
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1442} 1438}
1443 1439
1444static int srp_send_tsk_mgmt(struct srp_target_port *target, 1440static int srp_send_tsk_mgmt(struct srp_target_port *target,
1445 struct srp_request *req, u8 func) 1441 u64 req_tag, unsigned int lun, u8 func)
1446{ 1442{
1447 struct ib_device *dev = target->srp_host->srp_dev->dev; 1443 struct ib_device *dev = target->srp_host->srp_dev->dev;
1448 struct srp_iu *iu; 1444 struct srp_iu *iu;
1449 struct srp_tsk_mgmt *tsk_mgmt; 1445 struct srp_tsk_mgmt *tsk_mgmt;
1450 1446
1451 spin_lock_irq(target->scsi_host->host_lock);
1452
1453 if (target->state == SRP_TARGET_DEAD || 1447 if (target->state == SRP_TARGET_DEAD ||
1454 target->state == SRP_TARGET_REMOVED) { 1448 target->state == SRP_TARGET_REMOVED)
1455 req->scmnd->result = DID_BAD_TARGET << 16; 1449 return -1;
1456 goto out;
1457 }
1458 1450
1459 init_completion(&req->done); 1451 init_completion(&target->tsk_mgmt_done);
1460 1452
1453 spin_lock_irq(&target->lock);
1461 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1454 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1455 spin_unlock_irq(&target->lock);
1456
1462 if (!iu) 1457 if (!iu)
1463 goto out; 1458 return -1;
1464 1459
1465 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 1460 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1466 DMA_TO_DEVICE); 1461 DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1468 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1463 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1469 1464
1470 tsk_mgmt->opcode = SRP_TSK_MGMT; 1465 tsk_mgmt->opcode = SRP_TSK_MGMT;
1471 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1466 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1472 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1467 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1473 tsk_mgmt->tsk_mgmt_func = func; 1468 tsk_mgmt->tsk_mgmt_func = func;
1474 tsk_mgmt->task_tag = req->index; 1469 tsk_mgmt->task_tag = req_tag;
1475 1470
1476 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1471 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1477 DMA_TO_DEVICE); 1472 DMA_TO_DEVICE);
1478 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1473 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1479 goto out; 1474 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1480
1481 req->tsk_mgmt = iu;
1482
1483 spin_unlock_irq(target->scsi_host->host_lock);
1484
1485 if (!wait_for_completion_timeout(&req->done,
1486 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1487 return -1; 1475 return -1;
1476 }
1488 1477
1489 return 0; 1478 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1490 1479 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1491out:
1492 spin_unlock_irq(target->scsi_host->host_lock);
1493 return -1;
1494}
1495
1496static int srp_find_req(struct srp_target_port *target,
1497 struct scsi_cmnd *scmnd,
1498 struct srp_request **req)
1499{
1500 if (scmnd->host_scribble == (void *) -1L)
1501 return -1; 1480 return -1;
1502 1481
1503 *req = &target->req_ring[(long) scmnd->host_scribble];
1504
1505 return 0; 1482 return 0;
1506} 1483}
1507 1484
1508static int srp_abort(struct scsi_cmnd *scmnd) 1485static int srp_abort(struct scsi_cmnd *scmnd)
1509{ 1486{
1510 struct srp_target_port *target = host_to_target(scmnd->device->host); 1487 struct srp_target_port *target = host_to_target(scmnd->device->host);
1511 struct srp_request *req; 1488 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1512 int ret = SUCCESS; 1489 int ret = SUCCESS;
1513 1490
1514 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1491 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1515 1492
1516 if (target->qp_in_error) 1493 if (!req || target->qp_in_error)
1517 return FAILED; 1494 return FAILED;
1518 if (srp_find_req(target, scmnd, &req)) 1495 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1496 SRP_TSK_ABORT_TASK))
1519 return FAILED; 1497 return FAILED;
1520 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1521 return FAILED;
1522
1523 spin_lock_irq(target->scsi_host->host_lock);
1524 1498
1525 if (req->cmd_done) { 1499 if (req->scmnd) {
1526 srp_remove_req(target, req); 1500 if (!target->tsk_mgmt_status) {
1527 scmnd->scsi_done(scmnd); 1501 srp_remove_req(target, req, 0);
1528 } else if (!req->tsk_status) { 1502 scmnd->result = DID_ABORT << 16;
1529 srp_remove_req(target, req); 1503 } else
1530 scmnd->result = DID_ABORT << 16; 1504 ret = FAILED;
1531 } else 1505 }
1532 ret = FAILED;
1533
1534 spin_unlock_irq(target->scsi_host->host_lock);
1535 1506
1536 return ret; 1507 return ret;
1537} 1508}
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1539static int srp_reset_device(struct scsi_cmnd *scmnd) 1510static int srp_reset_device(struct scsi_cmnd *scmnd)
1540{ 1511{
1541 struct srp_target_port *target = host_to_target(scmnd->device->host); 1512 struct srp_target_port *target = host_to_target(scmnd->device->host);
1542 struct srp_request *req, *tmp; 1513 int i;
1543 1514
1544 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1545 1516
1546 if (target->qp_in_error) 1517 if (target->qp_in_error)
1547 return FAILED; 1518 return FAILED;
1548 if (srp_find_req(target, scmnd, &req)) 1519 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1520 SRP_TSK_LUN_RESET))
1549 return FAILED; 1521 return FAILED;
1550 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1522 if (target->tsk_mgmt_status)
1551 return FAILED; 1523 return FAILED;
1552 if (req->tsk_status)
1553 return FAILED;
1554
1555 spin_lock_irq(target->scsi_host->host_lock);
1556 1524
1557 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1525 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1558 if (req->scmnd->device == scmnd->device) 1526 struct srp_request *req = &target->req_ring[i];
1527 if (req->scmnd && req->scmnd->device == scmnd->device)
1559 srp_reset_req(target, req); 1528 srp_reset_req(target, req);
1560 1529 }
1561 spin_unlock_irq(target->scsi_host->host_lock);
1562 1530
1563 return SUCCESS; 1531 return SUCCESS;
1564} 1532}
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev,
1987 target->io_class = SRP_REV16A_IB_IO_CLASS; 1955 target->io_class = SRP_REV16A_IB_IO_CLASS;
1988 target->scsi_host = target_host; 1956 target->scsi_host = target_host;
1989 target->srp_host = host; 1957 target->srp_host = host;
1958 target->lkey = host->srp_dev->mr->lkey;
1959 target->rkey = host->srp_dev->mr->rkey;
1990 1960
1961 spin_lock_init(&target->lock);
1962 INIT_LIST_HEAD(&target->free_tx);
1991 INIT_LIST_HEAD(&target->free_reqs); 1963 INIT_LIST_HEAD(&target->free_reqs);
1992 INIT_LIST_HEAD(&target->req_queue);
1993 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1964 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1994 target->req_ring[i].index = i; 1965 target->req_ring[i].index = i;
1995 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1966 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device)
2217 */ 2188 */
2218 spin_lock(&host->target_lock); 2189 spin_lock(&host->target_lock);
2219 list_for_each_entry(target, &host->target_list, list) { 2190 list_for_each_entry(target, &host->target_list, list) {
2220 spin_lock_irq(target->scsi_host->host_lock); 2191 spin_lock_irq(&target->lock);
2221 target->state = SRP_TARGET_REMOVED; 2192 target->state = SRP_TARGET_REMOVED;
2222 spin_unlock_irq(target->scsi_host->host_lock); 2193 spin_unlock_irq(&target->lock);
2223 } 2194 }
2224 spin_unlock(&host->target_lock); 2195 spin_unlock(&host->target_lock);
2225 2196
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void)
2258{ 2229{
2259 int ret; 2230 int ret;
2260 2231
2261 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); 2232 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2262 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2263 2233
2264 if (srp_sg_tablesize > 255) { 2234 if (srp_sg_tablesize > 255) {
2265 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2235 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479f..9dc6fc3fd894 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@ enum {
59 59
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_RQ_MASK = SRP_RQ_SIZE - 1,
63 62
64 SRP_SQ_SIZE = SRP_RQ_SIZE, 63 SRP_SQ_SIZE = SRP_RQ_SIZE,
65 SRP_SQ_MASK = SRP_SQ_SIZE - 1,
66 SRP_RSP_SQ_SIZE = 1, 64 SRP_RSP_SQ_SIZE = 1,
67 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, 65 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
68 SRP_TSK_MGMT_SQ_SIZE = 1, 66 SRP_TSK_MGMT_SQ_SIZE = 1,
69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 67 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
70 68
71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 69 SRP_TAG_NO_REQ = ~0U,
70 SRP_TAG_TSK_MGMT = 1U << 31,
72 71
73 SRP_FMR_SIZE = 256, 72 SRP_FMR_SIZE = 256,
74 SRP_FMR_POOL_SIZE = 1024, 73 SRP_FMR_POOL_SIZE = 1024,
@@ -113,15 +112,29 @@ struct srp_request {
113 struct list_head list; 112 struct list_head list;
114 struct scsi_cmnd *scmnd; 113 struct scsi_cmnd *scmnd;
115 struct srp_iu *cmd; 114 struct srp_iu *cmd;
116 struct srp_iu *tsk_mgmt;
117 struct ib_pool_fmr *fmr; 115 struct ib_pool_fmr *fmr;
118 struct completion done;
119 short index; 116 short index;
120 u8 cmd_done;
121 u8 tsk_status;
122}; 117};
123 118
124struct srp_target_port { 119struct srp_target_port {
120 /* These are RW in the hot path, and commonly used together */
121 struct list_head free_tx;
122 struct list_head free_reqs;
123 spinlock_t lock;
124 s32 req_lim;
125
126 /* These are read-only in the hot path */
127 struct ib_cq *send_cq ____cacheline_aligned_in_smp;
128 struct ib_cq *recv_cq;
129 struct ib_qp *qp;
130 u32 lkey;
131 u32 rkey;
132 enum srp_target_state state;
133
134 /* Everything above this point is used in the hot path of
135 * command processing. Try to keep them packed into cachelines.
136 */
137
125 __be64 id_ext; 138 __be64 id_ext;
126 __be64 ioc_guid; 139 __be64 ioc_guid;
127 __be64 service_id; 140 __be64 service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
138 int path_query_id; 151 int path_query_id;
139 152
140 struct ib_cm_id *cm_id; 153 struct ib_cm_id *cm_id;
141 struct ib_cq *recv_cq;
142 struct ib_cq *send_cq;
143 struct ib_qp *qp;
144 154
145 int max_ti_iu_len; 155 int max_ti_iu_len;
146 s32 req_lim;
147 156
148 int zero_req_lim; 157 int zero_req_lim;
149 158
150 unsigned rx_head;
151 struct srp_iu *rx_ring[SRP_RQ_SIZE];
152
153 unsigned tx_head;
154 unsigned tx_tail;
155 struct srp_iu *tx_ring[SRP_SQ_SIZE]; 159 struct srp_iu *tx_ring[SRP_SQ_SIZE];
156 160 struct srp_iu *rx_ring[SRP_RQ_SIZE];
157 struct list_head free_reqs;
158 struct list_head req_queue;
159 struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 161 struct srp_request req_ring[SRP_CMD_SQ_SIZE];
160 162
161 struct work_struct work; 163 struct work_struct work;
@@ -163,16 +165,18 @@ struct srp_target_port {
163 struct list_head list; 165 struct list_head list;
164 struct completion done; 166 struct completion done;
165 int status; 167 int status;
166 enum srp_target_state state;
167 int qp_in_error; 168 int qp_in_error;
169
170 struct completion tsk_mgmt_done;
171 u8 tsk_mgmt_status;
168}; 172};
169 173
170struct srp_iu { 174struct srp_iu {
175 struct list_head list;
171 u64 dma; 176 u64 dma;
172 void *buf; 177 void *buf;
173 size_t size; 178 size_t size;
174 enum dma_data_direction direction; 179 enum dma_data_direction direction;
175 enum srp_iu_type type;
176}; 180};
177 181
178#endif /* IB_SRP_H */ 182#endif /* IB_SRP_H */
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc09d76..4daf9e5a7736 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
387 /* Set the DMA ops to the ones from the PCI device, this could be 387 /* Set the DMA ops to the ones from the PCI device, this could be
388 * fishy if we didn't know that on PowerMac it's always direct ops 388 * fishy if we didn't know that on PowerMac it's always direct ops
389 * or iommu ops that will work fine 389 * or iommu ops that will work fine
390 *
391 * To get all the fields, copy all archdata
390 */ 392 */
391 dev->ofdev.dev.archdata.dma_ops = 393 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
392 chip->lbus.pdev->dev.archdata.dma_ops;
393 dev->ofdev.dev.archdata.dma_data =
394 chip->lbus.pdev->dev.archdata.dma_data;
395#endif /* CONFIG_PCI */ 394#endif /* CONFIG_PCI */
396 395
397#ifdef DEBUG 396#ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 44549272333c..2e041fd0a00c 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2213,6 +2213,9 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
2213static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match) 2213static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
2214{ 2214{
2215 state = state_detached; 2215 state = state_detached;
2216 of_dev = dev;
2217
2218 dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
2216 2219
2217 /* Lookup the fans in the device tree */ 2220 /* Lookup the fans in the device tree */
2218 fcu_lookup_fans(dev->dev.of_node); 2221 fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@ static const struct of_device_id fcu_match[] =
2235 }, 2238 },
2236 {}, 2239 {},
2237}; 2240};
2241MODULE_DEVICE_TABLE(of, fcu_match);
2238 2242
2239static struct of_platform_driver fcu_of_platform_driver = 2243static struct of_platform_driver fcu_of_platform_driver =
2240{ 2244{
@@ -2252,8 +2256,6 @@ static struct of_platform_driver fcu_of_platform_driver =
2252 */ 2256 */
2253static int __init therm_pm72_init(void) 2257static int __init therm_pm72_init(void)
2254{ 2258{
2255 struct device_node *np;
2256
2257 rackmac = of_machine_is_compatible("RackMac3,1"); 2259 rackmac = of_machine_is_compatible("RackMac3,1");
2258 2260
2259 if (!of_machine_is_compatible("PowerMac7,2") && 2261 if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@ static int __init therm_pm72_init(void)
2261 !rackmac) 2263 !rackmac)
2262 return -ENODEV; 2264 return -ENODEV;
2263 2265
2264 printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION); 2266 return of_register_platform_driver(&fcu_of_platform_driver);
2265
2266 np = of_find_node_by_type(NULL, "fcu");
2267 if (np == NULL) {
2268 /* Some machines have strangely broken device-tree */
2269 np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
2270 if (np == NULL) {
2271 printk(KERN_ERR "Can't find FCU in device-tree !\n");
2272 return -ENODEV;
2273 }
2274 }
2275 of_dev = of_platform_device_create(np, "temperature", NULL);
2276 if (of_dev == NULL) {
2277 printk(KERN_ERR "Can't register FCU platform device !\n");
2278 return -ENODEV;
2279 }
2280
2281 of_register_platform_driver(&fcu_of_platform_driver);
2282
2283 return 0;
2284} 2267}
2285 2268
2286static void __exit therm_pm72_exit(void) 2269static void __exit therm_pm72_exit(void)
2287{ 2270{
2288 of_unregister_platform_driver(&fcu_of_platform_driver); 2271 of_unregister_platform_driver(&fcu_of_platform_driver);
2289
2290 if (of_dev)
2291 of_device_unregister(of_dev);
2292} 2272}
2293 2273
2294module_init(therm_pm72_init); 2274module_init(therm_pm72_init);
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f93af9d..0a7df44a93c0 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
131 */ 131 */
132 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; 132 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
133 133
134 /*
135 * All SDHI blocks support SDIO IRQ signalling.
136 */
137 mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
138
134 if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 139 if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
135 priv->param_tx.slave_id = p->dma_slave_tx; 140 priv->param_tx.slave_id = p->dma_slave_tx;
136 priv->param_rx.slave_id = p->dma_slave_rx; 141 priv->param_rx.slave_id = p->dma_slave_rx;
137 priv->dma_priv.chan_priv_tx = &priv->param_tx; 142 priv->dma_priv.chan_priv_tx = &priv->param_tx;
138 priv->dma_priv.chan_priv_rx = &priv->param_rx; 143 priv->dma_priv.chan_priv_rx = &priv->param_rx;
144 priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
139 mmc_data->dma = &priv->dma_priv; 145 mmc_data->dma = &priv->dma_priv;
140 } 146 }
141 147
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416b9ef0..2a876c4099cd 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@ config MMC_BLOCK
16 16
17config MMC_BLOCK_MINORS 17config MMC_BLOCK_MINORS
18 int "Number of minors per block device" 18 int "Number of minors per block device"
19 depends on MMC_BLOCK
19 range 4 256 20 range 4 256
20 default 8 21 default 8
21 help 22 help
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef8..ef103871517f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
16 16
17 This option sets a default which can be overridden by the 17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1". 18 module parameter "removable=0" or "removable=1".
19
20config MMC_CLKGATE
21 bool "MMC host clock gating (EXPERIMENTAL)"
22 depends on EXPERIMENTAL
23 help
24 This will attempt to aggressively gate the clock to the MMC card.
25 This is done to save power due to gating off the logic and bus
26 noise when the MMC card is not in use. Your host driver has to
27 support handling this in order for it to be of any use.
28
29 If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a2a317..63667a8f140c 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card)
303 type, card->rca); 303 type, card->rca);
304 } 304 }
305 305
306 ret = device_add(&card->dev);
307 if (ret)
308 return ret;
309
310#ifdef CONFIG_DEBUG_FS 306#ifdef CONFIG_DEBUG_FS
311 mmc_add_card_debugfs(card); 307 mmc_add_card_debugfs(card);
312#endif 308#endif
313 309
310 ret = device_add(&card->dev);
311 if (ret)
312 return ret;
313
314 mmc_card_set_present(card); 314 mmc_card_set_present(card);
315 315
316 return 0; 316 return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780faf85a..6625c057be05 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/log2.h> 23#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
27#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
130 131
131 if (mrq->done) 132 if (mrq->done)
132 mrq->done(mrq); 133 mrq->done(mrq);
134
135 mmc_host_clk_gate(host);
133 } 136 }
134} 137}
135 138
@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190 mrq->stop->mrq = mrq; 193 mrq->stop->mrq = mrq;
191 } 194 }
192 } 195 }
196 mmc_host_clk_ungate(host);
193 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
194} 198}
195 199
@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
295 unsigned int timeout_us, limit_us; 299 unsigned int timeout_us, limit_us;
296 300
297 timeout_us = data->timeout_ns / 1000; 301 timeout_us = data->timeout_ns / 1000;
298 timeout_us += data->timeout_clks * 1000 / 302 if (mmc_host_clk_rate(card->host))
299 (card->host->ios.clock / 1000); 303 timeout_us += data->timeout_clks * 1000 /
304 (mmc_host_clk_rate(card->host) / 1000);
300 305
301 if (data->flags & MMC_DATA_WRITE) 306 if (data->flags & MMC_DATA_WRITE)
302 /* 307 /*
@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
614 ios->power_mode, ios->chip_select, ios->vdd, 619 ios->power_mode, ios->chip_select, ios->vdd,
615 ios->bus_width, ios->timing); 620 ios->bus_width, ios->timing);
616 621
622 if (ios->clock > 0)
623 mmc_set_ungated(host);
617 host->ops->set_ios(host, ios); 624 host->ops->set_ios(host, ios);
618} 625}
619 626
@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
641 mmc_set_ios(host); 648 mmc_set_ios(host);
642} 649}
643 650
651#ifdef CONFIG_MMC_CLKGATE
652/*
653 * This gates the clock by setting it to 0 Hz.
654 */
655void mmc_gate_clock(struct mmc_host *host)
656{
657 unsigned long flags;
658
659 spin_lock_irqsave(&host->clk_lock, flags);
660 host->clk_old = host->ios.clock;
661 host->ios.clock = 0;
662 host->clk_gated = true;
663 spin_unlock_irqrestore(&host->clk_lock, flags);
664 mmc_set_ios(host);
665}
666
667/*
668 * This restores the clock from gating by using the cached
669 * clock value.
670 */
671void mmc_ungate_clock(struct mmc_host *host)
672{
673 /*
674 * We should previously have gated the clock, so the clock shall
675 * be 0 here! The clock may however be 0 during initialization,
676 * when some request operations are performed before setting
677 * the frequency. When ungate is requested in that situation
678 * we just ignore the call.
679 */
680 if (host->clk_old) {
681 BUG_ON(host->ios.clock);
682 /* This call will also set host->clk_gated to false */
683 mmc_set_clock(host, host->clk_old);
684 }
685}
686
687void mmc_set_ungated(struct mmc_host *host)
688{
689 unsigned long flags;
690
691 /*
692 * We've been given a new frequency while the clock is gated,
693 * so make sure we regard this as ungating it.
694 */
695 spin_lock_irqsave(&host->clk_lock, flags);
696 host->clk_gated = false;
697 spin_unlock_irqrestore(&host->clk_lock, flags);
698}
699
700#else
701void mmc_set_ungated(struct mmc_host *host)
702{
703}
704#endif
705
644/* 706/*
645 * Change the bus mode (open drain/push-pull) of a host. 707 * Change the bus mode (open drain/push-pull) of a host.
646 */ 708 */
@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1424} 1486}
1425EXPORT_SYMBOL(mmc_set_blocklen); 1487EXPORT_SYMBOL(mmc_set_blocklen);
1426 1488
1489static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1490{
1491 host->f_init = freq;
1492
1493#ifdef CONFIG_MMC_DEBUG
1494 pr_info("%s: %s: trying to init card at %u Hz\n",
1495 mmc_hostname(host), __func__, host->f_init);
1496#endif
1497 mmc_power_up(host);
1498 sdio_reset(host);
1499 mmc_go_idle(host);
1500
1501 mmc_send_if_cond(host, host->ocr_avail);
1502
1503 /* Order's important: probe SDIO, then SD, then MMC */
1504 if (!mmc_attach_sdio(host))
1505 return 0;
1506 if (!mmc_attach_sd(host))
1507 return 0;
1508 if (!mmc_attach_mmc(host))
1509 return 0;
1510
1511 mmc_power_off(host);
1512 return -EIO;
1513}
1514
1427void mmc_rescan(struct work_struct *work) 1515void mmc_rescan(struct work_struct *work)
1428{ 1516{
1517 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1429 struct mmc_host *host = 1518 struct mmc_host *host =
1430 container_of(work, struct mmc_host, detect.work); 1519 container_of(work, struct mmc_host, detect.work);
1431 u32 ocr;
1432 int err;
1433 unsigned long flags;
1434 int i; 1520 int i;
1435 const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1436
1437 spin_lock_irqsave(&host->lock, flags);
1438 1521
1439 if (host->rescan_disable) { 1522 if (host->rescan_disable)
1440 spin_unlock_irqrestore(&host->lock, flags);
1441 return; 1523 return;
1442 }
1443
1444 spin_unlock_irqrestore(&host->lock, flags);
1445
1446 1524
1447 mmc_bus_get(host); 1525 mmc_bus_get(host);
1448 1526
1449 /* if there is a card registered, check whether it is still present */ 1527 /*
1450 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1528 * if there is a _removable_ card registered, check whether it is
1529 * still present
1530 */
1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1532 && mmc_card_is_removable(host))
1451 host->bus_ops->detect(host); 1533 host->bus_ops->detect(host);
1452 1534
1535 /*
1536 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1537 * the card is no longer present.
1538 */
1453 mmc_bus_put(host); 1539 mmc_bus_put(host);
1454
1455
1456 mmc_bus_get(host); 1540 mmc_bus_get(host);
1457 1541
1458 /* if there still is a card present, stop here */ 1542 /* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work)
1461 goto out; 1545 goto out;
1462 } 1546 }
1463 1547
1464 /* detect a newly inserted card */
1465
1466 /* 1548 /*
1467 * Only we can add a new handler, so it's safe to 1549 * Only we can add a new handler, so it's safe to
1468 * release the lock here. 1550 * release the lock here.
@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work)
1472 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1554 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1473 goto out; 1555 goto out;
1474 1556
1557 mmc_claim_host(host);
1475 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1558 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1476 mmc_claim_host(host); 1559 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1477 1560 break;
1478 if (freqs[i] >= host->f_min) 1561 if (freqs[i] < host->f_min)
1479 host->f_init = freqs[i]; 1562 break;
1480 else if (!i || freqs[i-1] > host->f_min)
1481 host->f_init = host->f_min;
1482 else {
1483 mmc_release_host(host);
1484 goto out;
1485 }
1486#ifdef CONFIG_MMC_DEBUG
1487 pr_info("%s: %s: trying to init card at %u Hz\n",
1488 mmc_hostname(host), __func__, host->f_init);
1489#endif
1490 mmc_power_up(host);
1491 sdio_reset(host);
1492 mmc_go_idle(host);
1493
1494 mmc_send_if_cond(host, host->ocr_avail);
1495
1496 /*
1497 * First we search for SDIO...
1498 */
1499 err = mmc_send_io_op_cond(host, 0, &ocr);
1500 if (!err) {
1501 if (mmc_attach_sdio(host, ocr)) {
1502 mmc_claim_host(host);
1503 /*
1504 * Try SDMEM (but not MMC) even if SDIO
1505 * is broken.
1506 */
1507 if (mmc_send_app_op_cond(host, 0, &ocr))
1508 goto out_fail;
1509
1510 if (mmc_attach_sd(host, ocr))
1511 mmc_power_off(host);
1512 }
1513 goto out;
1514 }
1515
1516 /*
1517 * ...then normal SD...
1518 */
1519 err = mmc_send_app_op_cond(host, 0, &ocr);
1520 if (!err) {
1521 if (mmc_attach_sd(host, ocr))
1522 mmc_power_off(host);
1523 goto out;
1524 }
1525
1526 /*
1527 * ...and finally MMC.
1528 */
1529 err = mmc_send_op_cond(host, 0, &ocr);
1530 if (!err) {
1531 if (mmc_attach_mmc(host, ocr))
1532 mmc_power_off(host);
1533 goto out;
1534 }
1535
1536out_fail:
1537 mmc_release_host(host);
1538 mmc_power_off(host);
1539 } 1563 }
1540out: 1564 mmc_release_host(host);
1565
1566 out:
1541 if (host->caps & MMC_CAP_NEEDS_POLL) 1567 if (host->caps & MMC_CAP_NEEDS_POLL)
1542 mmc_schedule_delayed_work(&host->detect, HZ); 1568 mmc_schedule_delayed_work(&host->detect, HZ);
1543} 1569}
@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host)
1721 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1747 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1722 mmc_power_up(host); 1748 mmc_power_up(host);
1723 mmc_select_voltage(host, host->ocr); 1749 mmc_select_voltage(host, host->ocr);
1750 /*
1751 * Tell runtime PM core we just powered up the card,
1752 * since it still believes the card is powered off.
1753 * Note that currently runtime PM is only enabled
1754 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1755 */
1756 if (mmc_card_sdio(host->card) &&
1757 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
1758 pm_runtime_disable(&host->card->dev);
1759 pm_runtime_set_active(&host->card->dev);
1760 pm_runtime_enable(&host->card->dev);
1761 }
1724 } 1762 }
1725 BUG_ON(!host->bus_ops->resume); 1763 BUG_ON(!host->bus_ops->resume);
1726 err = host->bus_ops->resume(host); 1764 err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd11bcf..ca1fdde29df6 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
33 33
34void mmc_set_chip_select(struct mmc_host *host, int mode); 34void mmc_set_chip_select(struct mmc_host *host, int mode);
35void mmc_set_clock(struct mmc_host *host, unsigned int hz); 35void mmc_set_clock(struct mmc_host *host, unsigned int hz);
36void mmc_gate_clock(struct mmc_host *host);
37void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host);
36void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
37void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
38void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 41void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work);
54void mmc_start_host(struct mmc_host *host); 57void mmc_start_host(struct mmc_host *host);
55void mmc_stop_host(struct mmc_host *host); 58void mmc_stop_host(struct mmc_host *host);
56 59
57int mmc_attach_mmc(struct mmc_host *host, u32 ocr); 60int mmc_attach_mmc(struct mmc_host *host);
58int mmc_attach_sd(struct mmc_host *host, u32 ocr); 61int mmc_attach_sd(struct mmc_host *host);
59int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 62int mmc_attach_sdio(struct mmc_host *host);
60 63
61/* Module parameters */ 64/* Module parameters */
62extern int use_spi_crc; 65extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405fd742..998797ed67a6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
183 &mmc_clock_fops)) 183 &mmc_clock_fops))
184 goto err_node; 184 goto err_node;
185 185
186#ifdef CONFIG_MMC_CLKGATE
187 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
188 root, &host->clk_delay))
189 goto err_node;
190#endif
186 return; 191 return;
187 192
188err_node: 193err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af27e03a..b3ac6c5bc5c6 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright (C) 2007-2008 Pierre Ossman 5 * Copyright (C) 2007-2008 Pierre Ossman
6 * Copyright (C) 2010 Linus Walleij
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21 22
22#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
23 25
24#include "core.h" 26#include "core.h"
25#include "host.h" 27#include "host.h"
@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void)
50static DEFINE_IDR(mmc_host_idr); 52static DEFINE_IDR(mmc_host_idr);
51static DEFINE_SPINLOCK(mmc_host_lock); 53static DEFINE_SPINLOCK(mmc_host_lock);
52 54
55#ifdef CONFIG_MMC_CLKGATE
56
57/*
58 * Enabling clock gating will make the core call out to the host
59 * once up and once down when it performs a request or card operation
60 * intermingled in any fashion. The driver will see this through
61 * set_ios() operations with ios.clock field set to 0 to gate (disable)
62 * the block clock, and to the old frequency to enable it again.
63 */
64static void mmc_host_clk_gate_delayed(struct mmc_host *host)
65{
66 unsigned long tick_ns;
67 unsigned long freq = host->ios.clock;
68 unsigned long flags;
69
70 if (!freq) {
71 pr_debug("%s: frequency set to 0 in disable function, "
72 "this means the clock is already disabled.\n",
73 mmc_hostname(host));
74 return;
75 }
76 /*
77 * New requests may have appeared while we were scheduling,
78 * then there is no reason to delay the check before
79 * clk_disable().
80 */
81 spin_lock_irqsave(&host->clk_lock, flags);
82
83 /*
84 * Delay n bus cycles (at least 8 from MMC spec) before attempting
85 * to disable the MCI block clock. The reference count may have
86 * gone up again after this delay due to rescheduling!
87 */
88 if (!host->clk_requests) {
89 spin_unlock_irqrestore(&host->clk_lock, flags);
90 tick_ns = DIV_ROUND_UP(1000000000, freq);
91 ndelay(host->clk_delay * tick_ns);
92 } else {
93 /* New users appeared while waiting for this work */
94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return;
96 }
97 mutex_lock(&host->clk_gate_mutex);
98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags);
101 /* This will set host->ios.clock to 0 */
102 mmc_gate_clock(host);
103 spin_lock_irqsave(&host->clk_lock, flags);
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mutex_unlock(&host->clk_gate_mutex);
108}
109
110/*
111 * Internal work. Work to disable the clock at some later point.
112 */
113static void mmc_host_clk_gate_work(struct work_struct *work)
114{
115 struct mmc_host *host = container_of(work, struct mmc_host,
116 clk_gate_work);
117
118 mmc_host_clk_gate_delayed(host);
119}
120
121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks
123 * @host: host to ungate.
124 *
125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user.
128 */
129void mmc_host_clk_ungate(struct mmc_host *host)
130{
131 unsigned long flags;
132
133 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags);
137 mmc_ungate_clock(host);
138 spin_lock_irqsave(&host->clk_lock, flags);
139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
140 }
141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mutex_unlock(&host->clk_gate_mutex);
144}
145
146/**
147 * mmc_host_may_gate_card - check if this card may be gated
148 * @card: card to check.
149 */
150static bool mmc_host_may_gate_card(struct mmc_card *card)
151{
152 /* If there is no card we may gate it */
153 if (!card)
154 return true;
155 /*
156 * Don't gate SDIO cards! These need to be clocked at all times
157 * since they may be independent systems generating interrupts
158 * and other events. The clock requests counter from the core will
159 * go down to zero since the core does not need it, but we will not
160 * gate the clock, because there is somebody out there that may still
161 * be using it.
162 */
163 if (mmc_card_sdio(card))
164 return false;
165
166 return true;
167}
168
169/**
170 * mmc_host_clk_gate - gate off hardware MCI clocks
171 * @host: host to gate.
172 *
173 * Calls the host driver with ios.clock set to zero as often as possible
174 * in order to gate off hardware MCI clocks. Decrease clock reference
175 * count and schedule disabling of clock.
176 */
177void mmc_host_clk_gate(struct mmc_host *host)
178{
179 unsigned long flags;
180
181 spin_lock_irqsave(&host->clk_lock, flags);
182 host->clk_requests--;
183 if (mmc_host_may_gate_card(host->card) &&
184 !host->clk_requests)
185 schedule_work(&host->clk_gate_work);
186 spin_unlock_irqrestore(&host->clk_lock, flags);
187}
188
189/**
190 * mmc_host_clk_rate - get current clock frequency setting
191 * @host: host to get the clock frequency for.
192 *
193 * Returns current clock frequency regardless of gating.
194 */
195unsigned int mmc_host_clk_rate(struct mmc_host *host)
196{
197 unsigned long freq;
198 unsigned long flags;
199
200 spin_lock_irqsave(&host->clk_lock, flags);
201 if (host->clk_gated)
202 freq = host->clk_old;
203 else
204 freq = host->ios.clock;
205 spin_unlock_irqrestore(&host->clk_lock, flags);
206 return freq;
207}
208
209/**
210 * mmc_host_clk_init - set up clock gating code
211 * @host: host with potential clock to control
212 */
213static inline void mmc_host_clk_init(struct mmc_host *host)
214{
215 host->clk_requests = 0;
216 /* Hold MCI clock for 8 cycles by default */
217 host->clk_delay = 8;
218 host->clk_gated = false;
219 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
220 spin_lock_init(&host->clk_lock);
221 mutex_init(&host->clk_gate_mutex);
222}
223
224/**
225 * mmc_host_clk_exit - shut down clock gating code
226 * @host: host with potential clock to control
227 */
228static inline void mmc_host_clk_exit(struct mmc_host *host)
229{
230 /*
231 * Wait for any outstanding gate and then make sure we're
232 * ungated before exiting.
233 */
234 if (cancel_work_sync(&host->clk_gate_work))
235 mmc_host_clk_gate_delayed(host);
236 if (host->clk_gated)
237 mmc_host_clk_ungate(host);
238 /* There should be only one user now */
239 WARN_ON(host->clk_requests > 1);
240}
241
242#else
243
244static inline void mmc_host_clk_init(struct mmc_host *host)
245{
246}
247
248static inline void mmc_host_clk_exit(struct mmc_host *host)
249{
250}
251
252#endif
253
53/** 254/**
54 * mmc_alloc_host - initialise the per-host structure. 255 * mmc_alloc_host - initialise the per-host structure.
55 * @extra: sizeof private data structure 256 * @extra: sizeof private data structure
@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
82 host->class_dev.class = &mmc_host_class; 283 host->class_dev.class = &mmc_host_class;
83 device_initialize(&host->class_dev); 284 device_initialize(&host->class_dev);
84 285
286 mmc_host_clk_init(host);
287
85 spin_lock_init(&host->lock); 288 spin_lock_init(&host->lock);
86 init_waitqueue_head(&host->wq); 289 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 290 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host)
163 device_del(&host->class_dev); 366 device_del(&host->class_dev);
164 367
165 led_trigger_unregister_simple(host->led); 368 led_trigger_unregister_simple(host->led);
369
370 mmc_host_clk_exit(host);
166} 371}
167 372
168EXPORT_SYMBOL(mmc_remove_host); 373EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host)
183} 388}
184 389
185EXPORT_SYMBOL(mmc_free_host); 390EXPORT_SYMBOL(mmc_free_host);
186
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a34..de199f911928 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
10 */ 10 */
11#ifndef _MMC_CORE_HOST_H 11#ifndef _MMC_CORE_HOST_H
12#define _MMC_CORE_HOST_H 12#define _MMC_CORE_HOST_H
13#include <linux/mmc/host.h>
13 14
14int mmc_register_host_class(void); 15int mmc_register_host_class(void);
15void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
16 17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_gate(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
17void mmc_host_deeper_disable(struct work_struct *work); 38void mmc_host_deeper_disable(struct work_struct *work);
18 39
19#endif 40#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3b8808..16006ef153fe 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
534 */ 534 */
535 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 535 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
536 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 536 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
537 unsigned ext_csd_bit, bus_width; 537 static unsigned ext_csd_bits[][2] = {
538 538 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
539 if (host->caps & MMC_CAP_8_BIT_DATA) { 539 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
540 if (ddr) 540 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
541 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; 541 };
542 else 542 static unsigned bus_widths[] = {
543 ext_csd_bit = EXT_CSD_BUS_WIDTH_8; 543 MMC_BUS_WIDTH_8,
544 bus_width = MMC_BUS_WIDTH_8; 544 MMC_BUS_WIDTH_4,
545 } else { 545 MMC_BUS_WIDTH_1
546 if (ddr) 546 };
547 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; 547 unsigned idx, bus_width = 0;
548 else 548
549 ext_csd_bit = EXT_CSD_BUS_WIDTH_4; 549 if (host->caps & MMC_CAP_8_BIT_DATA)
550 bus_width = MMC_BUS_WIDTH_4; 550 idx = 0;
551 else
552 idx = 1;
553 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
554 bus_width = bus_widths[idx];
555 if (bus_width == MMC_BUS_WIDTH_1)
556 ddr = 0; /* no DDR for 1-bit width */
557 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
558 EXT_CSD_BUS_WIDTH,
559 ext_csd_bits[idx][0]);
560 if (!err) {
561 mmc_set_bus_width_ddr(card->host,
562 bus_width, MMC_SDR_MODE);
563 /*
564 * If controller can't handle bus width test,
565 * use the highest bus width to maintain
566 * compatibility with previous MMC behavior.
567 */
568 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
569 break;
570 err = mmc_bus_test(card, bus_width);
571 if (!err)
572 break;
573 }
551 } 574 }
552 575
553 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 576 if (!err && ddr) {
554 EXT_CSD_BUS_WIDTH, ext_csd_bit); 577 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
555 578 EXT_CSD_BUS_WIDTH,
556 if (err && err != -EBADMSG) 579 ext_csd_bits[idx][1]);
557 goto free_card; 580 }
558
559 if (err) { 581 if (err) {
560 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 582 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
561 "failed\n", mmc_hostname(card->host), 583 "failed\n", mmc_hostname(card->host),
562 1 << bus_width, ddr); 584 1 << bus_width, ddr);
563 err = 0; 585 goto free_card;
564 } else { 586 } else if (ddr) {
565 if (ddr) 587 mmc_card_set_ddr_mode(card);
566 mmc_card_set_ddr_mode(card);
567 else
568 ddr = MMC_SDR_MODE;
569
570 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 588 mmc_set_bus_width_ddr(card->host, bus_width, ddr);
571 } 589 }
572 } 590 }
@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
737/* 755/*
738 * Starting point for MMC card init. 756 * Starting point for MMC card init.
739 */ 757 */
740int mmc_attach_mmc(struct mmc_host *host, u32 ocr) 758int mmc_attach_mmc(struct mmc_host *host)
741{ 759{
742 int err; 760 int err;
761 u32 ocr;
743 762
744 BUG_ON(!host); 763 BUG_ON(!host);
745 WARN_ON(!host->claimed); 764 WARN_ON(!host->claimed);
746 765
766 err = mmc_send_op_cond(host, 0, &ocr);
767 if (err)
768 return err;
769
747 mmc_attach_bus_ops(host); 770 mmc_attach_bus_ops(host);
771 if (host->ocr_avail_mmc)
772 host->ocr_avail = host->ocr_avail_mmc;
748 773
749 /* 774 /*
750 * We need to get OCR a different way for SPI. 775 * We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
784 goto err; 809 goto err;
785 810
786 mmc_release_host(host); 811 mmc_release_host(host);
787
788 err = mmc_add_card(host->card); 812 err = mmc_add_card(host->card);
813 mmc_claim_host(host);
789 if (err) 814 if (err)
790 goto remove_card; 815 goto remove_card;
791 816
792 return 0; 817 return 0;
793 818
794remove_card: 819remove_card:
820 mmc_release_host(host);
795 mmc_remove_card(host->card); 821 mmc_remove_card(host->card);
796 host->card = NULL;
797 mmc_claim_host(host); 822 mmc_claim_host(host);
823 host->card = NULL;
798err: 824err:
799 mmc_detach_bus(host); 825 mmc_detach_bus(host);
800 mmc_release_host(host);
801 826
802 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", 827 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
803 mmc_hostname(host), err); 828 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede8..60842f878ded 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
462 return 0; 462 return 0;
463} 463}
464 464
465static int
466mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
467 u8 len)
468{
469 struct mmc_request mrq;
470 struct mmc_command cmd;
471 struct mmc_data data;
472 struct scatterlist sg;
473 u8 *data_buf;
474 u8 *test_buf;
475 int i, err;
476 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
477 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
478
479 /* dma onto stack is unsafe/nonportable, but callers to this
480 * routine normally provide temporary on-stack buffers ...
481 */
482 data_buf = kmalloc(len, GFP_KERNEL);
483 if (!data_buf)
484 return -ENOMEM;
485
486 if (len == 8)
487 test_buf = testdata_8bit;
488 else if (len == 4)
489 test_buf = testdata_4bit;
490 else {
491 printk(KERN_ERR "%s: Invalid bus_width %d\n",
492 mmc_hostname(host), len);
493 kfree(data_buf);
494 return -EINVAL;
495 }
496
497 if (opcode == MMC_BUS_TEST_W)
498 memcpy(data_buf, test_buf, len);
499
500 memset(&mrq, 0, sizeof(struct mmc_request));
501 memset(&cmd, 0, sizeof(struct mmc_command));
502 memset(&data, 0, sizeof(struct mmc_data));
503
504 mrq.cmd = &cmd;
505 mrq.data = &data;
506 cmd.opcode = opcode;
507 cmd.arg = 0;
508
509 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
510 * rely on callers to never use this with "native" calls for reading
511 * CSD or CID. Native versions of those commands use the R2 type,
512 * not R1 plus a data block.
513 */
514 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
515
516 data.blksz = len;
517 data.blocks = 1;
518 if (opcode == MMC_BUS_TEST_R)
519 data.flags = MMC_DATA_READ;
520 else
521 data.flags = MMC_DATA_WRITE;
522
523 data.sg = &sg;
524 data.sg_len = 1;
525 sg_init_one(&sg, data_buf, len);
526 mmc_wait_for_req(host, &mrq);
527 err = 0;
528 if (opcode == MMC_BUS_TEST_R) {
529 for (i = 0; i < len / 4; i++)
530 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
531 err = -EIO;
532 break;
533 }
534 }
535 kfree(data_buf);
536
537 if (cmd.error)
538 return cmd.error;
539 if (data.error)
540 return data.error;
541
542 return err;
543}
544
545int mmc_bus_test(struct mmc_card *card, u8 bus_width)
546{
547 int err, width;
548
549 if (bus_width == MMC_BUS_WIDTH_8)
550 width = 8;
551 else if (bus_width == MMC_BUS_WIDTH_4)
552 width = 4;
553 else if (bus_width == MMC_BUS_WIDTH_1)
554 return 0; /* no need for test */
555 else
556 return -EINVAL;
557
558 /*
559 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
560 * is a problem. This improves chances that the test will work.
561 */
562 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
563 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
564 return err;
565}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e84178..e6d44b8a18db 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_card_sleepawake(struct mmc_host *host, int sleep); 28int mmc_card_sleepawake(struct mmc_host *host, int sleep);
29int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29 30
30#endif 31#endif
31 32
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4dffd28e..d18c32bca99b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
764/* 764/*
765 * Starting point for SD card init. 765 * Starting point for SD card init.
766 */ 766 */
767int mmc_attach_sd(struct mmc_host *host, u32 ocr) 767int mmc_attach_sd(struct mmc_host *host)
768{ 768{
769 int err; 769 int err;
770 u32 ocr;
770 771
771 BUG_ON(!host); 772 BUG_ON(!host);
772 WARN_ON(!host->claimed); 773 WARN_ON(!host->claimed);
773 774
775 err = mmc_send_app_op_cond(host, 0, &ocr);
776 if (err)
777 return err;
778
774 mmc_sd_attach_bus_ops(host); 779 mmc_sd_attach_bus_ops(host);
780 if (host->ocr_avail_sd)
781 host->ocr_avail = host->ocr_avail_sd;
775 782
776 /* 783 /*
777 * We need to get OCR a different way for SPI. 784 * We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
795 ocr &= ~0x7F; 802 ocr &= ~0x7F;
796 } 803 }
797 804
798 if (ocr & MMC_VDD_165_195) { 805 if ((ocr & MMC_VDD_165_195) &&
806 !(host->ocr_avail_sd & MMC_VDD_165_195)) {
799 printk(KERN_WARNING "%s: SD card claims to support the " 807 printk(KERN_WARNING "%s: SD card claims to support the "
800 "incompletely defined 'low voltage range'. This " 808 "incompletely defined 'low voltage range'. This "
801 "will be ignored.\n", mmc_hostname(host)); 809 "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
820 goto err; 828 goto err;
821 829
822 mmc_release_host(host); 830 mmc_release_host(host);
823
824 err = mmc_add_card(host->card); 831 err = mmc_add_card(host->card);
832 mmc_claim_host(host);
825 if (err) 833 if (err)
826 goto remove_card; 834 goto remove_card;
827 835
828 return 0; 836 return 0;
829 837
830remove_card: 838remove_card:
839 mmc_release_host(host);
831 mmc_remove_card(host->card); 840 mmc_remove_card(host->card);
832 host->card = NULL; 841 host->card = NULL;
833 mmc_claim_host(host); 842 mmc_claim_host(host);
834err: 843err:
835 mmc_detach_bus(host); 844 mmc_detach_bus(host);
836 mmc_release_host(host);
837 845
838 printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 846 printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
839 mmc_hostname(host), err); 847 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f94ac42..5c4a54d9b6a4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
627 627
628static int mmc_sdio_resume(struct mmc_host *host) 628static int mmc_sdio_resume(struct mmc_host *host)
629{ 629{
630 int i, err; 630 int i, err = 0;
631 631
632 BUG_ON(!host); 632 BUG_ON(!host);
633 BUG_ON(!host->card); 633 BUG_ON(!host->card);
634 634
635 /* Basic card reinitialization. */ 635 /* Basic card reinitialization. */
636 mmc_claim_host(host); 636 mmc_claim_host(host);
637 err = mmc_sdio_init_card(host, host->ocr, host->card, 637
638 /* No need to reinitialize powered-resumed nonremovable cards */
639 if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
640 err = mmc_sdio_init_card(host, host->ocr, host->card,
638 (host->pm_flags & MMC_PM_KEEP_POWER)); 641 (host->pm_flags & MMC_PM_KEEP_POWER));
642 else if (mmc_card_is_powered_resumed(host)) {
643 /* We may have switched to 1-bit mode during suspend */
644 err = sdio_enable_4bit_bus(host->card);
645 if (err > 0) {
646 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
647 err = 0;
648 }
649 }
650
639 if (!err && host->sdio_irqs) 651 if (!err && host->sdio_irqs)
640 mmc_signal_sdio_irq(host); 652 mmc_signal_sdio_irq(host);
641 mmc_release_host(host); 653 mmc_release_host(host);
@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
690/* 702/*
691 * Starting point for SDIO card init. 703 * Starting point for SDIO card init.
692 */ 704 */
693int mmc_attach_sdio(struct mmc_host *host, u32 ocr) 705int mmc_attach_sdio(struct mmc_host *host)
694{ 706{
695 int err; 707 int err, i, funcs;
696 int i, funcs; 708 u32 ocr;
697 struct mmc_card *card; 709 struct mmc_card *card;
698 710
699 BUG_ON(!host); 711 BUG_ON(!host);
700 WARN_ON(!host->claimed); 712 WARN_ON(!host->claimed);
701 713
714 err = mmc_send_io_op_cond(host, 0, &ocr);
715 if (err)
716 return err;
717
702 mmc_attach_bus(host, &mmc_sdio_ops); 718 mmc_attach_bus(host, &mmc_sdio_ops);
719 if (host->ocr_avail_sdio)
720 host->ocr_avail = host->ocr_avail_sdio;
703 721
704 /* 722 /*
705 * Sanity check the voltages that the card claims to 723 * Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
769 pm_runtime_enable(&card->sdio_func[i]->dev); 787 pm_runtime_enable(&card->sdio_func[i]->dev);
770 } 788 }
771 789
772 mmc_release_host(host);
773
774 /* 790 /*
775 * First add the card to the driver model... 791 * First add the card to the driver model...
776 */ 792 */
793 mmc_release_host(host);
777 err = mmc_add_card(host->card); 794 err = mmc_add_card(host->card);
795 mmc_claim_host(host);
778 if (err) 796 if (err)
779 goto remove_added; 797 goto remove_added;
780 798
@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
792 810
793remove_added: 811remove_added:
794 /* Remove without lock if the device has been added. */ 812 /* Remove without lock if the device has been added. */
813 mmc_release_host(host);
795 mmc_sdio_remove(host); 814 mmc_sdio_remove(host);
796 mmc_claim_host(host); 815 mmc_claim_host(host);
797remove: 816remove:
798 /* And with lock if it hasn't been added. */ 817 /* And with lock if it hasn't been added. */
818 mmc_release_host(host);
799 if (host->card) 819 if (host->card)
800 mmc_sdio_remove(host); 820 mmc_sdio_remove(host);
821 mmc_claim_host(host);
801err: 822err:
802 mmc_detach_bus(host); 823 mmc_detach_bus(host);
803 mmc_release_host(host);
804 824
805 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", 825 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
806 mmc_hostname(host), err); 826 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da443e339..d29b9c36919a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@ out:
197 197
198#ifdef CONFIG_PM_RUNTIME 198#ifdef CONFIG_PM_RUNTIME
199 199
200static int sdio_bus_pm_prepare(struct device *dev)
201{
202 struct sdio_func *func = dev_to_sdio_func(dev);
203
204 /*
205 * Resume an SDIO device which was suspended at run time at this
206 * point, in order to allow standard SDIO suspend/resume paths
207 * to keep working as usual.
208 *
209 * Ultimately, the SDIO driver itself will decide (in its
210 * suspend handler, or lack thereof) whether the card should be
211 * removed or kept, and if kept, at what power state.
212 *
213 * At this point, PM core have increased our use count, so it's
214 * safe to directly resume the device. After system is resumed
215 * again, PM core will drop back its runtime PM use count, and if
216 * needed device will be suspended again.
217 *
218 * The end result is guaranteed to be a power state that is
219 * coherent with the device's runtime PM use count.
220 *
221 * The return value of pm_runtime_resume is deliberately unchecked
222 * since there is little point in failing system suspend if a
223 * device can't be resumed.
224 */
225 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
226 pm_runtime_resume(dev);
227
228 return 0;
229}
230
231static const struct dev_pm_ops sdio_bus_pm_ops = { 200static const struct dev_pm_ops sdio_bus_pm_ops = {
232 SET_RUNTIME_PM_OPS( 201 SET_RUNTIME_PM_OPS(
233 pm_generic_runtime_suspend, 202 pm_generic_runtime_suspend,
234 pm_generic_runtime_resume, 203 pm_generic_runtime_resume,
235 pm_generic_runtime_idle 204 pm_generic_runtime_idle
236 ) 205 )
237 .prepare = sdio_bus_pm_prepare,
238}; 206};
239 207
240#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) 208#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e960a9300eb2..c22a4c039988 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -142,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
142 142
143 If unsure, say N. 143 If unsure, say N.
144 144
145config MMC_SDHCI_DOVE
146 bool "SDHCI support on Marvell's Dove SoC"
147 depends on ARCH_DOVE
148 depends on MMC_SDHCI_PLTFM
149 select MMC_SDHCI_IO_ACCESSORS
150 help
151 This selects the Secure Digital Host Controller Interface in
152 Marvell's Dove SoC.
153
154 If unsure, say N.
155
156config MMC_SDHCI_TEGRA
157 tristate "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS
160 help
161 This selects the Tegra SD/MMC controller. If you have a Tegra
162 platform with SD or MMC devices, say Y or M here.
163
164 If unsure, say N.
165
145config MMC_SDHCI_S3C 166config MMC_SDHCI_S3C
146 tristate "SDHCI support on Samsung S3C SoC" 167 tristate "SDHCI support on Samsung S3C SoC"
147 depends on MMC_SDHCI && PLAT_SAMSUNG 168 depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -460,6 +481,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
460 help 481 help
461 If you say yes here SD-Cards may work on the EZkit. 482 If you say yes here SD-Cards may work on the EZkit.
462 483
484config MMC_DW
485 tristate "Synopsys DesignWare Memory Card Interface"
486 depends on ARM
487 help
488 This selects support for the Synopsys DesignWare Mobile Storage IP
489 block, this provides host support for SD and MMC interfaces, in both
490 PIO and external DMA modes.
491
492config MMC_DW_IDMAC
493 bool "Internal DMAC interface"
494 depends on MMC_DW
495 help
496 This selects support for the internal DMAC block within the Synopsys
497 Designware Mobile Storage IP block. This disables the external DMA
498 interface.
499
463config MMC_SH_MMCIF 500config MMC_SH_MMCIF
464 tristate "SuperH Internal MMCIF support" 501 tristate "SuperH Internal MMCIF support"
465 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) 502 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff43b30..e834fb223e9a 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
34obj-$(CONFIG_MMC_DW) += dw_mmc.o
34obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 35obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
35obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 36obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
36obj-$(CONFIG_MMC_USHC) += ushc.o 37obj-$(CONFIG_MMC_USHC) += ushc.o
@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
39sdhci-platform-y := sdhci-pltfm.o 40sdhci-platform-y := sdhci-pltfm.o
40sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o 41sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
41sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o 42sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
43sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
44sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
42 45
43obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 46obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
44sdhci-of-y := sdhci-of-core.o 47sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547cf701f..0076c7448fe6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
66#define DAVINCI_MMCBLNC 0x60 66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64 67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68 68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C 69#define DAVINCI_SDIOIEN 0x6C
70#define DAVINCI_SDIOST 0x70 70#define DAVINCI_SDIOIST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72 72
73/* DAVINCI_MMCCTL definitions */ 73/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
133 133
134/* DAVINCI_SDIOST0 definitions */
135#define SDIOST0_DAT1_HI BIT(0)
136
137/* DAVINCI_SDIOIEN definitions */
138#define SDIOIEN_IOINTEN BIT(0)
139
140/* DAVINCI_SDIOIST definitions */
141#define SDIOIST_IOINT BIT(0)
134 142
135/* MMCSD Init clock in Hz in opendrain mode */ 143/* MMCSD Init clock in Hz in opendrain mode */
136#define MMCSD_INIT_CLOCK 200000 144#define MMCSD_INIT_CLOCK 200000
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
164 unsigned int mmc_input_clk; 172 unsigned int mmc_input_clk;
165 void __iomem *base; 173 void __iomem *base;
166 struct resource *mem_res; 174 struct resource *mem_res;
167 int irq; 175 int mmc_irq, sdio_irq;
168 unsigned char bus_mode; 176 unsigned char bus_mode;
169 177
170#define DAVINCI_MMC_DATADIR_NONE 0 178#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
184 u32 rxdma, txdma; 192 u32 rxdma, txdma;
185 bool use_dma; 193 bool use_dma;
186 bool do_dma; 194 bool do_dma;
195 bool sdio_int;
187 196
188 /* Scatterlist DMA uses one or more parameter RAM entries: 197 /* Scatterlist DMA uses one or more parameter RAM entries:
189 * the main one (associated with rxdma or txdma) plus zero or 198 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
480 struct scatterlist *sg; 489 struct scatterlist *sg;
481 unsigned sg_len; 490 unsigned sg_len;
482 unsigned bytes_left = host->bytes_left; 491 unsigned bytes_left = host->bytes_left;
483 const unsigned shift = ffs(rw_threshold) - 1;; 492 const unsigned shift = ffs(rw_threshold) - 1;
484 493
485 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 494 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
486 template = &host->tx_template; 495 template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
866{ 875{
867 host->data = NULL; 876 host->data = NULL;
868 877
878 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
879 /*
880 * SDIO Interrupt Detection work-around as suggested by
881 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
882 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
883 */
884 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
885 SDIOST0_DAT1_HI)) {
886 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
887 mmc_signal_sdio_irq(host->mmc);
888 }
889 }
890
869 if (host->do_dma) { 891 if (host->do_dma) {
870 davinci_abort_dma(host); 892 davinci_abort_dma(host);
871 893
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
932 mmc_davinci_reset_ctrl(host, 0); 954 mmc_davinci_reset_ctrl(host, 0);
933} 955}
934 956
957static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
958{
959 struct mmc_davinci_host *host = dev_id;
960 unsigned int status;
961
962 status = readl(host->base + DAVINCI_SDIOIST);
963 if (status & SDIOIST_IOINT) {
964 dev_dbg(mmc_dev(host->mmc),
965 "SDIO interrupt status %x\n", status);
966 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
967 mmc_signal_sdio_irq(host->mmc);
968 }
969 return IRQ_HANDLED;
970}
971
935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 972static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
936{ 973{
937 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 974 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
1076 return config->get_ro(pdev->id); 1113 return config->get_ro(pdev->id);
1077} 1114}
1078 1115
1116static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1117{
1118 struct mmc_davinci_host *host = mmc_priv(mmc);
1119
1120 if (enable) {
1121 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1122 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1123 mmc_signal_sdio_irq(host->mmc);
1124 } else {
1125 host->sdio_int = true;
1126 writel(readl(host->base + DAVINCI_SDIOIEN) |
1127 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1128 }
1129 } else {
1130 host->sdio_int = false;
1131 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1132 host->base + DAVINCI_SDIOIEN);
1133 }
1134}
1135
1079static struct mmc_host_ops mmc_davinci_ops = { 1136static struct mmc_host_ops mmc_davinci_ops = {
1080 .request = mmc_davinci_request, 1137 .request = mmc_davinci_request,
1081 .set_ios = mmc_davinci_set_ios, 1138 .set_ios = mmc_davinci_set_ios,
1082 .get_cd = mmc_davinci_get_cd, 1139 .get_cd = mmc_davinci_get_cd,
1083 .get_ro = mmc_davinci_get_ro, 1140 .get_ro = mmc_davinci_get_ro,
1141 .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1084}; 1142};
1085 1143
1086/*----------------------------------------------------------------------*/ 1144/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1209 host->nr_sg = MAX_NR_SG; 1267 host->nr_sg = MAX_NR_SG;
1210 1268
1211 host->use_dma = use_dma; 1269 host->use_dma = use_dma;
1212 host->irq = irq; 1270 host->mmc_irq = irq;
1271 host->sdio_irq = platform_get_irq(pdev, 1);
1213 1272
1214 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1273 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1215 host->use_dma = 0; 1274 host->use_dma = 0;
@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1270 if (ret) 1329 if (ret)
1271 goto out; 1330 goto out;
1272 1331
1332 if (host->sdio_irq >= 0) {
1333 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1334 mmc_hostname(mmc), host);
1335 if (!ret)
1336 mmc->caps |= MMC_CAP_SDIO_IRQ;
1337 }
1338
1273 rename_region(mem, mmc_hostname(mmc)); 1339 rename_region(mem, mmc_hostname(mmc));
1274 1340
1275 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1341 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1313 mmc_davinci_cpufreq_deregister(host); 1379 mmc_davinci_cpufreq_deregister(host);
1314 1380
1315 mmc_remove_host(host->mmc); 1381 mmc_remove_host(host->mmc);
1316 free_irq(host->irq, host); 1382 free_irq(host->mmc_irq, host);
1383 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1384 free_irq(host->sdio_irq, host);
1317 1385
1318 davinci_release_dma_channels(host); 1386 davinci_release_dma_channels(host);
1319 1387
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..2fcc82577c1b
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
35
36#include "dw_mmc.h"
37
38/* Common flag combinations */
39#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
40 SDMMC_INT_HTO | SDMMC_INT_SBE | \
41 SDMMC_INT_EBE)
42#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
43 SDMMC_INT_RESP_ERR)
44#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
45 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
46#define DW_MCI_SEND_STATUS 1
47#define DW_MCI_RECV_STATUS 2
48#define DW_MCI_DMA_THRESHOLD 16
49
50#ifdef CONFIG_MMC_DW_IDMAC
51struct idmac_desc {
52 u32 des0; /* Control Descriptor */
53#define IDMAC_DES0_DIC BIT(1)
54#define IDMAC_DES0_LD BIT(2)
55#define IDMAC_DES0_FD BIT(3)
56#define IDMAC_DES0_CH BIT(4)
57#define IDMAC_DES0_ER BIT(5)
58#define IDMAC_DES0_CES BIT(30)
59#define IDMAC_DES0_OWN BIT(31)
60
61 u32 des1; /* Buffer sizes */
62#define IDMAC_SET_BUFFER1_SIZE(d, s) \
63 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
64
65 u32 des2; /* buffer 1 physical address */
66
67 u32 des3; /* buffer 2 physical address */
68};
69#endif /* CONFIG_MMC_DW_IDMAC */
70
71/**
72 * struct dw_mci_slot - MMC slot state
73 * @mmc: The mmc_host representing this slot.
74 * @host: The MMC controller this slot is using.
75 * @ctype: Card type for this slot.
76 * @mrq: mmc_request currently being processed or waiting to be
77 * processed, or NULL when the slot is idle.
78 * @queue_node: List node for placing this node in the @queue list of
79 * &struct dw_mci.
80 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
81 * @flags: Random state bits associated with the slot.
82 * @id: Number of this slot.
83 * @last_detect_state: Most recently observed card detect state.
84 */
85struct dw_mci_slot {
86 struct mmc_host *mmc;
87 struct dw_mci *host;
88
89 u32 ctype;
90
91 struct mmc_request *mrq;
92 struct list_head queue_node;
93
94 unsigned int clock;
95 unsigned long flags;
96#define DW_MMC_CARD_PRESENT 0
97#define DW_MMC_CARD_NEED_INIT 1
98 int id;
99 int last_detect_state;
100};
101
102#if defined(CONFIG_DEBUG_FS)
103static int dw_mci_req_show(struct seq_file *s, void *v)
104{
105 struct dw_mci_slot *slot = s->private;
106 struct mmc_request *mrq;
107 struct mmc_command *cmd;
108 struct mmc_command *stop;
109 struct mmc_data *data;
110
111 /* Make sure we get a consistent snapshot */
112 spin_lock_bh(&slot->host->lock);
113 mrq = slot->mrq;
114
115 if (mrq) {
116 cmd = mrq->cmd;
117 data = mrq->data;
118 stop = mrq->stop;
119
120 if (cmd)
121 seq_printf(s,
122 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
123 cmd->opcode, cmd->arg, cmd->flags,
124 cmd->resp[0], cmd->resp[1], cmd->resp[2],
125 cmd->resp[2], cmd->error);
126 if (data)
127 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
128 data->bytes_xfered, data->blocks,
129 data->blksz, data->flags, data->error);
130 if (stop)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 stop->opcode, stop->arg, stop->flags,
134 stop->resp[0], stop->resp[1], stop->resp[2],
135 stop->resp[2], stop->error);
136 }
137
138 spin_unlock_bh(&slot->host->lock);
139
140 return 0;
141}
142
143static int dw_mci_req_open(struct inode *inode, struct file *file)
144{
145 return single_open(file, dw_mci_req_show, inode->i_private);
146}
147
148static const struct file_operations dw_mci_req_fops = {
149 .owner = THIS_MODULE,
150 .open = dw_mci_req_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156static int dw_mci_regs_show(struct seq_file *s, void *v)
157{
158 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
159 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
160 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
161 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
162 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
163 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
164
165 return 0;
166}
167
168static int dw_mci_regs_open(struct inode *inode, struct file *file)
169{
170 return single_open(file, dw_mci_regs_show, inode->i_private);
171}
172
173static const struct file_operations dw_mci_regs_fops = {
174 .owner = THIS_MODULE,
175 .open = dw_mci_regs_open,
176 .read = seq_read,
177 .llseek = seq_lseek,
178 .release = single_release,
179};
180
181static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
182{
183 struct mmc_host *mmc = slot->mmc;
184 struct dw_mci *host = slot->host;
185 struct dentry *root;
186 struct dentry *node;
187
188 root = mmc->debugfs_root;
189 if (!root)
190 return;
191
192 node = debugfs_create_file("regs", S_IRUSR, root, host,
193 &dw_mci_regs_fops);
194 if (!node)
195 goto err;
196
197 node = debugfs_create_file("req", S_IRUSR, root, slot,
198 &dw_mci_req_fops);
199 if (!node)
200 goto err;
201
202 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
203 if (!node)
204 goto err;
205
206 node = debugfs_create_x32("pending_events", S_IRUSR, root,
207 (u32 *)&host->pending_events);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_x32("completed_events", S_IRUSR, root,
212 (u32 *)&host->completed_events);
213 if (!node)
214 goto err;
215
216 return;
217
218err:
219 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
220}
221#endif /* defined(CONFIG_DEBUG_FS) */
222
223static void dw_mci_set_timeout(struct dw_mci *host)
224{
225 /* timeout (maximum) */
226 mci_writel(host, TMOUT, 0xffffffff);
227}
228
229static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
230{
231 struct mmc_data *data;
232 u32 cmdr;
233 cmd->error = -EINPROGRESS;
234
235 cmdr = cmd->opcode;
236
237 if (cmdr == MMC_STOP_TRANSMISSION)
238 cmdr |= SDMMC_CMD_STOP;
239 else
240 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
241
242 if (cmd->flags & MMC_RSP_PRESENT) {
243 /* We expect a response, so set this bit */
244 cmdr |= SDMMC_CMD_RESP_EXP;
245 if (cmd->flags & MMC_RSP_136)
246 cmdr |= SDMMC_CMD_RESP_LONG;
247 }
248
249 if (cmd->flags & MMC_RSP_CRC)
250 cmdr |= SDMMC_CMD_RESP_CRC;
251
252 data = cmd->data;
253 if (data) {
254 cmdr |= SDMMC_CMD_DAT_EXP;
255 if (data->flags & MMC_DATA_STREAM)
256 cmdr |= SDMMC_CMD_STRM_MODE;
257 if (data->flags & MMC_DATA_WRITE)
258 cmdr |= SDMMC_CMD_DAT_WR;
259 }
260
261 return cmdr;
262}
263
264static void dw_mci_start_command(struct dw_mci *host,
265 struct mmc_command *cmd, u32 cmd_flags)
266{
267 host->cmd = cmd;
268 dev_vdbg(&host->pdev->dev,
269 "start command: ARGR=0x%08x CMDR=0x%08x\n",
270 cmd->arg, cmd_flags);
271
272 mci_writel(host, CMDARG, cmd->arg);
273 wmb();
274
275 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
276}
277
278static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
279{
280 dw_mci_start_command(host, data->stop, host->stop_cmdr);
281}
282
283/* DMA interface functions */
284static void dw_mci_stop_dma(struct dw_mci *host)
285{
286 if (host->use_dma) {
287 host->dma_ops->stop(host);
288 host->dma_ops->cleanup(host);
289 } else {
290 /* Data transfer was stopped by the interrupt handler */
291 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
292 }
293}
294
295#ifdef CONFIG_MMC_DW_IDMAC
296static void dw_mci_dma_cleanup(struct dw_mci *host)
297{
298 struct mmc_data *data = host->data;
299
300 if (data)
301 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
302 ((data->flags & MMC_DATA_WRITE)
303 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
304}
305
306static void dw_mci_idmac_stop_dma(struct dw_mci *host)
307{
308 u32 temp;
309
310 /* Disable and reset the IDMAC interface */
311 temp = mci_readl(host, CTRL);
312 temp &= ~SDMMC_CTRL_USE_IDMAC;
313 temp |= SDMMC_CTRL_DMA_RESET;
314 mci_writel(host, CTRL, temp);
315
316 /* Stop the IDMAC running */
317 temp = mci_readl(host, BMOD);
318 temp &= ~SDMMC_IDMAC_ENABLE;
319 mci_writel(host, BMOD, temp);
320}
321
322static void dw_mci_idmac_complete_dma(struct dw_mci *host)
323{
324 struct mmc_data *data = host->data;
325
326 dev_vdbg(&host->pdev->dev, "DMA complete\n");
327
328 host->dma_ops->cleanup(host);
329
330 /*
331 * If the card was removed, data will be NULL. No point in trying to
332 * send the stop command or waiting for NBUSY in this case.
333 */
334 if (data) {
335 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
336 tasklet_schedule(&host->tasklet);
337 }
338}
339
340static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
341 unsigned int sg_len)
342{
343 int i;
344 struct idmac_desc *desc = host->sg_cpu;
345
346 for (i = 0; i < sg_len; i++, desc++) {
347 unsigned int length = sg_dma_len(&data->sg[i]);
348 u32 mem_addr = sg_dma_address(&data->sg[i]);
349
350 /* Set the OWN bit and disable interrupts for this descriptor */
351 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
352
353 /* Buffer length */
354 IDMAC_SET_BUFFER1_SIZE(desc, length);
355
356 /* Physical address to DMA to/from */
357 desc->des2 = mem_addr;
358 }
359
360 /* Set first descriptor */
361 desc = host->sg_cpu;
362 desc->des0 |= IDMAC_DES0_FD;
363
364 /* Set last descriptor */
365 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
366 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
367 desc->des0 |= IDMAC_DES0_LD;
368
369 wmb();
370}
371
372static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
373{
374 u32 temp;
375
376 dw_mci_translate_sglist(host, host->data, sg_len);
377
378 /* Select IDMAC interface */
379 temp = mci_readl(host, CTRL);
380 temp |= SDMMC_CTRL_USE_IDMAC;
381 mci_writel(host, CTRL, temp);
382
383 wmb();
384
385 /* Enable the IDMAC */
386 temp = mci_readl(host, BMOD);
387 temp |= SDMMC_IDMAC_ENABLE;
388 mci_writel(host, BMOD, temp);
389
390 /* Start it running */
391 mci_writel(host, PLDMND, 1);
392}
393
394static int dw_mci_idmac_init(struct dw_mci *host)
395{
396 struct idmac_desc *p;
397 int i;
398
399 /* Number of descriptors in the ring buffer */
400 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
401
402 /* Forward link the descriptor list */
403 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
404 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
405
406 /* Set the last descriptor as the end-of-ring descriptor */
407 p->des3 = host->sg_dma;
408 p->des0 = IDMAC_DES0_ER;
409
410 /* Mask out interrupts - get Tx & Rx complete only */
411 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
412 SDMMC_IDMAC_INT_TI);
413
414 /* Set the descriptor base address */
415 mci_writel(host, DBADDR, host->sg_dma);
416 return 0;
417}
418
419static struct dw_mci_dma_ops dw_mci_idmac_ops = {
420 .init = dw_mci_idmac_init,
421 .start = dw_mci_idmac_start_dma,
422 .stop = dw_mci_idmac_stop_dma,
423 .complete = dw_mci_idmac_complete_dma,
424 .cleanup = dw_mci_dma_cleanup,
425};
426#endif /* CONFIG_MMC_DW_IDMAC */
427
428static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
429{
430 struct scatterlist *sg;
431 unsigned int i, direction, sg_len;
432 u32 temp;
433
434 /* If we don't have a channel, we can't do DMA */
435 if (!host->use_dma)
436 return -ENODEV;
437
438 /*
439 * We don't do DMA on "complex" transfers, i.e. with
440 * non-word-aligned buffers or lengths. Also, we don't bother
441 * with all the DMA setup overhead for short transfers.
442 */
443 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
444 return -EINVAL;
445 if (data->blksz & 3)
446 return -EINVAL;
447
448 for_each_sg(data->sg, sg, data->sg_len, i) {
449 if (sg->offset & 3 || sg->length & 3)
450 return -EINVAL;
451 }
452
453 if (data->flags & MMC_DATA_READ)
454 direction = DMA_FROM_DEVICE;
455 else
456 direction = DMA_TO_DEVICE;
457
458 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
459 direction);
460
461 dev_vdbg(&host->pdev->dev,
462 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
463 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
464 sg_len);
465
466 /* Enable the DMA interface */
467 temp = mci_readl(host, CTRL);
468 temp |= SDMMC_CTRL_DMA_ENABLE;
469 mci_writel(host, CTRL, temp);
470
471 /* Disable RX/TX IRQs, let DMA handle it */
472 temp = mci_readl(host, INTMASK);
473 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
474 mci_writel(host, INTMASK, temp);
475
476 host->dma_ops->start(host, sg_len);
477
478 return 0;
479}
480
481static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
482{
483 u32 temp;
484
485 data->error = -EINPROGRESS;
486
487 WARN_ON(host->data);
488 host->sg = NULL;
489 host->data = data;
490
491 if (dw_mci_submit_data_dma(host, data)) {
492 host->sg = data->sg;
493 host->pio_offset = 0;
494 if (data->flags & MMC_DATA_READ)
495 host->dir_status = DW_MCI_RECV_STATUS;
496 else
497 host->dir_status = DW_MCI_SEND_STATUS;
498
499 temp = mci_readl(host, INTMASK);
500 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
501 mci_writel(host, INTMASK, temp);
502
503 temp = mci_readl(host, CTRL);
504 temp &= ~SDMMC_CTRL_DMA_ENABLE;
505 mci_writel(host, CTRL, temp);
506 }
507}
508
509static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
510{
511 struct dw_mci *host = slot->host;
512 unsigned long timeout = jiffies + msecs_to_jiffies(500);
513 unsigned int cmd_status = 0;
514
515 mci_writel(host, CMDARG, arg);
516 wmb();
517 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
518
519 while (time_before(jiffies, timeout)) {
520 cmd_status = mci_readl(host, CMD);
521 if (!(cmd_status & SDMMC_CMD_START))
522 return;
523 }
524 dev_err(&slot->mmc->class_dev,
525 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
526 cmd, arg, cmd_status);
527}
528
529static void dw_mci_setup_bus(struct dw_mci_slot *slot)
530{
531 struct dw_mci *host = slot->host;
532 u32 div;
533
534 if (slot->clock != host->current_speed) {
535 if (host->bus_hz % slot->clock)
536 /*
537 * move the + 1 after the divide to prevent
538 * over-clocking the card.
539 */
540 div = ((host->bus_hz / slot->clock) >> 1) + 1;
541 else
542 div = (host->bus_hz / slot->clock) >> 1;
543
544 dev_info(&slot->mmc->class_dev,
545 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
546 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
547 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
548
549 /* disable clock */
550 mci_writel(host, CLKENA, 0);
551 mci_writel(host, CLKSRC, 0);
552
553 /* inform CIU */
554 mci_send_cmd(slot,
555 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
556
557 /* set clock to desired speed */
558 mci_writel(host, CLKDIV, div);
559
560 /* inform CIU */
561 mci_send_cmd(slot,
562 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
563
564 /* enable clock */
565 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
566
567 /* inform CIU */
568 mci_send_cmd(slot,
569 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
570
571 host->current_speed = slot->clock;
572 }
573
574 /* Set the current slot bus width */
575 mci_writel(host, CTYPE, slot->ctype);
576}
577
578static void dw_mci_start_request(struct dw_mci *host,
579 struct dw_mci_slot *slot)
580{
581 struct mmc_request *mrq;
582 struct mmc_command *cmd;
583 struct mmc_data *data;
584 u32 cmdflags;
585
586 mrq = slot->mrq;
587 if (host->pdata->select_slot)
588 host->pdata->select_slot(slot->id);
589
590 /* Slot specific timing and width adjustment */
591 dw_mci_setup_bus(slot);
592
593 host->cur_slot = slot;
594 host->mrq = mrq;
595
596 host->pending_events = 0;
597 host->completed_events = 0;
598 host->data_status = 0;
599
600 data = mrq->data;
601 if (data) {
602 dw_mci_set_timeout(host);
603 mci_writel(host, BYTCNT, data->blksz*data->blocks);
604 mci_writel(host, BLKSIZ, data->blksz);
605 }
606
607 cmd = mrq->cmd;
608 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
609
610 /* this is the first command, send the initialization clock */
611 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
612 cmdflags |= SDMMC_CMD_INIT;
613
614 if (data) {
615 dw_mci_submit_data(host, data);
616 wmb();
617 }
618
619 dw_mci_start_command(host, cmd, cmdflags);
620
621 if (mrq->stop)
622 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
623}
624
625static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
626 struct mmc_request *mrq)
627{
628 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
629 host->state);
630
631 spin_lock_bh(&host->lock);
632 slot->mrq = mrq;
633
634 if (host->state == STATE_IDLE) {
635 host->state = STATE_SENDING_CMD;
636 dw_mci_start_request(host, slot);
637 } else {
638 list_add_tail(&slot->queue_node, &host->queue);
639 }
640
641 spin_unlock_bh(&host->lock);
642}
643
644static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
645{
646 struct dw_mci_slot *slot = mmc_priv(mmc);
647 struct dw_mci *host = slot->host;
648
649 WARN_ON(slot->mrq);
650
651 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
652 mrq->cmd->error = -ENOMEDIUM;
653 mmc_request_done(mmc, mrq);
654 return;
655 }
656
657 /* We don't support multiple blocks of weird lengths. */
658 dw_mci_queue_request(host, slot, mrq);
659}
660
661static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{
663 struct dw_mci_slot *slot = mmc_priv(mmc);
664
665 /* set default 1 bit mode */
666 slot->ctype = SDMMC_CTYPE_1BIT;
667
668 switch (ios->bus_width) {
669 case MMC_BUS_WIDTH_1:
670 slot->ctype = SDMMC_CTYPE_1BIT;
671 break;
672 case MMC_BUS_WIDTH_4:
673 slot->ctype = SDMMC_CTYPE_4BIT;
674 break;
675 }
676
677 if (ios->clock) {
678 /*
679 * Use mirror of ios->clock to prevent race with mmc
680 * core ios update when finding the minimum.
681 */
682 slot->clock = ios->clock;
683 }
684
685 switch (ios->power_mode) {
686 case MMC_POWER_UP:
687 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
688 break;
689 default:
690 break;
691 }
692}
693
694static int dw_mci_get_ro(struct mmc_host *mmc)
695{
696 int read_only;
697 struct dw_mci_slot *slot = mmc_priv(mmc);
698 struct dw_mci_board *brd = slot->host->pdata;
699
700 /* Use platform get_ro function, else try on board write protect */
701 if (brd->get_ro)
702 read_only = brd->get_ro(slot->id);
703 else
704 read_only =
705 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
706
707 dev_dbg(&mmc->class_dev, "card is %s\n",
708 read_only ? "read-only" : "read-write");
709
710 return read_only;
711}
712
713static int dw_mci_get_cd(struct mmc_host *mmc)
714{
715 int present;
716 struct dw_mci_slot *slot = mmc_priv(mmc);
717 struct dw_mci_board *brd = slot->host->pdata;
718
719 /* Use platform get_cd function, else try onboard card detect */
720 if (brd->get_cd)
721 present = !brd->get_cd(slot->id);
722 else
723 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
724 == 0 ? 1 : 0;
725
726 if (present)
727 dev_dbg(&mmc->class_dev, "card is present\n");
728 else
729 dev_dbg(&mmc->class_dev, "card is not present\n");
730
731 return present;
732}
733
734static const struct mmc_host_ops dw_mci_ops = {
735 .request = dw_mci_request,
736 .set_ios = dw_mci_set_ios,
737 .get_ro = dw_mci_get_ro,
738 .get_cd = dw_mci_get_cd,
739};
740
741static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
742 __releases(&host->lock)
743 __acquires(&host->lock)
744{
745 struct dw_mci_slot *slot;
746 struct mmc_host *prev_mmc = host->cur_slot->mmc;
747
748 WARN_ON(host->cmd || host->data);
749
750 host->cur_slot->mrq = NULL;
751 host->mrq = NULL;
752 if (!list_empty(&host->queue)) {
753 slot = list_entry(host->queue.next,
754 struct dw_mci_slot, queue_node);
755 list_del(&slot->queue_node);
756 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
757 mmc_hostname(slot->mmc));
758 host->state = STATE_SENDING_CMD;
759 dw_mci_start_request(host, slot);
760 } else {
761 dev_vdbg(&host->pdev->dev, "list empty\n");
762 host->state = STATE_IDLE;
763 }
764
765 spin_unlock(&host->lock);
766 mmc_request_done(prev_mmc, mrq);
767 spin_lock(&host->lock);
768}
769
770static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
771{
772 u32 status = host->cmd_status;
773
774 host->cmd_status = 0;
775
776 /* Read the response from the card (up to 16 bytes) */
777 if (cmd->flags & MMC_RSP_PRESENT) {
778 if (cmd->flags & MMC_RSP_136) {
779 cmd->resp[3] = mci_readl(host, RESP0);
780 cmd->resp[2] = mci_readl(host, RESP1);
781 cmd->resp[1] = mci_readl(host, RESP2);
782 cmd->resp[0] = mci_readl(host, RESP3);
783 } else {
784 cmd->resp[0] = mci_readl(host, RESP0);
785 cmd->resp[1] = 0;
786 cmd->resp[2] = 0;
787 cmd->resp[3] = 0;
788 }
789 }
790
791 if (status & SDMMC_INT_RTO)
792 cmd->error = -ETIMEDOUT;
793 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
794 cmd->error = -EILSEQ;
795 else if (status & SDMMC_INT_RESP_ERR)
796 cmd->error = -EIO;
797 else
798 cmd->error = 0;
799
800 if (cmd->error) {
801 /* newer ip versions need a delay between retries */
802 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
803 mdelay(20);
804
805 if (cmd->data) {
806 host->data = NULL;
807 dw_mci_stop_dma(host);
808 }
809 }
810}
811
812static void dw_mci_tasklet_func(unsigned long priv)
813{
814 struct dw_mci *host = (struct dw_mci *)priv;
815 struct mmc_data *data;
816 struct mmc_command *cmd;
817 enum dw_mci_state state;
818 enum dw_mci_state prev_state;
819 u32 status;
820
821 spin_lock(&host->lock);
822
823 state = host->state;
824 data = host->data;
825
826 do {
827 prev_state = state;
828
829 switch (state) {
830 case STATE_IDLE:
831 break;
832
833 case STATE_SENDING_CMD:
834 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
835 &host->pending_events))
836 break;
837
838 cmd = host->cmd;
839 host->cmd = NULL;
840 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
841 dw_mci_command_complete(host, host->mrq->cmd);
842 if (!host->mrq->data || cmd->error) {
843 dw_mci_request_end(host, host->mrq);
844 goto unlock;
845 }
846
847 prev_state = state = STATE_SENDING_DATA;
848 /* fall through */
849
850 case STATE_SENDING_DATA:
851 if (test_and_clear_bit(EVENT_DATA_ERROR,
852 &host->pending_events)) {
853 dw_mci_stop_dma(host);
854 if (data->stop)
855 send_stop_cmd(host, data);
856 state = STATE_DATA_ERROR;
857 break;
858 }
859
860 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
861 &host->pending_events))
862 break;
863
864 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
865 prev_state = state = STATE_DATA_BUSY;
866 /* fall through */
867
868 case STATE_DATA_BUSY:
869 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
870 &host->pending_events))
871 break;
872
873 host->data = NULL;
874 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
875 status = host->data_status;
876
877 if (status & DW_MCI_DATA_ERROR_FLAGS) {
878 if (status & SDMMC_INT_DTO) {
879 dev_err(&host->pdev->dev,
880 "data timeout error\n");
881 data->error = -ETIMEDOUT;
882 } else if (status & SDMMC_INT_DCRC) {
883 dev_err(&host->pdev->dev,
884 "data CRC error\n");
885 data->error = -EILSEQ;
886 } else {
887 dev_err(&host->pdev->dev,
888 "data FIFO error "
889 "(status=%08x)\n",
890 status);
891 data->error = -EIO;
892 }
893 } else {
894 data->bytes_xfered = data->blocks * data->blksz;
895 data->error = 0;
896 }
897
898 if (!data->stop) {
899 dw_mci_request_end(host, host->mrq);
900 goto unlock;
901 }
902
903 prev_state = state = STATE_SENDING_STOP;
904 if (!data->error)
905 send_stop_cmd(host, data);
906 /* fall through */
907
908 case STATE_SENDING_STOP:
909 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
910 &host->pending_events))
911 break;
912
913 host->cmd = NULL;
914 dw_mci_command_complete(host, host->mrq->stop);
915 dw_mci_request_end(host, host->mrq);
916 goto unlock;
917
918 case STATE_DATA_ERROR:
919 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
920 &host->pending_events))
921 break;
922
923 state = STATE_DATA_BUSY;
924 break;
925 }
926 } while (state != prev_state);
927
928 host->state = state;
929unlock:
930 spin_unlock(&host->lock);
931
932}
933
934static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
935{
936 u16 *pdata = (u16 *)buf;
937
938 WARN_ON(cnt % 2 != 0);
939
940 cnt = cnt >> 1;
941 while (cnt > 0) {
942 mci_writew(host, DATA, *pdata++);
943 cnt--;
944 }
945}
946
947static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
948{
949 u16 *pdata = (u16 *)buf;
950
951 WARN_ON(cnt % 2 != 0);
952
953 cnt = cnt >> 1;
954 while (cnt > 0) {
955 *pdata++ = mci_readw(host, DATA);
956 cnt--;
957 }
958}
959
960static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
961{
962 u32 *pdata = (u32 *)buf;
963
964 WARN_ON(cnt % 4 != 0);
965 WARN_ON((unsigned long)pdata & 0x3);
966
967 cnt = cnt >> 2;
968 while (cnt > 0) {
969 mci_writel(host, DATA, *pdata++);
970 cnt--;
971 }
972}
973
974static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
975{
976 u32 *pdata = (u32 *)buf;
977
978 WARN_ON(cnt % 4 != 0);
979 WARN_ON((unsigned long)pdata & 0x3);
980
981 cnt = cnt >> 2;
982 while (cnt > 0) {
983 *pdata++ = mci_readl(host, DATA);
984 cnt--;
985 }
986}
987
988static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
989{
990 u64 *pdata = (u64 *)buf;
991
992 WARN_ON(cnt % 8 != 0);
993
994 cnt = cnt >> 3;
995 while (cnt > 0) {
996 mci_writeq(host, DATA, *pdata++);
997 cnt--;
998 }
999}
1000
1001static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1002{
1003 u64 *pdata = (u64 *)buf;
1004
1005 WARN_ON(cnt % 8 != 0);
1006
1007 cnt = cnt >> 3;
1008 while (cnt > 0) {
1009 *pdata++ = mci_readq(host, DATA);
1010 cnt--;
1011 }
1012}
1013
1014static void dw_mci_read_data_pio(struct dw_mci *host)
1015{
1016 struct scatterlist *sg = host->sg;
1017 void *buf = sg_virt(sg);
1018 unsigned int offset = host->pio_offset;
1019 struct mmc_data *data = host->data;
1020 int shift = host->data_shift;
1021 u32 status;
1022 unsigned int nbytes = 0, len, old_len, count = 0;
1023
1024 do {
1025 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1026 if (count == 0)
1027 old_len = len;
1028
1029 if (offset + len <= sg->length) {
1030 host->pull_data(host, (void *)(buf + offset), len);
1031
1032 offset += len;
1033 nbytes += len;
1034
1035 if (offset == sg->length) {
1036 flush_dcache_page(sg_page(sg));
1037 host->sg = sg = sg_next(sg);
1038 if (!sg)
1039 goto done;
1040
1041 offset = 0;
1042 buf = sg_virt(sg);
1043 }
1044 } else {
1045 unsigned int remaining = sg->length - offset;
1046 host->pull_data(host, (void *)(buf + offset),
1047 remaining);
1048 nbytes += remaining;
1049
1050 flush_dcache_page(sg_page(sg));
1051 host->sg = sg = sg_next(sg);
1052 if (!sg)
1053 goto done;
1054
1055 offset = len - remaining;
1056 buf = sg_virt(sg);
1057 host->pull_data(host, buf, offset);
1058 nbytes += offset;
1059 }
1060
1061 status = mci_readl(host, MINTSTS);
1062 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1063 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1064 host->data_status = status;
1065 data->bytes_xfered += nbytes;
1066 smp_wmb();
1067
1068 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1069
1070 tasklet_schedule(&host->tasklet);
1071 return;
1072 }
1073 count++;
1074 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1075 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1076 host->pio_offset = offset;
1077 data->bytes_xfered += nbytes;
1078 return;
1079
1080done:
1081 data->bytes_xfered += nbytes;
1082 smp_wmb();
1083 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1084}
1085
1086static void dw_mci_write_data_pio(struct dw_mci *host)
1087{
1088 struct scatterlist *sg = host->sg;
1089 void *buf = sg_virt(sg);
1090 unsigned int offset = host->pio_offset;
1091 struct mmc_data *data = host->data;
1092 int shift = host->data_shift;
1093 u32 status;
1094 unsigned int nbytes = 0, len;
1095
1096 do {
1097 len = SDMMC_FIFO_SZ -
1098 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1099 if (offset + len <= sg->length) {
1100 host->push_data(host, (void *)(buf + offset), len);
1101
1102 offset += len;
1103 nbytes += len;
1104 if (offset == sg->length) {
1105 host->sg = sg = sg_next(sg);
1106 if (!sg)
1107 goto done;
1108
1109 offset = 0;
1110 buf = sg_virt(sg);
1111 }
1112 } else {
1113 unsigned int remaining = sg->length - offset;
1114
1115 host->push_data(host, (void *)(buf + offset),
1116 remaining);
1117 nbytes += remaining;
1118
1119 host->sg = sg = sg_next(sg);
1120 if (!sg)
1121 goto done;
1122
1123 offset = len - remaining;
1124 buf = sg_virt(sg);
1125 host->push_data(host, (void *)buf, offset);
1126 nbytes += offset;
1127 }
1128
1129 status = mci_readl(host, MINTSTS);
1130 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1131 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1132 host->data_status = status;
1133 data->bytes_xfered += nbytes;
1134
1135 smp_wmb();
1136
1137 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1138
1139 tasklet_schedule(&host->tasklet);
1140 return;
1141 }
1142 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1143
1144 host->pio_offset = offset;
1145 data->bytes_xfered += nbytes;
1146
1147 return;
1148
1149done:
1150 data->bytes_xfered += nbytes;
1151 smp_wmb();
1152 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1153}
1154
1155static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1156{
1157 if (!host->cmd_status)
1158 host->cmd_status = status;
1159
1160 smp_wmb();
1161
1162 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1163 tasklet_schedule(&host->tasklet);
1164}
1165
1166static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1167{
1168 struct dw_mci *host = dev_id;
1169 u32 status, pending;
1170 unsigned int pass_count = 0;
1171
1172 do {
1173 status = mci_readl(host, RINTSTS);
1174 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1175
1176 /*
1177 * DTO fix - version 2.10a and below, and only if internal DMA
1178 * is configured.
1179 */
1180 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1181 if (!pending &&
1182 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1183 pending |= SDMMC_INT_DATA_OVER;
1184 }
1185
1186 if (!pending)
1187 break;
1188
1189 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1190 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1191 host->cmd_status = status;
1192 smp_wmb();
1193 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1194 tasklet_schedule(&host->tasklet);
1195 }
1196
1197 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1198 /* if there is an error report DATA_ERROR */
1199 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1200 host->data_status = status;
1201 smp_wmb();
1202 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1203 tasklet_schedule(&host->tasklet);
1204 }
1205
1206 if (pending & SDMMC_INT_DATA_OVER) {
1207 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1208 if (!host->data_status)
1209 host->data_status = status;
1210 smp_wmb();
1211 if (host->dir_status == DW_MCI_RECV_STATUS) {
1212 if (host->sg != NULL)
1213 dw_mci_read_data_pio(host);
1214 }
1215 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1216 tasklet_schedule(&host->tasklet);
1217 }
1218
1219 if (pending & SDMMC_INT_RXDR) {
1220 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1221 if (host->sg)
1222 dw_mci_read_data_pio(host);
1223 }
1224
1225 if (pending & SDMMC_INT_TXDR) {
1226 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1227 if (host->sg)
1228 dw_mci_write_data_pio(host);
1229 }
1230
1231 if (pending & SDMMC_INT_CMD_DONE) {
1232 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1233 dw_mci_cmd_interrupt(host, status);
1234 }
1235
1236 if (pending & SDMMC_INT_CD) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1238 tasklet_schedule(&host->card_tasklet);
1239 }
1240
1241 } while (pass_count++ < 5);
1242
1243#ifdef CONFIG_MMC_DW_IDMAC
1244 /* Handle DMA interrupts */
1245 pending = mci_readl(host, IDSTS);
1246 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1247 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1248 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1249 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1250 host->dma_ops->complete(host);
1251 }
1252#endif
1253
1254 return IRQ_HANDLED;
1255}
1256
1257static void dw_mci_tasklet_card(unsigned long data)
1258{
1259 struct dw_mci *host = (struct dw_mci *)data;
1260 int i;
1261
1262 for (i = 0; i < host->num_slots; i++) {
1263 struct dw_mci_slot *slot = host->slot[i];
1264 struct mmc_host *mmc = slot->mmc;
1265 struct mmc_request *mrq;
1266 int present;
1267 u32 ctrl;
1268
1269 present = dw_mci_get_cd(mmc);
1270 while (present != slot->last_detect_state) {
1271 spin_lock(&host->lock);
1272
1273 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1274 present ? "inserted" : "removed");
1275
1276 /* Card change detected */
1277 slot->last_detect_state = present;
1278
1279 /* Power up slot */
1280 if (present != 0) {
1281 if (host->pdata->setpower)
1282 host->pdata->setpower(slot->id,
1283 mmc->ocr_avail);
1284
1285 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1286 }
1287
1288 /* Clean up queue if present */
1289 mrq = slot->mrq;
1290 if (mrq) {
1291 if (mrq == host->mrq) {
1292 host->data = NULL;
1293 host->cmd = NULL;
1294
1295 switch (host->state) {
1296 case STATE_IDLE:
1297 break;
1298 case STATE_SENDING_CMD:
1299 mrq->cmd->error = -ENOMEDIUM;
1300 if (!mrq->data)
1301 break;
1302 /* fall through */
1303 case STATE_SENDING_DATA:
1304 mrq->data->error = -ENOMEDIUM;
1305 dw_mci_stop_dma(host);
1306 break;
1307 case STATE_DATA_BUSY:
1308 case STATE_DATA_ERROR:
1309 if (mrq->data->error == -EINPROGRESS)
1310 mrq->data->error = -ENOMEDIUM;
1311 if (!mrq->stop)
1312 break;
1313 /* fall through */
1314 case STATE_SENDING_STOP:
1315 mrq->stop->error = -ENOMEDIUM;
1316 break;
1317 }
1318
1319 dw_mci_request_end(host, mrq);
1320 } else {
1321 list_del(&slot->queue_node);
1322 mrq->cmd->error = -ENOMEDIUM;
1323 if (mrq->data)
1324 mrq->data->error = -ENOMEDIUM;
1325 if (mrq->stop)
1326 mrq->stop->error = -ENOMEDIUM;
1327
1328 spin_unlock(&host->lock);
1329 mmc_request_done(slot->mmc, mrq);
1330 spin_lock(&host->lock);
1331 }
1332 }
1333
1334 /* Power down slot */
1335 if (present == 0) {
1336 if (host->pdata->setpower)
1337 host->pdata->setpower(slot->id, 0);
1338 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1339
1340 /*
1341 * Clear down the FIFO - doing so generates a
1342 * block interrupt, hence setting the
1343 * scatter-gather pointer to NULL.
1344 */
1345 host->sg = NULL;
1346
1347 ctrl = mci_readl(host, CTRL);
1348 ctrl |= SDMMC_CTRL_FIFO_RESET;
1349 mci_writel(host, CTRL, ctrl);
1350
1351#ifdef CONFIG_MMC_DW_IDMAC
1352 ctrl = mci_readl(host, BMOD);
1353 ctrl |= 0x01; /* Software reset of DMA */
1354 mci_writel(host, BMOD, ctrl);
1355#endif
1356
1357 }
1358
1359 spin_unlock(&host->lock);
1360 present = dw_mci_get_cd(mmc);
1361 }
1362
1363 mmc_detect_change(slot->mmc,
1364 msecs_to_jiffies(host->pdata->detect_delay_ms));
1365 }
1366}
1367
1368static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1369{
1370 struct mmc_host *mmc;
1371 struct dw_mci_slot *slot;
1372
1373 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
1374 if (!mmc)
1375 return -ENOMEM;
1376
1377 slot = mmc_priv(mmc);
1378 slot->id = id;
1379 slot->mmc = mmc;
1380 slot->host = host;
1381
1382 mmc->ops = &dw_mci_ops;
1383 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1384 mmc->f_max = host->bus_hz;
1385
1386 if (host->pdata->get_ocr)
1387 mmc->ocr_avail = host->pdata->get_ocr(id);
1388 else
1389 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1390
1391 /*
1392 * Start with slot power disabled, it will be enabled when a card
1393 * is detected.
1394 */
1395 if (host->pdata->setpower)
1396 host->pdata->setpower(id, 0);
1397
1398 mmc->caps = 0;
1399 if (host->pdata->get_bus_wd)
1400 if (host->pdata->get_bus_wd(slot->id) >= 4)
1401 mmc->caps |= MMC_CAP_4_BIT_DATA;
1402
1403 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1404 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1405
1406#ifdef CONFIG_MMC_DW_IDMAC
1407 mmc->max_segs = host->ring_size;
1408 mmc->max_blk_size = 65536;
1409 mmc->max_blk_count = host->ring_size;
1410 mmc->max_seg_size = 0x1000;
1411 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1412#else
1413 if (host->pdata->blk_settings) {
1414 mmc->max_segs = host->pdata->blk_settings->max_segs;
1415 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1416 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1417 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1418 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1419 } else {
1420 /* Useful defaults if platform data is unset. */
1421 mmc->max_segs = 64;
1422 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1423 mmc->max_blk_count = 512;
1424 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1425 mmc->max_seg_size = mmc->max_req_size;
1426 }
1427#endif /* CONFIG_MMC_DW_IDMAC */
1428
1429 if (dw_mci_get_cd(mmc))
1430 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1431 else
1432 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1433
1434 host->slot[id] = slot;
1435 mmc_add_host(mmc);
1436
1437#if defined(CONFIG_DEBUG_FS)
1438 dw_mci_init_debugfs(slot);
1439#endif
1440
1441 /* Card initially undetected */
1442 slot->last_detect_state = 0;
1443
1444 return 0;
1445}
1446
1447static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1448{
1449 /* Shutdown detect IRQ */
1450 if (slot->host->pdata->exit)
1451 slot->host->pdata->exit(id);
1452
1453 /* Debugfs stuff is cleaned up by mmc core */
1454 mmc_remove_host(slot->mmc);
1455 slot->host->slot[id] = NULL;
1456 mmc_free_host(slot->mmc);
1457}
1458
1459static void dw_mci_init_dma(struct dw_mci *host)
1460{
1461 /* Alloc memory for sg translation */
1462 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
1463 &host->sg_dma, GFP_KERNEL);
1464 if (!host->sg_cpu) {
1465 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
1466 __func__);
1467 goto no_dma;
1468 }
1469
1470 /* Determine which DMA interface to use */
1471#ifdef CONFIG_MMC_DW_IDMAC
1472 host->dma_ops = &dw_mci_idmac_ops;
1473 dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
1474#endif
1475
1476 if (!host->dma_ops)
1477 goto no_dma;
1478
1479 if (host->dma_ops->init) {
1480 if (host->dma_ops->init(host)) {
1481 dev_err(&host->pdev->dev, "%s: Unable to initialize "
1482 "DMA Controller.\n", __func__);
1483 goto no_dma;
1484 }
1485 } else {
1486 dev_err(&host->pdev->dev, "DMA initialization not found.\n");
1487 goto no_dma;
1488 }
1489
1490 host->use_dma = 1;
1491 return;
1492
1493no_dma:
1494 dev_info(&host->pdev->dev, "Using PIO mode.\n");
1495 host->use_dma = 0;
1496 return;
1497}
1498
1499static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1500{
1501 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1502 unsigned int ctrl;
1503
1504 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1505 SDMMC_CTRL_DMA_RESET));
1506
1507 /* wait till resets clear */
1508 do {
1509 ctrl = mci_readl(host, CTRL);
1510 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1511 SDMMC_CTRL_DMA_RESET)))
1512 return true;
1513 } while (time_before(jiffies, timeout));
1514
1515 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1516
1517 return false;
1518}
1519
1520static int dw_mci_probe(struct platform_device *pdev)
1521{
1522 struct dw_mci *host;
1523 struct resource *regs;
1524 struct dw_mci_board *pdata;
1525 int irq, ret, i, width;
1526 u32 fifo_size;
1527
1528 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1529 if (!regs)
1530 return -ENXIO;
1531
1532 irq = platform_get_irq(pdev, 0);
1533 if (irq < 0)
1534 return irq;
1535
1536 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
1537 if (!host)
1538 return -ENOMEM;
1539
1540 host->pdev = pdev;
1541 host->pdata = pdata = pdev->dev.platform_data;
1542 if (!pdata || !pdata->init) {
1543 dev_err(&pdev->dev,
1544 "Platform data must supply init function\n");
1545 ret = -ENODEV;
1546 goto err_freehost;
1547 }
1548
1549 if (!pdata->select_slot && pdata->num_slots > 1) {
1550 dev_err(&pdev->dev,
1551 "Platform data must supply select_slot function\n");
1552 ret = -ENODEV;
1553 goto err_freehost;
1554 }
1555
1556 if (!pdata->bus_hz) {
1557 dev_err(&pdev->dev,
1558 "Platform data must supply bus speed\n");
1559 ret = -ENODEV;
1560 goto err_freehost;
1561 }
1562
1563 host->bus_hz = pdata->bus_hz;
1564 host->quirks = pdata->quirks;
1565
1566 spin_lock_init(&host->lock);
1567 INIT_LIST_HEAD(&host->queue);
1568
1569 ret = -ENOMEM;
1570 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1571 if (!host->regs)
1572 goto err_freehost;
1573
1574 host->dma_ops = pdata->dma_ops;
1575 dw_mci_init_dma(host);
1576
1577 /*
1578 * Get the host data width - this assumes that HCON has been set with
1579 * the correct values.
1580 */
1581 i = (mci_readl(host, HCON) >> 7) & 0x7;
1582 if (!i) {
1583 host->push_data = dw_mci_push_data16;
1584 host->pull_data = dw_mci_pull_data16;
1585 width = 16;
1586 host->data_shift = 1;
1587 } else if (i == 2) {
1588 host->push_data = dw_mci_push_data64;
1589 host->pull_data = dw_mci_pull_data64;
1590 width = 64;
1591 host->data_shift = 3;
1592 } else {
1593 /* Check for a reserved value, and warn if it is */
1594 WARN((i != 1),
1595 "HCON reports a reserved host data width!\n"
1596 "Defaulting to 32-bit access.\n");
1597 host->push_data = dw_mci_push_data32;
1598 host->pull_data = dw_mci_pull_data32;
1599 width = 32;
1600 host->data_shift = 2;
1601 }
1602
1603 /* Reset all blocks */
1604 if (!mci_wait_reset(&pdev->dev, host)) {
1605 ret = -ENODEV;
1606 goto err_dmaunmap;
1607 }
1608
1609 /* Clear the interrupts for the host controller */
1610 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1611 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1612
1613 /* Put in max timeout */
1614 mci_writel(host, TMOUT, 0xFFFFFFFF);
1615
1616 /*
1617 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1618 * Tx Mark = fifo_size / 2 DMA Size = 8
1619 */
1620 fifo_size = mci_readl(host, FIFOTH);
1621 fifo_size = (fifo_size >> 16) & 0x7ff;
1622 mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1623 ((fifo_size/2) << 0)));
1624
1625 /* disable clock to CIU */
1626 mci_writel(host, CLKENA, 0);
1627 mci_writel(host, CLKSRC, 0);
1628
1629 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1630 tasklet_init(&host->card_tasklet,
1631 dw_mci_tasklet_card, (unsigned long)host);
1632
1633 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1634 if (ret)
1635 goto err_dmaunmap;
1636
1637 platform_set_drvdata(pdev, host);
1638
1639 if (host->pdata->num_slots)
1640 host->num_slots = host->pdata->num_slots;
1641 else
1642 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
1643
1644 /* We need at least one slot to succeed */
1645 for (i = 0; i < host->num_slots; i++) {
1646 ret = dw_mci_init_slot(host, i);
1647 if (ret) {
1648 ret = -ENODEV;
1649 goto err_init_slot;
1650 }
1651 }
1652
1653 /*
1654 * Enable interrupts for command done, data over, data empty, card det,
1655 * receive ready and error such as transmit, receive timeout, crc error
1656 */
1657 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1658 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1659 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1660 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1661 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1662
1663 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1664 "%d bit host data width\n", irq, width);
1665 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1666 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1667
1668 return 0;
1669
1670err_init_slot:
1671 /* De-init any initialized slots */
1672 while (i > 0) {
1673 if (host->slot[i])
1674 dw_mci_cleanup_slot(host->slot[i], i);
1675 i--;
1676 }
1677 free_irq(irq, host);
1678
1679err_dmaunmap:
1680 if (host->use_dma && host->dma_ops->exit)
1681 host->dma_ops->exit(host);
1682 dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
1683 host->sg_cpu, host->sg_dma);
1684 iounmap(host->regs);
1685
1686err_freehost:
1687 kfree(host);
1688 return ret;
1689}
1690
1691static int __exit dw_mci_remove(struct platform_device *pdev)
1692{
1693 struct dw_mci *host = platform_get_drvdata(pdev);
1694 int i;
1695
1696 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1697 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1698
1699 platform_set_drvdata(pdev, NULL);
1700
1701 for (i = 0; i < host->num_slots; i++) {
1702 dev_dbg(&pdev->dev, "remove slot %d\n", i);
1703 if (host->slot[i])
1704 dw_mci_cleanup_slot(host->slot[i], i);
1705 }
1706
1707 /* disable clock to CIU */
1708 mci_writel(host, CLKENA, 0);
1709 mci_writel(host, CLKSRC, 0);
1710
1711 free_irq(platform_get_irq(pdev, 0), host);
1712 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1713
1714 if (host->use_dma && host->dma_ops->exit)
1715 host->dma_ops->exit(host);
1716
1717 iounmap(host->regs);
1718
1719 kfree(host);
1720 return 0;
1721}
1722
1723#ifdef CONFIG_PM
1724/*
1725 * TODO: we should probably disable the clock to the card in the suspend path.
1726 */
1727static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1728{
1729 int i, ret;
1730 struct dw_mci *host = platform_get_drvdata(pdev);
1731
1732 for (i = 0; i < host->num_slots; i++) {
1733 struct dw_mci_slot *slot = host->slot[i];
1734 if (!slot)
1735 continue;
1736 ret = mmc_suspend_host(slot->mmc);
1737 if (ret < 0) {
1738 while (--i >= 0) {
1739 slot = host->slot[i];
1740 if (slot)
1741 mmc_resume_host(host->slot[i]->mmc);
1742 }
1743 return ret;
1744 }
1745 }
1746
1747 return 0;
1748}
1749
1750static int dw_mci_resume(struct platform_device *pdev)
1751{
1752 int i, ret;
1753 struct dw_mci *host = platform_get_drvdata(pdev);
1754
1755 for (i = 0; i < host->num_slots; i++) {
1756 struct dw_mci_slot *slot = host->slot[i];
1757 if (!slot)
1758 continue;
1759 ret = mmc_resume_host(host->slot[i]->mmc);
1760 if (ret < 0)
1761 return ret;
1762 }
1763
1764 return 0;
1765}
1766#else
1767#define dw_mci_suspend NULL
1768#define dw_mci_resume NULL
1769#endif /* CONFIG_PM */
1770
1771static struct platform_driver dw_mci_driver = {
1772 .remove = __exit_p(dw_mci_remove),
1773 .suspend = dw_mci_suspend,
1774 .resume = dw_mci_resume,
1775 .driver = {
1776 .name = "dw_mmc",
1777 },
1778};
1779
1780static int __init dw_mci_init(void)
1781{
1782 return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
1783}
1784
1785static void __exit dw_mci_exit(void)
1786{
1787 platform_driver_unregister(&dw_mci_driver);
1788}
1789
1790module_init(dw_mci_init);
1791module_exit(dw_mci_exit);
1792
1793MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
1794MODULE_AUTHOR("NXP Semiconductor VietNam");
1795MODULE_AUTHOR("Imagination Technologies Ltd");
1796MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..5dd55a75233d
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_
16
17#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008
20#define SDMMC_CLKSRC 0x00c
21#define SDMMC_CLKENA 0x010
22#define SDMMC_TMOUT 0x014
23#define SDMMC_CTYPE 0x018
24#define SDMMC_BLKSIZ 0x01c
25#define SDMMC_BYTCNT 0x020
26#define SDMMC_INTMASK 0x024
27#define SDMMC_CMDARG 0x028
28#define SDMMC_CMD 0x02c
29#define SDMMC_RESP0 0x030
30#define SDMMC_RESP1 0x034
31#define SDMMC_RESP2 0x038
32#define SDMMC_RESP3 0x03c
33#define SDMMC_MINTSTS 0x040
34#define SDMMC_RINTSTS 0x044
35#define SDMMC_STATUS 0x048
36#define SDMMC_FIFOTH 0x04c
37#define SDMMC_CDETECT 0x050
38#define SDMMC_WRTPRT 0x054
39#define SDMMC_GPIO 0x058
40#define SDMMC_TCBCNT 0x05c
41#define SDMMC_TBBCNT 0x060
42#define SDMMC_DEBNCE 0x064
43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070
46#define SDMMC_BMOD 0x080
47#define SDMMC_PLDMND 0x084
48#define SDMMC_DBADDR 0x088
49#define SDMMC_IDSTS 0x08c
50#define SDMMC_IDINTEN 0x090
51#define SDMMC_DSCADDR 0x094
52#define SDMMC_BUFADDR 0x098
53#define SDMMC_DATA 0x100
54#define SDMMC_DATA_ADR 0x100
55
56/* shift bit field */
57#define _SBF(f, v) ((v) << (f))
58
59/* Control register defines */
60#define SDMMC_CTRL_USE_IDMAC BIT(25)
61#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
62#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
63#define SDMMC_CTRL_SEND_CCSD BIT(9)
64#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
65#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
66#define SDMMC_CTRL_READ_WAIT BIT(6)
67#define SDMMC_CTRL_DMA_ENABLE BIT(5)
68#define SDMMC_CTRL_INT_ENABLE BIT(4)
69#define SDMMC_CTRL_DMA_RESET BIT(2)
70#define SDMMC_CTRL_FIFO_RESET BIT(1)
71#define SDMMC_CTRL_RESET BIT(0)
72/* Clock Enable register defines */
73#define SDMMC_CLKEN_LOW_PWR BIT(16)
74#define SDMMC_CLKEN_ENABLE BIT(0)
75/* time-out register defines */
76#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
77#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
78#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
79#define SDMMC_TMOUT_RESP_MSK 0xFF
80/* card-type register defines */
81#define SDMMC_CTYPE_8BIT BIT(16)
82#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16)
86#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13)
89#define SDMMC_INT_HLE BIT(12)
90#define SDMMC_INT_FRUN BIT(11)
91#define SDMMC_INT_HTO BIT(10)
92#define SDMMC_INT_DTO BIT(9)
93#define SDMMC_INT_RTO BIT(8)
94#define SDMMC_INT_DCRC BIT(7)
95#define SDMMC_INT_RCRC BIT(6)
96#define SDMMC_INT_RXDR BIT(5)
97#define SDMMC_INT_TXDR BIT(4)
98#define SDMMC_INT_DATA_OVER BIT(3)
99#define SDMMC_INT_CMD_DONE BIT(2)
100#define SDMMC_INT_RESP_ERR BIT(1)
101#define SDMMC_INT_CD BIT(0)
102#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */
104#define SDMMC_CMD_START BIT(31)
105#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21)
108#define SDMMC_CMD_INIT BIT(15)
109#define SDMMC_CMD_STOP BIT(14)
110#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
111#define SDMMC_CMD_SEND_STOP BIT(12)
112#define SDMMC_CMD_STRM_MODE BIT(11)
113#define SDMMC_CMD_DAT_WR BIT(10)
114#define SDMMC_CMD_DAT_EXP BIT(9)
115#define SDMMC_CMD_RESP_CRC BIT(8)
116#define SDMMC_CMD_RESP_LONG BIT(7)
117#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8)
125#define SDMMC_IDMAC_INT_CES BIT(5)
126#define SDMMC_IDMAC_INT_DU BIT(4)
127#define SDMMC_IDMAC_INT_FBE BIT(2)
128#define SDMMC_IDMAC_INT_RI BIT(1)
129#define SDMMC_IDMAC_INT_TI BIT(0)
130/* Internal DMAC bus mode bits */
131#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0)
134
135/* Register access macros */
136#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg)
140
141/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg)
146
147/* 64-bit FIFO access macros */
148#ifdef readq
149#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg)
153#else
154/*
155 * Dummy readq implementation for architectures that don't define it.
156 *
157 * We would assume that none of these architectures would configure
158 * the IP block with a 64bit FIFO width, so this code will never be
159 * executed on those machines. Defining these macros here keeps the
160 * rest of the code free from ifdefs.
161 */
162#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
166#endif
167
168#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb87cba..4428594261c5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h>
34 35
35#include <asm/dma.h> 36#include <asm/dma.h>
36#include <asm/irq.h> 37#include <asm/irq.h>
@@ -141,10 +142,49 @@ struct mxcmci_host {
141 142
142 struct work_struct datawork; 143 struct work_struct datawork;
143 spinlock_t lock; 144 spinlock_t lock;
145
146 struct regulator *vcc;
144}; 147};
145 148
146static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 149static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
147 150
151static inline void mxcmci_init_ocr(struct mxcmci_host *host)
152{
153 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
154
155 if (IS_ERR(host->vcc)) {
156 host->vcc = NULL;
157 } else {
158 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
159 if (host->pdata && host->pdata->ocr_avail)
160 dev_warn(mmc_dev(host->mmc),
161 "pdata->ocr_avail will not be used\n");
162 }
163
164 if (host->vcc == NULL) {
165 /* fall-back to platform data */
166 if (host->pdata && host->pdata->ocr_avail)
167 host->mmc->ocr_avail = host->pdata->ocr_avail;
168 else
169 host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
170 }
171}
172
173static inline void mxcmci_set_power(struct mxcmci_host *host,
174 unsigned char power_mode,
175 unsigned int vdd)
176{
177 if (host->vcc) {
178 if (power_mode == MMC_POWER_UP)
179 mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
180 else if (power_mode == MMC_POWER_OFF)
181 mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
182 }
183
184 if (host->pdata && host->pdata->setpower)
185 host->pdata->setpower(mmc_dev(host->mmc), vdd);
186}
187
148static inline int mxcmci_use_dma(struct mxcmci_host *host) 188static inline int mxcmci_use_dma(struct mxcmci_host *host)
149{ 189{
150 return host->do_dma; 190 return host->do_dma;
@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
680 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; 720 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
681 721
682 if (host->power_mode != ios->power_mode) { 722 if (host->power_mode != ios->power_mode) {
683 if (host->pdata && host->pdata->setpower) 723 mxcmci_set_power(host, ios->power_mode, ios->vdd);
684 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
685 host->power_mode = ios->power_mode; 724 host->power_mode = ios->power_mode;
725
686 if (ios->power_mode == MMC_POWER_ON) 726 if (ios->power_mode == MMC_POWER_ON)
687 host->cmdat |= CMD_DAT_CONT_INIT; 727 host->cmdat |= CMD_DAT_CONT_INIT;
688 } 728 }
@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev)
807 host->pdata = pdev->dev.platform_data; 847 host->pdata = pdev->dev.platform_data;
808 spin_lock_init(&host->lock); 848 spin_lock_init(&host->lock);
809 849
810 if (host->pdata && host->pdata->ocr_avail) 850 mxcmci_init_ocr(host);
811 mmc->ocr_avail = host->pdata->ocr_avail;
812 else
813 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
814 851
815 if (host->pdata && host->pdata->dat3_card_detect) 852 if (host->pdata && host->pdata->dat3_card_detect)
816 host->default_irq_mask = 853 host->default_irq_mask =
@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev)
915 952
916 mmc_remove_host(mmc); 953 mmc_remove_host(mmc);
917 954
955 if (host->vcc)
956 regulator_put(host->vcc);
957
918 if (host->pdata && host->pdata->exit) 958 if (host->pdata && host->pdata->exit)
919 host->pdata->exit(&pdev->dev, mmc); 959 host->pdata->exit(&pdev->dev, mmc);
920 960
@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev)
927 clk_put(host->clk); 967 clk_put(host->clk);
928 968
929 release_mem_region(host->res->start, resource_size(host->res)); 969 release_mem_region(host->res->start, resource_size(host->res));
930 release_resource(host->res);
931 970
932 mmc_free_host(mmc); 971 mmc_free_host(mmc);
933 972
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000000..2aeef4ffed8c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
1/*
2 * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
3 *
4 * Author: Saeed Bishara <saeed@marvell.com>
5 * Mike Rapoport <mike@compulab.co.il>
6 * Based on sdhci-cns3xxx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/mmc/host.h>
24
25#include "sdhci.h"
26#include "sdhci-pltfm.h"
27
28static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
29{
30 u16 ret;
31
32 switch (reg) {
33 case SDHCI_HOST_VERSION:
34 case SDHCI_SLOT_INT_STATUS:
35 /* those registers don't exist */
36 return 0;
37 default:
38 ret = readw(host->ioaddr + reg);
39 }
40 return ret;
41}
42
43static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
44{
45 u32 ret;
46
47 switch (reg) {
48 case SDHCI_CAPABILITIES:
49 ret = readl(host->ioaddr + reg);
50 /* Mask the support for 3.0V */
51 ret &= ~SDHCI_CAN_VDD_300;
52 break;
53 default:
54 ret = readl(host->ioaddr + reg);
55 }
56 return ret;
57}
58
59static struct sdhci_ops sdhci_dove_ops = {
60 .read_w = sdhci_dove_readw,
61 .read_l = sdhci_dove_readl,
62};
63
64struct sdhci_pltfm_data sdhci_dove_pdata = {
65 .ops = &sdhci_dove_ops,
66 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
67 SDHCI_QUIRK_NO_BUSY_IRQ |
68 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
69 SDHCI_QUIRK_FORCE_DMA,
70};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c2460d437..0dc905b20eee 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
177}; 177};
178 178
179/* O2Micro extra registers */
180#define O2_SD_LOCK_WP 0xD3
181#define O2_SD_MULTI_VCC3V 0xEE
182#define O2_SD_CLKREQ 0xEC
183#define O2_SD_CAPS 0xE0
184#define O2_SD_ADMA1 0xE2
185#define O2_SD_ADMA2 0xE7
186#define O2_SD_INF_MOD 0xF1
187
188static int o2_probe(struct sdhci_pci_chip *chip)
189{
190 int ret;
191 u8 scratch;
192
193 switch (chip->pdev->device) {
194 case PCI_DEVICE_ID_O2_8220:
195 case PCI_DEVICE_ID_O2_8221:
196 case PCI_DEVICE_ID_O2_8320:
197 case PCI_DEVICE_ID_O2_8321:
198 /* This extra setup is required due to broken ADMA. */
199 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
200 if (ret)
201 return ret;
202 scratch &= 0x7f;
203 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
204
205 /* Set Multi 3 to VCC3V# */
206 pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
207
208 /* Disable CLK_REQ# support after media DET */
209 ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
210 if (ret)
211 return ret;
212 scratch |= 0x20;
213 pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
214
215 /* Choose capabilities, enable SDMA. We have to write 0x01
216 * to the capabilities register first to unlock it.
217 */
218 ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
219 if (ret)
220 return ret;
221 scratch |= 0x01;
222 pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
223 pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
224
225 /* Disable ADMA1/2 */
226 pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
227 pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
228
229 /* Disable the infinite transfer mode */
230 ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
231 if (ret)
232 return ret;
233 scratch |= 0x08;
234 pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
235
236 /* Lock WP */
237 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
238 if (ret)
239 return ret;
240 scratch |= 0x80;
241 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
242 }
243
244 return 0;
245}
246
179static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 247static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
180{ 248{
181 u8 scratch; 249 u8 scratch;
@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
204static int jmicron_probe(struct sdhci_pci_chip *chip) 272static int jmicron_probe(struct sdhci_pci_chip *chip)
205{ 273{
206 int ret; 274 int ret;
275 u16 mmcdev = 0;
207 276
208 if (chip->pdev->revision == 0) { 277 if (chip->pdev->revision == 0) {
209 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 278 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
225 * 2. The MMC interface has a lower subfunction number 294 * 2. The MMC interface has a lower subfunction number
226 * than the SD interface. 295 * than the SD interface.
227 */ 296 */
228 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { 297 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
298 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
299 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
300 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
301
302 if (mmcdev) {
229 struct pci_dev *sd_dev; 303 struct pci_dev *sd_dev;
230 304
231 sd_dev = NULL; 305 sd_dev = NULL;
232 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 306 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
233 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { 307 mmcdev, sd_dev)) != NULL) {
234 if ((PCI_SLOT(chip->pdev->devfn) == 308 if ((PCI_SLOT(chip->pdev->devfn) ==
235 PCI_SLOT(sd_dev->devfn)) && 309 PCI_SLOT(sd_dev->devfn)) &&
236 (chip->pdev->bus == sd_dev->bus)) 310 (chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
290 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 364 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
291 } 365 }
292 366
367 /* JM388 MMC doesn't support 1.8V while SD supports it */
368 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
369 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
370 MMC_VDD_29_30 | MMC_VDD_30_31 |
371 MMC_VDD_165_195; /* allow 1.8V */
372 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
373 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
374 }
375
293 /* 376 /*
294 * The secondary interface requires a bit set to get the 377 * The secondary interface requires a bit set to get the
295 * interrupts. 378 * interrupts.
296 */ 379 */
297 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 380 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
381 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
298 jmicron_enable_mmc(slot->host, 1); 382 jmicron_enable_mmc(slot->host, 1);
299 383
384 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
385
300 return 0; 386 return 0;
301} 387}
302 388
@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
305 if (dead) 391 if (dead)
306 return; 392 return;
307 393
308 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 394 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
395 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
309 jmicron_enable_mmc(slot->host, 0); 396 jmicron_enable_mmc(slot->host, 0);
310} 397}
311 398
@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
313{ 400{
314 int i; 401 int i;
315 402
316 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
317 for (i = 0;i < chip->num_slots;i++) 405 for (i = 0;i < chip->num_slots;i++)
318 jmicron_enable_mmc(chip->slots[i]->host, 0); 406 jmicron_enable_mmc(chip->slots[i]->host, 0);
319 } 407 }
@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
325{ 413{
326 int ret, i; 414 int ret, i;
327 415
328 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 416 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
417 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
329 for (i = 0;i < chip->num_slots;i++) 418 for (i = 0;i < chip->num_slots;i++)
330 jmicron_enable_mmc(chip->slots[i]->host, 1); 419 jmicron_enable_mmc(chip->slots[i]->host, 1);
331 } 420 }
@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
339 return 0; 428 return 0;
340} 429}
341 430
431static const struct sdhci_pci_fixes sdhci_o2 = {
432 .probe = o2_probe,
433};
434
342static const struct sdhci_pci_fixes sdhci_jmicron = { 435static const struct sdhci_pci_fixes sdhci_jmicron = {
343 .probe = jmicron_probe, 436 .probe = jmicron_probe,
344 437
@@ -510,6 +603,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
510 }, 603 },
511 604
512 { 605 {
606 .vendor = PCI_VENDOR_ID_JMICRON,
607 .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
608 .subvendor = PCI_ANY_ID,
609 .subdevice = PCI_ANY_ID,
610 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
611 },
612
613 {
614 .vendor = PCI_VENDOR_ID_JMICRON,
615 .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
616 .subvendor = PCI_ANY_ID,
617 .subdevice = PCI_ANY_ID,
618 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
619 },
620
621 {
513 .vendor = PCI_VENDOR_ID_SYSKONNECT, 622 .vendor = PCI_VENDOR_ID_SYSKONNECT,
514 .device = 0x8000, 623 .device = 0x8000,
515 .subvendor = PCI_ANY_ID, 624 .subvendor = PCI_ANY_ID,
@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
589 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 698 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
590 }, 699 },
591 700
701 {
702 .vendor = PCI_VENDOR_ID_O2,
703 .device = PCI_DEVICE_ID_O2_8120,
704 .subvendor = PCI_ANY_ID,
705 .subdevice = PCI_ANY_ID,
706 .driver_data = (kernel_ulong_t)&sdhci_o2,
707 },
708
709 {
710 .vendor = PCI_VENDOR_ID_O2,
711 .device = PCI_DEVICE_ID_O2_8220,
712 .subvendor = PCI_ANY_ID,
713 .subdevice = PCI_ANY_ID,
714 .driver_data = (kernel_ulong_t)&sdhci_o2,
715 },
716
717 {
718 .vendor = PCI_VENDOR_ID_O2,
719 .device = PCI_DEVICE_ID_O2_8221,
720 .subvendor = PCI_ANY_ID,
721 .subdevice = PCI_ANY_ID,
722 .driver_data = (kernel_ulong_t)&sdhci_o2,
723 },
724
725 {
726 .vendor = PCI_VENDOR_ID_O2,
727 .device = PCI_DEVICE_ID_O2_8320,
728 .subvendor = PCI_ANY_ID,
729 .subdevice = PCI_ANY_ID,
730 .driver_data = (kernel_ulong_t)&sdhci_o2,
731 },
732
733 {
734 .vendor = PCI_VENDOR_ID_O2,
735 .device = PCI_DEVICE_ID_O2_8321,
736 .subvendor = PCI_ANY_ID,
737 .subdevice = PCI_ANY_ID,
738 .driver_data = (kernel_ulong_t)&sdhci_o2,
739 },
740
592 { /* Generic SD host controller */ 741 { /* Generic SD host controller */
593 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 742 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
594 }, 743 },
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89f662b..dbab0407f4b6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX 170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, 171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
172#endif 172#endif
173#ifdef CONFIG_MMC_SDHCI_DOVE
174 { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
175#endif
176#ifdef CONFIG_MMC_SDHCI_TEGRA
177 { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
178#endif
173 { }, 179 { },
174}; 180};
175MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); 181MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48af56a..ea2e44d9be5e 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@ struct sdhci_pltfm_host {
22 22
23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; 23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; 24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
25extern struct sdhci_pltfm_data sdhci_dove_pdata;
26extern struct sdhci_pltfm_data sdhci_tegra_pdata;
25 27
26#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ 28#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8a..17203586305c 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
130 if (!clksrc) 130 if (!clksrc)
131 return UINT_MAX; 131 return UINT_MAX;
132 132
133 /*
134 * Clock divider's step is different as 1 from that of host controller
135 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
136 */
137 if (ourhost->pdata->clk_type) {
138 rate = clk_round_rate(clksrc, wanted);
139 return wanted - rate;
140 }
141
133 rate = clk_get_rate(clksrc); 142 rate = clk_get_rate(clksrc);
134 143
135 for (div = 1; div < 256; div *= 2) { 144 for (div = 1; div < 256; div *= 2) {
@@ -232,6 +241,42 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
232 return min; 241 return min;
233} 242}
234 243
244/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
245static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
246{
247 struct sdhci_s3c *ourhost = to_s3c(host);
248
249 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
250}
251
252/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
253static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
254{
255 struct sdhci_s3c *ourhost = to_s3c(host);
256
257 /*
258 * initial clock can be in the frequency range of
259 * 100KHz-400KHz, so we set it as max value.
260 */
261 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
262}
263
264/* sdhci_cmu_set_clock - callback on clock change.*/
265static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
266{
267 struct sdhci_s3c *ourhost = to_s3c(host);
268
269 /* don't bother if the clock is going off */
270 if (clock == 0)
271 return;
272
273 sdhci_s3c_set_clock(host, clock);
274
275 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
276
277 host->clock = clock;
278}
279
235static struct sdhci_ops sdhci_s3c_ops = { 280static struct sdhci_ops sdhci_s3c_ops = {
236 .get_max_clock = sdhci_s3c_get_max_clk, 281 .get_max_clock = sdhci_s3c_get_max_clk,
237 .set_clock = sdhci_s3c_set_clock, 282 .set_clock = sdhci_s3c_set_clock,
@@ -361,6 +406,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
361 406
362 clks++; 407 clks++;
363 sc->clk_bus[ptr] = clk; 408 sc->clk_bus[ptr] = clk;
409
410 /*
411 * save current clock index to know which clock bus
412 * is used later in overriding functions.
413 */
414 sc->cur_clk = ptr;
415
364 clk_enable(clk); 416 clk_enable(clk);
365 417
366 dev_info(dev, "clock source %d: %s (%ld Hz)\n", 418 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -427,6 +479,20 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
427 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ 479 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
428 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; 480 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
429 481
482 /*
483 * If controller does not have internal clock divider,
484 * we can use overriding functions instead of default.
485 */
486 if (pdata->clk_type) {
487 sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
488 sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
489 sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
490 }
491
492 /* It supports additional host capabilities if needed */
493 if (pdata->host_caps)
494 host->mmc->caps |= pdata->host_caps;
495
430 ret = sdhci_add_host(host); 496 ret = sdhci_add_host(host);
431 if (ret) { 497 if (ret) {
432 dev_err(dev, "sdhci_add_host() failed\n"); 498 dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..4823ee94a63f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/host.h>
23
24#include <mach/gpio.h>
25#include <mach/sdhci.h>
26
27#include "sdhci.h"
28#include "sdhci-pltfm.h"
29
30static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
31{
32 u32 val;
33
34 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
35 /* Use wp_gpio here instead? */
36 val = readl(host->ioaddr + reg);
37 return val | SDHCI_WRITE_PROTECT;
38 }
39
40 return readl(host->ioaddr + reg);
41}
42
43static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
44{
45 if (unlikely(reg == SDHCI_HOST_VERSION)) {
46 /* Erratum: Version register is invalid in HW. */
47 return SDHCI_SPEC_200;
48 }
49
50 return readw(host->ioaddr + reg);
51}
52
53static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
54{
55 /* Seems like we're getting spurious timeout and crc errors, so
56 * disable signalling of them. In case of real errors software
57 * timers should take care of eventually detecting them.
58 */
59 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
60 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
61
62 writel(val, host->ioaddr + reg);
63
64 if (unlikely(reg == SDHCI_INT_ENABLE)) {
65 /* Erratum: Must enable block gap interrupt detection */
66 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
67 if (val & SDHCI_INT_CARD_INT)
68 gap_ctrl |= 0x8;
69 else
70 gap_ctrl &= ~0x8;
71 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
72 }
73}
74
75static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
76{
77 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
78 struct tegra_sdhci_platform_data *plat;
79
80 plat = pdev->dev.platform_data;
81
82 if (!gpio_is_valid(plat->wp_gpio))
83 return -1;
84
85 return gpio_get_value(plat->wp_gpio);
86}
87
88static irqreturn_t carddetect_irq(int irq, void *data)
89{
90 struct sdhci_host *sdhost = (struct sdhci_host *)data;
91
92 tasklet_schedule(&sdhost->card_tasklet);
93 return IRQ_HANDLED;
94};
95
96static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
97{
98 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
99 struct tegra_sdhci_platform_data *plat;
100 u32 ctrl;
101
102 plat = pdev->dev.platform_data;
103
104 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
105 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
106 ctrl &= ~SDHCI_CTRL_4BITBUS;
107 ctrl |= SDHCI_CTRL_8BITBUS;
108 } else {
109 ctrl &= ~SDHCI_CTRL_8BITBUS;
110 if (bus_width == MMC_BUS_WIDTH_4)
111 ctrl |= SDHCI_CTRL_4BITBUS;
112 else
113 ctrl &= ~SDHCI_CTRL_4BITBUS;
114 }
115 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
116 return 0;
117}
118
119
120static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
121 struct sdhci_pltfm_data *pdata)
122{
123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
124 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
125 struct tegra_sdhci_platform_data *plat;
126 struct clk *clk;
127 int rc;
128
129 plat = pdev->dev.platform_data;
130 if (plat == NULL) {
131 dev_err(mmc_dev(host->mmc), "missing platform data\n");
132 return -ENXIO;
133 }
134
135 if (gpio_is_valid(plat->power_gpio)) {
136 rc = gpio_request(plat->power_gpio, "sdhci_power");
137 if (rc) {
138 dev_err(mmc_dev(host->mmc),
139 "failed to allocate power gpio\n");
140 goto out;
141 }
142 tegra_gpio_enable(plat->power_gpio);
143 gpio_direction_output(plat->power_gpio, 1);
144 }
145
146 if (gpio_is_valid(plat->cd_gpio)) {
147 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
148 if (rc) {
149 dev_err(mmc_dev(host->mmc),
150 "failed to allocate cd gpio\n");
151 goto out_power;
152 }
153 tegra_gpio_enable(plat->cd_gpio);
154 gpio_direction_input(plat->cd_gpio);
155
156 rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
157 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
158 mmc_hostname(host->mmc), host);
159
160 if (rc) {
161 dev_err(mmc_dev(host->mmc), "request irq error\n");
162 goto out_cd;
163 }
164
165 }
166
167 if (gpio_is_valid(plat->wp_gpio)) {
168 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
169 if (rc) {
170 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n");
172 goto out_cd;
173 }
174 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio);
176 }
177
178 clk = clk_get(mmc_dev(host->mmc), NULL);
179 if (IS_ERR(clk)) {
180 dev_err(mmc_dev(host->mmc), "clk err\n");
181 rc = PTR_ERR(clk);
182 goto out_wp;
183 }
184 clk_enable(clk);
185 pltfm_host->clk = clk;
186
187 if (plat->is_8bit)
188 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
189
190 return 0;
191
192out_wp:
193 if (gpio_is_valid(plat->wp_gpio)) {
194 tegra_gpio_disable(plat->wp_gpio);
195 gpio_free(plat->wp_gpio);
196 }
197
198out_cd:
199 if (gpio_is_valid(plat->cd_gpio)) {
200 tegra_gpio_disable(plat->cd_gpio);
201 gpio_free(plat->cd_gpio);
202 }
203
204out_power:
205 if (gpio_is_valid(plat->power_gpio)) {
206 tegra_gpio_disable(plat->power_gpio);
207 gpio_free(plat->power_gpio);
208 }
209
210out:
211 return rc;
212}
213
214static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
215{
216 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
217 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
218 struct tegra_sdhci_platform_data *plat;
219
220 plat = pdev->dev.platform_data;
221
222 if (gpio_is_valid(plat->wp_gpio)) {
223 tegra_gpio_disable(plat->wp_gpio);
224 gpio_free(plat->wp_gpio);
225 }
226
227 if (gpio_is_valid(plat->cd_gpio)) {
228 tegra_gpio_disable(plat->cd_gpio);
229 gpio_free(plat->cd_gpio);
230 }
231
232 if (gpio_is_valid(plat->power_gpio)) {
233 tegra_gpio_disable(plat->power_gpio);
234 gpio_free(plat->power_gpio);
235 }
236
237 clk_disable(pltfm_host->clk);
238 clk_put(pltfm_host->clk);
239}
240
241static struct sdhci_ops tegra_sdhci_ops = {
242 .get_ro = tegra_sdhci_get_ro,
243 .read_l = tegra_sdhci_readl,
244 .read_w = tegra_sdhci_readw,
245 .write_l = tegra_sdhci_writel,
246 .platform_8bit_width = tegra_sdhci_8bit,
247};
248
249struct sdhci_pltfm_data sdhci_tegra_pdata = {
250 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
251 SDHCI_QUIRK_SINGLE_POWER_WRITE |
252 SDHCI_QUIRK_NO_HISPD_BIT |
253 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
254 .ops = &tegra_sdhci_ops,
255 .init = tegra_sdhci_pltfm_init,
256 .exit = tegra_sdhci_pltfm_exit,
257};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db426c910..9e15f41f87be 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/leds.h> 24#include <linux/leds.h>
25 25
26#include <linux/mmc/mmc.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
77 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 78 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
78 sdhci_readw(host, SDHCI_ACMD12_ERR), 79 sdhci_readw(host, SDHCI_ACMD12_ERR),
79 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 80 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
80 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 81 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
81 sdhci_readl(host, SDHCI_CAPABILITIES), 82 sdhci_readl(host, SDHCI_CAPABILITIES),
83 sdhci_readl(host, SDHCI_CAPABILITIES_1));
84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
85 sdhci_readw(host, SDHCI_COMMAND),
82 sdhci_readl(host, SDHCI_MAX_CURRENT)); 86 sdhci_readl(host, SDHCI_MAX_CURRENT));
83 87
84 if (host->flags & SDHCI_USE_ADMA) 88 if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1518 1522
1519 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1523 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1520 host->data->error = -ETIMEDOUT; 1524 host->data->error = -ETIMEDOUT;
1521 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1525 else if (intmask & SDHCI_INT_DATA_END_BIT)
1526 host->data->error = -EILSEQ;
1527 else if ((intmask & SDHCI_INT_DATA_CRC) &&
1528 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
1529 != MMC_BUS_TEST_R)
1522 host->data->error = -EILSEQ; 1530 host->data->error = -EILSEQ;
1523 else if (intmask & SDHCI_INT_ADMA_ERROR) { 1531 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1524 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 1532 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1736int sdhci_add_host(struct sdhci_host *host) 1744int sdhci_add_host(struct sdhci_host *host)
1737{ 1745{
1738 struct mmc_host *mmc; 1746 struct mmc_host *mmc;
1739 unsigned int caps; 1747 unsigned int caps, ocr_avail;
1740 int ret; 1748 int ret;
1741 1749
1742 WARN_ON(host == NULL); 1750 WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host)
1890 mmc_card_is_removable(mmc)) 1898 mmc_card_is_removable(mmc))
1891 mmc->caps |= MMC_CAP_NEEDS_POLL; 1899 mmc->caps |= MMC_CAP_NEEDS_POLL;
1892 1900
1893 mmc->ocr_avail = 0; 1901 ocr_avail = 0;
1894 if (caps & SDHCI_CAN_VDD_330) 1902 if (caps & SDHCI_CAN_VDD_330)
1895 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1903 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
1896 if (caps & SDHCI_CAN_VDD_300) 1904 if (caps & SDHCI_CAN_VDD_300)
1897 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1905 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
1898 if (caps & SDHCI_CAN_VDD_180) 1906 if (caps & SDHCI_CAN_VDD_180)
1899 mmc->ocr_avail |= MMC_VDD_165_195; 1907 ocr_avail |= MMC_VDD_165_195;
1908
1909 mmc->ocr_avail = ocr_avail;
1910 mmc->ocr_avail_sdio = ocr_avail;
1911 if (host->ocr_avail_sdio)
1912 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
1913 mmc->ocr_avail_sd = ocr_avail;
1914 if (host->ocr_avail_sd)
1915 mmc->ocr_avail_sd &= host->ocr_avail_sd;
1916 else /* normal SD controllers don't support 1.8V */
1917 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
1918 mmc->ocr_avail_mmc = ocr_avail;
1919 if (host->ocr_avail_mmc)
1920 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
1900 1921
1901 if (mmc->ocr_avail == 0) { 1922 if (mmc->ocr_avail == 0) {
1902 printk(KERN_ERR "%s: Hardware doesn't report any " 1923 printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host)
1928 * of bytes. When doing hardware scatter/gather, each entry cannot 1949 * of bytes. When doing hardware scatter/gather, each entry cannot
1929 * be larger than 64 KiB though. 1950 * be larger than 64 KiB though.
1930 */ 1951 */
1931 if (host->flags & SDHCI_USE_ADMA) 1952 if (host->flags & SDHCI_USE_ADMA) {
1932 mmc->max_seg_size = 65536; 1953 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1933 else 1954 mmc->max_seg_size = 65535;
1955 else
1956 mmc->max_seg_size = 65536;
1957 } else {
1934 mmc->max_seg_size = mmc->max_req_size; 1958 mmc->max_seg_size = mmc->max_req_size;
1959 }
1935 1960
1936 /* 1961 /*
1937 * Maximum block size. This varies from controller to controller and 1962 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f00c060..6e0969e40650 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
52#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 52#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
53 53
54#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) 54#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
55#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
55 56
56#define SDHCI_RESPONSE 0x10 57#define SDHCI_RESPONSE 0x10
57 58
@@ -165,7 +166,7 @@
165#define SDHCI_CAN_VDD_180 0x04000000 166#define SDHCI_CAN_VDD_180 0x04000000
166#define SDHCI_CAN_64BIT 0x10000000 167#define SDHCI_CAN_64BIT 0x10000000
167 168
168/* 44-47 reserved for more caps */ 169#define SDHCI_CAPABILITIES_1 0x44
169 170
170#define SDHCI_MAX_CURRENT 0x48 171#define SDHCI_MAX_CURRENT 0x48
171 172
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a89593e..e3c6ef208391 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
25 * double buffer support 25 * double buffer support
26 * 26 *
27 */ 27 */
28#include <linux/module.h> 28
29#include <linux/irq.h>
30#include <linux/device.h>
31#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/device.h>
32#include <linux/dmaengine.h> 31#include <linux/dmaengine.h>
33#include <linux/mmc/host.h> 32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
34#include <linux/mfd/core.h> 36#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h> 37#include <linux/mfd/tmio.h>
38#include <linux/mmc/host.h>
39#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/scatterlist.h>
42#include <linux/workqueue.h>
43#include <linux/spinlock.h>
44
45#define CTL_SD_CMD 0x00
46#define CTL_ARG_REG 0x04
47#define CTL_STOP_INTERNAL_ACTION 0x08
48#define CTL_XFER_BLK_COUNT 0xa
49#define CTL_RESPONSE 0x0c
50#define CTL_STATUS 0x1c
51#define CTL_IRQ_MASK 0x20
52#define CTL_SD_CARD_CLK_CTL 0x24
53#define CTL_SD_XFER_LEN 0x26
54#define CTL_SD_MEM_CARD_OPT 0x28
55#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
56#define CTL_SD_DATA_PORT 0x30
57#define CTL_TRANSACTION_CTL 0x34
58#define CTL_SDIO_STATUS 0x36
59#define CTL_SDIO_IRQ_MASK 0x38
60#define CTL_RESET_SD 0xe0
61#define CTL_SDIO_REGS 0x100
62#define CTL_CLK_AND_WAIT_CTL 0x138
63#define CTL_RESET_SDIO 0x1e0
64
65/* Definitions for values the CTRL_STATUS register can take. */
66#define TMIO_STAT_CMDRESPEND 0x00000001
67#define TMIO_STAT_DATAEND 0x00000004
68#define TMIO_STAT_CARD_REMOVE 0x00000008
69#define TMIO_STAT_CARD_INSERT 0x00000010
70#define TMIO_STAT_SIGSTATE 0x00000020
71#define TMIO_STAT_WRPROTECT 0x00000080
72#define TMIO_STAT_CARD_REMOVE_A 0x00000100
73#define TMIO_STAT_CARD_INSERT_A 0x00000200
74#define TMIO_STAT_SIGSTATE_A 0x00000400
75#define TMIO_STAT_CMD_IDX_ERR 0x00010000
76#define TMIO_STAT_CRCFAIL 0x00020000
77#define TMIO_STAT_STOPBIT_ERR 0x00040000
78#define TMIO_STAT_DATATIMEOUT 0x00080000
79#define TMIO_STAT_RXOVERFLOW 0x00100000
80#define TMIO_STAT_TXUNDERRUN 0x00200000
81#define TMIO_STAT_CMDTIMEOUT 0x00400000
82#define TMIO_STAT_RXRDY 0x01000000
83#define TMIO_STAT_TXRQ 0x02000000
84#define TMIO_STAT_ILL_FUNC 0x20000000
85#define TMIO_STAT_CMD_BUSY 0x40000000
86#define TMIO_STAT_ILL_ACCESS 0x80000000
87
88/* Definitions for values the CTRL_SDIO_STATUS register can take. */
89#define TMIO_SDIO_STAT_IOIRQ 0x0001
90#define TMIO_SDIO_STAT_EXPUB52 0x4000
91#define TMIO_SDIO_STAT_EXWT 0x8000
92#define TMIO_SDIO_MASK_ALL 0xc007
93
94/* Define some IRQ masks */
95/* This is the mask used at reset by the chip */
96#define TMIO_MASK_ALL 0x837f031d
97#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
98#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
99#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
101#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
102
103#define enable_mmc_irqs(host, i) \
104 do { \
105 u32 mask;\
106 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
107 mask &= ~((i) & TMIO_MASK_IRQ); \
108 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
109 } while (0)
110
111#define disable_mmc_irqs(host, i) \
112 do { \
113 u32 mask;\
114 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
115 mask |= ((i) & TMIO_MASK_IRQ); \
116 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
117 } while (0)
118
119#define ack_mmc_irqs(host, i) \
120 do { \
121 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
122 } while (0)
123
124/* This is arbitrary, just noone needed any higher alignment yet */
125#define MAX_ALIGN 4
126
127struct tmio_mmc_host {
128 void __iomem *ctl;
129 unsigned long bus_shift;
130 struct mmc_command *cmd;
131 struct mmc_request *mrq;
132 struct mmc_data *data;
133 struct mmc_host *mmc;
134 int irq;
135 unsigned int sdio_irq_enabled;
136
137 /* Callbacks for clock / power control */
138 void (*set_pwr)(struct platform_device *host, int state);
139 void (*set_clk_div)(struct platform_device *host, int state);
140
141 /* pio related stuff */
142 struct scatterlist *sg_ptr;
143 struct scatterlist *sg_orig;
144 unsigned int sg_len;
145 unsigned int sg_off;
146
147 struct platform_device *pdev;
148
149 /* DMA support */
150 struct dma_chan *chan_rx;
151 struct dma_chan *chan_tx;
152 struct tasklet_struct dma_complete;
153 struct tasklet_struct dma_issue;
154#ifdef CONFIG_TMIO_MMC_DMA
155 unsigned int dma_sglen;
156 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
157 struct scatterlist bounce_sg;
158#endif
159
160 /* Track lost interrupts */
161 struct delayed_work delayed_reset_work;
162 spinlock_t lock;
163 unsigned long last_req_ts;
164};
165
166static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
167
168static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
169{
170 return readw(host->ctl + (addr << host->bus_shift));
171}
172
173static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
174 u16 *buf, int count)
175{
176 readsw(host->ctl + (addr << host->bus_shift), buf, count);
177}
36 178
37#include "tmio_mmc.h" 179static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
180{
181 return readw(host->ctl + (addr << host->bus_shift)) |
182 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
183}
184
185static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
186{
187 writew(val, host->ctl + (addr << host->bus_shift));
188}
189
190static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
191 u16 *buf, int count)
192{
193 writesw(host->ctl + (addr << host->bus_shift), buf, count);
194}
195
196static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
197{
198 writew(val, host->ctl + (addr << host->bus_shift));
199 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
200}
201
202static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
203{
204 host->sg_len = data->sg_len;
205 host->sg_ptr = data->sg;
206 host->sg_orig = data->sg;
207 host->sg_off = 0;
208}
209
210static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
211{
212 host->sg_ptr = sg_next(host->sg_ptr);
213 host->sg_off = 0;
214 return --host->sg_len;
215}
216
217static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
218{
219 local_irq_save(*flags);
220 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
221}
222
223static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
224{
225 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
226 local_irq_restore(*flags);
227}
228
229#ifdef CONFIG_MMC_DEBUG
230
231#define STATUS_TO_TEXT(a) \
232 do { \
233 if (status & TMIO_STAT_##a) \
234 printk(#a); \
235 } while (0)
236
237void pr_debug_status(u32 status)
238{
239 printk(KERN_DEBUG "status: %08x = ", status);
240 STATUS_TO_TEXT(CARD_REMOVE);
241 STATUS_TO_TEXT(CARD_INSERT);
242 STATUS_TO_TEXT(SIGSTATE);
243 STATUS_TO_TEXT(WRPROTECT);
244 STATUS_TO_TEXT(CARD_REMOVE_A);
245 STATUS_TO_TEXT(CARD_INSERT_A);
246 STATUS_TO_TEXT(SIGSTATE_A);
247 STATUS_TO_TEXT(CMD_IDX_ERR);
248 STATUS_TO_TEXT(STOPBIT_ERR);
249 STATUS_TO_TEXT(ILL_FUNC);
250 STATUS_TO_TEXT(CMD_BUSY);
251 STATUS_TO_TEXT(CMDRESPEND);
252 STATUS_TO_TEXT(DATAEND);
253 STATUS_TO_TEXT(CRCFAIL);
254 STATUS_TO_TEXT(DATATIMEOUT);
255 STATUS_TO_TEXT(CMDTIMEOUT);
256 STATUS_TO_TEXT(RXOVERFLOW);
257 STATUS_TO_TEXT(TXUNDERRUN);
258 STATUS_TO_TEXT(RXRDY);
259 STATUS_TO_TEXT(TXRQ);
260 STATUS_TO_TEXT(ILL_ACCESS);
261 printk("\n");
262}
263
264#else
265#define pr_debug_status(s) do { } while (0)
266#endif
267
268static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
269{
270 struct tmio_mmc_host *host = mmc_priv(mmc);
271
272 if (enable) {
273 host->sdio_irq_enabled = 1;
274 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
275 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
276 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
277 } else {
278 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
279 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
280 host->sdio_irq_enabled = 0;
281 }
282}
38 283
39static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 284static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
40{ 285{
@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
55 300
56static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 301static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
57{ 302{
303 struct mfd_cell *cell = host->pdev->dev.platform_data;
304 struct tmio_mmc_data *pdata = cell->driver_data;
305
306 /*
307 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
308 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
309 * device IRQ here and restore the SDIO IRQ mask before
310 * re-enabling the device IRQ.
311 */
312 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
313 disable_irq(host->irq);
58 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 314 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
59 msleep(10); 315 msleep(10);
316 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
317 tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
318 enable_irq(host->irq);
319 }
60 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 320 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
61 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 321 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
62 msleep(10); 322 msleep(10);
@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
64 324
65static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 325static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
66{ 326{
327 struct mfd_cell *cell = host->pdev->dev.platform_data;
328 struct tmio_mmc_data *pdata = cell->driver_data;
329
67 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 330 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
68 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 331 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
69 msleep(10); 332 msleep(10);
333 /* see comment in tmio_mmc_clk_stop above */
334 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
335 disable_irq(host->irq);
70 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 336 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
71 msleep(10); 337 msleep(10);
338 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
339 tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
340 enable_irq(host->irq);
341 }
72} 342}
73 343
74static void reset(struct tmio_mmc_host *host) 344static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
82 msleep(10); 352 msleep(10);
83} 353}
84 354
355static void tmio_mmc_reset_work(struct work_struct *work)
356{
357 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
358 delayed_reset_work.work);
359 struct mmc_request *mrq;
360 unsigned long flags;
361
362 spin_lock_irqsave(&host->lock, flags);
363 mrq = host->mrq;
364
365 /* request already finished */
366 if (!mrq
367 || time_is_after_jiffies(host->last_req_ts +
368 msecs_to_jiffies(2000))) {
369 spin_unlock_irqrestore(&host->lock, flags);
370 return;
371 }
372
373 dev_warn(&host->pdev->dev,
374 "timeout waiting for hardware interrupt (CMD%u)\n",
375 mrq->cmd->opcode);
376
377 if (host->data)
378 host->data->error = -ETIMEDOUT;
379 else if (host->cmd)
380 host->cmd->error = -ETIMEDOUT;
381 else
382 mrq->cmd->error = -ETIMEDOUT;
383
384 host->cmd = NULL;
385 host->data = NULL;
386 host->mrq = NULL;
387
388 spin_unlock_irqrestore(&host->lock, flags);
389
390 reset(host);
391
392 mmc_request_done(host->mmc, mrq);
393}
394
85static void 395static void
86tmio_mmc_finish_request(struct tmio_mmc_host *host) 396tmio_mmc_finish_request(struct tmio_mmc_host *host)
87{ 397{
88 struct mmc_request *mrq = host->mrq; 398 struct mmc_request *mrq = host->mrq;
89 399
400 if (!mrq)
401 return;
402
90 host->mrq = NULL; 403 host->mrq = NULL;
91 host->cmd = NULL; 404 host->cmd = NULL;
92 host->data = NULL; 405 host->data = NULL;
93 406
407 cancel_delayed_work(&host->delayed_reset_work);
408
94 mmc_request_done(host->mmc, mrq); 409 mmc_request_done(host->mmc, mrq);
95} 410}
96 411
@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
200 return; 515 return;
201} 516}
202 517
518/* needs to be called with host->lock held */
203static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 519static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
204{ 520{
205 struct mmc_data *data = host->data; 521 struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
233 if (data->flags & MMC_DATA_READ) { 549 if (data->flags & MMC_DATA_READ) {
234 if (!host->chan_rx) 550 if (!host->chan_rx)
235 disable_mmc_irqs(host, TMIO_MASK_READOP); 551 disable_mmc_irqs(host, TMIO_MASK_READOP);
552 else
553 tmio_check_bounce_buffer(host);
236 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 554 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
237 host->mrq); 555 host->mrq);
238 } else { 556 } else {
@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
254 572
255static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 573static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
256{ 574{
257 struct mmc_data *data = host->data; 575 struct mmc_data *data;
576 spin_lock(&host->lock);
577 data = host->data;
258 578
259 if (!data) 579 if (!data)
260 return; 580 goto out;
261 581
262 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 582 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
263 /* 583 /*
@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
278 } else { 598 } else {
279 tmio_mmc_do_data_irq(host); 599 tmio_mmc_do_data_irq(host);
280 } 600 }
601out:
602 spin_unlock(&host->lock);
281} 603}
282 604
283static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 605static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
286 struct mmc_command *cmd = host->cmd; 608 struct mmc_command *cmd = host->cmd;
287 int i, addr; 609 int i, addr;
288 610
611 spin_lock(&host->lock);
612
289 if (!host->cmd) { 613 if (!host->cmd) {
290 pr_debug("Spurious CMD irq\n"); 614 pr_debug("Spurious CMD irq\n");
291 return; 615 goto out;
292 } 616 }
293 617
294 host->cmd = NULL; 618 host->cmd = NULL;
@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
324 if (!host->chan_rx) 648 if (!host->chan_rx)
325 enable_mmc_irqs(host, TMIO_MASK_READOP); 649 enable_mmc_irqs(host, TMIO_MASK_READOP);
326 } else { 650 } else {
327 struct dma_chan *chan = host->chan_tx; 651 if (!host->chan_tx)
328 if (!chan)
329 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 652 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
330 else 653 else
331 tasklet_schedule(&host->dma_issue); 654 tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
334 tmio_mmc_finish_request(host); 657 tmio_mmc_finish_request(host);
335 } 658 }
336 659
660out:
661 spin_unlock(&host->lock);
662
337 return; 663 return;
338} 664}
339 665
340static irqreturn_t tmio_mmc_irq(int irq, void *devid) 666static irqreturn_t tmio_mmc_irq(int irq, void *devid)
341{ 667{
342 struct tmio_mmc_host *host = devid; 668 struct tmio_mmc_host *host = devid;
669 struct mfd_cell *cell = host->pdev->dev.platform_data;
670 struct tmio_mmc_data *pdata = cell->driver_data;
343 unsigned int ireg, irq_mask, status; 671 unsigned int ireg, irq_mask, status;
672 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
344 673
345 pr_debug("MMC IRQ begin\n"); 674 pr_debug("MMC IRQ begin\n");
346 675
@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
348 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 677 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
349 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 678 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
350 679
680 sdio_ireg = 0;
681 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
682 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
683 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
684 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
685
686 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
687
688 if (sdio_ireg && !host->sdio_irq_enabled) {
689 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
690 sdio_status, sdio_irq_mask, sdio_ireg);
691 tmio_mmc_enable_sdio_irq(host->mmc, 0);
692 goto out;
693 }
694
695 if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
696 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
697 mmc_signal_sdio_irq(host->mmc);
698
699 if (sdio_ireg)
700 goto out;
701 }
702
351 pr_debug_status(status); 703 pr_debug_status(status);
352 pr_debug_status(ireg); 704 pr_debug_status(ireg);
353 705
@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
375 */ 727 */
376 728
377 /* Command completion */ 729 /* Command completion */
378 if (ireg & TMIO_MASK_CMD) { 730 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
379 ack_mmc_irqs(host, TMIO_MASK_CMD); 731 ack_mmc_irqs(host,
732 TMIO_STAT_CMDRESPEND |
733 TMIO_STAT_CMDTIMEOUT);
380 tmio_mmc_cmd_irq(host, status); 734 tmio_mmc_cmd_irq(host, status);
381 } 735 }
382 736
@@ -407,6 +761,16 @@ out:
407} 761}
408 762
409#ifdef CONFIG_TMIO_MMC_DMA 763#ifdef CONFIG_TMIO_MMC_DMA
764static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
765{
766 if (host->sg_ptr == &host->bounce_sg) {
767 unsigned long flags;
768 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
769 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
770 tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
771 }
772}
773
410static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 774static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
411{ 775{
412#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 776#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg)
427 enable_mmc_irqs(host, TMIO_STAT_DATAEND); 791 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
428} 792}
429 793
430static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 794static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
431{ 795{
432 struct scatterlist *sg = host->sg_ptr; 796 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
433 struct dma_async_tx_descriptor *desc = NULL; 797 struct dma_async_tx_descriptor *desc = NULL;
434 struct dma_chan *chan = host->chan_rx; 798 struct dma_chan *chan = host->chan_rx;
435 int ret; 799 struct mfd_cell *cell = host->pdev->dev.platform_data;
800 struct tmio_mmc_data *pdata = cell->driver_data;
801 dma_cookie_t cookie;
802 int ret, i;
803 bool aligned = true, multiple = true;
804 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
805
806 for_each_sg(sg, sg_tmp, host->sg_len, i) {
807 if (sg_tmp->offset & align)
808 aligned = false;
809 if (sg_tmp->length & align) {
810 multiple = false;
811 break;
812 }
813 }
814
815 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
816 align >= MAX_ALIGN)) || !multiple) {
817 ret = -EINVAL;
818 goto pio;
819 }
820
821 /* The only sg element can be unaligned, use our bounce buffer then */
822 if (!aligned) {
823 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
824 host->sg_ptr = &host->bounce_sg;
825 sg = host->sg_ptr;
826 }
436 827
437 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); 828 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
438 if (ret > 0) { 829 if (ret > 0) {
@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
442 } 833 }
443 834
444 if (desc) { 835 if (desc) {
445 host->desc = desc;
446 desc->callback = tmio_dma_complete; 836 desc->callback = tmio_dma_complete;
447 desc->callback_param = host; 837 desc->callback_param = host;
448 host->cookie = desc->tx_submit(desc); 838 cookie = desc->tx_submit(desc);
449 if (host->cookie < 0) { 839 if (cookie < 0) {
450 host->desc = NULL; 840 desc = NULL;
451 ret = host->cookie; 841 ret = cookie;
452 } else { 842 } else {
453 chan->device->device_issue_pending(chan); 843 chan->device->device_issue_pending(chan);
454 } 844 }
455 } 845 }
456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 846 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
457 __func__, host->sg_len, ret, host->cookie, host->mrq); 847 __func__, host->sg_len, ret, cookie, host->mrq);
458 848
459 if (!host->desc) { 849pio:
850 if (!desc) {
460 /* DMA failed, fall back to PIO */ 851 /* DMA failed, fall back to PIO */
461 if (ret >= 0) 852 if (ret >= 0)
462 ret = -EIO; 853 ret = -EIO;
@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
471 dev_warn(&host->pdev->dev, 862 dev_warn(&host->pdev->dev,
472 "DMA failed: %d, falling back to PIO\n", ret); 863 "DMA failed: %d, falling back to PIO\n", ret);
473 tmio_mmc_enable_dma(host, false); 864 tmio_mmc_enable_dma(host, false);
474 reset(host);
475 /* Fail this request, let above layers recover */
476 host->mrq->cmd->error = ret;
477 tmio_mmc_finish_request(host);
478 } 865 }
479 866
480 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 867 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
481 desc, host->cookie, host->sg_len); 868 desc, cookie, host->sg_len);
482
483 return ret > 0 ? 0 : ret;
484} 869}
485 870
486static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 871static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
487{ 872{
488 struct scatterlist *sg = host->sg_ptr; 873 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
489 struct dma_async_tx_descriptor *desc = NULL; 874 struct dma_async_tx_descriptor *desc = NULL;
490 struct dma_chan *chan = host->chan_tx; 875 struct dma_chan *chan = host->chan_tx;
491 int ret; 876 struct mfd_cell *cell = host->pdev->dev.platform_data;
877 struct tmio_mmc_data *pdata = cell->driver_data;
878 dma_cookie_t cookie;
879 int ret, i;
880 bool aligned = true, multiple = true;
881 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
882
883 for_each_sg(sg, sg_tmp, host->sg_len, i) {
884 if (sg_tmp->offset & align)
885 aligned = false;
886 if (sg_tmp->length & align) {
887 multiple = false;
888 break;
889 }
890 }
891
892 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
893 align >= MAX_ALIGN)) || !multiple) {
894 ret = -EINVAL;
895 goto pio;
896 }
897
898 /* The only sg element can be unaligned, use our bounce buffer then */
899 if (!aligned) {
900 unsigned long flags;
901 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
902 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
903 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
904 tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
905 host->sg_ptr = &host->bounce_sg;
906 sg = host->sg_ptr;
907 }
492 908
493 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); 909 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
494 if (ret > 0) { 910 if (ret > 0) {
@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
498 } 914 }
499 915
500 if (desc) { 916 if (desc) {
501 host->desc = desc;
502 desc->callback = tmio_dma_complete; 917 desc->callback = tmio_dma_complete;
503 desc->callback_param = host; 918 desc->callback_param = host;
504 host->cookie = desc->tx_submit(desc); 919 cookie = desc->tx_submit(desc);
505 if (host->cookie < 0) { 920 if (cookie < 0) {
506 host->desc = NULL; 921 desc = NULL;
507 ret = host->cookie; 922 ret = cookie;
508 } 923 }
509 } 924 }
510 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 925 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
511 __func__, host->sg_len, ret, host->cookie, host->mrq); 926 __func__, host->sg_len, ret, cookie, host->mrq);
512 927
513 if (!host->desc) { 928pio:
929 if (!desc) {
514 /* DMA failed, fall back to PIO */ 930 /* DMA failed, fall back to PIO */
515 if (ret >= 0) 931 if (ret >= 0)
516 ret = -EIO; 932 ret = -EIO;
@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
525 dev_warn(&host->pdev->dev, 941 dev_warn(&host->pdev->dev,
526 "DMA failed: %d, falling back to PIO\n", ret); 942 "DMA failed: %d, falling back to PIO\n", ret);
527 tmio_mmc_enable_dma(host, false); 943 tmio_mmc_enable_dma(host, false);
528 reset(host);
529 /* Fail this request, let above layers recover */
530 host->mrq->cmd->error = ret;
531 tmio_mmc_finish_request(host);
532 } 944 }
533 945
534 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 946 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
535 desc, host->cookie); 947 desc, cookie);
536
537 return ret > 0 ? 0 : ret;
538} 948}
539 949
540static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 950static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
541 struct mmc_data *data) 951 struct mmc_data *data)
542{ 952{
543 if (data->flags & MMC_DATA_READ) { 953 if (data->flags & MMC_DATA_READ) {
544 if (host->chan_rx) 954 if (host->chan_rx)
545 return tmio_mmc_start_dma_rx(host); 955 tmio_mmc_start_dma_rx(host);
546 } else { 956 } else {
547 if (host->chan_tx) 957 if (host->chan_tx)
548 return tmio_mmc_start_dma_tx(host); 958 tmio_mmc_start_dma_tx(host);
549 } 959 }
550
551 return 0;
552} 960}
553 961
554static void tmio_issue_tasklet_fn(unsigned long priv) 962static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
562static void tmio_tasklet_fn(unsigned long arg) 970static void tmio_tasklet_fn(unsigned long arg)
563{ 971{
564 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 972 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
973 unsigned long flags;
974
975 spin_lock_irqsave(&host->lock, flags);
976
977 if (!host->data)
978 goto out;
565 979
566 if (host->data->flags & MMC_DATA_READ) 980 if (host->data->flags & MMC_DATA_READ)
567 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 981 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg)
571 DMA_TO_DEVICE); 985 DMA_TO_DEVICE);
572 986
573 tmio_mmc_do_data_irq(host); 987 tmio_mmc_do_data_irq(host);
988out:
989 spin_unlock_irqrestore(&host->lock, flags);
574} 990}
575 991
576/* It might be necessary to make filter MFD specific */ 992/* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
584static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1000static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
585 struct tmio_mmc_data *pdata) 1001 struct tmio_mmc_data *pdata)
586{ 1002{
587 host->cookie = -EINVAL;
588 host->desc = NULL;
589
590 /* We can only either use DMA for both Tx and Rx or not use it at all */ 1003 /* We can only either use DMA for both Tx and Rx or not use it at all */
591 if (pdata->dma) { 1004 if (pdata->dma) {
592 dma_cap_mask_t mask; 1005 dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
632 host->chan_rx = NULL; 1045 host->chan_rx = NULL;
633 dma_release_channel(chan); 1046 dma_release_channel(chan);
634 } 1047 }
635
636 host->cookie = -EINVAL;
637 host->desc = NULL;
638} 1048}
639#else 1049#else
640static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 1050static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
1051{
1052}
1053
1054static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
641 struct mmc_data *data) 1055 struct mmc_data *data)
642{ 1056{
643 return 0;
644} 1057}
645 1058
646static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1059static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
682 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 1095 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
683 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 1096 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
684 1097
685 return tmio_mmc_start_dma(host, data); 1098 tmio_mmc_start_dma(host, data);
1099
1100 return 0;
686} 1101}
687 1102
688/* Process requests from the MMC layer */ 1103/* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
694 if (host->mrq) 1109 if (host->mrq)
695 pr_debug("request not null\n"); 1110 pr_debug("request not null\n");
696 1111
1112 host->last_req_ts = jiffies;
1113 wmb();
697 host->mrq = mrq; 1114 host->mrq = mrq;
698 1115
699 if (mrq->data) { 1116 if (mrq->data) {
@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
703 } 1120 }
704 1121
705 ret = tmio_mmc_start_command(host, mrq->cmd); 1122 ret = tmio_mmc_start_command(host, mrq->cmd);
706 if (!ret) 1123 if (!ret) {
1124 schedule_delayed_work(&host->delayed_reset_work,
1125 msecs_to_jiffies(2000));
707 return; 1126 return;
1127 }
708 1128
709fail: 1129fail:
1130 host->mrq = NULL;
710 mrq->cmd->error = ret; 1131 mrq->cmd->error = ret;
711 mmc_request_done(mmc, mrq); 1132 mmc_request_done(mmc, mrq);
712} 1133}
@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
780 .set_ios = tmio_mmc_set_ios, 1201 .set_ios = tmio_mmc_set_ios,
781 .get_ro = tmio_mmc_get_ro, 1202 .get_ro = tmio_mmc_get_ro,
782 .get_cd = tmio_mmc_get_cd, 1203 .get_cd = tmio_mmc_get_cd,
1204 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
783}; 1205};
784 1206
785#ifdef CONFIG_PM 1207#ifdef CONFIG_PM
@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
864 goto host_free; 1286 goto host_free;
865 1287
866 mmc->ops = &tmio_mmc_ops; 1288 mmc->ops = &tmio_mmc_ops;
867 mmc->caps = MMC_CAP_4_BIT_DATA; 1289 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
868 mmc->caps |= pdata->capabilities;
869 mmc->f_max = pdata->hclk; 1290 mmc->f_max = pdata->hclk;
870 mmc->f_min = mmc->f_max / 512; 1291 mmc->f_min = mmc->f_max / 512;
1292 mmc->max_segs = 32;
1293 mmc->max_blk_size = 512;
1294 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
1295 mmc->max_segs;
1296 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1297 mmc->max_seg_size = mmc->max_req_size;
871 if (pdata->ocr_mask) 1298 if (pdata->ocr_mask)
872 mmc->ocr_avail = pdata->ocr_mask; 1299 mmc->ocr_avail = pdata->ocr_mask;
873 else 1300 else
@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
890 goto cell_disable; 1317 goto cell_disable;
891 1318
892 disable_mmc_irqs(host, TMIO_MASK_ALL); 1319 disable_mmc_irqs(host, TMIO_MASK_ALL);
1320 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1321 tmio_mmc_enable_sdio_irq(mmc, 0);
893 1322
894 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 1323 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
895 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 1324 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
896 if (ret) 1325 if (ret)
897 goto cell_disable; 1326 goto cell_disable;
898 1327
1328 spin_lock_init(&host->lock);
1329
1330 /* Init delayed work for request timeouts */
1331 INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
1332
899 /* See if we also get DMA */ 1333 /* See if we also get DMA */
900 tmio_mmc_request_dma(host, pdata); 1334 tmio_mmc_request_dma(host, pdata);
901 1335
@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
934 if (mmc) { 1368 if (mmc) {
935 struct tmio_mmc_host *host = mmc_priv(mmc); 1369 struct tmio_mmc_host *host = mmc_priv(mmc);
936 mmc_remove_host(mmc); 1370 mmc_remove_host(mmc);
1371 cancel_delayed_work_sync(&host->delayed_reset_work);
937 tmio_mmc_release_dma(host); 1372 tmio_mmc_release_dma(host);
938 free_irq(host->irq, host); 1373 free_irq(host->irq, host);
939 if (cell->disable) 1374 if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78e3ea5..000000000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
1/* Definitons for use with the tmio_mmc.c
2 *
3 * (c) 2004 Ian Molton <spyro@f2s.com>
4 * (c) 2007 Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/highmem.h>
13#include <linux/interrupt.h>
14#include <linux/dmaengine.h>
15
16#define CTL_SD_CMD 0x00
17#define CTL_ARG_REG 0x04
18#define CTL_STOP_INTERNAL_ACTION 0x08
19#define CTL_XFER_BLK_COUNT 0xa
20#define CTL_RESPONSE 0x0c
21#define CTL_STATUS 0x1c
22#define CTL_IRQ_MASK 0x20
23#define CTL_SD_CARD_CLK_CTL 0x24
24#define CTL_SD_XFER_LEN 0x26
25#define CTL_SD_MEM_CARD_OPT 0x28
26#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
27#define CTL_SD_DATA_PORT 0x30
28#define CTL_TRANSACTION_CTL 0x34
29#define CTL_RESET_SD 0xe0
30#define CTL_SDIO_REGS 0x100
31#define CTL_CLK_AND_WAIT_CTL 0x138
32#define CTL_RESET_SDIO 0x1e0
33
34/* Definitions for values the CTRL_STATUS register can take. */
35#define TMIO_STAT_CMDRESPEND 0x00000001
36#define TMIO_STAT_DATAEND 0x00000004
37#define TMIO_STAT_CARD_REMOVE 0x00000008
38#define TMIO_STAT_CARD_INSERT 0x00000010
39#define TMIO_STAT_SIGSTATE 0x00000020
40#define TMIO_STAT_WRPROTECT 0x00000080
41#define TMIO_STAT_CARD_REMOVE_A 0x00000100
42#define TMIO_STAT_CARD_INSERT_A 0x00000200
43#define TMIO_STAT_SIGSTATE_A 0x00000400
44#define TMIO_STAT_CMD_IDX_ERR 0x00010000
45#define TMIO_STAT_CRCFAIL 0x00020000
46#define TMIO_STAT_STOPBIT_ERR 0x00040000
47#define TMIO_STAT_DATATIMEOUT 0x00080000
48#define TMIO_STAT_RXOVERFLOW 0x00100000
49#define TMIO_STAT_TXUNDERRUN 0x00200000
50#define TMIO_STAT_CMDTIMEOUT 0x00400000
51#define TMIO_STAT_RXRDY 0x01000000
52#define TMIO_STAT_TXRQ 0x02000000
53#define TMIO_STAT_ILL_FUNC 0x20000000
54#define TMIO_STAT_CMD_BUSY 0x40000000
55#define TMIO_STAT_ILL_ACCESS 0x80000000
56
57/* Define some IRQ masks */
58/* This is the mask used at reset by the chip */
59#define TMIO_MASK_ALL 0x837f031d
60#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
61#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
62#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
63 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
64#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
65
66
67#define enable_mmc_irqs(host, i) \
68 do { \
69 u32 mask;\
70 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
71 mask &= ~((i) & TMIO_MASK_IRQ); \
72 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
73 } while (0)
74
75#define disable_mmc_irqs(host, i) \
76 do { \
77 u32 mask;\
78 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
79 mask |= ((i) & TMIO_MASK_IRQ); \
80 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
81 } while (0)
82
83#define ack_mmc_irqs(host, i) \
84 do { \
85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 } while (0)
87
88
89struct tmio_mmc_host {
90 void __iomem *ctl;
91 unsigned long bus_shift;
92 struct mmc_command *cmd;
93 struct mmc_request *mrq;
94 struct mmc_data *data;
95 struct mmc_host *mmc;
96 int irq;
97
98 /* Callbacks for clock / power control */
99 void (*set_pwr)(struct platform_device *host, int state);
100 void (*set_clk_div)(struct platform_device *host, int state);
101
102 /* pio related stuff */
103 struct scatterlist *sg_ptr;
104 unsigned int sg_len;
105 unsigned int sg_off;
106
107 struct platform_device *pdev;
108
109 /* DMA support */
110 struct dma_chan *chan_rx;
111 struct dma_chan *chan_tx;
112 struct tasklet_struct dma_complete;
113 struct tasklet_struct dma_issue;
114#ifdef CONFIG_TMIO_MMC_DMA
115 struct dma_async_tx_descriptor *desc;
116 unsigned int dma_sglen;
117 dma_cookie_t cookie;
118#endif
119};
120
121#include <linux/io.h>
122
123static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
124{
125 return readw(host->ctl + (addr << host->bus_shift));
126}
127
128static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
129 u16 *buf, int count)
130{
131 readsw(host->ctl + (addr << host->bus_shift), buf, count);
132}
133
134static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
135{
136 return readw(host->ctl + (addr << host->bus_shift)) |
137 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
138}
139
140static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
141 u16 val)
142{
143 writew(val, host->ctl + (addr << host->bus_shift));
144}
145
146static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
147 u16 *buf, int count)
148{
149 writesw(host->ctl + (addr << host->bus_shift), buf, count);
150}
151
152static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
153 u32 val)
154{
155 writew(val, host->ctl + (addr << host->bus_shift));
156 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
157}
158
159#include <linux/scatterlist.h>
160#include <linux/blkdev.h>
161
162static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
163 struct mmc_data *data)
164{
165 host->sg_len = data->sg_len;
166 host->sg_ptr = data->sg;
167 host->sg_off = 0;
168}
169
170static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
171{
172 host->sg_ptr = sg_next(host->sg_ptr);
173 host->sg_off = 0;
174 return --host->sg_len;
175}
176
177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
178 unsigned long *flags)
179{
180 local_irq_save(*flags);
181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
182}
183
184static inline void tmio_mmc_kunmap_atomic(void *virt,
185 unsigned long *flags)
186{
187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
188 local_irq_restore(*flags);
189}
190
191#ifdef CONFIG_MMC_DEBUG
192
193#define STATUS_TO_TEXT(a) \
194 do { \
195 if (status & TMIO_STAT_##a) \
196 printk(#a); \
197 } while (0)
198
199void pr_debug_status(u32 status)
200{
201 printk(KERN_DEBUG "status: %08x = ", status);
202 STATUS_TO_TEXT(CARD_REMOVE);
203 STATUS_TO_TEXT(CARD_INSERT);
204 STATUS_TO_TEXT(SIGSTATE);
205 STATUS_TO_TEXT(WRPROTECT);
206 STATUS_TO_TEXT(CARD_REMOVE_A);
207 STATUS_TO_TEXT(CARD_INSERT_A);
208 STATUS_TO_TEXT(SIGSTATE_A);
209 STATUS_TO_TEXT(CMD_IDX_ERR);
210 STATUS_TO_TEXT(STOPBIT_ERR);
211 STATUS_TO_TEXT(ILL_FUNC);
212 STATUS_TO_TEXT(CMD_BUSY);
213 STATUS_TO_TEXT(CMDRESPEND);
214 STATUS_TO_TEXT(DATAEND);
215 STATUS_TO_TEXT(CRCFAIL);
216 STATUS_TO_TEXT(DATATIMEOUT);
217 STATUS_TO_TEXT(CMDTIMEOUT);
218 STATUS_TO_TEXT(RXOVERFLOW);
219 STATUS_TO_TEXT(TXUNDERRUN);
220 STATUS_TO_TEXT(RXRDY);
221 STATUS_TO_TEXT(TXRQ);
222 STATUS_TO_TEXT(ILL_ACCESS);
223 printk("\n");
224}
225
226#else
227#define pr_debug_status(s) do { } while (0)
228#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a28d2f..ff652c77a0a5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
1948 select PHYLIB 1948 select PHYLIB
1949 help 1949 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1950 Say Y here if you want to use the built-in 10/100 Fast ethernet
1951 controller on some Motorola ColdFire and Freescale i.MX processors. 1951 controller on some Motorola ColdFire and Freescale i.MX processors.
1952 1952
1953config FEC2
1954 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
1955 depends on FEC
1956 help
1957 Say Y here if you want to use the second built-in 10/100 Fast
1958 ethernet controller on some Motorola ColdFire processors.
1959
1960config FEC_MPC52xx 1953config FEC_MPC52xx
1961 tristate "MPC52xx FEC driver" 1954 tristate "MPC52xx FEC driver"
1962 depends on PPC_MPC52xx && PPC_BESTCOMM 1955 depends on PPC_MPC52xx && PPC_BESTCOMM
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f6..0b9fc5173aef 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
11#include <linux/init.h> 16#include <linux/init.h>
12#include <linux/module.h> 17#include <linux/module.h>
13#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -41,12 +46,7 @@
41 46
42#include "bfin_mac.h" 47#include "bfin_mac.h"
43 48
44#define DRV_NAME "bfin_mac" 49MODULE_AUTHOR("Bryan Wu, Luke Yang");
45#define DRV_VERSION "1.1"
46#define DRV_AUTHOR "Bryan Wu, Luke Yang"
47#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
48
49MODULE_AUTHOR(DRV_AUTHOR);
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC); 51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb) {
192 printk(KERN_NOTICE DRV_NAME 192 pr_notice("init: low on mem - packet dropped\n");
193 ": init: low on mem - packet dropped\n");
194 goto init_error; 193 goto init_error;
195 } 194 }
196 skb_reserve(new_skb, NET_IP_ALIGN); 195 skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
240 239
241init_error: 240init_error:
242 desc_list_free(); 241 desc_list_free();
243 printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); 242 pr_err("kmalloc failed\n");
244 return -ENOMEM; 243 return -ENOMEM;
245} 244}
246 245
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
259 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 258 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
260 udelay(1); 259 udelay(1);
261 if (timeout_cnt-- < 0) { 260 if (timeout_cnt-- < 0) {
262 printk(KERN_ERR DRV_NAME 261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
263 ": wait MDC/MDIO transaction to complete timeout\n");
264 return -ETIMEDOUT; 262 return -ETIMEDOUT;
265 } 263 }
266 } 264 }
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
350 opmode &= ~RMII_10; 348 opmode &= ~RMII_10;
351 break; 349 break;
352 default: 350 default:
353 printk(KERN_WARNING 351 netdev_warn(dev,
354 "%s: Ack! Speed (%d) is not 10/100!\n", 352 "Ack! Speed (%d) is not 10/100!\n",
355 DRV_NAME, phydev->speed); 353 phydev->speed);
356 break; 354 break;
357 } 355 }
358 bfin_write_EMAC_OPMODE(opmode); 356 bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
417 415
418 /* now we are supposed to have a proper phydev, to attach to... */ 416 /* now we are supposed to have a proper phydev, to attach to... */
419 if (!phydev) { 417 if (!phydev) {
420 printk(KERN_INFO "%s: Don't found any phy device at all\n", 418 netdev_err(dev, "no phy device found\n");
421 dev->name);
422 return -ENODEV; 419 return -ENODEV;
423 } 420 }
424 421
425 if (phy_mode != PHY_INTERFACE_MODE_RMII && 422 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
426 phy_mode != PHY_INTERFACE_MODE_MII) { 423 phy_mode != PHY_INTERFACE_MODE_MII) {
427 printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); 424 netdev_err(dev, "invalid phy interface mode\n");
428 return -EINVAL; 425 return -EINVAL;
429 } 426 }
430 427
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
432 0, phy_mode); 429 0, phy_mode);
433 430
434 if (IS_ERR(phydev)) { 431 if (IS_ERR(phydev)) {
435 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 432 netdev_err(dev, "could not attach PHY\n");
436 return PTR_ERR(phydev); 433 return PTR_ERR(phydev);
437 } 434 }
438 435
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
453 lp->old_duplex = -1; 450 lp->old_duplex = -1;
454 lp->phydev = phydev; 451 lp->phydev = phydev;
455 452
456 printk(KERN_INFO "%s: attached PHY driver [%s] " 453 pr_info("attached PHY driver [%s] "
457 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
458 "@sclk=%dMHz)\n", 455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
459 DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 456 MDC_CLK, mdc_div, sclk/1000000);
460 MDC_CLK, mdc_div, sclk/1000000);
461 457
462 return 0; 458 return 0;
463} 459}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
502static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
503 struct ethtool_drvinfo *info) 499 struct ethtool_drvinfo *info)
504{ 500{
505 strcpy(info->driver, DRV_NAME); 501 strcpy(info->driver, KBUILD_MODNAME);
506 strcpy(info->version, DRV_VERSION); 502 strcpy(info->version, DRV_VERSION);
507 strcpy(info->fw_version, "N/A"); 503 strcpy(info->fw_version, "N/A");
508 strcpy(info->bus_info, dev_name(&dev->dev)); 504 strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
562}; 558};
563 559
564/**************************************************************************/ 560/**************************************************************************/
565void setup_system_regs(struct net_device *dev) 561static void setup_system_regs(struct net_device *dev)
566{ 562{
567 struct bfin_mac_local *lp = netdev_priv(dev); 563 struct bfin_mac_local *lp = netdev_priv(dev);
568 int i; 564 int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
592 588
593 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
594 590
591 /* Set vlan regs to let 1522 bytes long packets pass through */
592 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
593 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
594
595 /* Initialize the TX DMA channel registers */ 595 /* Initialize the TX DMA channel registers */
596 bfin_write_DMA2_X_COUNT(0); 596 bfin_write_DMA2_X_COUNT(0);
597 bfin_write_DMA2_X_MODIFY(4); 597 bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
828 udelay(1); 828 udelay(1);
829 if (timeout_cnt == 0) 829 if (timeout_cnt == 0)
830 printk(KERN_ERR DRV_NAME 830 netdev_err(netdev, "timestamp the TX packet failed\n");
831 ": fails to timestamp the TX packet\n");
832 else { 831 else {
833 struct skb_shared_hwtstamps shhwtstamps; 832 struct skb_shared_hwtstamps shhwtstamps;
834 u64 ns; 833 u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
1083 * we which case we simply drop the packet 1082 * we which case we simply drop the packet
1084 */ 1083 */
1085 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1084 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1086 printk(KERN_NOTICE DRV_NAME 1085 netdev_notice(dev, "rx: receive error - packet dropped\n");
1087 ": rx: receive error - packet dropped\n");
1088 dev->stats.rx_dropped++; 1086 dev->stats.rx_dropped++;
1089 goto out; 1087 goto out;
1090 } 1088 }
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
1094 1092
1095 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
1096 if (!new_skb) { 1094 if (!new_skb) {
1097 printk(KERN_NOTICE DRV_NAME 1095 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1098 ": rx: low on mem - packet dropped\n");
1099 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
1100 goto out; 1097 goto out;
1101 } 1098 }
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
1213 int ret; 1210 int ret;
1214 u32 opmode; 1211 u32 opmode;
1215 1212
1216 pr_debug("%s: %s\n", DRV_NAME, __func__); 1213 pr_debug("%s\n", __func__);
1217 1214
1218 /* Set RX DMA */ 1215 /* Set RX DMA */
1219 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 1216 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
1323 u32 sysctl; 1320 u32 sysctl;
1324 1321
1325 if (dev->flags & IFF_PROMISC) { 1322 if (dev->flags & IFF_PROMISC) {
1326 printk(KERN_INFO "%s: set to promisc mode\n", dev->name); 1323 netdev_info(dev, "set promisc mode\n");
1327 sysctl = bfin_read_EMAC_OPMODE(); 1324 sysctl = bfin_read_EMAC_OPMODE();
1328 sysctl |= PR; 1325 sysctl |= PR;
1329 bfin_write_EMAC_OPMODE(sysctl); 1326 bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev)
1393 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1390 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1394 */ 1391 */
1395 if (!is_valid_ether_addr(dev->dev_addr)) { 1392 if (!is_valid_ether_addr(dev->dev_addr)) {
1396 printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); 1393 netdev_warn(dev, "no valid ethernet hw addr\n");
1397 return -EINVAL; 1394 return -EINVAL;
1398 } 1395 }
1399 1396
@@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1527 goto out_err_mii_probe; 1524 goto out_err_mii_probe;
1528 } 1525 }
1529 1526
1527 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1528 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1529
1530 /* Fill in the fields of the device structure with ethernet values. */ 1530 /* Fill in the fields of the device structure with ethernet values. */
1531 ether_setup(ndev); 1531 ether_setup(ndev);
1532 1532
@@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1558 bfin_mac_hwtstamp_init(ndev); 1558 bfin_mac_hwtstamp_init(ndev);
1559 1559
1560 /* now, print out the card info, in a short format.. */ 1560 /* now, print out the card info, in a short format.. */
1561 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1561 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1562 1562
1563 return 0; 1563 return 0;
1564 1564
@@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1650 * so set the GPIO pins to Ethernet mode 1650 * so set the GPIO pins to Ethernet mode
1651 */ 1651 */
1652 pin_req = mii_bus_pd->mac_peripherals; 1652 pin_req = mii_bus_pd->mac_peripherals;
1653 rc = peripheral_request_list(pin_req, DRV_NAME); 1653 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1654 if (rc) { 1654 if (rc) {
1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1656 return rc; 1656 return rc;
@@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = {
1739 .resume = bfin_mac_resume, 1739 .resume = bfin_mac_resume,
1740 .suspend = bfin_mac_suspend, 1740 .suspend = bfin_mac_suspend,
1741 .driver = { 1741 .driver = {
1742 .name = DRV_NAME, 1742 .name = KBUILD_MODNAME,
1743 .owner = THIS_MODULE, 1743 .owner = THIS_MODULE,
1744 }, 1744 },
1745}; 1745};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed2365..f8559ac9a403 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h> 18#include <linux/bfin_mac.h>
19 19
20/*
21 * Disable hardware checksum for bug #5600 if writeback cache is
22 * enabled. Otherwize, corrupted RX packet will be sent up stack
23 * without error mark.
24 */
25#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
20#define BFIN_MAC_CSUM_OFFLOAD 26#define BFIN_MAC_CSUM_OFFLOAD
27#endif
21 28
22#define TX_RECLAIM_JIFFIES (HZ / 5) 29#define TX_RECLAIM_JIFFIES (HZ / 5)
23 30
@@ -68,7 +75,6 @@ struct bfin_mac_local {
68 */ 75 */
69 struct net_device_stats stats; 76 struct net_device_stats stats;
70 77
71 unsigned char Mac[6]; /* MAC address of the board */
72 spinlock_t lock; 78 spinlock_t lock;
73 79
74 int wol; /* Wake On Lan */ 80 int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
76 struct timer_list tx_reclaim_timer; 82 struct timer_list tx_reclaim_timer;
77 struct net_device *ndev; 83 struct net_device *ndev;
78 84
85 /* Data for EMAC_VLAN1 regs */
86 u16 vlan1_mask, vlan2_mask;
87
79 /* MII and PHY stuffs */ 88 /* MII and PHY stuffs */
80 int old_link; /* used by bf537_adjust_link */ 89 int old_link; /* used by bf537_adjust_link */
81 int old_speed; 90 int old_speed;
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d6d86b..6a858a29db56 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -636,6 +636,7 @@ struct bnx2x_common {
636 636
637#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 637#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
638#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 638#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
639#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
639 640
640 int flash_size; 641 int flash_size;
641#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 642#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e5..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
1/* bnx2x_dump.h: Broadcom Everest network driver. 1/* bnx2x_dump.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2009 Broadcom Corporation 3 * Copyright (c) 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * Unless you and Broadcom execute a separate written software license
6 * it under the terms of the GNU General Public License as published by 6 * agreement governing use of this software, this software is licensed to you
7 * the Free Software Foundation. 7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
8 */ 14 */
9 15
10 16
@@ -17,53 +23,53 @@
17#define BNX2X_DUMP_H 23#define BNX2X_DUMP_H
18 24
19 25
20struct dump_sign {
21 u32 time_stamp;
22 u32 diag_ver;
23 u32 grc_dump_ver;
24};
25 26
26#define TSTORM_WAITP_ADDR 0x1b8a80 27/*definitions */
27#define CSTORM_WAITP_ADDR 0x238a80 28#define XSTORM_WAITP_ADDR 0x2b8a80
28#define XSTORM_WAITP_ADDR 0x2b8a80 29#define TSTORM_WAITP_ADDR 0x1b8a80
29#define USTORM_WAITP_ADDR 0x338a80 30#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440 31#define CSTORM_WAITP_ADDR 0x238a80
32#define TSTORM_CAM_MODE 0x1B1440
31 33
32#define RI_E1 0x1 34#define MAX_TIMER_PENDING 200
33#define RI_E1H 0x2 35#define TIMER_SCAN_DONT_CARE 0xFF
36#define RI_E1 0x1
37#define RI_E1H 0x2
34#define RI_E2 0x4 38#define RI_E2 0x4
35#define RI_ONLINE 0x100 39#define RI_ONLINE 0x100
36#define RI_PATH0_DUMP 0x200 40#define RI_PATH0_DUMP 0x200
37#define RI_PATH1_DUMP 0x400 41#define RI_PATH1_DUMP 0x400
38#define RI_E1_OFFLINE (RI_E1) 42#define RI_E1_OFFLINE (RI_E1)
39#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 43#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
40#define RI_E1H_OFFLINE (RI_E1H) 44#define RI_E1H_OFFLINE (RI_E1H)
41#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 45#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
42#define RI_E2_OFFLINE (RI_E2) 46#define RI_E2_OFFLINE (RI_E2)
43#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) 47#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
44#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) 48#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
45#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 49#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
46#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) 50#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
47#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) 51#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
48#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) 52#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
49#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) 53#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
50#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) 54#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
51#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) 55#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
52
53#define MAX_TIMER_PENDING 200
54#define TIMER_SCAN_DONT_CARE 0xFF
55 56
57struct dump_sign {
58 u32 time_stamp;
59 u32 diag_ver;
60 u32 grc_dump_ver;
61};
56 62
57struct dump_hdr { 63struct dump_hdr {
58 u32 hdr_size; /* in dwords, excluding this field */ 64 u32 hdr_size; /* in dwords, excluding this field */
59 struct dump_sign dump_sign; 65 struct dump_sign dump_sign;
60 u32 xstorm_waitp; 66 u32 xstorm_waitp;
61 u32 tstorm_waitp; 67 u32 tstorm_waitp;
62 u32 ustorm_waitp; 68 u32 ustorm_waitp;
63 u32 cstorm_waitp; 69 u32 cstorm_waitp;
64 u16 info; 70 u16 info;
65 u8 idle_chk; 71 u8 idle_chk;
66 u8 reserved; 72 u8 reserved;
67}; 73};
68 74
69struct reg_addr { 75struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
80 u16 info; 86 u16 info;
81}; 87};
82 88
83 89#define REGS_COUNT 834
84#define REGS_COUNT 558
85static const struct reg_addr reg_addrs[REGS_COUNT] = { 90static const struct reg_addr reg_addrs[REGS_COUNT] = {
86 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 91 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
87 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 92 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
88 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, 93 { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
89 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, 94 { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
90 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, 95 { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
91 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, 96 { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
92 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, 97 { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
93 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, 98 { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
94 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, 99 { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
95 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, 100 { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
96 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, 101 { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
97 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, 102 { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
98 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, 103 { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
99 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, 104 { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
100 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, 105 { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
101 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, 106 { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
102 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, 107 { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
103 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, 108 { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
104 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 109 { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
105 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, 110 { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
106 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, 111 { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
107 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, 112 { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
108 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, 113 { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
109 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, 114 { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
110 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, 115 { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
111 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, 116 { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
112 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, 117 { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
113 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, 118 { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
114 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, 119 { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
115 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, 120 { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
116 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, 121 { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
117 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, 122 { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
118 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, 123 { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
119 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, 124 { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
120 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, 125 { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
121 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, 126 { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
122 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, 127 { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
123 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, 128 { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
124 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, 129 { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
125 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, 130 { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
126 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, 131 { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
127 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, 132 { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
128 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, 133 { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
129 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, 134 { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
130 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, 135 { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
131 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, 136 { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
132 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 137 { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
138 { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
139 { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
140 { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
141 { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
142 { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
143 { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
144 { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
145 { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
146 { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
147 { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
148 { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
149 { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
150 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
151 { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
152 { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
133 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 153 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
134 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 154 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
135 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, 155 { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
136 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, 156 { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
137 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, 157 { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
138 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, 158 { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
139 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, 159 { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
140 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, 160 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
141 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, 161 { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
142 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, 162 { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
143 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, 163 { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
144 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, 164 { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
165 { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
166 { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
167 { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
145 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 168 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
146 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 169 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
147 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, 170 { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
148 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, 171 { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
149 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 172 { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
173 { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
174 { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
175 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
176 { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
177 { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
150 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 178 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
151 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, 179 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
152 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, 180 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
153 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, 181 { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
154 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, 182 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
155 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, 183 { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
156 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, 184 { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
157 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
158 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
159 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
160 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
161 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 185 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
162 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 186 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
163 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, 187 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
164 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, 188 { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
165 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, 189 { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
166 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, 190 { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
167 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 191 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
168 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 192 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
169 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, 193 { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
170 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, 194 { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
171 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, 195 { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
172 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, 196 { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
173 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, 197 { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
174 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, 198 { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
175 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, 199 { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
176 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, 200 { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
177 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 201 { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
178 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, 202 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
179 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 203 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
180 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 204 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
181 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 205 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
182 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 206 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
183 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, 207 { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
184 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, 208 { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
185 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, 209 { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
186 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
187 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 210 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
188 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, 211 { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
189 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, 212 { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
190 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, 213 { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
214 { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
191 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 215 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
192 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, 216 { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
193 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, 217 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
194 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 218 { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
195 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, 219 { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
196 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, 220 { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
197 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 221 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
198 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, 222 { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
199 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 223 { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
200 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, 224 { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
201 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 225 { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
202 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, 226 { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
203 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 227 { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
204 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, 228 { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
205 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 229 { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
206 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, 230 { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
207 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 231 { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
208 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, 232 { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
209 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 233 { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
210 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, 234 { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
211 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 235 { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
212 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, 236 { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
213 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 237 { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
214 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, 238 { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
215 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 239 { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
216 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, 240 { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
217 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 241 { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
218 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, 242 { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
219 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 243 { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
220 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, 244 { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
221 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 245 { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
222 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, 246 { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
223 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 247 { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
224 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, 248 { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
225 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 249 { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
226 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, 250 { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
227 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 251 { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
228 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, 252 { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
229 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 253 { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
230 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, 254 { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
231 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 255 { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
232 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, 256 { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
233 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 257 { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
234 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, 258 { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
235 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 259 { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
236 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, 260 { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
237 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 261 { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
238 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, 262 { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
239 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 263 { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
240 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, 264 { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
241 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 265 { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
242 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, 266 { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
243 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 267 { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
244 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
245 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
246 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
247 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
248 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
249 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
250 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
251 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
252 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
253 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
254 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
255 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
256 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
257 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
258 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
259 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
260 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
261 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
262 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
263 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
264 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
265 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
266 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
267 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
268 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
269 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
270 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
271 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
272 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
273 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
274 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
275 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
276 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
277 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
278 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
279 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 268 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
280 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 269 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
281 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, 270 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
284 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 273 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
285 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 274 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
286 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 275 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
287 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, 276 { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
288 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
289 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 277 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
290 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, 278 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
291 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, 279 { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
292 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, 280 { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
293 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, 281 { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
294 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, 282 { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
295 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, 283 { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
296 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 284 { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
297 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, 285 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
298 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, 286 { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
299 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, 287 { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
300 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, 288 { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
301 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, 289 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
302 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, 290 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
303 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, 291 { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
304 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, 292 { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
305 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, 293 { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
294 { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
295 { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
296 { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
297 { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
298 { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
299 { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
300 { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
301 { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
302 { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
303 { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
304 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
306 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 305 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
307 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, 306 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
308 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, 307 { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
309 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, 308 { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
310 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, 309 { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
311 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, 310 { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
312 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, 311 { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
313 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 312 { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
313 { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
314 { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
315 { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
316 { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
317 { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
318 { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
319 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
320 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
321 { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
322 { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
314 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 323 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
315 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 324 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
316 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, 325 { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
317 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, 326 { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
318 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, 327 { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
319 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, 328 { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
320 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, 329 { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
321 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, 330 { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
322 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, 331 { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
323 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, 332 { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
324 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, 333 { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
325 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, 334 { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
326 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, 335 { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
327 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 336 { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
328 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, 337 { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
329 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, 338 { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
330 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, 339 { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
331 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, 340 { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
332 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, 341 { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
333 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, 342 { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
334 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, 343 { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
335 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, 344 { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
336 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, 345 { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
337 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, 346 { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
338 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, 347 { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
339 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, 348 { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
340 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, 349 { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
341 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, 350 { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
342 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, 351 { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
343 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 352 { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
344 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, 353 { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
345 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, 354 { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
346 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, 355 { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
347 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, 356 { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
348 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 357 { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
349 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, 358 { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
350 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, 359 { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
351 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, 360 { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
361 { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
362 { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
363 { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
364 { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
365 { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
366 { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
367 { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
368 { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
369 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
370 { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
371 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
372 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
373 { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
374 { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
375 { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
376 { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
377 { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
378 { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
379 { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
380 { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
381 { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
382 { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
383 { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
384 { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
385 { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
386 { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
387 { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
388 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
389 { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
390 { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
391 { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
392 { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
393 { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
394 { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
395 { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
396 { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
397 { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
398 { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
399 { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
400 { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
401 { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
402 { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
403 { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
404 { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
405 { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
406 { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
407 { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
408 { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
409 { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
410 { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
411 { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
412 { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
413 { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
414 { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
415 { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
416 { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
417 { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
418 { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
419 { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
420 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
421 { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
422 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
423 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
424 { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
425 { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
426 { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
427 { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
428 { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
429 { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
430 { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
431 { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
432 { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
433 { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
434 { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
435 { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
436 { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
437 { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
438 { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
439 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
440 { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
441 { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
442 { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
443 { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
444 { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
445 { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
446 { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
447 { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
448 { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
449 { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
450 { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
451 { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
452 { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
453 { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
454 { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
455 { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
352 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 456 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
353 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, 457 { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
458 { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
459 { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
460 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
354 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 461 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
355 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, 462 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
356 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, 463 { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
357 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, 464 { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
358 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, 465 { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
359 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, 466 { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
360 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, 467 { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
361 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 468 { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
469 { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
470 { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
471 { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
472 { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
473 { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
474 { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
475 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
476 { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
477 { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
478 { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
362 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 479 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
363 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 480 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
364 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } 481 { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
482 { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
483 { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
484 { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
485 { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
486 { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
487 { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
488 { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
489 { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
490 { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
491 { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
492 { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
493 { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
494 { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
495 { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
496 { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
497 { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
498 { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
499 { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
500 { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
501 { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
502 { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
503 { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
504 { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
505 { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
365}; 506};
366 507
367 508#define IDLE_REGS_COUNT 237
368#define IDLE_REGS_COUNT 277
369static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 509static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
370 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, 510 { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
371 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 511 { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
372 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, 512 { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
513 { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
514 { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
515 { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
516 { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
517 { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
373 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 518 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
374 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, 519 { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
375 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, 520 { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
376 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, 521 { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
377 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, 522 { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
378 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, 523 { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
379 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, 524 { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
380 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, 525 { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
381 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, 526 { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
382 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, 527 { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
383 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, 528 { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
384 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, 529 { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
385 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, 530 { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
386 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, 531 { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
387 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, 532 { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
388 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, 533 { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
389 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, 534 { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
390 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, 535 { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
391 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, 536 { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
392 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, 537 { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
393 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, 538 { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
394 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, 539 { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
395 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, 540 { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
396 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, 541 { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
397 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, 542 { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
398 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, 543 { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
399 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, 544 { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
400 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, 545 { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
401 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, 546 { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
402 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, 547 { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
403 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, 548 { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
404 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, 549 { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
405 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, 550 { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
406 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, 551 { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
552 { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
553 { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
554 { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
555 { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
407 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 556 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
408 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 557 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
409 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 558 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
410 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 559 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
411 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, 560 { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
412 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 561 { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
413 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 562 { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
414 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 563 { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
415 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 564 { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
416 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 565 { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
417 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 566 { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
418 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 567 { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
419 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
420 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
421 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
422 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
423 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
424 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
425 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
426 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
427 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
428 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
429 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
430 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
431 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
432 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
433 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
434 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
435 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
436 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
437 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
438 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
439 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
440 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
441 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
442 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
443 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
444 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
445 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
446 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
447 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
448 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
449 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
450 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 568 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
451 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 569 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
452 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, 570 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
462 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 580 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
463 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 581 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
464 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 582 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
465 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, 583 { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
466 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, 584 { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
467 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 585 { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
468 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, 586 { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
469 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, 587 { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
470 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, 588 { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
471 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, 589 { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
472 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, 590 { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
473 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, 591 { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
474 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, 592 { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
475 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, 593 { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
476 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, 594 { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
477 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, 595 { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
478 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 596 { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
597 { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
598 { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
599 { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
600 { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
601 { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
479 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 602 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
480 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 603 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
481 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, 604 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
482 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, 605 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
483 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, 606 { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
484 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, 607 { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
485 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, 608 { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
486 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, 609 { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
487 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, 610 { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
488 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 611 { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
489 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 612 { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
490 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, 613 { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
491 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, 614 { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
492 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, 615 { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
493 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, 616 { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
494 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, 617 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
495 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, 618 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
496 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, 619 { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
497 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, 620 { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
498 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, 621 { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
499 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, 622 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
500 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, 623 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
501 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
502 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
503 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 624 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
504 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 625 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
505 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, 626 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
506 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
507 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 627 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
508 { 0x3380c0, 1, RI_ALL_ONLINE } 628 { 0x3380c0, 1, RI_ALL_ONLINE }
509}; 629};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
515 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 635 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
516}; 636};
517 637
518
519#define WREGS_COUNT_E1H 1 638#define WREGS_COUNT_E1H 1
520static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; 639static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
521 640
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } 649 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
531}; 650};
532 651
533static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 652static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
534
535 653
536#define TIMER_REGS_COUNT_E1 2 654#define TIMER_REGS_COUNT_E1 2
537static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
538 { 0x164014, 0x164018 };
539static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
540 { 0x1640d0, 0x1640d4 };
541 655
656static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
657 0x164014, 0x164018 };
658static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
659 0x1640d0, 0x1640d4 };
542 660
543#define TIMER_REGS_COUNT_E1H 2 661#define TIMER_REGS_COUNT_E1H 2
544static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
545 { 0x164014, 0x164018 };
546static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
547 { 0x1640d0, 0x1640d4 };
548 662
663static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
664 0x164014, 0x164018 };
665static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
666 0x1640d0, 0x1640d4 };
667
668#define TIMER_REGS_COUNT_E2 2
669
670static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
671 0x164014, 0x164018 };
672static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
673 0x1640d0, 0x1640d4 };
674
675#define PAGE_MODE_VALUES_E1 0
676
677#define PAGE_READ_REGS_E1 0
678
679#define PAGE_WRITE_REGS_E1 0
680
681static const u32 page_vals_e1[] = { 0 };
682
683static const u32 page_write_regs_e1[] = { 0 };
684
685static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
686
687#define PAGE_MODE_VALUES_E1H 0
688
689#define PAGE_READ_REGS_E1H 0
690
691#define PAGE_WRITE_REGS_E1H 0
692
693static const u32 page_vals_e1h[] = { 0 };
694
695static const u32 page_write_regs_e1h[] = { 0 };
696
697static const struct reg_addr page_read_regs_e1h[] = {
698 { 0x0, 0, RI_E1H_ONLINE } };
549 699
550#define PAGE_MODE_VALUES_E2 2 700#define PAGE_MODE_VALUES_E2 2
551 701
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d894ca..5b44a8b48509 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
24#include "bnx2x.h" 24#include "bnx2x.h"
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27#include "bnx2x_init.h"
27 28
28/* Note: in the format strings below %s is replaced by the queue-name which is 29/* Note: in the format strings below %s is replaced by the queue-name which is
29 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -472,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
472{ 473{
473 struct bnx2x *bp = netdev_priv(dev); 474 struct bnx2x *bp = netdev_priv(dev);
474 int regdump_len = 0; 475 int regdump_len = 0;
475 int i; 476 int i, j, k;
476 477
477 if (CHIP_IS_E1(bp)) { 478 if (CHIP_IS_E1(bp)) {
478 for (i = 0; i < REGS_COUNT; i++) 479 for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
502 if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) 503 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
503 regdump_len += wreg_addrs_e2[i].size * 504 regdump_len += wreg_addrs_e2[i].size *
504 (1 + wreg_addrs_e2[i].read_regs_count); 505 (1 + wreg_addrs_e2[i].read_regs_count);
506
507 for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
508 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
509 for (k = 0; k < PAGE_READ_REGS_E2; k++)
510 if (IS_E2_ONLINE(page_read_regs_e2[k].
511 info))
512 regdump_len +=
513 page_read_regs_e2[k].size;
514 }
505 } 515 }
506 regdump_len *= 4; 516 regdump_len *= 4;
507 regdump_len += sizeof(struct dump_hdr); 517 regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev,
539 if (!netif_running(bp->dev)) 549 if (!netif_running(bp->dev))
540 return; 550 return;
541 551
552 /* Disable parity attentions as long as following dump may
553 * cause false alarms by reading never written registers. We
554 * will re-enable parity attentions right after the dump.
555 */
556 bnx2x_disable_blocks_parity(bp);
557
542 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 558 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
543 dump_hdr.dump_sign = dump_sign_all; 559 dump_hdr.dump_sign = dump_sign_all;
544 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); 560 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev,
580 596
581 bnx2x_read_pages_regs_e2(bp, p); 597 bnx2x_read_pages_regs_e2(bp, p);
582 } 598 }
599 /* Re-enable parity attentions */
600 bnx2x_clear_blocks_parity(bp);
601 if (CHIP_PARITY_ENABLED(bp))
602 bnx2x_enable_blocks_parity(bp);
583} 603}
584 604
585#define PHY_FW_VER_LEN 20 605#define PHY_FW_VER_LEN 20
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a559..5a268e9a0895 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
192 u64 next; 192 u64 next;
193}; 193};
194 194
195/****************************************************************************
196* Parity configuration
197****************************************************************************/
198#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
199{ \
200 block##_REG_##block##_PRTY_MASK, \
201 block##_REG_##block##_PRTY_STS_CLR, \
202 en_mask, {m1, m1h, m2}, #block \
203}
204
205#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
206{ \
207 block##_REG_##block##_PRTY_MASK_0, \
208 block##_REG_##block##_PRTY_STS_CLR_0, \
209 en_mask, {m1, m1h, m2}, #block"_0" \
210}
211
212#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
213{ \
214 block##_REG_##block##_PRTY_MASK_1, \
215 block##_REG_##block##_PRTY_STS_CLR_1, \
216 en_mask, {m1, m1h, m2}, #block"_1" \
217}
218
219static const struct {
220 u32 mask_addr;
221 u32 sts_clr_addr;
222 u32 en_mask; /* Mask to enable parity attentions */
223 struct {
224 u32 e1; /* 57710 */
225 u32 e1h; /* 57711 */
226 u32 e2; /* 57712 */
227 } reg_mask; /* Register mask (all valid bits) */
228 char name[7]; /* Block's longest name is 6 characters long
229 * (name + suffix)
230 */
231} bnx2x_blocks_parity_data[] = {
232 /* bit 19 masked */
233 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
234 /* bit 5,18,20-31 */
235 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
236 /* bit 5 */
237 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
238 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
239 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
240
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment.
243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
248 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
249 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
250 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
251 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
252 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
253 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
254 {0xf, 0xf, 0xf}, "UPB"},
255 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
256 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
257 {0xf, 0xf, 0xf}, "XPB"},
258 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
259 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
260 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
261 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
262 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
263 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
264 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
265 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
266 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
267 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
268 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
269 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
270 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
271 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
272 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
273 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
274 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
275 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
276 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
277};
278
279
280/* [28] MCP Latched rom_parity
281 * [29] MCP Latched ump_rx_parity
282 * [30] MCP Latched ump_tx_parity
283 * [31] MCP Latched scpad_parity
284 */
285#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
286 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
287 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
288 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
289 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
290
291/* Below registers control the MCP parity attention output. When
292 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
293 * enabled, when cleared - disabled.
294 */
295static const u32 mcp_attn_ctl_regs[] = {
296 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
297 MISC_REG_AEU_ENABLE4_NIG_0,
298 MISC_REG_AEU_ENABLE4_PXP_0,
299 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
300 MISC_REG_AEU_ENABLE4_NIG_1,
301 MISC_REG_AEU_ENABLE4_PXP_1
302};
303
304static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
305{
306 int i;
307 u32 reg_val;
308
309 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
310 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
311
312 if (enable)
313 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
314 else
315 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
316
317 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
318 }
319}
320
321static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
322{
323 if (CHIP_IS_E1(bp))
324 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
325 else if (CHIP_IS_E1H(bp))
326 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
327 else
328 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
329}
330
331static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
336 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
337
338 if (dis_mask) {
339 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
340 dis_mask);
341 DP(NETIF_MSG_HW, "Setting parity mask "
342 "for %s to\t\t0x%x\n",
343 bnx2x_blocks_parity_data[i].name, dis_mask);
344 }
345 }
346
347 /* Disable MCP parity attentions */
348 bnx2x_set_mcp_parity(bp, false);
349}
350
351/**
352 * Clear the parity error status registers.
353 */
354static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
355{
356 int i;
357 u32 reg_val, mcp_aeu_bits =
358 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
359 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
360 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
361 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
362
363 /* Clear SEM_FAST parities */
364 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
365 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
366 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
367 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
368
369 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
370 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
371
372 if (reg_mask) {
373 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
374 sts_clr_addr);
375 if (reg_val & reg_mask)
376 DP(NETIF_MSG_HW,
377 "Parity errors in %s: 0x%x\n",
378 bnx2x_blocks_parity_data[i].name,
379 reg_val & reg_mask);
380 }
381 }
382
383 /* Check if there were parity attentions in MCP */
384 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
385 if (reg_val & mcp_aeu_bits)
386 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
387 reg_val & mcp_aeu_bits);
388
389 /* Clear parity attentions in MCP:
390 * [7] clears Latched rom_parity
391 * [8] clears Latched ump_rx_parity
392 * [9] clears Latched ump_tx_parity
393 * [10] clears Latched scpad_parity (both ports)
394 */
395 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
396}
397
398static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
399{
400 int i;
401
402 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
403 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
404
405 if (reg_mask)
406 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
407 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
408 }
409
410 /* Enable MCP parity attentions */
411 bnx2x_set_mcp_parity(bp, true);
412}
413
414
195#endif /* BNX2X_INIT_H */ 415#endif /* BNX2X_INIT_H */
196 416
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512a04d..84e1af4d65e1 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -3152,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3152#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3152#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3153#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3153#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3154#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3154#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3155#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3156 3155
3157/* 3156/*
3158 * should be run under rtnl lock 3157 * should be run under rtnl lock
@@ -3527,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3527 try to handle this event */ 3526 try to handle this event */
3528 bnx2x_acquire_alr(bp); 3527 bnx2x_acquire_alr(bp);
3529 3528
3530 if (bnx2x_chk_parity_attn(bp)) { 3529 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3531 bp->recovery_state = BNX2X_RECOVERY_INIT; 3530 bp->recovery_state = BNX2X_RECOVERY_INIT;
3532 bnx2x_set_reset_in_progress(bp); 3531 bnx2x_set_reset_in_progress(bp);
3533 schedule_delayed_work(&bp->reset_task, 0); 3532 schedule_delayed_work(&bp->reset_task, 0);
@@ -4754,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4754 return 0; /* OK */ 4753 return 0; /* OK */
4755} 4754}
4756 4755
4757static void enable_blocks_attention(struct bnx2x *bp) 4756static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4758{ 4757{
4759 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4758 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4760 if (CHIP_IS_E2(bp)) 4759 if (CHIP_IS_E2(bp))
@@ -4808,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
4808 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4807 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4809 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4808 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4810/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4809/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4811 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 4810 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
4812} 4811}
4813 4812
4814static const struct {
4815 u32 addr;
4816 u32 mask;
4817} bnx2x_parity_mask[] = {
4818 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4819 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4820 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4821 {HC_REG_HC_PRTY_MASK, 0x7},
4822 {MISC_REG_MISC_PRTY_MASK, 0x1},
4823 {QM_REG_QM_PRTY_MASK, 0x0},
4824 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4825 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4826 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4827 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4828 {CDU_REG_CDU_PRTY_MASK, 0x0},
4829 {CFC_REG_CFC_PRTY_MASK, 0x0},
4830 {DBG_REG_DBG_PRTY_MASK, 0x0},
4831 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4832 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4833 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4834 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4835 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4836 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4837 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4838 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4839 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4840 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4841 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4842 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4843 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4844 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4845 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4846};
4847
4848static void enable_blocks_parity(struct bnx2x *bp)
4849{
4850 int i;
4851
4852 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4853 REG_WR(bp, bnx2x_parity_mask[i].addr,
4854 bnx2x_parity_mask[i].mask);
4855}
4856
4857
4858static void bnx2x_reset_common(struct bnx2x *bp) 4813static void bnx2x_reset_common(struct bnx2x *bp)
4859{ 4814{
4860 /* reset_common */ 4815 /* reset_common */
@@ -5350,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5350 /* clear PXP2 attentions */ 5305 /* clear PXP2 attentions */
5351 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5352 5307
5353 enable_blocks_attention(bp); 5308 bnx2x_enable_blocks_attention(bp);
5354 if (CHIP_PARITY_SUPPORTED(bp)) 5309 if (CHIP_PARITY_ENABLED(bp))
5355 enable_blocks_parity(bp); 5310 bnx2x_enable_blocks_parity(bp);
5356 5311
5357 if (!BP_NOMCP(bp)) { 5312 if (!BP_NOMCP(bp)) {
5358 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 5313 /* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -8751,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8751 dev_err(&bp->pdev->dev, "MCP disabled, " 8706 dev_err(&bp->pdev->dev, "MCP disabled, "
8752 "must load devices in order!\n"); 8707 "must load devices in order!\n");
8753 8708
8754 /* Set multi queue mode */
8755 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8756 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8757 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8758 "requested is not MSI-X\n");
8759 multi_mode = ETH_RSS_MODE_DISABLED;
8760 }
8761 bp->multi_mode = multi_mode; 8709 bp->multi_mode = multi_mode;
8762 bp->int_mode = int_mode; 8710 bp->int_mode = int_mode;
8763 8711
@@ -9560,9 +9508,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9560 /* Delete all NAPI objects */ 9508 /* Delete all NAPI objects */
9561 bnx2x_del_all_napi(bp); 9509 bnx2x_del_all_napi(bp);
9562 9510
9511 /* Power on: we can't let PCI layer write to us while we are in D3 */
9512 bnx2x_set_power_state(bp, PCI_D0);
9513
9563 /* Disable MSI/MSI-X */ 9514 /* Disable MSI/MSI-X */
9564 bnx2x_disable_msi(bp); 9515 bnx2x_disable_msi(bp);
9565 9516
9517 /* Power off */
9518 bnx2x_set_power_state(bp, PCI_D3hot);
9519
9566 /* Make sure RESET task is not scheduled before continuing */ 9520 /* Make sure RESET task is not scheduled before continuing */
9567 cancel_delayed_work_sync(&bp->reset_task); 9521 cancel_delayed_work_sync(&bp->reset_task);
9568 9522
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b72906..38ef7ca9f21d 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
18 * WR - Write Clear (write 1 to clear the bit) 18 * WR - Write Clear (write 1 to clear the bit)
19 * 19 *
20 */ 20 */
21#ifndef BNX2X_REG_H
22#define BNX2X_REG_H
21 23
22#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 24#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
23#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) 25#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
39#define BRB1_REG_BRB1_PRTY_MASK 0x60138 41#define BRB1_REG_BRB1_PRTY_MASK 0x60138
40/* [R 4] Parity register #0 read */ 42/* [R 4] Parity register #0 read */
41#define BRB1_REG_BRB1_PRTY_STS 0x6012c 43#define BRB1_REG_BRB1_PRTY_STS 0x6012c
44/* [RC 4] Parity register #0 read clear */
45#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
42/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 46/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
43 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 47 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
44 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - 48 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
132#define CCM_REG_CCM_INT_MASK 0xd01e4 136#define CCM_REG_CCM_INT_MASK 0xd01e4
133/* [R 11] Interrupt register #0 read */ 137/* [R 11] Interrupt register #0 read */
134#define CCM_REG_CCM_INT_STS 0xd01d8 138#define CCM_REG_CCM_INT_STS 0xd01d8
139/* [RW 27] Parity mask register #0 read/write */
140#define CCM_REG_CCM_PRTY_MASK 0xd01f4
135/* [R 27] Parity register #0 read */ 141/* [R 27] Parity register #0 read */
136#define CCM_REG_CCM_PRTY_STS 0xd01e8 142#define CCM_REG_CCM_PRTY_STS 0xd01e8
143/* [RC 27] Parity register #0 read clear */
144#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
137/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 145/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
138 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 146 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
139 Is used to determine the number of the AG context REG-pairs written back; 147 Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
350#define CDU_REG_CDU_PRTY_MASK 0x10104c 358#define CDU_REG_CDU_PRTY_MASK 0x10104c
351/* [R 5] Parity register #0 read */ 359/* [R 5] Parity register #0 read */
352#define CDU_REG_CDU_PRTY_STS 0x101040 360#define CDU_REG_CDU_PRTY_STS 0x101040
361/* [RC 5] Parity register #0 read clear */
362#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
353/* [RC 32] logging of error data in case of a CDU load error: 363/* [RC 32] logging of error data in case of a CDU load error:
354 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 364 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
355 ype_error; ctual_active; ctual_compressed_context}; */ 365 ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
381#define CFC_REG_CFC_PRTY_MASK 0x104118 391#define CFC_REG_CFC_PRTY_MASK 0x104118
382/* [R 4] Parity register #0 read */ 392/* [R 4] Parity register #0 read */
383#define CFC_REG_CFC_PRTY_STS 0x10410c 393#define CFC_REG_CFC_PRTY_STS 0x10410c
394/* [RC 4] Parity register #0 read clear */
395#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
384/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 396/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
385#define CFC_REG_CID_CAM 0x104800 397#define CFC_REG_CID_CAM 0x104800
386#define CFC_REG_CONTROL0 0x104028 398#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
466#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 478#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
467/* [R 11] Parity register #0 read */ 479/* [R 11] Parity register #0 read */
468#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 480#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
481/* [RC 11] Parity register #0 read clear */
482#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
469#define CSDM_REG_ENABLE_IN1 0xc2238 483#define CSDM_REG_ENABLE_IN1 0xc2238
470#define CSDM_REG_ENABLE_IN2 0xc223c 484#define CSDM_REG_ENABLE_IN2 0xc223c
471#define CSDM_REG_ENABLE_OUT1 0xc2240 485#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
556/* [R 32] Parity register #0 read */ 570/* [R 32] Parity register #0 read */
557#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 571#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
558#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 572#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
573/* [RC 32] Parity register #0 read clear */
574#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
575#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
559#define CSEM_REG_ENABLE_IN 0x2000a4 576#define CSEM_REG_ENABLE_IN 0x2000a4
560#define CSEM_REG_ENABLE_OUT 0x2000a8 577#define CSEM_REG_ENABLE_OUT 0x2000a8
561/* [RW 32] This address space contains all registers and memories that are 578/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
648#define DBG_REG_DBG_PRTY_MASK 0xc0a8 665#define DBG_REG_DBG_PRTY_MASK 0xc0a8
649/* [R 1] Parity register #0 read */ 666/* [R 1] Parity register #0 read */
650#define DBG_REG_DBG_PRTY_STS 0xc09c 667#define DBG_REG_DBG_PRTY_STS 0xc09c
668/* [RC 1] Parity register #0 read clear */
669#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
651/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The 670/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
652 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; 671 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
653 * 4.Completion function=0; 5.Error handling=0 */ 672 * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
668#define DMAE_REG_DMAE_PRTY_MASK 0x102064 687#define DMAE_REG_DMAE_PRTY_MASK 0x102064
669/* [R 4] Parity register #0 read */ 688/* [R 4] Parity register #0 read */
670#define DMAE_REG_DMAE_PRTY_STS 0x102058 689#define DMAE_REG_DMAE_PRTY_STS 0x102058
690/* [RC 4] Parity register #0 read clear */
691#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
671/* [RW 1] Command 0 go. */ 692/* [RW 1] Command 0 go. */
672#define DMAE_REG_GO_C0 0x102080 693#define DMAE_REG_GO_C0 0x102080
673/* [RW 1] Command 1 go. */ 694/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
734#define DORQ_REG_DORQ_PRTY_MASK 0x170190 755#define DORQ_REG_DORQ_PRTY_MASK 0x170190
735/* [R 2] Parity register #0 read */ 756/* [R 2] Parity register #0 read */
736#define DORQ_REG_DORQ_PRTY_STS 0x170184 757#define DORQ_REG_DORQ_PRTY_STS 0x170184
758/* [RC 2] Parity register #0 read clear */
759#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
737/* [RW 8] The address to write the DPM CID to STORM. */ 760/* [RW 8] The address to write the DPM CID to STORM. */
738#define DORQ_REG_DPM_CID_ADDR 0x170044 761#define DORQ_REG_DPM_CID_ADDR 0x170044
739/* [RW 5] The DPM mode CID extraction offset. */ 762/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
842/* [R 1] data availble for error memory. If this bit is clear do not red 865/* [R 1] data availble for error memory. If this bit is clear do not red
843 * from error_handling_memory. */ 866 * from error_handling_memory. */
844#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 867#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
868/* [RW 11] Parity mask register #0 read/write */
869#define IGU_REG_IGU_PRTY_MASK 0x1300a8
845/* [R 11] Parity register #0 read */ 870/* [R 11] Parity register #0 read */
846#define IGU_REG_IGU_PRTY_STS 0x13009c 871#define IGU_REG_IGU_PRTY_STS 0x13009c
872/* [RC 11] Parity register #0 read clear */
873#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
847/* [R 4] Debug: int_handle_fsm */ 874/* [R 4] Debug: int_handle_fsm */
848#define IGU_REG_INT_HANDLE_FSM 0x130050 875#define IGU_REG_INT_HANDLE_FSM 0x130050
849#define IGU_REG_LEADING_EDGE_LATCH 0x130134 876#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
1501#define MISC_REG_MISC_PRTY_MASK 0xa398 1528#define MISC_REG_MISC_PRTY_MASK 0xa398
1502/* [R 1] Parity register #0 read */ 1529/* [R 1] Parity register #0 read */
1503#define MISC_REG_MISC_PRTY_STS 0xa38c 1530#define MISC_REG_MISC_PRTY_STS 0xa38c
1531/* [RC 1] Parity register #0 read clear */
1532#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
1504#define MISC_REG_NIG_WOL_P0 0xa270 1533#define MISC_REG_NIG_WOL_P0 0xa270
1505#define MISC_REG_NIG_WOL_P1 0xa274 1534#define MISC_REG_NIG_WOL_P1 0xa274
1506/* [R 1] If set indicate that the pcie_rst_b was asserted without perst 1535/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -2082,6 +2111,10 @@
2082#define PBF_REG_PBF_INT_MASK 0x1401d4 2111#define PBF_REG_PBF_INT_MASK 0x1401d4
2083/* [R 5] Interrupt register #0 read */ 2112/* [R 5] Interrupt register #0 read */
2084#define PBF_REG_PBF_INT_STS 0x1401c8 2113#define PBF_REG_PBF_INT_STS 0x1401c8
2114/* [RW 20] Parity mask register #0 read/write */
2115#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2116/* [RC 20] Parity register #0 read clear */
2117#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
2085#define PB_REG_CONTROL 0 2118#define PB_REG_CONTROL 0
2086/* [RW 2] Interrupt mask register #0 read/write */ 2119/* [RW 2] Interrupt mask register #0 read/write */
2087#define PB_REG_PB_INT_MASK 0x28 2120#define PB_REG_PB_INT_MASK 0x28
@@ -2091,6 +2124,8 @@
2091#define PB_REG_PB_PRTY_MASK 0x38 2124#define PB_REG_PB_PRTY_MASK 0x38
2092/* [R 4] Parity register #0 read */ 2125/* [R 4] Parity register #0 read */
2093#define PB_REG_PB_PRTY_STS 0x2c 2126#define PB_REG_PB_PRTY_STS 0x2c
2127/* [RC 4] Parity register #0 read clear */
2128#define PB_REG_PB_PRTY_STS_CLR 0x30
2094#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 2129#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2095#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) 2130#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2096#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) 2131#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2446,6 +2481,8 @@
2446#define PRS_REG_PRS_PRTY_MASK 0x401a4 2481#define PRS_REG_PRS_PRTY_MASK 0x401a4
2447/* [R 8] Parity register #0 read */ 2482/* [R 8] Parity register #0 read */
2448#define PRS_REG_PRS_PRTY_STS 0x40198 2483#define PRS_REG_PRS_PRTY_STS 0x40198
2484/* [RC 8] Parity register #0 read clear */
2485#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
2449/* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2486/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
2450 request message */ 2487 request message */
2451#define PRS_REG_PURE_REGIONS 0x40024 2488#define PRS_REG_PURE_REGIONS 0x40024
@@ -2599,6 +2636,9 @@
2599/* [R 32] Parity register #0 read */ 2636/* [R 32] Parity register #0 read */
2600#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2637#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2601#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2638#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2639/* [RC 32] Parity register #0 read clear */
2640#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
2641#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
2602/* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2642/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2603 indication about backpressure) */ 2643 indication about backpressure) */
2604#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 2644#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -3001,6 +3041,8 @@
3001#define PXP_REG_PXP_PRTY_MASK 0x103094 3041#define PXP_REG_PXP_PRTY_MASK 0x103094
3002/* [R 26] Parity register #0 read */ 3042/* [R 26] Parity register #0 read */
3003#define PXP_REG_PXP_PRTY_STS 0x103088 3043#define PXP_REG_PXP_PRTY_STS 0x103088
3044/* [RC 27] Parity register #0 read clear */
3045#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
3004/* [RW 4] The activity counter initial increment value sent in the load 3046/* [RW 4] The activity counter initial increment value sent in the load
3005 request */ 3047 request */
3006#define QM_REG_ACTCTRINITVAL_0 0x168040 3048#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3157,6 +3199,8 @@
3157#define QM_REG_QM_PRTY_MASK 0x168454 3199#define QM_REG_QM_PRTY_MASK 0x168454
3158/* [R 12] Parity register #0 read */ 3200/* [R 12] Parity register #0 read */
3159#define QM_REG_QM_PRTY_STS 0x168448 3201#define QM_REG_QM_PRTY_STS 0x168448
3202/* [RC 12] Parity register #0 read clear */
3203#define QM_REG_QM_PRTY_STS_CLR 0x16844c
3160/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3204/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
3161#define QM_REG_QSTATUS_HIGH 0x16802c 3205#define QM_REG_QSTATUS_HIGH 0x16802c
3162/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ 3206/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
3442#define QM_REG_WRRWEIGHTS_9 0x168848 3486#define QM_REG_WRRWEIGHTS_9 0x168848
3443/* [R 6] Keep the fill level of the fifo from write client 1 */ 3487/* [R 6] Keep the fill level of the fifo from write client 1 */
3444#define QM_REG_XQM_WRC_FIFOLVL 0x168000 3488#define QM_REG_XQM_WRC_FIFOLVL 0x168000
3489/* [W 1] reset to parity interrupt */
3490#define SEM_FAST_REG_PARITY_RST 0x18840
3445#define SRC_REG_COUNTFREE0 0x40500 3491#define SRC_REG_COUNTFREE0 0x40500
3446/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3492/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
3447 ports. If set the searcher support 8 functions. */ 3493 ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
3470#define SRC_REG_SRC_PRTY_MASK 0x404c8 3516#define SRC_REG_SRC_PRTY_MASK 0x404c8
3471/* [R 3] Parity register #0 read */ 3517/* [R 3] Parity register #0 read */
3472#define SRC_REG_SRC_PRTY_STS 0x404bc 3518#define SRC_REG_SRC_PRTY_STS 0x404bc
3519/* [RC 3] Parity register #0 read clear */
3520#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
3473/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3521/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
3474#define TCM_REG_CAM_OCCUP 0x5017c 3522#define TCM_REG_CAM_OCCUP 0x5017c
3475/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3523/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
3596#define TCM_REG_TCM_INT_MASK 0x501dc 3644#define TCM_REG_TCM_INT_MASK 0x501dc
3597/* [R 11] Interrupt register #0 read */ 3645/* [R 11] Interrupt register #0 read */
3598#define TCM_REG_TCM_INT_STS 0x501d0 3646#define TCM_REG_TCM_INT_STS 0x501d0
3647/* [RW 27] Parity mask register #0 read/write */
3648#define TCM_REG_TCM_PRTY_MASK 0x501ec
3599/* [R 27] Parity register #0 read */ 3649/* [R 27] Parity register #0 read */
3600#define TCM_REG_TCM_PRTY_STS 0x501e0 3650#define TCM_REG_TCM_PRTY_STS 0x501e0
3651/* [RC 27] Parity register #0 read clear */
3652#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
3601/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3653/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3602 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3654 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3603 Is used to determine the number of the AG context REG-pairs written back; 3655 Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
3755#define TM_REG_TM_INT_MASK 0x1640fc 3807#define TM_REG_TM_INT_MASK 0x1640fc
3756/* [R 1] Interrupt register #0 read */ 3808/* [R 1] Interrupt register #0 read */
3757#define TM_REG_TM_INT_STS 0x1640f0 3809#define TM_REG_TM_INT_STS 0x1640f0
3810/* [RW 7] Parity mask register #0 read/write */
3811#define TM_REG_TM_PRTY_MASK 0x16410c
3812/* [RC 7] Parity register #0 read clear */
3813#define TM_REG_TM_PRTY_STS_CLR 0x164104
3758/* [RW 8] The event id for aggregated interrupt 0 */ 3814/* [RW 8] The event id for aggregated interrupt 0 */
3759#define TSDM_REG_AGG_INT_EVENT_0 0x42038 3815#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3760#define TSDM_REG_AGG_INT_EVENT_1 0x4203c 3816#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3835,6 +3891,8 @@
3835#define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3891#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3836/* [R 11] Parity register #0 read */ 3892/* [R 11] Parity register #0 read */
3837#define TSDM_REG_TSDM_PRTY_STS 0x422b0 3893#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3894/* [RC 11] Parity register #0 read clear */
3895#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
3838/* [RW 5] The number of time_slots in the arbitration cycle */ 3896/* [RW 5] The number of time_slots in the arbitration cycle */
3839#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3897#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3840/* [RW 3] The source that is associated with arbitration element 0. Source 3898/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
3914#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3972#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3915/* [RW 8] List of free threads . There is a bit per thread. */ 3973/* [RW 8] List of free threads . There is a bit per thread. */
3916#define TSEM_REG_THREADS_LIST 0x1802e4 3974#define TSEM_REG_THREADS_LIST 0x1802e4
3975/* [RC 32] Parity register #0 read clear */
3976#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
3977#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
3917/* [RW 3] The arbitration scheme of time_slot 0 */ 3978/* [RW 3] The arbitration scheme of time_slot 0 */
3918#define TSEM_REG_TS_0_AS 0x180038 3979#define TSEM_REG_TS_0_AS 0x180038
3919/* [RW 3] The arbitration scheme of time_slot 10 */ 3980/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
4116#define UCM_REG_UCM_INT_STS 0xe01c8 4177#define UCM_REG_UCM_INT_STS 0xe01c8
4117/* [R 27] Parity register #0 read */ 4178/* [R 27] Parity register #0 read */
4118#define UCM_REG_UCM_PRTY_STS 0xe01d8 4179#define UCM_REG_UCM_PRTY_STS 0xe01d8
4180/* [RC 27] Parity register #0 read clear */
4181#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
4119/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4182/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
4120 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4183 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4121 Is used to determine the number of the AG context REG-pairs written back; 4184 Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
4292#define USDM_REG_USDM_PRTY_MASK 0xc42c0 4355#define USDM_REG_USDM_PRTY_MASK 0xc42c0
4293/* [R 11] Parity register #0 read */ 4356/* [R 11] Parity register #0 read */
4294#define USDM_REG_USDM_PRTY_STS 0xc42b4 4357#define USDM_REG_USDM_PRTY_STS 0xc42b4
4358/* [RC 11] Parity register #0 read clear */
4359#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
4295/* [RW 5] The number of time_slots in the arbitration cycle */ 4360/* [RW 5] The number of time_slots in the arbitration cycle */
4296#define USEM_REG_ARB_CYCLE_SIZE 0x300034 4361#define USEM_REG_ARB_CYCLE_SIZE 0x300034
4297/* [RW 3] The source that is associated with arbitration element 0. Source 4362/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
4421/* [R 32] Parity register #0 read */ 4486/* [R 32] Parity register #0 read */
4422#define USEM_REG_USEM_PRTY_STS_0 0x300124 4487#define USEM_REG_USEM_PRTY_STS_0 0x300124
4423#define USEM_REG_USEM_PRTY_STS_1 0x300134 4488#define USEM_REG_USEM_PRTY_STS_1 0x300134
4489/* [RC 32] Parity register #0 read clear */
4490#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
4491#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
4424/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 4492/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4425 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ 4493 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4426#define USEM_REG_VFPF_ERR_NUM 0x300380 4494#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4797,6 +4865,8 @@
4797#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4865#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4798/* [R 11] Parity register #0 read */ 4866/* [R 11] Parity register #0 read */
4799#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4867#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4868/* [RC 11] Parity register #0 read clear */
4869#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
4800/* [RW 5] The number of time_slots in the arbitration cycle */ 4870/* [RW 5] The number of time_slots in the arbitration cycle */
4801#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4871#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4802/* [RW 3] The source that is associated with arbitration element 0. Source 4872/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
4929/* [R 32] Parity register #0 read */ 4999/* [R 32] Parity register #0 read */
4930#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 5000#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4931#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 5001#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
5002/* [RC 32] Parity register #0 read clear */
5003#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5004#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
4932#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5005#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4933#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5006#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4934#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 5007#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -6316,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
6316} 6389}
6317 6390
6318 6391
6392#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b144cc4..bda60d590fa8 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
160 160
161 if (bp->stats_pending) {
162 spin_unlock_bh(&bp->stats_lock);
163 return;
164 }
165
161 ramrod_data.drv_counter = bp->stats_counter++; 166 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 167 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_eth_queue(bp, i) 168 for_each_eth_queue(bp, i)
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f895750..56166ae2059f 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
749 netif_set_real_num_tx_queues(dev, pi->nqsets); 749 netif_set_real_num_tx_queues(dev, pi->nqsets);
750 err = netif_set_real_num_rx_queues(dev, pi->nqsets); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
751 if (err) 751 if (err)
752 return err; 752 goto err_unwind;
753 set_bit(pi->port_id, &adapter->open_device_map);
754 err = link_start(dev); 753 err = link_start(dev);
755 if (err) 754 if (err)
756 return err; 755 goto err_unwind;
756
757 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
758 set_bit(pi->port_id, &adapter->open_device_map);
758 return 0; 759 return 0;
760
761err_unwind:
762 if (adapter->open_device_map == 0)
763 adapter_down(adapter);
764 return err;
759} 765}
760 766
761/* 767/*
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
764 */ 770 */
765static int cxgb4vf_stop(struct net_device *dev) 771static int cxgb4vf_stop(struct net_device *dev)
766{ 772{
767 int ret;
768 struct port_info *pi = netdev_priv(dev); 773 struct port_info *pi = netdev_priv(dev);
769 struct adapter *adapter = pi->adapter; 774 struct adapter *adapter = pi->adapter;
770 775
771 netif_tx_stop_all_queues(dev); 776 netif_tx_stop_all_queues(dev);
772 netif_carrier_off(dev); 777 netif_carrier_off(dev);
773 ret = t4vf_enable_vi(adapter, pi->viid, false, false); 778 t4vf_enable_vi(adapter, pi->viid, false, false);
774 pi->link_cfg.link_ok = 0; 779 pi->link_cfg.link_ok = 0;
775 780
776 clear_bit(pi->port_id, &adapter->open_device_map); 781 clear_bit(pi->port_id, &adapter->open_device_map);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78c8e3f..0f51c80475ce 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
147 /* 147 /*
148 * Write the command array into the Mailbox Data register array and 148 * Write the command array into the Mailbox Data register array and
149 * transfer ownership of the mailbox to the firmware. 149 * transfer ownership of the mailbox to the firmware.
150 *
151 * For the VFs, the Mailbox Data "registers" are actually backed by
152 * T4's "MA" interface rather than PL Registers (as is the case for
153 * the PFs). Because these are in different coherency domains, the
154 * write to the VF's PL-register-backed Mailbox Control can race in
155 * front of the writes to the MA-backed VF Mailbox Data "registers".
156 * So we need to do a read-back on at least one byte of the VF Mailbox
157 * Data registers before doing the write to the VF Mailbox Control
158 * register.
150 */ 159 */
151 for (i = 0, p = cmd; i < size; i += 8) 160 for (i = 0, p = cmd; i < size; i += 8)
152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
162 t4_read_reg(adapter, mbox_data); /* flush write */
163
153 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
155 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e697b74..aed223b1b897 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
130 if (hw->mac_type == e1000_82541 || 130 if (hw->mac_type == e1000_82541 ||
131 hw->mac_type == e1000_82541_rev_2 || 131 hw->mac_type == e1000_82541_rev_2 ||
132 hw->mac_type == e1000_82547 || 132 hw->mac_type == e1000_82547 ||
133 hw->mac_type == e1000_82547_rev_2) { 133 hw->mac_type == e1000_82547_rev_2)
134 hw->phy_type = e1000_phy_igp; 134 hw->phy_type = e1000_phy_igp;
135 break; 135 break;
136 } 136 case RTL8211B_PHY_ID:
137 hw->phy_type = e1000_phy_8211;
138 break;
139 case RTL8201N_PHY_ID:
140 hw->phy_type = e1000_phy_8201;
141 break;
137 default: 142 default:
138 /* Should never have loaded on this device */ 143 /* Should never have loaded on this device */
139 hw->phy_type = e1000_phy_undefined; 144 hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82547GI: 323 case E1000_DEV_ID_82547GI:
319 hw->mac_type = e1000_82547_rev_2; 324 hw->mac_type = e1000_82547_rev_2;
320 break; 325 break;
326 case E1000_DEV_ID_INTEL_CE4100_GBE:
327 hw->mac_type = e1000_ce4100;
328 break;
321 default: 329 default:
322 /* Should never have loaded on this device */ 330 /* Should never have loaded on this device */
323 return -E1000_ERR_MAC_TYPE; 331 return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
372 case e1000_82542_rev2_1: 380 case e1000_82542_rev2_1:
373 hw->media_type = e1000_media_type_fiber; 381 hw->media_type = e1000_media_type_fiber;
374 break; 382 break;
383 case e1000_ce4100:
384 hw->media_type = e1000_media_type_copper;
385 break;
375 default: 386 default:
376 status = er32(STATUS); 387 status = er32(STATUS);
377 if (status & E1000_STATUS_TBIMODE) { 388 if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
460 /* Reset is performed on a shadow of the control register */ 471 /* Reset is performed on a shadow of the control register */
461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); 472 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
462 break; 473 break;
474 case e1000_ce4100:
463 default: 475 default:
464 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 476 ew32(CTRL, (ctrl | E1000_CTRL_RST));
465 break; 477 break;
@@ -952,6 +964,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
952} 964}
953 965
954/** 966/**
967 * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
968 * @hw: Struct containing variables accessed by shared code
969 *
970 * Commits changes to PHY configuration by calling e1000_phy_reset().
971 */
972static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
973{
974 s32 ret_val;
975
976 /* SW reset the PHY so all changes take effect */
977 ret_val = e1000_phy_reset(hw);
978 if (ret_val) {
979 e_dbg("Error Resetting the PHY\n");
980 return ret_val;
981 }
982
983 return E1000_SUCCESS;
984}
985
986static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
987{
988 s32 ret_val;
989 u32 ctrl_aux;
990
991 switch (hw->phy_type) {
992 case e1000_phy_8211:
993 ret_val = e1000_copper_link_rtl_setup(hw);
994 if (ret_val) {
995 e_dbg("e1000_copper_link_rtl_setup failed!\n");
996 return ret_val;
997 }
998 break;
999 case e1000_phy_8201:
1000 /* Set RMII mode */
1001 ctrl_aux = er32(CTL_AUX);
1002 ctrl_aux |= E1000_CTL_AUX_RMII;
1003 ew32(CTL_AUX, ctrl_aux);
1004 E1000_WRITE_FLUSH();
1005
1006 /* Disable the J/K bits required for receive */
1007 ctrl_aux = er32(CTL_AUX);
1008 ctrl_aux |= 0x4;
1009 ctrl_aux &= ~0x2;
1010 ew32(CTL_AUX, ctrl_aux);
1011 E1000_WRITE_FLUSH();
1012 ret_val = e1000_copper_link_rtl_setup(hw);
1013
1014 if (ret_val) {
1015 e_dbg("e1000_copper_link_rtl_setup failed!\n");
1016 return ret_val;
1017 }
1018 break;
1019 default:
1020 e_dbg("Error Resetting the PHY\n");
1021 return E1000_ERR_PHY_TYPE;
1022 }
1023
1024 return E1000_SUCCESS;
1025}
1026
1027/**
955 * e1000_copper_link_preconfig - early configuration for copper 1028 * e1000_copper_link_preconfig - early configuration for copper
956 * @hw: Struct containing variables accessed by shared code 1029 * @hw: Struct containing variables accessed by shared code
957 * 1030 *
@@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1286 if (hw->autoneg_advertised == 0) 1359 if (hw->autoneg_advertised == 0)
1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1360 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1288 1361
1362 /* IFE/RTL8201N PHY only supports 10/100 */
1363 if (hw->phy_type == e1000_phy_8201)
1364 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
1365
1289 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1366 e_dbg("Reconfiguring auto-neg advertisement params\n");
1290 ret_val = e1000_phy_setup_autoneg(hw); 1367 ret_val = e1000_phy_setup_autoneg(hw);
1291 if (ret_val) { 1368 if (ret_val) {
@@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1341 s32 ret_val; 1418 s32 ret_val;
1342 e_dbg("e1000_copper_link_postconfig"); 1419 e_dbg("e1000_copper_link_postconfig");
1343 1420
1344 if (hw->mac_type >= e1000_82544) { 1421 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
1345 e1000_config_collision_dist(hw); 1422 e1000_config_collision_dist(hw);
1346 } else { 1423 } else {
1347 ret_val = e1000_config_mac_to_phy(hw); 1424 ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1395 ret_val = e1000_copper_link_mgp_setup(hw); 1472 ret_val = e1000_copper_link_mgp_setup(hw);
1396 if (ret_val) 1473 if (ret_val)
1397 return ret_val; 1474 return ret_val;
1475 } else {
1476 ret_val = gbe_dhg_phy_setup(hw);
1477 if (ret_val) {
1478 e_dbg("gbe_dhg_phy_setup failed!\n");
1479 return ret_val;
1480 }
1398 } 1481 }
1399 1482
1400 if (hw->autoneg) { 1483 if (hw->autoneg) {
@@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1461 return ret_val; 1544 return ret_val;
1462 1545
1463 /* Read the MII 1000Base-T Control Register (Address 9). */ 1546 /* Read the MII 1000Base-T Control Register (Address 9). */
1464 ret_val = 1547 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1465 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1466 if (ret_val) 1548 if (ret_val)
1467 return ret_val; 1549 return ret_val;
1550 else if (hw->phy_type == e1000_phy_8201)
1551 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
1468 1552
1469 /* Need to parse both autoneg_advertised and fc and set up 1553 /* Need to parse both autoneg_advertised and fc and set up
1470 * the appropriate PHY registers. First we will parse for 1554 * the appropriate PHY registers. First we will parse for
@@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1577 1661
1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1662 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1579 1663
1580 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1664 if (hw->phy_type == e1000_phy_8201) {
1581 if (ret_val) 1665 mii_1000t_ctrl_reg = 0;
1582 return ret_val; 1666 } else {
1667 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1668 mii_1000t_ctrl_reg);
1669 if (ret_val)
1670 return ret_val;
1671 }
1583 1672
1584 return E1000_SUCCESS; 1673 return E1000_SUCCESS;
1585} 1674}
@@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1860 1949
1861 /* 82544 or newer MAC, Auto Speed Detection takes care of 1950 /* 82544 or newer MAC, Auto Speed Detection takes care of
1862 * MAC speed/duplex configuration.*/ 1951 * MAC speed/duplex configuration.*/
1863 if (hw->mac_type >= e1000_82544) 1952 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
1864 return E1000_SUCCESS; 1953 return E1000_SUCCESS;
1865 1954
1866 /* Read the Device Control Register and set the bits to Force Speed 1955 /* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1870 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1959 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1871 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1960 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
1872 1961
1873 /* Set up duplex in the Device Control and Transmit Control 1962 switch (hw->phy_type) {
1874 * registers depending on negotiated values. 1963 case e1000_phy_8201:
1875 */ 1964 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1876 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1965 if (ret_val)
1877 if (ret_val) 1966 return ret_val;
1878 return ret_val;
1879 1967
1880 if (phy_data & M88E1000_PSSR_DPLX) 1968 if (phy_data & RTL_PHY_CTRL_FD)
1881 ctrl |= E1000_CTRL_FD; 1969 ctrl |= E1000_CTRL_FD;
1882 else 1970 else
1883 ctrl &= ~E1000_CTRL_FD; 1971 ctrl &= ~E1000_CTRL_FD;
1884 1972
1885 e1000_config_collision_dist(hw); 1973 if (phy_data & RTL_PHY_CTRL_SPD_100)
1974 ctrl |= E1000_CTRL_SPD_100;
1975 else
1976 ctrl |= E1000_CTRL_SPD_10;
1886 1977
1887 /* Set up speed in the Device Control register depending on 1978 e1000_config_collision_dist(hw);
1888 * negotiated values. 1979 break;
1889 */ 1980 default:
1890 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1981 /* Set up duplex in the Device Control and Transmit Control
1891 ctrl |= E1000_CTRL_SPD_1000; 1982 * registers depending on negotiated values.
1892 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1983 */
1893 ctrl |= E1000_CTRL_SPD_100; 1984 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1985 &phy_data);
1986 if (ret_val)
1987 return ret_val;
1988
1989 if (phy_data & M88E1000_PSSR_DPLX)
1990 ctrl |= E1000_CTRL_FD;
1991 else
1992 ctrl &= ~E1000_CTRL_FD;
1993
1994 e1000_config_collision_dist(hw);
1995
1996 /* Set up speed in the Device Control register depending on
1997 * negotiated values.
1998 */
1999 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2000 ctrl |= E1000_CTRL_SPD_1000;
2001 else if ((phy_data & M88E1000_PSSR_SPEED) ==
2002 M88E1000_PSSR_100MBS)
2003 ctrl |= E1000_CTRL_SPD_100;
2004 }
1894 2005
1895 /* Write the configured values back to the Device Control Reg. */ 2006 /* Write the configured values back to the Device Control Reg. */
1896 ew32(CTRL, ctrl); 2007 ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2401 * speed/duplex on the MAC to the current PHY speed/duplex 2512 * speed/duplex on the MAC to the current PHY speed/duplex
2402 * settings. 2513 * settings.
2403 */ 2514 */
2404 if (hw->mac_type >= e1000_82544) 2515 if ((hw->mac_type >= e1000_82544) &&
2516 (hw->mac_type != e1000_ce4100))
2405 e1000_config_collision_dist(hw); 2517 e1000_config_collision_dist(hw);
2406 else { 2518 else {
2407 ret_val = e1000_config_mac_to_phy(hw); 2519 ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2738{ 2850{
2739 u32 i; 2851 u32 i;
2740 u32 mdic = 0; 2852 u32 mdic = 0;
2741 const u32 phy_addr = 1; 2853 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2742 2854
2743 e_dbg("e1000_read_phy_reg_ex"); 2855 e_dbg("e1000_read_phy_reg_ex");
2744 2856
@@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2752 * Control register. The MAC will take care of interfacing with the 2864 * Control register. The MAC will take care of interfacing with the
2753 * PHY to retrieve the desired data. 2865 * PHY to retrieve the desired data.
2754 */ 2866 */
2755 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2867 if (hw->mac_type == e1000_ce4100) {
2756 (phy_addr << E1000_MDIC_PHY_SHIFT) | 2868 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2757 (E1000_MDIC_OP_READ)); 2869 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2870 (INTEL_CE_GBE_MDIC_OP_READ) |
2871 (INTEL_CE_GBE_MDIC_GO));
2758 2872
2759 ew32(MDIC, mdic); 2873 writel(mdic, E1000_MDIO_CMD);
2760 2874
2761 /* Poll the ready bit to see if the MDI read completed */ 2875 /* Poll the ready bit to see if the MDI read
2762 for (i = 0; i < 64; i++) { 2876 * completed
2763 udelay(50); 2877 */
2764 mdic = er32(MDIC); 2878 for (i = 0; i < 64; i++) {
2765 if (mdic & E1000_MDIC_READY) 2879 udelay(50);
2766 break; 2880 mdic = readl(E1000_MDIO_CMD);
2767 } 2881 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2768 if (!(mdic & E1000_MDIC_READY)) { 2882 break;
2769 e_dbg("MDI Read did not complete\n"); 2883 }
2770 return -E1000_ERR_PHY; 2884
2771 } 2885 if (mdic & INTEL_CE_GBE_MDIC_GO) {
2772 if (mdic & E1000_MDIC_ERROR) { 2886 e_dbg("MDI Read did not complete\n");
2773 e_dbg("MDI Error\n"); 2887 return -E1000_ERR_PHY;
2774 return -E1000_ERR_PHY; 2888 }
2889
2890 mdic = readl(E1000_MDIO_STS);
2891 if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
2892 e_dbg("MDI Read Error\n");
2893 return -E1000_ERR_PHY;
2894 }
2895 *phy_data = (u16) mdic;
2896 } else {
2897 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2898 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2899 (E1000_MDIC_OP_READ));
2900
2901 ew32(MDIC, mdic);
2902
2903 /* Poll the ready bit to see if the MDI read
2904 * completed
2905 */
2906 for (i = 0; i < 64; i++) {
2907 udelay(50);
2908 mdic = er32(MDIC);
2909 if (mdic & E1000_MDIC_READY)
2910 break;
2911 }
2912 if (!(mdic & E1000_MDIC_READY)) {
2913 e_dbg("MDI Read did not complete\n");
2914 return -E1000_ERR_PHY;
2915 }
2916 if (mdic & E1000_MDIC_ERROR) {
2917 e_dbg("MDI Error\n");
2918 return -E1000_ERR_PHY;
2919 }
2920 *phy_data = (u16) mdic;
2775 } 2921 }
2776 *phy_data = (u16) mdic;
2777 } else { 2922 } else {
2778 /* We must first send a preamble through the MDIO pin to signal the 2923 /* We must first send a preamble through the MDIO pin to signal the
2779 * beginning of an MII instruction. This is done by sending 32 2924 * beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2840{ 2985{
2841 u32 i; 2986 u32 i;
2842 u32 mdic = 0; 2987 u32 mdic = 0;
2843 const u32 phy_addr = 1; 2988 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2844 2989
2845 e_dbg("e1000_write_phy_reg_ex"); 2990 e_dbg("e1000_write_phy_reg_ex");
2846 2991
@@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2850 } 2995 }
2851 2996
2852 if (hw->mac_type > e1000_82543) { 2997 if (hw->mac_type > e1000_82543) {
2853 /* Set up Op-code, Phy Address, register address, and data intended 2998 /* Set up Op-code, Phy Address, register address, and data
2854 * for the PHY register in the MDI Control register. The MAC will take 2999 * intended for the PHY register in the MDI Control register.
2855 * care of interfacing with the PHY to send the desired data. 3000 * The MAC will take care of interfacing with the PHY to send
3001 * the desired data.
2856 */ 3002 */
2857 mdic = (((u32) phy_data) | 3003 if (hw->mac_type == e1000_ce4100) {
2858 (reg_addr << E1000_MDIC_REG_SHIFT) | 3004 mdic = (((u32) phy_data) |
2859 (phy_addr << E1000_MDIC_PHY_SHIFT) | 3005 (reg_addr << E1000_MDIC_REG_SHIFT) |
2860 (E1000_MDIC_OP_WRITE)); 3006 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3007 (INTEL_CE_GBE_MDIC_OP_WRITE) |
3008 (INTEL_CE_GBE_MDIC_GO));
2861 3009
2862 ew32(MDIC, mdic); 3010 writel(mdic, E1000_MDIO_CMD);
2863 3011
2864 /* Poll the ready bit to see if the MDI read completed */ 3012 /* Poll the ready bit to see if the MDI read
2865 for (i = 0; i < 641; i++) { 3013 * completed
2866 udelay(5); 3014 */
2867 mdic = er32(MDIC); 3015 for (i = 0; i < 640; i++) {
2868 if (mdic & E1000_MDIC_READY) 3016 udelay(5);
2869 break; 3017 mdic = readl(E1000_MDIO_CMD);
2870 } 3018 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2871 if (!(mdic & E1000_MDIC_READY)) { 3019 break;
2872 e_dbg("MDI Write did not complete\n"); 3020 }
2873 return -E1000_ERR_PHY; 3021 if (mdic & INTEL_CE_GBE_MDIC_GO) {
3022 e_dbg("MDI Write did not complete\n");
3023 return -E1000_ERR_PHY;
3024 }
3025 } else {
3026 mdic = (((u32) phy_data) |
3027 (reg_addr << E1000_MDIC_REG_SHIFT) |
3028 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3029 (E1000_MDIC_OP_WRITE));
3030
3031 ew32(MDIC, mdic);
3032
3033 /* Poll the ready bit to see if the MDI read
3034 * completed
3035 */
3036 for (i = 0; i < 641; i++) {
3037 udelay(5);
3038 mdic = er32(MDIC);
3039 if (mdic & E1000_MDIC_READY)
3040 break;
3041 }
3042 if (!(mdic & E1000_MDIC_READY)) {
3043 e_dbg("MDI Write did not complete\n");
3044 return -E1000_ERR_PHY;
3045 }
2874 } 3046 }
2875 } else { 3047 } else {
2876 /* We'll need to use the SW defined pins to shift the write command 3048 /* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3048 if (hw->phy_id == M88E1011_I_PHY_ID) 3220 if (hw->phy_id == M88E1011_I_PHY_ID)
3049 match = true; 3221 match = true;
3050 break; 3222 break;
3223 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID))
3226 match = true;
3227 break;
3051 case e1000_82541: 3228 case e1000_82541:
3052 case e1000_82541_rev_2: 3229 case e1000_82541_rev_2:
3053 case e1000_82547: 3230 case e1000_82547:
@@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3291 3468
3292 if (hw->phy_type == e1000_phy_igp) 3469 if (hw->phy_type == e1000_phy_igp)
3293 return e1000_phy_igp_get_info(hw, phy_info); 3470 return e1000_phy_igp_get_info(hw, phy_info);
3471 else if ((hw->phy_type == e1000_phy_8211) ||
3472 (hw->phy_type == e1000_phy_8201))
3473 return E1000_SUCCESS;
3294 else 3474 else
3295 return e1000_phy_m88_get_info(hw, phy_info); 3475 return e1000_phy_m88_get_info(hw, phy_info);
3296} 3476}
@@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3742 3922
3743 e_dbg("e1000_read_eeprom"); 3923 e_dbg("e1000_read_eeprom");
3744 3924
3925 if (hw->mac_type == e1000_ce4100) {
3926 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
3927 data);
3928 return E1000_SUCCESS;
3929 }
3930
3745 /* If eeprom is not yet detected, do so now */ 3931 /* If eeprom is not yet detected, do so now */
3746 if (eeprom->word_size == 0) 3932 if (eeprom->word_size == 0)
3747 e1000_init_eeprom_params(hw); 3933 e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3904 4090
3905 e_dbg("e1000_write_eeprom"); 4091 e_dbg("e1000_write_eeprom");
3906 4092
4093 if (hw->mac_type == e1000_ce4100) {
4094 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
4095 data);
4096 return E1000_SUCCESS;
4097 }
4098
3907 /* If eeprom is not yet detected, do so now */ 4099 /* If eeprom is not yet detected, do so now */
3908 if (eeprom->word_size == 0) 4100 if (eeprom->word_size == 0)
3909 e1000_init_eeprom_params(hw); 4101 e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c6bcd5..f5514a0d5be6 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -52,6 +52,7 @@ typedef enum {
52 e1000_82545, 52 e1000_82545,
53 e1000_82545_rev_3, 53 e1000_82545_rev_3,
54 e1000_82546, 54 e1000_82546,
55 e1000_ce4100,
55 e1000_82546_rev_3, 56 e1000_82546_rev_3,
56 e1000_82541, 57 e1000_82541,
57 e1000_82541_rev_2, 58 e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
209} e1000_1000t_rx_status; 210} e1000_1000t_rx_status;
210 211
211typedef enum { 212typedef enum {
212 e1000_phy_m88 = 0, 213 e1000_phy_m88 = 0,
213 e1000_phy_igp, 214 e1000_phy_igp,
214 e1000_phy_undefined = 0xFF 215 e1000_phy_8211,
216 e1000_phy_8201,
217 e1000_phy_undefined = 0xFF
215} e1000_phy_type; 218} e1000_phy_type;
216 219
217typedef enum { 220typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
442#define E1000_DEV_ID_82547EI 0x1019 445#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82547EI_MOBILE 0x101A 446#define E1000_DEV_ID_82547EI_MOBILE 0x101A
444#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 447#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
448#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
445 449
446#define NODE_ADDRESS_SIZE 6 450#define NODE_ADDRESS_SIZE 6
447#define ETH_LENGTH_OF_ADDRESS 6 451#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
808#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 812#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
809#define E1000_FLA 0x0001C /* Flash Access - RW */ 813#define E1000_FLA 0x0001C /* Flash Access - RW */
810#define E1000_MDIC 0x00020 /* MDI Control - RW */ 814#define E1000_MDIC 0x00020 /* MDI Control - RW */
815
816extern void __iomem *ce4100_gbe_mdio_base_virt;
817#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
818#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
819#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
820#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
821#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
822#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
823#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
824
811#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 825#define E1000_SCTL 0x00024 /* SerDes Control - RW */
812#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 826#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
813#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 827#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
820#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 834#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
821#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 835#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
822#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 836#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
837
838/* Auxiliary Control Register. This register is CE4100 specific,
839 * RMII/RGMII function is switched by this register - RW
840 * Following are bits definitions of the Auxiliary Control Register
841 */
842#define E1000_CTL_AUX 0x000E0
843#define E1000_CTL_AUX_END_SEL_SHIFT 10
844#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
845#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
846
847/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
848#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
849/* descriptor use CTL_AUX.ENDIANESS, packet use default */
850#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
851/* descriptor use default, packet use CTL_AUX.ENDIANESS */
852#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
853/* all use CTL_AUX.ENDIANESS */
854#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
855
856#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
857#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
858
859/* LW little endian, Byte big endian */
860#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
861#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
862#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
863#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
864
823#define E1000_RCTL 0x00100 /* RX Control - RW */ 865#define E1000_RCTL 0x00100 /* RX Control - RW */
824#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 866#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
825#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ 867#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
1011 * in more current versions of the 8254x. Despite the difference in location, 1053 * in more current versions of the 8254x. Despite the difference in location,
1012 * the registers function in the same manner. 1054 * the registers function in the same manner.
1013 */ 1055 */
1056#define E1000_82542_CTL_AUX E1000_CTL_AUX
1014#define E1000_82542_CTRL E1000_CTRL 1057#define E1000_82542_CTRL E1000_CTRL
1015#define E1000_82542_CTRL_DUP E1000_CTRL_DUP 1058#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
1016#define E1000_82542_STATUS E1000_STATUS 1059#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
1571#define E1000_MDIC_INT_EN 0x20000000 1614#define E1000_MDIC_INT_EN 0x20000000
1572#define E1000_MDIC_ERROR 0x40000000 1615#define E1000_MDIC_ERROR 0x40000000
1573 1616
1617#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
1618#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
1619#define INTEL_CE_GBE_MDIC_GO 0x80000000
1620#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
1621
1574#define E1000_KUMCTRLSTA_MASK 0x0000FFFF 1622#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
1575#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 1623#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
1576#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 1624#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2871,6 +2919,11 @@ struct e1000_host_command_info {
2871#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2872#define L1LXT971A_PHY_ID 0x001378E0 2920#define L1LXT971A_PHY_ID 0x001378E0
2873 2921
2922#define RTL8211B_PHY_ID 0x001CC910
2923#define RTL8201N_PHY_ID 0x8200
2924#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
2925#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
2926
2874/* Bits... 2927/* Bits...
2875 * 15-5: page 2928 * 15-5: page
2876 * 4-0: register offset 2929 * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d2e4a9..4ff88a683f61 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
28 28
29#include "e1000.h" 29#include "e1000.h"
30#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32
33/* Intel Media SOC GbE MDIO physical base address */
34static unsigned long ce4100_gbe_mdio_base_phy;
35/* Intel Media SOC GbE MDIO virtual base address */
36void __iomem *ce4100_gbe_mdio_base_virt;
31 37
32char e1000_driver_name[] = "e1000"; 38char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 39static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 85 INTEL_E1000_ETHERNET_DEVICE(0x108A),
80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 86 INTEL_E1000_ETHERNET_DEVICE(0x1099),
81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 87 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
88 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
82 /* required last entry */ 89 /* required last entry */
83 {0,} 90 {0,}
84}; 91};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 case e1000_82545: 466 case e1000_82545:
460 case e1000_82545_rev_3: 467 case e1000_82545_rev_3:
461 case e1000_82546: 468 case e1000_82546:
469 case e1000_ce4100:
462 case e1000_82546_rev_3: 470 case e1000_82546_rev_3:
463 case e1000_82541: 471 case e1000_82541:
464 case e1000_82541_rev_2: 472 case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
573 case e1000_82545: 581 case e1000_82545:
574 case e1000_82545_rev_3: 582 case e1000_82545_rev_3:
575 case e1000_82546: 583 case e1000_82546:
584 case e1000_ce4100:
576 case e1000_82546_rev_3: 585 case e1000_82546_rev_3:
577 pba = E1000_PBA_48K; 586 pba = E1000_PBA_48K;
578 break; 587 break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
894 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 903 static int global_quad_port_a = 0; /* global ksp3 port a indication */
895 int i, err, pci_using_dac; 904 int i, err, pci_using_dac;
896 u16 eeprom_data = 0; 905 u16 eeprom_data = 0;
906 u16 tmp = 0;
897 u16 eeprom_apme_mask = E1000_EEPROM_APME; 907 u16 eeprom_apme_mask = E1000_EEPROM_APME;
898 int bars, need_ioport; 908 int bars, need_ioport;
899 909
@@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
996 goto err_sw_init; 1006 goto err_sw_init;
997 1007
998 err = -EIO; 1008 err = -EIO;
1009 if (hw->mac_type == e1000_ce4100) {
1010 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1011 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1012 pci_resource_len(pdev, BAR_1));
1013
1014 if (!ce4100_gbe_mdio_base_virt)
1015 goto err_mdio_ioremap;
1016 }
999 1017
1000 if (hw->mac_type >= e1000_82543) { 1018 if (hw->mac_type >= e1000_82543) {
1001 netdev->features = NETIF_F_SG | 1019 netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1135 adapter->wol = adapter->eeprom_wol; 1153 adapter->wol = adapter->eeprom_wol;
1136 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1137 1155
1156 /* Auto detect PHY address */
1157 if (hw->mac_type == e1000_ce4100) {
1158 for (i = 0; i < 32; i++) {
1159 hw->phy_addr = i;
1160 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1161 if (tmp == 0 || tmp == 0xFF) {
1162 if (i == 31)
1163 goto err_eeprom;
1164 continue;
1165 } else
1166 break;
1167 }
1168 }
1169
1138 /* reset the hardware with the new settings */ 1170 /* reset the hardware with the new settings */
1139 e1000_reset(adapter); 1171 e1000_reset(adapter);
1140 1172
@@ -1171,6 +1203,8 @@ err_eeprom:
1171 kfree(adapter->rx_ring); 1203 kfree(adapter->rx_ring);
1172err_dma: 1204err_dma:
1173err_sw_init: 1205err_sw_init:
1206err_mdio_ioremap:
1207 iounmap(ce4100_gbe_mdio_base_virt);
1174 iounmap(hw->hw_addr); 1208 iounmap(hw->hw_addr);
1175err_ioremap: 1209err_ioremap:
1176 free_netdev(netdev); 1210 free_netdev(netdev);
@@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1409 /* First rev 82545 and 82546 need to not allow any memory 1443 /* First rev 82545 and 82546 need to not allow any memory
1410 * write location to cross 64k boundary due to errata 23 */ 1444 * write location to cross 64k boundary due to errata 23 */
1411 if (hw->mac_type == e1000_82545 || 1445 if (hw->mac_type == e1000_82545 ||
1446 hw->mac_type == e1000_ce4100 ||
1412 hw->mac_type == e1000_82546) { 1447 hw->mac_type == e1000_82546) {
1413 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1448 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1414 } 1449 }
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa895..55c1711f1688 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
34#ifndef _E1000_OSDEP_H_ 34#ifndef _E1000_OSDEP_H_
35#define _E1000_OSDEP_H_ 35#define _E1000_OSDEP_H_
36 36
37#include <linux/types.h>
38#include <linux/pci.h>
39#include <linux/delay.h>
40#include <asm/io.h> 37#include <asm/io.h>
41#include <linux/interrupt.h> 38
42#include <linux/sched.h> 39#define CONFIG_RAM_BASE 0x60000
40#define GBE_CONFIG_OFFSET 0x0
41
42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
46
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count))
49
50#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
51 (ioread16_rep(base + (offset << 1), data, count))
43 52
44#define er32(reg) \ 53#define er32(reg) \
45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ 54 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e4097ef1b..cb6c7b1c1fb8 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); 78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); 79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); 80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
81static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
82static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
81 83
82/** 84/**
83 * e1000_init_phy_params_82571 - Init PHY func ptrs. 85 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
113 phy->type = e1000_phy_bm; 115 phy->type = e1000_phy_bm;
114 phy->ops.acquire = e1000_get_hw_semaphore_82574; 116 phy->ops.acquire = e1000_get_hw_semaphore_82574;
115 phy->ops.release = e1000_put_hw_semaphore_82574; 117 phy->ops.release = e1000_put_hw_semaphore_82574;
118 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
119 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
116 break; 120 break;
117 default: 121 default:
118 return -E1000_ERR_PHY; 122 return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
121 125
122 /* This can only be done after all function pointers are setup. */ 126 /* This can only be done after all function pointers are setup. */
123 ret_val = e1000_get_phy_id_82571(hw); 127 ret_val = e1000_get_phy_id_82571(hw);
128 if (ret_val) {
129 e_dbg("Error getting PHY ID\n");
130 return ret_val;
131 }
124 132
125 /* Verify phy id */ 133 /* Verify phy id */
126 switch (hw->mac.type) { 134 switch (hw->mac.type) {
127 case e1000_82571: 135 case e1000_82571:
128 case e1000_82572: 136 case e1000_82572:
129 if (phy->id != IGP01E1000_I_PHY_ID) 137 if (phy->id != IGP01E1000_I_PHY_ID)
130 return -E1000_ERR_PHY; 138 ret_val = -E1000_ERR_PHY;
131 break; 139 break;
132 case e1000_82573: 140 case e1000_82573:
133 if (phy->id != M88E1111_I_PHY_ID) 141 if (phy->id != M88E1111_I_PHY_ID)
134 return -E1000_ERR_PHY; 142 ret_val = -E1000_ERR_PHY;
135 break; 143 break;
136 case e1000_82574: 144 case e1000_82574:
137 case e1000_82583: 145 case e1000_82583:
138 if (phy->id != BME1000_E_PHY_ID_R2) 146 if (phy->id != BME1000_E_PHY_ID_R2)
139 return -E1000_ERR_PHY; 147 ret_val = -E1000_ERR_PHY;
140 break; 148 break;
141 default: 149 default:
142 return -E1000_ERR_PHY; 150 ret_val = -E1000_ERR_PHY;
143 break; 151 break;
144 } 152 }
145 153
146 return 0; 154 if (ret_val)
155 e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
156
157 return ret_val;
147} 158}
148 159
149/** 160/**
@@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
649} 660}
650 661
651/** 662/**
663 * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
664 * @hw: pointer to the HW structure
665 * @active: true to enable LPLU, false to disable
666 *
667 * Sets the LPLU D0 state according to the active flag.
668 * LPLU will not be activated unless the
669 * device autonegotiation advertisement meets standards of
670 * either 10 or 10/100 or 10/100/1000 at all duplexes.
671 * This is a function pointer entry point only called by
672 * PHY setup routines.
673 **/
674static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
675{
676 u16 data = er32(POEMB);
677
678 if (active)
679 data |= E1000_PHY_CTRL_D0A_LPLU;
680 else
681 data &= ~E1000_PHY_CTRL_D0A_LPLU;
682
683 ew32(POEMB, data);
684 return 0;
685}
686
687/**
688 * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
689 * @hw: pointer to the HW structure
690 * @active: boolean used to enable/disable lplu
691 *
692 * The low power link up (lplu) state is set to the power management level D3
693 * when active is true, else clear lplu for D3. LPLU
694 * is used during Dx states where the power conservation is most important.
695 * During driver activity, SmartSpeed should be enabled so performance is
696 * maintained.
697 **/
698static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
699{
700 u16 data = er32(POEMB);
701
702 if (!active) {
703 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
704 } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
705 (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
706 (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
707 data |= E1000_PHY_CTRL_NOND0A_LPLU;
708 }
709
710 ew32(POEMB, data);
711 return 0;
712}
713
714/**
652 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 715 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
653 * @hw: pointer to the HW structure 716 * @hw: pointer to the HW structure
654 * 717 *
@@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
956 **/ 1019 **/
957static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1020static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
958{ 1021{
959 u32 ctrl, ctrl_ext, icr; 1022 u32 ctrl, ctrl_ext;
960 s32 ret_val; 1023 s32 ret_val;
961 1024
962 /* 1025 /*
@@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1040 1103
1041 /* Clear any pending interrupt events. */ 1104 /* Clear any pending interrupt events. */
1042 ew32(IMC, 0xffffffff); 1105 ew32(IMC, 0xffffffff);
1043 icr = er32(ICR); 1106 er32(ICR);
1044 1107
1045 if (hw->mac.type == e1000_82571) { 1108 if (hw->mac.type == e1000_82571) {
1046 /* Install any alternate MAC address into RAR0 */ 1109 /* Install any alternate MAC address into RAR0 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8e9116..5255be753746 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -38,6 +38,7 @@
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h> 40#include <linux/pci-aspm.h>
41#include <linux/crc32.h>
41 42
42#include "hw.h" 43#include "hw.h"
43 44
@@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
496extern void e1000e_update_stats(struct e1000_adapter *adapter); 497extern void e1000e_update_stats(struct e1000_adapter *adapter);
497extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
498extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
501extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
499extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 502extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
500 503
501extern unsigned int copybreak; 504extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644e13d1..e45a61c8930a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
784 **/ 784 **/
785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
786{ 786{
787 u32 ctrl, icr; 787 u32 ctrl;
788 s32 ret_val; 788 s32 ret_val;
789 789
790 /* 790 /*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
818 818
819 /* Clear any pending interrupt events. */ 819 /* Clear any pending interrupt events. */
820 ew32(IMC, 0xffffffff); 820 ew32(IMC, 0xffffffff);
821 icr = er32(ICR); 821 er32(ICR);
822 822
823 ret_val = e1000_check_alt_mac_addr_generic(hw); 823 ret_val = e1000_check_alt_mac_addr_generic(hw);
824 824
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf6f5a9..f8ed03dab9b1 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
624 struct e1000_adapter *adapter = netdev_priv(netdev); 624 struct e1000_adapter *adapter = netdev_priv(netdev);
625 char firmware_version[32]; 625 char firmware_version[32];
626 626
627 strncpy(drvinfo->driver, e1000e_driver_name, 32); 627 strncpy(drvinfo->driver, e1000e_driver_name,
628 strncpy(drvinfo->version, e1000e_driver_version, 32); 628 sizeof(drvinfo->driver) - 1);
629 strncpy(drvinfo->version, e1000e_driver_version,
630 sizeof(drvinfo->version) - 1);
629 631
630 /* 632 /*
631 * EEPROM image version # is reported as firmware version # for 633 * EEPROM image version # is reported as firmware version # for
632 * PCI-E controllers 634 * PCI-E controllers
633 */ 635 */
634 sprintf(firmware_version, "%d.%d-%d", 636 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
635 (adapter->eeprom_vers & 0xF000) >> 12, 637 (adapter->eeprom_vers & 0xF000) >> 12,
636 (adapter->eeprom_vers & 0x0FF0) >> 4, 638 (adapter->eeprom_vers & 0x0FF0) >> 4,
637 (adapter->eeprom_vers & 0x000F)); 639 (adapter->eeprom_vers & 0x000F));
638 640
639 strncpy(drvinfo->fw_version, firmware_version, 32); 641 strncpy(drvinfo->fw_version, firmware_version,
640 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 642 sizeof(drvinfo->fw_version) - 1);
643 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
644 sizeof(drvinfo->bus_info) - 1);
641 drvinfo->regdump_len = e1000_get_regs_len(netdev); 645 drvinfo->regdump_len = e1000_get_regs_len(netdev);
642 drvinfo->eedump_len = e1000_get_eeprom_len(netdev); 646 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
643} 647}
@@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
1704 bool if_running = netif_running(netdev); 1708 bool if_running = netif_running(netdev);
1705 1709
1706 set_bit(__E1000_TESTING, &adapter->state); 1710 set_bit(__E1000_TESTING, &adapter->state);
1711
1712 if (!if_running) {
1713 /* Get control of and reset hardware */
1714 if (adapter->flags & FLAG_HAS_AMT)
1715 e1000e_get_hw_control(adapter);
1716
1717 e1000e_power_up_phy(adapter);
1718
1719 adapter->hw.phy.autoneg_wait_to_complete = 1;
1720 e1000e_reset(adapter);
1721 adapter->hw.phy.autoneg_wait_to_complete = 0;
1722 }
1723
1707 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1724 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1708 /* Offline tests */ 1725 /* Offline tests */
1709 1726
@@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
1717 if (if_running) 1734 if (if_running)
1718 /* indicate we're in test mode */ 1735 /* indicate we're in test mode */
1719 dev_close(netdev); 1736 dev_close(netdev);
1720 else
1721 e1000e_reset(adapter);
1722 1737
1723 if (e1000_reg_test(adapter, &data[0])) 1738 if (e1000_reg_test(adapter, &data[0]))
1724 eth_test->flags |= ETH_TEST_FL_FAILED; 1739 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
1732 eth_test->flags |= ETH_TEST_FL_FAILED; 1747 eth_test->flags |= ETH_TEST_FL_FAILED;
1733 1748
1734 e1000e_reset(adapter); 1749 e1000e_reset(adapter);
1735 /* make sure the phy is powered up */
1736 e1000e_power_up_phy(adapter);
1737 if (e1000_loopback_test(adapter, &data[3])) 1750 if (e1000_loopback_test(adapter, &data[3]))
1738 eth_test->flags |= ETH_TEST_FL_FAILED; 1751 eth_test->flags |= ETH_TEST_FL_FAILED;
1739 1752
@@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
1755 if (if_running) 1768 if (if_running)
1756 dev_open(netdev); 1769 dev_open(netdev);
1757 } else { 1770 } else {
1758 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { 1771 /* Online tests */
1759 clear_bit(__E1000_TESTING, &adapter->state);
1760 dev_open(netdev);
1761 set_bit(__E1000_TESTING, &adapter->state);
1762 }
1763 1772
1764 e_info("online testing starting\n"); 1773 e_info("online testing starting\n");
1765 /* Online tests */
1766 if (e1000_link_test(adapter, &data[4]))
1767 eth_test->flags |= ETH_TEST_FL_FAILED;
1768 1774
1769 /* Online tests aren't run; pass by default */ 1775 /* register, eeprom, intr and loopback tests not run online */
1770 data[0] = 0; 1776 data[0] = 0;
1771 data[1] = 0; 1777 data[1] = 0;
1772 data[2] = 0; 1778 data[2] = 0;
1773 data[3] = 0; 1779 data[3] = 0;
1774 1780
1775 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) 1781 if (e1000_link_test(adapter, &data[4]))
1776 dev_close(netdev); 1782 eth_test->flags |= ETH_TEST_FL_FAILED;
1777 1783
1778 clear_bit(__E1000_TESTING, &adapter->state); 1784 clear_bit(__E1000_TESTING, &adapter->state);
1779 } 1785 }
1786
1787 if (!if_running) {
1788 e1000e_reset(adapter);
1789
1790 if (adapter->flags & FLAG_HAS_AMT)
1791 e1000e_release_hw_control(adapter);
1792 }
1793
1780 msleep_interruptible(4 * 1000); 1794 msleep_interruptible(4 * 1000);
1781} 1795}
1782 1796
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c30..e774380c7cec 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -83,6 +83,7 @@ enum e1e_registers {
83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ 85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
86#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
86 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 87 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
87 E1000_PBS = 0x01008, /* Packet Buffer Size */ 88 E1000_PBS = 0x01008, /* Packet Buffer Size */
88 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 89 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc0832720..5328a2927731 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1395 } 1395 }
1396} 1396}
1397 1397
1398static u32 e1000_calc_rx_da_crc(u8 mac[])
1399{
1400 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1401 u32 i, j, mask, crc;
1402
1403 crc = 0xffffffff;
1404 for (i = 0; i < 6; i++) {
1405 crc = crc ^ mac[i];
1406 for (j = 8; j > 0; j--) {
1407 mask = (crc & 1) * (-1);
1408 crc = (crc >> 1) ^ (poly & mask);
1409 }
1410 }
1411 return ~crc;
1412}
1413
1414/** 1398/**
1415 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1399 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1416 * with 82579 PHY 1400 * with 82579 PHY
@@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1453 mac_addr[4] = (addr_high & 0xFF); 1437 mac_addr[4] = (addr_high & 0xFF);
1454 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1438 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1455 1439
1456 ew32(PCH_RAICC(i), 1440 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1457 e1000_calc_rx_da_crc(mac_addr));
1458 } 1441 }
1459 1442
1460 /* Write Rx addresses to the PHY */ 1443 /* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2977{ 2960{
2978 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2961 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2979 u16 reg; 2962 u16 reg;
2980 u32 ctrl, icr, kab; 2963 u32 ctrl, kab;
2981 s32 ret_val; 2964 s32 ret_val;
2982 2965
2983 /* 2966 /*
@@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3067 ew32(CRC_OFFSET, 0x65656565); 3050 ew32(CRC_OFFSET, 0x65656565);
3068 3051
3069 ew32(IMC, 0xffffffff); 3052 ew32(IMC, 0xffffffff);
3070 icr = er32(ICR); 3053 er32(ICR);
3071 3054
3072 kab = er32(KABGTXD); 3055 kab = er32(KABGTXD);
3073 kab |= E1000_KABGTXD_BGSQLBIAS; 3056 kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3118 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3101 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3119 */ 3102 */
3120 if (hw->phy.type == e1000_phy_82578) { 3103 if (hw->phy.type == e1000_phy_82578) {
3121 hw->phy.ops.read_reg(hw, BM_WUC, &i); 3104 e1e_rphy(hw, BM_WUC, &i);
3122 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3105 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3123 if (ret_val) 3106 if (ret_val)
3124 return ret_val; 3107 return ret_val;
@@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3276 (hw->phy.type == e1000_phy_82577)) { 3259 (hw->phy.type == e1000_phy_82577)) {
3277 ew32(FCRTV_PCH, hw->fc.refresh_time); 3260 ew32(FCRTV_PCH, hw->fc.refresh_time);
3278 3261
3279 ret_val = hw->phy.ops.write_reg(hw, 3262 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3280 PHY_REG(BM_PORT_CTRL_PAGE, 27), 3263 hw->fc.pause_time);
3281 hw->fc.pause_time);
3282 if (ret_val) 3264 if (ret_val)
3283 return ret_val; 3265 return ret_val;
3284 } 3266 }
@@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3342 return ret_val; 3324 return ret_val;
3343 break; 3325 break;
3344 case e1000_phy_ife: 3326 case e1000_phy_ife:
3345 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3327 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3346 &reg_data);
3347 if (ret_val) 3328 if (ret_val)
3348 return ret_val; 3329 return ret_val;
3349 3330
@@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3361 reg_data |= IFE_PMC_AUTO_MDIX; 3342 reg_data |= IFE_PMC_AUTO_MDIX;
3362 break; 3343 break;
3363 } 3344 }
3364 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3345 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3365 reg_data);
3366 if (ret_val) 3346 if (ret_val)
3367 return ret_val; 3347 return ret_val;
3368 break; 3348 break;
@@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3646{ 3626{
3647 if (hw->phy.type == e1000_phy_ife) 3627 if (hw->phy.type == e1000_phy_ife)
3648 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3628 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3649 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3629 (IFE_PSCL_PROBE_MODE |
3630 IFE_PSCL_PROBE_LEDS_OFF));
3650 3631
3651 ew32(LEDCTL, hw->mac.ledctl_mode1); 3632 ew32(LEDCTL, hw->mac.ledctl_mode1);
3652 return 0; 3633 return 0;
@@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3660 **/ 3641 **/
3661static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3642static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3662{ 3643{
3663 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3644 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3664 (u16)hw->mac.ledctl_mode1);
3665} 3645}
3666 3646
3667/** 3647/**
@@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3672 **/ 3652 **/
3673static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3653static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3674{ 3654{
3675 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3655 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3676 (u16)hw->mac.ledctl_default);
3677} 3656}
3678 3657
3679/** 3658/**
@@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3704 } 3683 }
3705 } 3684 }
3706 3685
3707 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3686 return e1e_wphy(hw, HV_LED_CONFIG, data);
3708} 3687}
3709 3688
3710/** 3689/**
@@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3735 } 3714 }
3736 } 3715 }
3737 3716
3738 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3717 return e1e_wphy(hw, HV_LED_CONFIG, data);
3739} 3718}
3740 3719
3741/** 3720/**
@@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3844 if ((hw->phy.type == e1000_phy_82578) || 3823 if ((hw->phy.type == e1000_phy_82578) ||
3845 (hw->phy.type == e1000_phy_82579) || 3824 (hw->phy.type == e1000_phy_82579) ||
3846 (hw->phy.type == e1000_phy_82577)) { 3825 (hw->phy.type == e1000_phy_82577)) {
3847 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3826 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3848 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3827 e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
3849 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 3828 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3850 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 3829 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
3851 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 3830 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3852 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 3831 e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
3853 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 3832 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3854 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 3833 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
3855 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 3834 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3856 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 3835 e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
3857 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 3836 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3858 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 3837 e1e_rphy(hw, HV_DC_LOWER, &phy_data);
3859 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 3838 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3860 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 3839 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
3861 } 3840 }
3862} 3841}
3863 3842
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170a601e..ff2872153b21 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1136 if (ret_val) 1136 if (ret_val)
1137 return ret_val; 1137 return ret_val;
1138 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1138 ret_val =
1139 e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1139 if (ret_val) 1140 if (ret_val)
1140 return ret_val; 1141 return ret_val;
1141 1142
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242aa9e6..fa5b60452547 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1980,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1980} 1980}
1981 1981
1982/** 1982/**
1983 * e1000_get_hw_control - get control of the h/w from f/w 1983 * e1000e_get_hw_control - get control of the h/w from f/w
1984 * @adapter: address of board private structure 1984 * @adapter: address of board private structure
1985 * 1985 *
1986 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 1986 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1987 * For ASF and Pass Through versions of f/w this means that 1987 * For ASF and Pass Through versions of f/w this means that
1988 * the driver is loaded. For AMT version (only with 82573) 1988 * the driver is loaded. For AMT version (only with 82573)
1989 * of the f/w this means that the network i/f is open. 1989 * of the f/w this means that the network i/f is open.
1990 **/ 1990 **/
1991static void e1000_get_hw_control(struct e1000_adapter *adapter) 1991void e1000e_get_hw_control(struct e1000_adapter *adapter)
1992{ 1992{
1993 struct e1000_hw *hw = &adapter->hw; 1993 struct e1000_hw *hw = &adapter->hw;
1994 u32 ctrl_ext; 1994 u32 ctrl_ext;
@@ -2005,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
2005} 2005}
2006 2006
2007/** 2007/**
2008 * e1000_release_hw_control - release control of the h/w to f/w 2008 * e1000e_release_hw_control - release control of the h/w to f/w
2009 * @adapter: address of board private structure 2009 * @adapter: address of board private structure
2010 * 2010 *
2011 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2011 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2012 * For ASF and Pass Through versions of f/w this means that the 2012 * For ASF and Pass Through versions of f/w this means that the
2013 * driver is no longer loaded. For AMT version (only with 82573) i 2013 * driver is no longer loaded. For AMT version (only with 82573) i
2014 * of the f/w this means that the network i/f is closed. 2014 * of the f/w this means that the network i/f is closed.
2015 * 2015 *
2016 **/ 2016 **/
2017static void e1000_release_hw_control(struct e1000_adapter *adapter) 2017void e1000e_release_hw_control(struct e1000_adapter *adapter)
2018{ 2018{
2019 struct e1000_hw *hw = &adapter->hw; 2019 struct e1000_hw *hw = &adapter->hw;
2020 u32 ctrl_ext; 2020 u32 ctrl_ext;
@@ -2445,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2446 (vid == adapter->mng_vlan_id)) { 2446 (vid == adapter->mng_vlan_id)) {
2447 /* release control to f/w */ 2447 /* release control to f/w */
2448 e1000_release_hw_control(adapter); 2448 e1000e_release_hw_control(adapter);
2449 return; 2449 return;
2450 } 2450 }
2451 2451
@@ -2734,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2735 else 2735 else
2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2737
2738 if (ret_val)
2739 e_dbg("failed to enable jumbo frame workaround mode\n");
2737 } 2740 }
2738 2741
2739 /* Program MC offset vector base */ 2742 /* Program MC offset vector base */
@@ -3184,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3184 ew32(PBA, pba); 3187 ew32(PBA, pba);
3185 } 3188 }
3186 3189
3187
3188 /* 3190 /*
3189 * flow control settings 3191 * flow control settings
3190 * 3192 *
@@ -3272,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3272 * that the network interface is in control 3274 * that the network interface is in control
3273 */ 3275 */
3274 if (adapter->flags & FLAG_HAS_AMT) 3276 if (adapter->flags & FLAG_HAS_AMT)
3275 e1000_get_hw_control(adapter); 3277 e1000e_get_hw_control(adapter);
3276 3278
3277 ew32(WUC, 0); 3279 ew32(WUC, 0);
3278 3280
@@ -3285,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
3285 ew32(VET, ETH_P_8021Q); 3287 ew32(VET, ETH_P_8021Q);
3286 3288
3287 e1000e_reset_adaptive(hw); 3289 e1000e_reset_adaptive(hw);
3290
3291 if (!netif_running(adapter->netdev) &&
3292 !test_bit(__E1000_TESTING, &adapter->state)) {
3293 e1000_power_down_phy(adapter);
3294 return;
3295 }
3296
3288 e1000_get_phy_info(hw); 3297 e1000_get_phy_info(hw);
3289 3298
3290 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3299 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3570,7 +3579,7 @@ static int e1000_open(struct net_device *netdev)
3570 * interface is now open and reset the part to a known state. 3579 * interface is now open and reset the part to a known state.
3571 */ 3580 */
3572 if (adapter->flags & FLAG_HAS_AMT) { 3581 if (adapter->flags & FLAG_HAS_AMT) {
3573 e1000_get_hw_control(adapter); 3582 e1000e_get_hw_control(adapter);
3574 e1000e_reset(adapter); 3583 e1000e_reset(adapter);
3575 } 3584 }
3576 3585
@@ -3634,7 +3643,7 @@ static int e1000_open(struct net_device *netdev)
3634 return 0; 3643 return 0;
3635 3644
3636err_req_irq: 3645err_req_irq:
3637 e1000_release_hw_control(adapter); 3646 e1000e_release_hw_control(adapter);
3638 e1000_power_down_phy(adapter); 3647 e1000_power_down_phy(adapter);
3639 e1000e_free_rx_resources(adapter); 3648 e1000e_free_rx_resources(adapter);
3640err_setup_rx: 3649err_setup_rx:
@@ -3689,8 +3698,9 @@ static int e1000_close(struct net_device *netdev)
3689 * If AMT is enabled, let the firmware know that the network 3698 * If AMT is enabled, let the firmware know that the network
3690 * interface is now closed 3699 * interface is now closed
3691 */ 3700 */
3692 if (adapter->flags & FLAG_HAS_AMT) 3701 if ((adapter->flags & FLAG_HAS_AMT) &&
3693 e1000_release_hw_control(adapter); 3702 !test_bit(__E1000_TESTING, &adapter->state))
3703 e1000e_release_hw_control(adapter);
3694 3704
3695 if ((adapter->flags & FLAG_HAS_ERT) || 3705 if ((adapter->flags & FLAG_HAS_ERT) ||
3696 (adapter->hw.mac.type == e1000_pch2lan)) 3706 (adapter->hw.mac.type == e1000_pch2lan))
@@ -5209,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5209 * Release control of h/w to f/w. If f/w is AMT enabled, this 5219 * Release control of h/w to f/w. If f/w is AMT enabled, this
5210 * would have already happened in close and is redundant. 5220 * would have already happened in close and is redundant.
5211 */ 5221 */
5212 e1000_release_hw_control(adapter); 5222 e1000e_release_hw_control(adapter);
5213 5223
5214 pci_disable_device(pdev); 5224 pci_disable_device(pdev);
5215 5225
@@ -5366,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5366 * under the control of the driver. 5376 * under the control of the driver.
5367 */ 5377 */
5368 if (!(adapter->flags & FLAG_HAS_AMT)) 5378 if (!(adapter->flags & FLAG_HAS_AMT))
5369 e1000_get_hw_control(adapter); 5379 e1000e_get_hw_control(adapter);
5370 5380
5371 return 0; 5381 return 0;
5372} 5382}
@@ -5613,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
5613 * under the control of the driver. 5623 * under the control of the driver.
5614 */ 5624 */
5615 if (!(adapter->flags & FLAG_HAS_AMT)) 5625 if (!(adapter->flags & FLAG_HAS_AMT))
5616 e1000_get_hw_control(adapter); 5626 e1000e_get_hw_control(adapter);
5617 5627
5618} 5628}
5619 5629
@@ -5636,7 +5646,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5636 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5646 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5637 E1000_PBANUM_LENGTH); 5647 E1000_PBANUM_LENGTH);
5638 if (ret_val) 5648 if (ret_val)
5639 strcpy(pba_str, "Unknown"); 5649 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5640 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5650 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5641 hw->mac.type, hw->phy.type, pba_str); 5651 hw->mac.type, hw->phy.type, pba_str);
5642} 5652}
@@ -5963,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5963 * under the control of the driver. 5973 * under the control of the driver.
5964 */ 5974 */
5965 if (!(adapter->flags & FLAG_HAS_AMT)) 5975 if (!(adapter->flags & FLAG_HAS_AMT))
5966 e1000_get_hw_control(adapter); 5976 e1000e_get_hw_control(adapter);
5967 5977
5968 strcpy(netdev->name, "eth%d"); 5978 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
5969 err = register_netdev(netdev); 5979 err = register_netdev(netdev);
5970 if (err) 5980 if (err)
5971 goto err_register; 5981 goto err_register;
@@ -5982,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5982 5992
5983err_register: 5993err_register:
5984 if (!(adapter->flags & FLAG_HAS_AMT)) 5994 if (!(adapter->flags & FLAG_HAS_AMT))
5985 e1000_release_hw_control(adapter); 5995 e1000e_release_hw_control(adapter);
5986err_eeprom: 5996err_eeprom:
5987 if (!e1000_check_reset_block(&adapter->hw)) 5997 if (!e1000_check_reset_block(&adapter->hw))
5988 e1000_phy_hw_reset(&adapter->hw); 5998 e1000_phy_hw_reset(&adapter->hw);
5989err_hw_init: 5999err_hw_init:
5990
5991 kfree(adapter->tx_ring); 6000 kfree(adapter->tx_ring);
5992 kfree(adapter->rx_ring); 6001 kfree(adapter->rx_ring);
5993err_sw_init: 6002err_sw_init:
@@ -6053,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
6053 * Release control of h/w to f/w. If f/w is AMT enabled, this 6062 * Release control of h/w to f/w. If f/w is AMT enabled, this
6054 * would have already happened in close and is redundant. 6063 * would have already happened in close and is redundant.
6055 */ 6064 */
6056 e1000_release_hw_control(adapter); 6065 e1000e_release_hw_control(adapter);
6057 6066
6058 e1000e_reset_interrupt_capability(adapter); 6067 e1000e_reset_interrupt_capability(adapter);
6059 kfree(adapter->tx_ring); 6068 kfree(adapter->tx_ring);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efeb55e3..a640f1c369ae 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
637 **/ 637 **/
638s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) 638s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
639{ 639{
640 struct e1000_phy_info *phy = &hw->phy;
641 s32 ret_val; 640 s32 ret_val;
642 u16 phy_data; 641 u16 phy_data;
643 642
644 /* Enable CRS on TX. This must be set for half-duplex operation. */ 643 /* Enable CRS on TX. This must be set for half-duplex operation. */
645 ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); 644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
646 if (ret_val) 645 if (ret_val)
647 goto out; 646 goto out;
648 647
@@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
651 /* Enable downshift */ 650 /* Enable downshift */
652 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 651 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
653 652
654 ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); 653 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
655 654
656out: 655out:
657 return ret_val; 656 return ret_val;
@@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
774 } 773 }
775 774
776 if (phy->type == e1000_phy_82578) { 775 if (phy->type == e1000_phy_82578) {
777 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 776 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
778 &phy_data);
779 if (ret_val) 777 if (ret_val)
780 return ret_val; 778 return ret_val;
781 779
782 /* 82578 PHY - set the downshift count to 1x. */ 780 /* 82578 PHY - set the downshift count to 1x. */
783 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 781 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
784 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 782 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
785 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 783 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
786 phy_data);
787 if (ret_val) 784 if (ret_val)
788 return ret_val; 785 return ret_val;
789 } 786 }
@@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1319 * We didn't get link. 1316 * We didn't get link.
1320 * Reset the DSP and cross our fingers. 1317 * Reset the DSP and cross our fingers.
1321 */ 1318 */
1322 ret_val = e1e_wphy(hw, 1319 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1323 M88E1000_PHY_PAGE_SELECT, 1320 0x001d);
1324 0x001d);
1325 if (ret_val) 1321 if (ret_val)
1326 return ret_val; 1322 return ret_val;
1327 ret_val = e1000e_phy_reset_dsp(hw); 1323 ret_val = e1000e_phy_reset_dsp(hw);
@@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3071 goto out; 3067 goto out;
3072 3068
3073 /* Do not apply workaround if in PHY loopback bit 14 set */ 3069 /* Do not apply workaround if in PHY loopback bit 14 set */
3074 hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); 3070 e1e_rphy(hw, PHY_CONTROL, &data);
3075 if (data & PHY_CONTROL_LB) 3071 if (data & PHY_CONTROL_LB)
3076 goto out; 3072 goto out;
3077 3073
3078 /* check if link is up and at 1Gbps */ 3074 /* check if link is up and at 1Gbps */
3079 ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); 3075 ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
3080 if (ret_val) 3076 if (ret_val)
3081 goto out; 3077 goto out;
3082 3078
@@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3092 mdelay(200); 3088 mdelay(200);
3093 3089
3094 /* flush the packets in the fifo buffer */ 3090 /* flush the packets in the fifo buffer */
3095 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3091 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
3096 HV_MUX_DATA_CTRL_GEN_TO_MAC | 3092 HV_MUX_DATA_CTRL_FORCE_SPEED);
3097 HV_MUX_DATA_CTRL_FORCE_SPEED);
3098 if (ret_val) 3093 if (ret_val)
3099 goto out; 3094 goto out;
3100 3095
3101 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3096 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
3102 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3103 3097
3104out: 3098out:
3105 return ret_val; 3099 return ret_val;
@@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3119 s32 ret_val; 3113 s32 ret_val;
3120 u16 data; 3114 u16 data;
3121 3115
3122 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3116 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3123 3117
3124 if (!ret_val) 3118 if (!ret_val)
3125 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 3119 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3142 u16 phy_data; 3136 u16 phy_data;
3143 bool link; 3137 bool link;
3144 3138
3145 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 3139 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
3146 if (ret_val) 3140 if (ret_val)
3147 goto out; 3141 goto out;
3148 3142
3149 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 3143 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
3150 3144
3151 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 3145 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
3152 if (ret_val) 3146 if (ret_val)
3153 goto out; 3147 goto out;
3154 3148
@@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3212 if (ret_val) 3206 if (ret_val)
3213 goto out; 3207 goto out;
3214 3208
3215 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3209 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3216 if (ret_val) 3210 if (ret_val)
3217 goto out; 3211 goto out;
3218 3212
@@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3224 if (ret_val) 3218 if (ret_val)
3225 goto out; 3219 goto out;
3226 3220
3227 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); 3221 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
3228 if (ret_val) 3222 if (ret_val)
3229 goto out; 3223 goto out;
3230 3224
@@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3258 s32 ret_val; 3252 s32 ret_val;
3259 u16 phy_data, length; 3253 u16 phy_data, length;
3260 3254
3261 ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3255 ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
3262 if (ret_val) 3256 if (ret_val)
3263 goto out; 3257 goto out;
3264 3258
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d14506..6c7257bd73fc 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0106" 43#define DRV_VERSION "EHEA_0107"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5bbe238..f75d3144b8a5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
437 } 437 }
438 } 438 }
439 /* Ring doorbell */ 439 /* Ring doorbell */
440 ehea_update_rq1a(pr->qp, i); 440 ehea_update_rq1a(pr->qp, i - 1);
441} 441}
442 442
443static int ehea_refill_rq_def(struct ehea_port_res *pr, 443static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
1329 int ret; 1329 int ret;
1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1331 1331
1332 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1332 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1333 - init_attr->act_nr_rwqes_rq2
1334 - init_attr->act_nr_rwqes_rq3 - 1);
1335 1333
1336 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1334 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1337 1335
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175f..2a71373719ae 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA. 19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 *
21 * Copyright (C) 2010 Freescale Semiconductor, Inc.
20 */ 22 */
21 23
22#include <linux/module.h> 24#include <linux/module.h>
@@ -45,29 +47,41 @@
45 47
46#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
47 49
48#ifndef CONFIG_ARCH_MXC 50#ifndef CONFIG_ARM
49#include <asm/coldfire.h> 51#include <asm/coldfire.h>
50#include <asm/mcfsim.h> 52#include <asm/mcfsim.h>
51#endif 53#endif
52 54
53#include "fec.h" 55#include "fec.h"
54 56
55#ifdef CONFIG_ARCH_MXC 57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
56#include <mach/hardware.h>
57#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
58#else 59#else
59#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
60#endif 61#endif
61 62
62/* 63#define DRIVER_NAME "fec"
63 * Define the fixed address of the FEC hardware. 64
64 */ 65/* Controller is ENET-MAC */
65#if defined(CONFIG_M5272) 66#define FEC_QUIRK_ENET_MAC (1 << 0)
67/* Controller needs driver to swap frame */
68#define FEC_QUIRK_SWAP_FRAME (1 << 1)
66 69
67static unsigned char fec_mac_default[] = { 70static struct platform_device_id fec_devtype[] = {
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 71 {
72 .name = DRIVER_NAME,
73 .driver_data = 0,
74 }, {
75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 }
69}; 78};
70 79
80static unsigned char macaddr[ETH_ALEN];
81module_param_array(macaddr, byte, NULL, 0);
82MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
83
84#if defined(CONFIG_M5272)
71/* 85/*
72 * Some hardware gets it MAC address out of local flash memory. 86 * Some hardware gets it MAC address out of local flash memory.
73 * if this is non-zero then assume it is the address to get MAC from. 87 * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = {
133 * account when setting it. 147 * account when setting it.
134 */ 148 */
135#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
151 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
137#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
138#else 153#else
139#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -186,7 +201,6 @@ struct fec_enet_private {
186 int mii_timeout; 201 int mii_timeout;
187 uint phy_speed; 202 uint phy_speed;
188 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
189 int index;
190 int link; 204 int link;
191 int full_duplex; 205 int full_duplex;
192 struct completion mdio_done; 206 struct completion mdio_done;
@@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev);
213/* Transmitter timeout */ 227/* Transmitter timeout */
214#define TX_TIMEOUT (2 * HZ) 228#define TX_TIMEOUT (2 * HZ)
215 229
230static void *swap_buffer(void *bufaddr, int len)
231{
232 int i;
233 unsigned int *buf = bufaddr;
234
235 for (i = 0; i < (len + 3) / 4; i++, buf++)
236 *buf = cpu_to_be32(*buf);
237
238 return bufaddr;
239}
240
216static netdev_tx_t 241static netdev_tx_t
217fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 242fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
218{ 243{
219 struct fec_enet_private *fep = netdev_priv(dev); 244 struct fec_enet_private *fep = netdev_priv(dev);
245 const struct platform_device_id *id_entry =
246 platform_get_device_id(fep->pdev);
220 struct bufdesc *bdp; 247 struct bufdesc *bdp;
221 void *bufaddr; 248 void *bufaddr;
222 unsigned short status; 249 unsigned short status;
@@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 bufaddr = fep->tx_bounce[index]; 288 bufaddr = fep->tx_bounce[index];
262 } 289 }
263 290
291 /*
292 * Some design made an incorrect assumption on endian mode of
293 * the system that it's running on. As the result, driver has to
294 * swap every frame going to and coming from the controller.
295 */
296 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
297 swap_buffer(bufaddr, skb->len);
298
264 /* Save skb pointer */ 299 /* Save skb pointer */
265 fep->tx_skbuff[fep->skb_cur] = skb; 300 fep->tx_skbuff[fep->skb_cur] = skb;
266 301
@@ -429,6 +464,8 @@ static void
429fec_enet_rx(struct net_device *dev) 464fec_enet_rx(struct net_device *dev)
430{ 465{
431 struct fec_enet_private *fep = netdev_priv(dev); 466 struct fec_enet_private *fep = netdev_priv(dev);
467 const struct platform_device_id *id_entry =
468 platform_get_device_id(fep->pdev);
432 struct bufdesc *bdp; 469 struct bufdesc *bdp;
433 unsigned short status; 470 unsigned short status;
434 struct sk_buff *skb; 471 struct sk_buff *skb;
@@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev)
492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
493 DMA_FROM_DEVICE); 530 DMA_FROM_DEVICE);
494 531
532 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
533 swap_buffer(data, pkt_len);
534
495 /* This does 16 byte alignment, exactly what we need. 535 /* This does 16 byte alignment, exactly what we need.
496 * The packet length includes FCS, but we don't want to 536 * The packet length includes FCS, but we don't want to
497 * include that when passing upstream as it messes up 537 * include that when passing upstream as it messes up
@@ -538,37 +578,50 @@ rx_processing_done:
538} 578}
539 579
540/* ------------------------------------------------------------------------- */ 580/* ------------------------------------------------------------------------- */
541#ifdef CONFIG_M5272
542static void __inline__ fec_get_mac(struct net_device *dev) 581static void __inline__ fec_get_mac(struct net_device *dev)
543{ 582{
544 struct fec_enet_private *fep = netdev_priv(dev); 583 struct fec_enet_private *fep = netdev_priv(dev);
584 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
545 unsigned char *iap, tmpaddr[ETH_ALEN]; 585 unsigned char *iap, tmpaddr[ETH_ALEN];
546 586
547 if (FEC_FLASHMAC) { 587 /*
548 /* 588 * try to get mac address in following order:
549 * Get MAC address from FLASH. 589 *
550 * If it is all 1's or 0's, use the default. 590 * 1) module parameter via kernel command line in form
551 */ 591 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
552 iap = (unsigned char *)FEC_FLASHMAC; 592 */
553 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 593 iap = macaddr;
554 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 594
555 iap = fec_mac_default; 595 /*
556 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 596 * 2) from flash or fuse (via platform data)
557 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 597 */
558 iap = fec_mac_default; 598 if (!is_valid_ether_addr(iap)) {
559 } else { 599#ifdef CONFIG_M5272
560 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 600 if (FEC_FLASHMAC)
561 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 601 iap = (unsigned char *)FEC_FLASHMAC;
602#else
603 if (pdata)
604 memcpy(iap, pdata->mac, ETH_ALEN);
605#endif
606 }
607
608 /*
609 * 3) FEC mac registers set by bootloader
610 */
611 if (!is_valid_ether_addr(iap)) {
612 *((unsigned long *) &tmpaddr[0]) =
613 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
614 *((unsigned short *) &tmpaddr[4]) =
615 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
562 iap = &tmpaddr[0]; 616 iap = &tmpaddr[0];
563 } 617 }
564 618
565 memcpy(dev->dev_addr, iap, ETH_ALEN); 619 memcpy(dev->dev_addr, iap, ETH_ALEN);
566 620
567 /* Adjust MAC if using default MAC address */ 621 /* Adjust MAC if using macaddr */
568 if (iap == fec_mac_default) 622 if (iap == macaddr)
569 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 623 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
570} 624}
571#endif
572 625
573/* ------------------------------------------------------------------------- */ 626/* ------------------------------------------------------------------------- */
574 627
@@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
651 fep->mii_timeout = 0; 704 fep->mii_timeout = 0;
652 init_completion(&fep->mdio_done); 705 init_completion(&fep->mdio_done);
653 706
654 /* start a read op */ 707 /* start a write op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 708 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 709 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
657 FEC_MMFR_TA | FEC_MMFR_DATA(value), 710 FEC_MMFR_TA | FEC_MMFR_DATA(value),
658 fep->hwp + FEC_MII_DATA); 711 fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
681 char mdio_bus_id[MII_BUS_ID_SIZE]; 734 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3]; 735 char phy_name[MII_BUS_ID_SIZE + 3];
683 int phy_id; 736 int phy_id;
737 int dev_id = fep->pdev->id;
684 738
685 fep->phy_dev = NULL; 739 fep->phy_dev = NULL;
686 740
@@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
692 continue; 746 continue;
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 747 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 continue; 748 continue;
749 if (dev_id--)
750 continue;
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 751 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
696 break; 752 break;
697 } 753 }
@@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
729 785
730static int fec_enet_mii_init(struct platform_device *pdev) 786static int fec_enet_mii_init(struct platform_device *pdev)
731{ 787{
788 static struct mii_bus *fec0_mii_bus;
732 struct net_device *dev = platform_get_drvdata(pdev); 789 struct net_device *dev = platform_get_drvdata(pdev);
733 struct fec_enet_private *fep = netdev_priv(dev); 790 struct fec_enet_private *fep = netdev_priv(dev);
791 const struct platform_device_id *id_entry =
792 platform_get_device_id(fep->pdev);
734 int err = -ENXIO, i; 793 int err = -ENXIO, i;
735 794
795 /*
796 * The dual fec interfaces are not equivalent with enet-mac.
797 * Here are the differences:
798 *
799 * - fec0 supports MII & RMII modes while fec1 only supports RMII
800 * - fec0 acts as the 1588 time master while fec1 is slave
801 * - external phys can only be configured by fec0
802 *
803 * That is to say fec1 can not work independently. It only works
804 * when fec0 is working. The reason behind this design is that the
805 * second interface is added primarily for Switch mode.
806 *
807 * Because of the last point above, both phys are attached on fec0
808 * mdio interface in board design, and need to be configured by
809 * fec0 mii_bus.
810 */
811 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
812 /* fec1 uses fec0 mii_bus */
813 fep->mii_bus = fec0_mii_bus;
814 return 0;
815 }
816
736 fep->mii_timeout = 0; 817 fep->mii_timeout = 0;
737 818
738 /* 819 /*
@@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
769 if (mdiobus_register(fep->mii_bus)) 850 if (mdiobus_register(fep->mii_bus))
770 goto err_out_free_mdio_irq; 851 goto err_out_free_mdio_irq;
771 852
853 /* save fec0 mii_bus */
854 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
855 fec0_mii_bus = fep->mii_bus;
856
772 return 0; 857 return 0;
773 858
774err_out_free_mdio_irq: 859err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = {
1067 /* 1152 /*
1068 * XXX: We need to clean up on failure exits here. 1153 * XXX: We need to clean up on failure exits here.
1069 * 1154 *
1070 * index is only used in legacy code
1071 */ 1155 */
1072static int fec_enet_init(struct net_device *dev, int index) 1156static int fec_enet_init(struct net_device *dev)
1073{ 1157{
1074 struct fec_enet_private *fep = netdev_priv(dev); 1158 struct fec_enet_private *fep = netdev_priv(dev);
1075 struct bufdesc *cbd_base; 1159 struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index)
1086 1170
1087 spin_lock_init(&fep->hw_lock); 1171 spin_lock_init(&fep->hw_lock);
1088 1172
1089 fep->index = index;
1090 fep->hwp = (void __iomem *)dev->base_addr; 1173 fep->hwp = (void __iomem *)dev->base_addr;
1091 fep->netdev = dev; 1174 fep->netdev = dev;
1092 1175
1093 /* Set the Ethernet address */ 1176 /* Get the Ethernet address */
1094#ifdef CONFIG_M5272
1095 fec_get_mac(dev); 1177 fec_get_mac(dev);
1096#else
1097 {
1098 unsigned long l;
1099 l = readl(fep->hwp + FEC_ADDR_LOW);
1100 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1101 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1102 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1103 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1104 l = readl(fep->hwp + FEC_ADDR_HIGH);
1105 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1106 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1107 }
1108#endif
1109 1178
1110 /* Set receive and transmit descriptor base. */ 1179 /* Set receive and transmit descriptor base. */
1111 fep->rx_bd_base = cbd_base; 1180 fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@ static void
1156fec_restart(struct net_device *dev, int duplex) 1225fec_restart(struct net_device *dev, int duplex)
1157{ 1226{
1158 struct fec_enet_private *fep = netdev_priv(dev); 1227 struct fec_enet_private *fep = netdev_priv(dev);
1228 const struct platform_device_id *id_entry =
1229 platform_get_device_id(fep->pdev);
1159 int i; 1230 int i;
1231 u32 val, temp_mac[2];
1160 1232
1161 /* Whack a reset. We should wait for this. */ 1233 /* Whack a reset. We should wait for this. */
1162 writel(1, fep->hwp + FEC_ECNTRL); 1234 writel(1, fep->hwp + FEC_ECNTRL);
1163 udelay(10); 1235 udelay(10);
1164 1236
1237 /*
1238 * enet-mac reset will reset mac address registers too,
1239 * so need to reconfigure it.
1240 */
1241 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1242 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1243 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1244 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1245 }
1246
1165 /* Clear any outstanding interrupt. */ 1247 /* Clear any outstanding interrupt. */
1166 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1248 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1167 1249
@@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex)
1208 /* Set MII speed */ 1290 /* Set MII speed */
1209 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1210 1292
1211#ifdef FEC_MIIGSK_ENR 1293 /*
1212 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1294 * The phy interface and speed need to get configured
1213 /* disable the gasket and wait */ 1295 * differently on enet-mac.
1214 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1296 */
1215 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1297 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1216 udelay(1); 1298 val = readl(fep->hwp + FEC_R_CNTRL);
1217 1299
1218 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1300 /* MII or RMII */
1219 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1301 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1302 val |= (1 << 8);
1303 else
1304 val &= ~(1 << 8);
1220 1305
1221 /* re-enable the gasket */ 1306 /* 10M or 100M */
1222 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1307 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1223 } 1308 val &= ~(1 << 9);
1309 else
1310 val |= (1 << 9);
1311
1312 writel(val, fep->hwp + FEC_R_CNTRL);
1313 } else {
1314#ifdef FEC_MIIGSK_ENR
1315 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1316 /* disable the gasket and wait */
1317 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1318 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1319 udelay(1);
1320
1321 /*
1322 * configure the gasket:
1323 * RMII, 50 MHz, no loopback, no echo
1324 */
1325 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1326
1327 /* re-enable the gasket */
1328 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1329 }
1224#endif 1330#endif
1331 }
1225 1332
1226 /* And last, enable the transmit and receive processing */ 1333 /* And last, enable the transmit and receive processing */
1227 writel(2, fep->hwp + FEC_ECNTRL); 1334 writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev)
1316 } 1423 }
1317 clk_enable(fep->clk); 1424 clk_enable(fep->clk);
1318 1425
1319 ret = fec_enet_init(ndev, 0); 1426 ret = fec_enet_init(ndev);
1320 if (ret) 1427 if (ret)
1321 goto failed_init; 1428 goto failed_init;
1322 1429
@@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev)
1380 1487
1381 if (ndev) { 1488 if (ndev) {
1382 fep = netdev_priv(ndev); 1489 fep = netdev_priv(ndev);
1383 if (netif_running(ndev)) 1490 if (netif_running(ndev)) {
1384 fec_enet_close(ndev); 1491 fec_stop(ndev);
1492 netif_device_detach(ndev);
1493 }
1385 clk_disable(fep->clk); 1494 clk_disable(fep->clk);
1386 } 1495 }
1387 return 0; 1496 return 0;
@@ -1396,8 +1505,10 @@ fec_resume(struct device *dev)
1396 if (ndev) { 1505 if (ndev) {
1397 fep = netdev_priv(ndev); 1506 fep = netdev_priv(ndev);
1398 clk_enable(fep->clk); 1507 clk_enable(fep->clk);
1399 if (netif_running(ndev)) 1508 if (netif_running(ndev)) {
1400 fec_enet_open(ndev); 1509 fec_restart(ndev, fep->full_duplex);
1510 netif_device_attach(ndev);
1511 }
1401 } 1512 }
1402 return 0; 1513 return 0;
1403} 1514}
@@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = {
1414 1525
1415static struct platform_driver fec_driver = { 1526static struct platform_driver fec_driver = {
1416 .driver = { 1527 .driver = {
1417 .name = "fec", 1528 .name = DRIVER_NAME,
1418 .owner = THIS_MODULE, 1529 .owner = THIS_MODULE,
1419#ifdef CONFIG_PM 1530#ifdef CONFIG_PM
1420 .pm = &fec_pm_ops, 1531 .pm = &fec_pm_ops,
1421#endif 1532#endif
1422 }, 1533 },
1534 .id_table = fec_devtype,
1423 .probe = fec_probe, 1535 .probe = fec_probe,
1424 .remove = __devexit_p(fec_drv_remove), 1536 .remove = __devexit_p(fec_drv_remove),
1425}; 1537};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d5..ace318df4c8d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
18 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
18/* 19/*
19 * Just figures, Motorola would have to change the offsets for 20 * Just figures, Motorola would have to change the offsets for
20 * registers in the same peripheral device on different models 21 * registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
78/* 79/*
79 * Define the buffer descriptor structure. 80 * Define the buffer descriptor structure.
80 */ 81 */
81#ifdef CONFIG_ARCH_MXC 82#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
82struct bufdesc { 83struct bufdesc {
83 unsigned short cbd_datlen; /* Data length */ 84 unsigned short cbd_datlen; /* Data length */
84 unsigned short cbd_sc; /* Control and status info */ 85 unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d825df..af09296ef0dd 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3949 writel(flags, base + NvRegWakeUpFlags); 3949 writel(flags, base + NvRegWakeUpFlags);
3950 spin_unlock_irq(&np->lock); 3950 spin_unlock_irq(&np->lock);
3951 } 3951 }
3952 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
3952 return 0; 3953 return 0;
3953} 3954}
3954 3955
@@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5488 /* set mac address */ 5489 /* set mac address */
5489 nv_copy_mac_to_hw(dev); 5490 nv_copy_mac_to_hw(dev);
5490 5491
5491 /* Workaround current PCI init glitch: wakeup bits aren't
5492 * being set from PCI PM capability.
5493 */
5494 device_init_wakeup(&pci_dev->dev, 1);
5495
5496 /* disable WOL */ 5492 /* disable WOL */
5497 writel(0, base + NvRegWakeUpFlags); 5493 writel(0, base + NvRegWakeUpFlags);
5498 np->wolenabled = 0; 5494 np->wolenabled = 0;
5495 device_set_wakeup_enable(&pci_dev->dev, false);
5499 5496
5500 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5497 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5501 5498
@@ -5746,8 +5743,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5746} 5743}
5747 5744
5748#ifdef CONFIG_PM 5745#ifdef CONFIG_PM
5749static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5746static int nv_suspend(struct device *device)
5750{ 5747{
5748 struct pci_dev *pdev = to_pci_dev(device);
5751 struct net_device *dev = pci_get_drvdata(pdev); 5749 struct net_device *dev = pci_get_drvdata(pdev);
5752 struct fe_priv *np = netdev_priv(dev); 5750 struct fe_priv *np = netdev_priv(dev);
5753 u8 __iomem *base = get_hwbase(dev); 5751 u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5761,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5763 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5761 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5764 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5762 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5765 5763
5766 pci_save_state(pdev);
5767 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5768 pci_disable_device(pdev);
5769 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5770 return 0; 5764 return 0;
5771} 5765}
5772 5766
5773static int nv_resume(struct pci_dev *pdev) 5767static int nv_resume(struct device *device)
5774{ 5768{
5769 struct pci_dev *pdev = to_pci_dev(device);
5775 struct net_device *dev = pci_get_drvdata(pdev); 5770 struct net_device *dev = pci_get_drvdata(pdev);
5776 struct fe_priv *np = netdev_priv(dev); 5771 struct fe_priv *np = netdev_priv(dev);
5777 u8 __iomem *base = get_hwbase(dev); 5772 u8 __iomem *base = get_hwbase(dev);
5778 int i, rc = 0; 5773 int i, rc = 0;
5779 5774
5780 pci_set_power_state(pdev, PCI_D0);
5781 pci_restore_state(pdev);
5782 /* ack any pending wake events, disable PME */
5783 pci_enable_wake(pdev, PCI_D0, 0);
5784
5785 /* restore non-pci configuration space */ 5775 /* restore non-pci configuration space */
5786 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5776 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5787 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5777 writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev)
5800 return rc; 5790 return rc;
5801} 5791}
5802 5792
5793static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5794#define NV_PM_OPS (&nv_pm_ops)
5795
5803static void nv_shutdown(struct pci_dev *pdev) 5796static void nv_shutdown(struct pci_dev *pdev)
5804{ 5797{
5805 struct net_device *dev = pci_get_drvdata(pdev); 5798 struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev)
5822 * only put the device into D3 if we really go for poweroff. 5815 * only put the device into D3 if we really go for poweroff.
5823 */ 5816 */
5824 if (system_state == SYSTEM_POWER_OFF) { 5817 if (system_state == SYSTEM_POWER_OFF) {
5825 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 5818 pci_wake_from_d3(pdev, np->wolenabled);
5826 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5827 pci_set_power_state(pdev, PCI_D3hot); 5819 pci_set_power_state(pdev, PCI_D3hot);
5828 } 5820 }
5829} 5821}
5830#else 5822#else
5831#define nv_suspend NULL 5823#define NV_PM_OPS NULL
5832#define nv_shutdown NULL 5824#define nv_shutdown NULL
5833#define nv_resume NULL
5834#endif /* CONFIG_PM */ 5825#endif /* CONFIG_PM */
5835 5826
5836static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 5827static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5993,8 @@ static struct pci_driver driver = {
6002 .id_table = pci_tbl, 5993 .id_table = pci_tbl,
6003 .probe = nv_probe, 5994 .probe = nv_probe,
6004 .remove = __devexit_p(nv_remove), 5995 .remove = __devexit_p(nv_remove),
6005 .suspend = nv_suspend,
6006 .resume = nv_resume,
6007 .shutdown = nv_shutdown, 5996 .shutdown = nv_shutdown,
5997 .driver.pm = NV_PM_OPS,
6008}; 5998};
6009 5999
6010static int __init init_nic(void) 6000static int __init init_nic(void)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a2340..7d9ced0738c5 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
396 while (p) { 396 while (p) {
397 if (p->bitrate == bitrate) { 397 if (p->bitrate == bitrate) {
398 memcpy(p->bits, bits, YAM_FPGA_SIZE); 398 memcpy(p->bits, bits, YAM_FPGA_SIZE);
399 return p->bits; 399 goto out;
400 } 400 }
401 p = p->next; 401 p = p->next;
402 } 402 }
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
411 p->bitrate = bitrate; 411 p->bitrate = bitrate;
412 p->next = yam_data; 412 p->next = yam_data;
413 yam_data = p; 413 yam_data = p;
414 414 out:
415 release_firmware(fw); 415 release_firmware(fw);
416 return p->bits; 416 return p->bits;
417} 417}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8cb7d6..3b8c92463617 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
508extern void ixgbe_free_tx_resources(struct ixgbe_ring *); 508extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
511extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
512 struct ixgbe_ring *);
511extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 513extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
512extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 514extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
513extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 515extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
524extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
525extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
526extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 528extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
527 struct ixgbe_atr_input *input, 529 union ixgbe_atr_hash_dword input,
530 union ixgbe_atr_hash_dword common,
528 u8 queue); 531 u8 queue);
529extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 532extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
530 struct ixgbe_atr_input *input, 533 union ixgbe_atr_input *input,
531 struct ixgbe_atr_input_masks *input_masks, 534 struct ixgbe_atr_input_masks *input_masks,
532 u16 soft_id, u8 queue); 535 u16 soft_id, u8 queue);
533extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
534 u16 vlan_id);
535extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
536 u32 src_addr);
537extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
538 u32 dst_addr);
539extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
540 u16 src_port);
541extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
542 u16 dst_port);
543extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
544 u16 flex_byte);
545extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
546 u8 l4type);
547extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 536extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
548 struct ixgbe_ring *ring); 537 struct ixgbe_ring *ring);
549extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 538extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c227cd4a..8d316d9cd29d 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1003 udelay(10); 1003 udelay(10);
1004 } 1004 }
1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1006 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1006 hw_dbg(hw, "Flow Director previous command isn't complete, "
1007 "aborting table re-initialization.\n"); 1007 "aborting table re-initialization.\n");
1008 return IXGBE_ERR_FDIR_REINIT_FAILED; 1008 return IXGBE_ERR_FDIR_REINIT_FAILED;
1009 } 1009 }
@@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1113 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1113 /* Move the flexible bytes to use the ethertype - shift 6 words */
1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1115 1115
1116 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1117 1116
1118 /* Prime the keys for hashing */ 1117 /* Prime the keys for hashing */
1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1120 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1121 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1122 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1123 1120
1124 /* 1121 /*
1125 * Poll init-done after we write the register. Estimated times: 1122 * Poll init-done after we write the register. Estimated times:
@@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1209 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1206 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1210 1207
1211 /* Prime the keys for hashing */ 1208 /* Prime the keys for hashing */
1212 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1209 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1213 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1210 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1214 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1215 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1216 1211
1217 /* 1212 /*
1218 * Poll init-done after we write the register. Estimated times: 1213 * Poll init-done after we write the register. Estimated times:
@@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1251 * @stream: input bitstream to compute the hash on 1246 * @stream: input bitstream to compute the hash on
1252 * @key: 32-bit hash key 1247 * @key: 32-bit hash key
1253 **/ 1248 **/
1254static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, 1249static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1255 u32 key) 1250 u32 key)
1256{ 1251{
1257 /* 1252 /*
1258 * The algorithm is as follows: 1253 * The algorithm is as follows:
@@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
1272 * To simplify for programming, the algorithm is implemented 1267 * To simplify for programming, the algorithm is implemented
1273 * in software this way: 1268 * in software this way:
1274 * 1269 *
1275 * Key[31:0], Stream[335:0] 1270 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1271 *
1272 * for (i = 0; i < 352; i+=32)
1273 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1274 *
1275 * lo_hash_dword[15:0] ^= Stream[15:0];
1276 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1277 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1276 * 1278 *
1277 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1279 * hi_hash_dword[31:0] ^= Stream[351:320];
1278 * int_key[350:0] = tmp_key[351:1]
1279 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1280 * 1280 *
1281 * hash[15:0] = 0; 1281 * if(key[0])
1282 * for (i = 0; i < 351; i++) { 1282 * hash[15:0] ^= Stream[15:0];
1283 * if (int_key[i]) 1283 *
1284 * hash ^= int_stream[(i + 15):i]; 1284 * for (i = 0; i < 16; i++) {
1285 * if (key[i])
1286 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1287 * if (key[i + 16])
1288 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1285 * } 1289 * }
1290 *
1286 */ 1291 */
1292 __be32 common_hash_dword = 0;
1293 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1294 u32 hash_result = 0;
1295 u8 i;
1287 1296
1288 union { 1297 /* record the flow_vm_vlan bits as they are a key part to the hash */
1289 u64 fill[6]; 1298 flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
1290 u32 key[11];
1291 u8 key_stream[44];
1292 } tmp_key;
1293 1299
1294 u8 *stream = (u8 *)atr_input; 1300 /* generate common hash dword */
1295 u8 int_key[44]; /* upper-most bit unused */ 1301 for (i = 10; i; i -= 2)
1296 u8 hash_str[46]; /* upper-most 2 bits unused */ 1302 common_hash_dword ^= atr_input->dword_stream[i] ^
1297 u16 hash_result = 0; 1303 atr_input->dword_stream[i - 1];
1298 int i, j, k, h;
1299 1304
1300 /* 1305 hi_hash_dword = ntohl(common_hash_dword);
1301 * Initialize the fill member to prevent warnings
1302 * on some compilers
1303 */
1304 tmp_key.fill[0] = 0;
1305 1306
1306 /* First load the temporary key stream */ 1307 /* low dword is word swapped version of common */
1307 for (i = 0; i < 6; i++) { 1308 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1308 u64 fillkey = ((u64)key << 32) | key;
1309 tmp_key.fill[i] = fillkey;
1310 }
1311 1309
1312 /* 1310 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1313 * Set the interim key for the hashing. Bit 352 is unused, so we must 1311 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1314 * shift and compensate when building the key.
1315 */
1316 1312
1317 int_key[0] = tmp_key.key_stream[0] >> 1; 1313 /* Process bits 0 and 16 */
1318 for (i = 1, j = 0; i < 44; i++) { 1314 if (key & 0x0001) hash_result ^= lo_hash_dword;
1319 unsigned int this_key = tmp_key.key_stream[j] << 7; 1315 if (key & 0x00010000) hash_result ^= hi_hash_dword;
1320 j++;
1321 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1322 }
1323
1324 /*
1325 * Set the interim bit string for the hashing. Bits 368 and 367 are
1326 * unused, so shift and compensate when building the string.
1327 */
1328 hash_str[0] = (stream[40] & 0x7f) >> 1;
1329 for (i = 1, j = 40; i < 46; i++) {
1330 unsigned int this_str = stream[j] << 7;
1331 j++;
1332 if (j > 41)
1333 j = 0;
1334 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1335 }
1336 1316
1337 /* 1317 /*
1338 * Now compute the hash. i is the index into hash_str, j is into our 1318 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1339 * key stream, k is counting the number of bits, and h interates within 1319 * delay this because bit 0 of the stream should not be processed
1340 * each byte. 1320 * so we do not add the vlan until after bit 0 was processed
1341 */ 1321 */
1342 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1322 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1343 for (h = 0; h < 8 && k < 351; h++, k++) {
1344 if (int_key[j] & (1 << h)) {
1345 /*
1346 * Key bit is set, XOR in the current 16-bit
1347 * string. Example of processing:
1348 * h = 0,
1349 * tmp = (hash_str[i - 2] & 0 << 16) |
1350 * (hash_str[i - 1] & 0xff << 8) |
1351 * (hash_str[i] & 0xff >> 0)
1352 * So tmp = hash_str[15 + k:k], since the
1353 * i + 2 clause rolls off the 16-bit value
1354 * h = 7,
1355 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1356 * (hash_str[i - 1] & 0xff << 1) |
1357 * (hash_str[i] & 0x80 >> 7)
1358 */
1359 int tmp = (hash_str[i] >> h);
1360 tmp |= (hash_str[i - 1] << (8 - h));
1361 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1362 << (16 - h);
1363 hash_result ^= (u16)tmp;
1364 }
1365 }
1366 }
1367
1368 return hash_result;
1369}
1370
1371/**
1372 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1373 * @input: input stream to modify
1374 * @vlan: the VLAN id to load
1375 **/
1376s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1377{
1378 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1379 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1380
1381 return 0;
1382}
1383
1384/**
1385 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1386 * @input: input stream to modify
1387 * @src_addr: the IP address to load
1388 **/
1389s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1390{
1391 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1392 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1393 (src_addr >> 16) & 0xff;
1394 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1395 (src_addr >> 8) & 0xff;
1396 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1397
1398 return 0;
1399}
1400
1401/**
1402 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1403 * @input: input stream to modify
1404 * @dst_addr: the IP address to load
1405 **/
1406s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1407{
1408 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1409 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1410 (dst_addr >> 16) & 0xff;
1411 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1412 (dst_addr >> 8) & 0xff;
1413 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1414
1415 return 0;
1416}
1417 1323
1418/**
1419 * ixgbe_atr_set_src_port_82599 - Sets the source port
1420 * @input: input stream to modify
1421 * @src_port: the source port to load
1422 **/
1423s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1424{
1425 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1426 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1427
1428 return 0;
1429}
1430
1431/**
1432 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1433 * @input: input stream to modify
1434 * @dst_port: the destination port to load
1435 **/
1436s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1437{
1438 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1439 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1440
1441 return 0;
1442}
1443
1444/**
1445 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1446 * @input: input stream to modify
1447 * @flex_bytes: the flexible bytes to load
1448 **/
1449s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1450{
1451 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1452 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1453
1454 return 0;
1455}
1456
1457/**
1458 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1459 * @input: input stream to modify
1460 * @l4type: the layer 4 type value to load
1461 **/
1462s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1463{
1464 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1465
1466 return 0;
1467}
1468
1469/**
1470 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1471 * @input: input stream to search
1472 * @vlan: the VLAN id to load
1473 **/
1474static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1475{
1476 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1477 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1478
1479 return 0;
1480}
1481
1482/**
1483 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1484 * @input: input stream to search
1485 * @src_addr: the IP address to load
1486 **/
1487static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
1488 u32 *src_addr)
1489{
1490 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1491 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1492 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1493 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1494
1495 return 0;
1496}
1497 1324
1498/** 1325 /* process the remaining 30 bits in the key 2 bits at a time */
1499 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1326 for (i = 15; i; i-- ) {
1500 * @input: input stream to search 1327 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1501 * @dst_addr: the IP address to load 1328 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1502 **/ 1329 }
1503static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
1504 u32 *dst_addr)
1505{
1506 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1507 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1508 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1509 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1510 1330
1511 return 0; 1331 return hash_result & IXGBE_ATR_HASH_MASK;
1512} 1332}
1513 1333
1514/** 1334/*
1515 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1335 * These defines allow us to quickly generate all of the necessary instructions
1516 * @input: input stream to search 1336 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1517 * @src_addr_1: the first 4 bytes of the IP address to load 1337 * for values 0 through 15
1518 * @src_addr_2: the second 4 bytes of the IP address to load 1338 */
1519 * @src_addr_3: the third 4 bytes of the IP address to load 1339#define IXGBE_ATR_COMMON_HASH_KEY \
1520 * @src_addr_4: the fourth 4 bytes of the IP address to load 1340 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1521 **/ 1341#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1522static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1342do { \
1523 u32 *src_addr_1, u32 *src_addr_2, 1343 u32 n = (_n); \
1524 u32 *src_addr_3, u32 *src_addr_4) 1344 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1525{ 1345 common_hash ^= lo_hash_dword >> n; \
1526 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1346 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1527 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1347 bucket_hash ^= lo_hash_dword >> n; \
1528 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1348 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1529 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1349 sig_hash ^= lo_hash_dword << (16 - n); \
1530 1350 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1531 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1351 common_hash ^= hi_hash_dword >> n; \
1532 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1352 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1533 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1353 bucket_hash ^= hi_hash_dword >> n; \
1534 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1354 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1535 1355 sig_hash ^= hi_hash_dword << (16 - n); \
1536 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1356} while (0);
1537 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1538 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1539 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1540
1541 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1542 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1543 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1544 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1545
1546 return 0;
1547}
1548 1357
1549/** 1358/**
1550 * ixgbe_atr_get_src_port_82599 - Gets the source port 1359 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1551 * @input: input stream to modify 1360 * @stream: input bitstream to compute the hash on
1552 * @src_port: the source port to load
1553 * 1361 *
1554 * Even though the input is given in big-endian, the FDIRPORT registers 1362 * This function is almost identical to the function above but contains
1555 * expect the ports to be programmed in little-endian. Hence the need to swap 1363 * several optomizations such as unwinding all of the loops, letting the
1556 * endianness when retrieving the data. This can be confusing since the 1364 * compiler work out all of the conditional ifs since the keys are static
1557 * internal hash engine expects it to be big-endian. 1365 * defines, and computing two keys at once since the hashed dword stream
1366 * will be the same for both keys.
1558 **/ 1367 **/
1559static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, 1368static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1560 u16 *src_port) 1369 union ixgbe_atr_hash_dword common)
1561{ 1370{
1562 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1371 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1563 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1372 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1564 1373
1565 return 0; 1374 /* record the flow_vm_vlan bits as they are a key part to the hash */
1566} 1375 flow_vm_vlan = ntohl(input.dword);
1567 1376
1568/** 1377 /* generate common hash dword */
1569 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1378 hi_hash_dword = ntohl(common.dword);
1570 * @input: input stream to modify
1571 * @dst_port: the destination port to load
1572 *
1573 * Even though the input is given in big-endian, the FDIRPORT registers
1574 * expect the ports to be programmed in little-endian. Hence the need to swap
1575 * endianness when retrieving the data. This can be confusing since the
1576 * internal hash engine expects it to be big-endian.
1577 **/
1578static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
1579 u16 *dst_port)
1580{
1581 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1582 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1583 1379
1584 return 0; 1380 /* low dword is word swapped version of common */
1585} 1381 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1586 1382
1587/** 1383 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1588 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1384 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1589 * @input: input stream to modify
1590 * @flex_bytes: the flexible bytes to load
1591 **/
1592static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
1593 u16 *flex_byte)
1594{
1595 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1596 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1597 1385
1598 return 0; 1386 /* Process bits 0 and 16 */
1599} 1387 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1600 1388
1601/** 1389 /*
1602 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1390 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1603 * @input: input stream to modify 1391 * delay this because bit 0 of the stream should not be processed
1604 * @l4type: the layer 4 type value to load 1392 * so we do not add the vlan until after bit 0 was processed
1605 **/ 1393 */
1606static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, 1394 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1607 u8 *l4type) 1395
1608{ 1396 /* Process remaining 30 bit of the key */
1609 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1397 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1398 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1399 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1400 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1401 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1402 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1403 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1404 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1405 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1406 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1407 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1408 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1409 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1410 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1411 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1412
1413 /* combine common_hash result with signature and bucket hashes */
1414 bucket_hash ^= common_hash;
1415 bucket_hash &= IXGBE_ATR_HASH_MASK;
1610 1416
1611 return 0; 1417 sig_hash ^= common_hash << 16;
1418 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1419
1420 /* return completed signature hash */
1421 return sig_hash ^ bucket_hash;
1612} 1422}
1613 1423
1614/** 1424/**
1615 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1425 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1616 * @hw: pointer to hardware structure 1426 * @hw: pointer to hardware structure
1617 * @stream: input bitstream 1427 * @input: unique input dword
1428 * @common: compressed common input dword
1618 * @queue: queue index to direct traffic to 1429 * @queue: queue index to direct traffic to
1619 **/ 1430 **/
1620s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1431s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1621 struct ixgbe_atr_input *input, 1432 union ixgbe_atr_hash_dword input,
1433 union ixgbe_atr_hash_dword common,
1622 u8 queue) 1434 u8 queue)
1623{ 1435{
1624 u64 fdirhashcmd; 1436 u64 fdirhashcmd;
1625 u64 fdircmd; 1437 u32 fdircmd;
1626 u32 fdirhash;
1627 u16 bucket_hash, sig_hash;
1628 u8 l4type;
1629
1630 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1631 IXGBE_ATR_BUCKET_HASH_KEY);
1632
1633 /* bucket_hash is only 15 bits */
1634 bucket_hash &= IXGBE_ATR_HASH_MASK;
1635
1636 sig_hash = ixgbe_atr_compute_hash_82599(input,
1637 IXGBE_ATR_SIGNATURE_HASH_KEY);
1638
1639 /* Get the l4type in order to program FDIRCMD properly */
1640 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1641 ixgbe_atr_get_l4type_82599(input, &l4type);
1642 1438
1643 /* 1439 /*
1644 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1440 * Get the flow_type in order to program FDIRCMD properly
1645 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1441 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1646 */ 1442 */
1647 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1443 switch (input.formatted.flow_type) {
1648 1444 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1649 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1445 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1650 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1446 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1651 1447 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1652 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1448 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1653 case IXGBE_ATR_L4TYPE_TCP: 1449 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1654 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1655 break;
1656 case IXGBE_ATR_L4TYPE_UDP:
1657 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1658 break;
1659 case IXGBE_ATR_L4TYPE_SCTP:
1660 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1661 break; 1450 break;
1662 default: 1451 default:
1663 hw_dbg(hw, "Error on l4type input\n"); 1452 hw_dbg(hw, " Error on flow type input\n");
1664 return IXGBE_ERR_CONFIG; 1453 return IXGBE_ERR_CONFIG;
1665 } 1454 }
1666 1455
1667 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1456 /* configure FDIRCMD register */
1668 fdircmd |= IXGBE_FDIRCMD_IPV6; 1457 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1458 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1459 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1460 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1669 1461
1670 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1462 /*
1671 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1463 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1464 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1465 */
1466 fdirhashcmd = (u64)fdircmd << 32;
1467 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1672 1468
1673 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1469 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1674 1470
1471 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1472
1675 return 0; 1473 return 0;
1676} 1474}
1677 1475
1678/** 1476/**
1477 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1478 * @input_mask: mask to be bit swapped
1479 *
1480 * The source and destination port masks for flow director are bit swapped
1481 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1482 * generate a correctly swapped value we need to bit swap the mask and that
1483 * is what is accomplished by this function.
1484 **/
1485static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1486{
1487 u32 mask = ntohs(input_masks->dst_port_mask);
1488 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1489 mask |= ntohs(input_masks->src_port_mask);
1490 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1491 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1492 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1493 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1494}
1495
1496/*
1497 * These two macros are meant to address the fact that we have registers
1498 * that are either all or in part big-endian. As a result on big-endian
1499 * systems we will end up byte swapping the value to little-endian before
1500 * it is byte swapped again and written to the hardware in the original
1501 * big-endian format.
1502 */
1503#define IXGBE_STORE_AS_BE32(_value) \
1504 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1505 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1506
1507#define IXGBE_WRITE_REG_BE32(a, reg, value) \
1508 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1509
1510#define IXGBE_STORE_AS_BE16(_value) \
1511 (((u16)(_value) >> 8) | ((u16)(_value) << 8))
1512
1513/**
1679 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1514 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1680 * @hw: pointer to hardware structure 1515 * @hw: pointer to hardware structure
1681 * @input: input bitstream 1516 * @input: input bitstream
@@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1687 * hardware writes must be protected from one another. 1522 * hardware writes must be protected from one another.
1688 **/ 1523 **/
1689s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1524s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1690 struct ixgbe_atr_input *input, 1525 union ixgbe_atr_input *input,
1691 struct ixgbe_atr_input_masks *input_masks, 1526 struct ixgbe_atr_input_masks *input_masks,
1692 u16 soft_id, u8 queue) 1527 u16 soft_id, u8 queue)
1693{ 1528{
1694 u32 fdircmd = 0;
1695 u32 fdirhash; 1529 u32 fdirhash;
1696 u32 src_ipv4 = 0, dst_ipv4 = 0; 1530 u32 fdircmd;
1697 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1531 u32 fdirport, fdirtcpm;
1698 u16 src_port, dst_port, vlan_id, flex_bytes; 1532 u32 fdirvlan;
1699 u16 bucket_hash; 1533 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1700 u8 l4type; 1534 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1701 u8 fdirm = 0; 1535 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1702
1703 /* Get our input values */
1704 ixgbe_atr_get_l4type_82599(input, &l4type);
1705 1536
1706 /* 1537 /*
1707 * Check l4type formatting, and bail out before we touch the hardware 1538 * Check flow_type formatting, and bail out before we touch the hardware
1708 * if there's a configuration issue 1539 * if there's a configuration issue
1709 */ 1540 */
1710 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1541 switch (input->formatted.flow_type) {
1711 case IXGBE_ATR_L4TYPE_TCP: 1542 case IXGBE_ATR_FLOW_TYPE_IPV4:
1712 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1543 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1713 break; 1544 fdirm |= IXGBE_FDIRM_L4P;
1714 case IXGBE_ATR_L4TYPE_UDP: 1545 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1715 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1546 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1716 break; 1547 hw_dbg(hw, " Error on src/dst port mask\n");
1717 case IXGBE_ATR_L4TYPE_SCTP: 1548 return IXGBE_ERR_CONFIG;
1718 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1549 }
1550 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1551 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1719 break; 1552 break;
1720 default: 1553 default:
1721 hw_dbg(hw, "Error on l4type input\n"); 1554 hw_dbg(hw, " Error on flow type input\n");
1722 return IXGBE_ERR_CONFIG; 1555 return IXGBE_ERR_CONFIG;
1723 } 1556 }
1724 1557
1725 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1726 IXGBE_ATR_BUCKET_HASH_KEY);
1727
1728 /* bucket_hash is only 15 bits */
1729 bucket_hash &= IXGBE_ATR_HASH_MASK;
1730
1731 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1732 ixgbe_atr_get_src_port_82599(input, &src_port);
1733 ixgbe_atr_get_dst_port_82599(input, &dst_port);
1734 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1735
1736 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1737
1738 /* Now figure out if we're IPv4 or IPv6 */
1739 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1740 /* IPv6 */
1741 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1742 &src_ipv6_3, &src_ipv6_4);
1743
1744 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1745 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1746 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1747 /* The last 4 bytes is the same register as IPv4 */
1748 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1749
1750 fdircmd |= IXGBE_FDIRCMD_IPV6;
1751 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1752 } else {
1753 /* IPv4 */
1754 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1755 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1756 }
1757
1758 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1759 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1760
1761 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1762 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1763 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1764 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1765
1766 /* 1558 /*
1767 * Program the relevant mask registers. L4type cannot be 1559 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1768 * masked out in this implementation. 1560 * are zero, then assume a full mask for that field. Also assume that
1561 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1562 * cannot be masked out in this implementation.
1769 * 1563 *
1770 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1564 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1771 * point in time. 1565 * point in time.
1772 */ 1566 */
1773 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); 1567
1774 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 1568 /* Program FDIRM */
1775 1569 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
1776 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1570 case 0xEFFF:
1777 case IXGBE_ATR_L4TYPE_TCP: 1571 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
1778 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); 1572 fdirm &= ~IXGBE_FDIRM_VLANID;
1779 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1573 case 0xE000:
1780 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 1574 /* Unmask VLAN prio - bit 1 */
1781 (input_masks->dst_port_mask << 16))); 1575 fdirm &= ~IXGBE_FDIRM_VLANP;
1782 break; 1576 break;
1783 case IXGBE_ATR_L4TYPE_UDP: 1577 case 0x0FFF:
1784 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); 1578 /* Unmask VLAN ID - bit 0 */
1785 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1579 fdirm &= ~IXGBE_FDIRM_VLANID;
1786 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1787 (input_masks->src_port_mask << 16)));
1788 break; 1580 break;
1789 default: 1581 case 0x0000:
1790 /* this already would have failed above */ 1582 /* do nothing, vlans already masked */
1791 break; 1583 break;
1584 default:
1585 hw_dbg(hw, " Error on VLAN mask\n");
1586 return IXGBE_ERR_CONFIG;
1792 } 1587 }
1793 1588
1794 /* Program the last mask register, FDIRM */ 1589 if (input_masks->flex_mask & 0xFFFF) {
1795 if (input_masks->vlan_id_mask) 1590 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
1796 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1591 hw_dbg(hw, " Error on flexible byte mask\n");
1797 fdirm |= 0x3; 1592 return IXGBE_ERR_CONFIG;
1798 1593 }
1799 if (input_masks->data_mask) 1594 /* Unmask Flex Bytes - bit 4 */
1800 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1595 fdirm &= ~IXGBE_FDIRM_FLEX;
1801 fdirm |= 0x10; 1596 }
1802 1597
1803 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1598 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1804 fdirm |= 0x24;
1805
1806 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1599 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1807 1600
1808 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 1601 /* store the TCP/UDP port masks, bit reversed from port layout */
1809 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 1602 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
1810 fdircmd |= IXGBE_FDIRCMD_LAST; 1603
1811 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 1604 /* write both the same so that UDP and TCP use the same mask */
1812 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1605 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1606 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1607
1608 /* store source and destination IP masks (big-enian) */
1609 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1610 ~input_masks->src_ip_mask[0]);
1611 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1612 ~input_masks->dst_ip_mask[0]);
1613
1614 /* Apply masks to input data */
1615 input->formatted.vlan_id &= input_masks->vlan_id_mask;
1616 input->formatted.flex_bytes &= input_masks->flex_mask;
1617 input->formatted.src_port &= input_masks->src_port_mask;
1618 input->formatted.dst_port &= input_masks->dst_port_mask;
1619 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1620 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
1621
1622 /* record vlan (little-endian) and flex_bytes(big-endian) */
1623 fdirvlan =
1624 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
1625 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1626 fdirvlan |= ntohs(input->formatted.vlan_id);
1627 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1628
1629 /* record source and destination port (little-endian)*/
1630 fdirport = ntohs(input->formatted.dst_port);
1631 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1632 fdirport |= ntohs(input->formatted.src_port);
1633 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1634
1635 /* record the first 32 bits of the destination address (big-endian) */
1636 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1637
1638 /* record the source address (big-endian) */
1639 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1640
1641 /* configure FDIRCMD register */
1642 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1643 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1644 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1645 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1646
1647 /* we only want the bucket hash so drop the upper 16 bits */
1648 fdirhash = ixgbe_atr_compute_hash_82599(input,
1649 IXGBE_ATR_BUCKET_HASH_KEY);
1650 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1813 1651
1814 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1652 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1815 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1653 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1816 1654
1817 return 0; 1655 return 0;
1818} 1656}
1657
1819/** 1658/**
1820 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1659 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1821 * @hw: pointer to hardware structure 1660 * @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e8b393..2002ea88ca2a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1480 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); 1480 ixgbe_disable_rx_queue(adapter, rx_ring);
1481 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1482 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1483 1481
1484 /* now Tx */ 1482 /* now Tx */
1485 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1483 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2279 struct ethtool_rx_ntuple *cmd) 2277 struct ethtool_rx_ntuple *cmd)
2280{ 2278{
2281 struct ixgbe_adapter *adapter = netdev_priv(dev); 2279 struct ixgbe_adapter *adapter = netdev_priv(dev);
2282 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2280 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2283 struct ixgbe_atr_input input_struct; 2281 union ixgbe_atr_input input_struct;
2284 struct ixgbe_atr_input_masks input_masks; 2282 struct ixgbe_atr_input_masks input_masks;
2285 int target_queue; 2283 int target_queue;
2284 int err;
2286 2285
2287 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2286 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2288 return -EOPNOTSUPP; 2287 return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2291 * Don't allow programming if the action is a queue greater than 2290 * Don't allow programming if the action is a queue greater than
2292 * the number of online Tx queues. 2291 * the number of online Tx queues.
2293 */ 2292 */
2294 if ((fs.action >= adapter->num_tx_queues) || 2293 if ((fs->action >= adapter->num_tx_queues) ||
2295 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2294 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2296 return -EINVAL; 2295 return -EINVAL;
2297 2296
2298 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2297 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2299 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2298 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2300 2299
2301 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2300 /* record flow type */
2302 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2301 switch (fs->flow_type) {
2303 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2302 case IPV4_FLOW:
2304 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2303 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2305 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2304 break;
2306 /* only use the lowest 2 bytes for flex bytes */
2307 input_masks.data_mask = (fs.data_mask & 0xffff);
2308
2309 switch (fs.flow_type) {
2310 case TCP_V4_FLOW: 2305 case TCP_V4_FLOW:
2311 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2306 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2312 break; 2307 break;
2313 case UDP_V4_FLOW: 2308 case UDP_V4_FLOW:
2314 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2309 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2315 break; 2310 break;
2316 case SCTP_V4_FLOW: 2311 case SCTP_V4_FLOW:
2317 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2312 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2318 break; 2313 break;
2319 default: 2314 default:
2320 return -1; 2315 return -1;
2321 } 2316 }
2322 2317
2323 /* Mask bits from the inputs based on user-supplied mask */ 2318 /* copy vlan tag minus the CFI bit */
2324 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2319 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2325 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2320 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2326 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2321 if (!fs->vlan_tag_mask) {
2327 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2322 input_masks.vlan_id_mask = htons(0xEFFF);
2328 /* 82599 expects these to be byte-swapped for perfect filtering */ 2323 } else {
2329 ixgbe_atr_set_src_port_82599(&input_struct, 2324 switch (~fs->vlan_tag_mask & 0xEFFF) {
2330 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2325 /* all of these are valid vlan-mask values */
2331 ixgbe_atr_set_dst_port_82599(&input_struct, 2326 case 0xEFFF:
2332 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2327 case 0xE000:
2333 2328 case 0x0FFF:
2334 /* VLAN and Flex bytes are either completely masked or not */ 2329 case 0x0000:
2335 if (!fs.vlan_tag_mask) 2330 input_masks.vlan_id_mask =
2336 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2331 htons(~fs->vlan_tag_mask);
2337 2332 break;
2338 if (!input_masks.data_mask) 2333 /* exit with error if vlan-mask is invalid */
2339 /* make sure we only use the first 2 bytes of user data */ 2334 default:
2340 ixgbe_atr_set_flex_byte_82599(&input_struct, 2335 e_err(drv, "Partial VLAN ID or "
2341 (fs.data & 0xffff)); 2336 "priority mask in vlan-mask is not "
2337 "supported by hardware\n");
2338 return -1;
2339 }
2340 }
2341 }
2342
2343 /* make sure we only use the first 2 bytes of user data */
2344 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2345 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2346 if (!(fs->data_mask & 0xFFFF)) {
2347 input_masks.flex_mask = 0xFFFF;
2348 } else if (~fs->data_mask & 0xFFFF) {
2349 e_err(drv, "Partial user-def-mask is not "
2350 "supported by hardware\n");
2351 return -1;
2352 }
2353 }
2354
2355 /*
2356 * Copy input into formatted structures
2357 *
2358 * These assignments are based on the following logic
2359 * If neither input or mask are set assume value is masked out.
2360 * If input is set, but mask is not mask should default to accept all.
2361 * If input is not set, but mask is set then mask likely results in 0.
2362 * If input is set and mask is set then assign both.
2363 */
2364 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2365 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2366 if (!fs->m_u.tcp_ip4_spec.ip4src)
2367 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2368 else
2369 input_masks.src_ip_mask[0] =
2370 ~fs->m_u.tcp_ip4_spec.ip4src;
2371 }
2372 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2373 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2374 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2375 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2376 else
2377 input_masks.dst_ip_mask[0] =
2378 ~fs->m_u.tcp_ip4_spec.ip4dst;
2379 }
2380 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2381 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2382 if (!fs->m_u.tcp_ip4_spec.psrc)
2383 input_masks.src_port_mask = 0xFFFF;
2384 else
2385 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2386 }
2387 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2388 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2389 if (!fs->m_u.tcp_ip4_spec.pdst)
2390 input_masks.dst_port_mask = 0xFFFF;
2391 else
2392 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2393 }
2342 2394
2343 /* determine if we need to drop or route the packet */ 2395 /* determine if we need to drop or route the packet */
2344 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2396 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2345 target_queue = MAX_RX_QUEUES - 1; 2397 target_queue = MAX_RX_QUEUES - 1;
2346 else 2398 else
2347 target_queue = fs.action; 2399 target_queue = fs->action;
2348 2400
2349 spin_lock(&adapter->fdir_perfect_lock); 2401 spin_lock(&adapter->fdir_perfect_lock);
2350 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2402 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2351 &input_masks, 0, target_queue); 2403 &input_struct,
2404 &input_masks, 0,
2405 target_queue);
2352 spin_unlock(&adapter->fdir_perfect_lock); 2406 spin_unlock(&adapter->fdir_perfect_lock);
2353 2407
2354 return 0; 2408 return err ? -1 : 0;
2355} 2409}
2356 2410
2357static const struct ethtool_ops ixgbe_ethtool_ops = { 2411static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3f8197..a060610a42db 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3024 } 3024 }
3025} 3025}
3026 3026
3027void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring)
3029{
3030 struct ixgbe_hw *hw = &adapter->hw;
3031 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3032 u32 rxdctl;
3033 u8 reg_idx = ring->reg_idx;
3034
3035 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3036 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3037
3038 /* write value back with RXDCTL.ENABLE bit cleared */
3039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3040
3041 if (hw->mac.type == ixgbe_mac_82598EB &&
3042 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3043 return;
3044
3045 /* the hardware may take up to 100us to really disable the rx queue */
3046 do {
3047 udelay(10);
3048 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3049 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3050
3051 if (!wait_loop) {
3052 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3053 "the polling period\n", reg_idx);
3054 }
3055}
3056
3027void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3057void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring) 3058 struct ixgbe_ring *ring)
3029{ 3059{
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3034 3064
3035 /* disable queue to avoid issues while updating state */ 3065 /* disable queue to avoid issues while updating state */
3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3066 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), 3067 ixgbe_disable_rx_queue(adapter, ring);
3038 rxdctl & ~IXGBE_RXDCTL_ENABLE);
3039 IXGBE_WRITE_FLUSH(hw);
3040 3068
3041 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3069 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3042 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 3070 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4064 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4092 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4065 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4093 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4066 4094
4067 IXGBE_WRITE_FLUSH(hw); 4095 /* disable all enabled rx queues */
4096 for (i = 0; i < adapter->num_rx_queues; i++)
4097 /* this call also flushes the previous write */
4098 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4099
4068 msleep(10); 4100 msleep(10);
4069 4101
4070 netif_tx_stop_all_queues(netdev); 4102 netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4789 4821
4790 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4822 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4791 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4823 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4824 if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
4825 IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
4826 e_err(probe,
4827 "Flow Director is not supported while multiple "
4828 "queues are disabled. Disabling Flow Director\n");
4829 }
4792 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4830 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4793 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4831 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4794 adapter->atr_sample_rate = 0; 4832 adapter->atr_sample_rate = 0;
@@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5094 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5132 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
5095 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5133 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5096 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5134 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5097 if (dev->features & NETIF_F_NTUPLE) { 5135 /* n-tuple support exists, always init our spinlock */
5098 /* Flow Director perfect filter enabled */ 5136 spin_lock_init(&adapter->fdir_perfect_lock);
5099 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 5137 /* Flow Director hash filters enabled */
5100 adapter->atr_sample_rate = 0; 5138 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5101 spin_lock_init(&adapter->fdir_perfect_lock); 5139 adapter->atr_sample_rate = 20;
5102 } else {
5103 /* Flow Director hash filters enabled */
5104 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5105 adapter->atr_sample_rate = 20;
5106 }
5107 adapter->ring_feature[RING_F_FDIR].indices = 5140 adapter->ring_feature[RING_F_FDIR].indices =
5108 IXGBE_MAX_FDIR_INDICES; 5141 IXGBE_MAX_FDIR_INDICES;
5109 adapter->fdir_pballoc = 0; 5142 adapter->fdir_pballoc = 0;
@@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6474 writel(i, tx_ring->tail); 6507 writel(i, tx_ring->tail);
6475} 6508}
6476 6509
6477static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6510static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6478 u8 queue, u32 tx_flags, __be16 protocol) 6511 u32 tx_flags, __be16 protocol)
6479{ 6512{
6480 struct ixgbe_atr_input atr_input; 6513 struct ixgbe_q_vector *q_vector = ring->q_vector;
6481 struct iphdr *iph = ip_hdr(skb); 6514 union ixgbe_atr_hash_dword input = { .dword = 0 };
6482 struct ethhdr *eth = (struct ethhdr *)skb->data; 6515 union ixgbe_atr_hash_dword common = { .dword = 0 };
6516 union {
6517 unsigned char *network;
6518 struct iphdr *ipv4;
6519 struct ipv6hdr *ipv6;
6520 } hdr;
6483 struct tcphdr *th; 6521 struct tcphdr *th;
6484 u16 vlan_id; 6522 __be16 vlan_id;
6485 6523
6486 /* Right now, we support IPv4 w/ TCP only */ 6524 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6487 if (protocol != htons(ETH_P_IP) || 6525 if (!q_vector)
6488 iph->protocol != IPPROTO_TCP)
6489 return; 6526 return;
6490 6527
6491 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6528 /* do nothing if sampling is disabled */
6529 if (!ring->atr_sample_rate)
6530 return;
6492 6531
6493 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6532 ring->atr_count++;
6494 IXGBE_TX_FLAGS_VLAN_SHIFT; 6533
6534 /* snag network header to get L4 type and address */
6535 hdr.network = skb_network_header(skb);
6536
6537 /* Currently only IPv4/IPv6 with TCP is supported */
6538 if ((protocol != __constant_htons(ETH_P_IPV6) ||
6539 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6540 (protocol != __constant_htons(ETH_P_IP) ||
6541 hdr.ipv4->protocol != IPPROTO_TCP))
6542 return;
6495 6543
6496 th = tcp_hdr(skb); 6544 th = tcp_hdr(skb);
6497 6545
6498 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6546 /* skip this packet since the socket is closing */
6499 ixgbe_atr_set_src_port_82599(&atr_input, th->dest); 6547 if (th->fin)
6500 ixgbe_atr_set_dst_port_82599(&atr_input, th->source); 6548 return;
6501 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); 6549
6502 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); 6550 /* sample on all syn packets or once every atr sample count */
6503 /* src and dst are inverted, think how the receiver sees them */ 6551 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6504 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); 6552 return;
6505 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); 6553
6554 /* reset sample count */
6555 ring->atr_count = 0;
6556
6557 vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6558
6559 /*
6560 * src and dst are inverted, think how the receiver sees them
6561 *
6562 * The input is broken into two sections, a non-compressed section
6563 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6564 * is XORed together and stored in the compressed dword.
6565 */
6566 input.formatted.vlan_id = vlan_id;
6567
6568 /*
6569 * since src port and flex bytes occupy the same word XOR them together
6570 * and write the value to source port portion of compressed dword
6571 */
6572 if (vlan_id)
6573 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6574 else
6575 common.port.src ^= th->dest ^ protocol;
6576 common.port.dst ^= th->source;
6577
6578 if (protocol == __constant_htons(ETH_P_IP)) {
6579 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6580 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6581 } else {
6582 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6583 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6584 hdr.ipv6->saddr.s6_addr32[1] ^
6585 hdr.ipv6->saddr.s6_addr32[2] ^
6586 hdr.ipv6->saddr.s6_addr32[3] ^
6587 hdr.ipv6->daddr.s6_addr32[0] ^
6588 hdr.ipv6->daddr.s6_addr32[1] ^
6589 hdr.ipv6->daddr.s6_addr32[2] ^
6590 hdr.ipv6->daddr.s6_addr32[3];
6591 }
6506 6592
6507 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6593 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6508 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6594 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6595 input, common, ring->queue_index);
6509} 6596}
6510 6597
6511static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) 6598static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6676 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); 6763 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6677 if (count) { 6764 if (count) {
6678 /* add the ATR filter if ATR is on */ 6765 /* add the ATR filter if ATR is on */
6679 if (tx_ring->atr_sample_rate) { 6766 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6680 ++tx_ring->atr_count; 6767 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6681 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6682 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6683 &tx_ring->state)) {
6684 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6685 tx_flags, protocol);
6686 tx_ring->atr_count = 0;
6687 }
6688 }
6689 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6768 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6690 txq->tx_bytes += skb->len; 6769 txq->tx_bytes += skb->len;
6691 txq->tx_packets++; 6770 txq->tx_packets++;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467d3c7..fd3358f54139 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
1947#define IXGBE_FDIRM_VLANID 0x00000001 1947#define IXGBE_FDIRM_VLANID 0x00000001
1948#define IXGBE_FDIRM_VLANP 0x00000002 1948#define IXGBE_FDIRM_VLANP 0x00000002
1949#define IXGBE_FDIRM_POOL 0x00000004 1949#define IXGBE_FDIRM_POOL 0x00000004
1950#define IXGBE_FDIRM_L3P 0x00000008 1950#define IXGBE_FDIRM_L4P 0x00000008
1951#define IXGBE_FDIRM_L4P 0x00000010 1951#define IXGBE_FDIRM_FLEX 0x00000010
1952#define IXGBE_FDIRM_FLEX 0x00000020 1952#define IXGBE_FDIRM_DIPv6 0x00000020
1953#define IXGBE_FDIRM_DIPv6 0x00000040
1954 1953
1955#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 1954#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
1956#define IXGBE_FDIRFREE_FREE_SHIFT 0 1955#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
1990#define IXGBE_FDIRCMD_LAST 0x00000800 1989#define IXGBE_FDIRCMD_LAST 0x00000800
1991#define IXGBE_FDIRCMD_COLLISION 0x00001000 1990#define IXGBE_FDIRCMD_COLLISION 0x00001000
1992#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 1991#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
1992#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
1993#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 1993#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
1994#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 1994#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
1995#define IXGBE_FDIR_INIT_DONE_POLL 10 1995#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer;
2147#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) 2147#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2148 2148
2149/* Software ATR hash keys */ 2149/* Software ATR hash keys */
2150#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2150#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
2151#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2151#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
2152
2153/* Software ATR input stream offsets and masks */
2154#define IXGBE_ATR_VLAN_OFFSET 0
2155#define IXGBE_ATR_SRC_IPV6_OFFSET 2
2156#define IXGBE_ATR_SRC_IPV4_OFFSET 14
2157#define IXGBE_ATR_DST_IPV6_OFFSET 18
2158#define IXGBE_ATR_DST_IPV4_OFFSET 30
2159#define IXGBE_ATR_SRC_PORT_OFFSET 34
2160#define IXGBE_ATR_DST_PORT_OFFSET 36
2161#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
2162#define IXGBE_ATR_VM_POOL_OFFSET 40
2163#define IXGBE_ATR_L4TYPE_OFFSET 41
2164 2152
2153/* Software ATR input stream values and masks */
2154#define IXGBE_ATR_HASH_MASK 0x7fff
2165#define IXGBE_ATR_L4TYPE_MASK 0x3 2155#define IXGBE_ATR_L4TYPE_MASK 0x3
2166#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2167#define IXGBE_ATR_L4TYPE_UDP 0x1 2156#define IXGBE_ATR_L4TYPE_UDP 0x1
2168#define IXGBE_ATR_L4TYPE_TCP 0x2 2157#define IXGBE_ATR_L4TYPE_TCP 0x2
2169#define IXGBE_ATR_L4TYPE_SCTP 0x3 2158#define IXGBE_ATR_L4TYPE_SCTP 0x3
2170#define IXGBE_ATR_HASH_MASK 0x7fff 2159#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2160enum ixgbe_atr_flow_type {
2161 IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
2162 IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
2163 IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
2164 IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
2165 IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
2166 IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
2167 IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
2168 IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
2169};
2171 2170
2172/* Flow Director ATR input struct. */ 2171/* Flow Director ATR input struct. */
2173struct ixgbe_atr_input { 2172union ixgbe_atr_input {
2174 /* Byte layout in order, all values with MSB first: 2173 /*
2174 * Byte layout in order, all values with MSB first:
2175 * 2175 *
2176 * vm_pool - 1 byte
2177 * flow_type - 1 byte
2176 * vlan_id - 2 bytes 2178 * vlan_id - 2 bytes
2177 * src_ip - 16 bytes 2179 * src_ip - 16 bytes
2178 * dst_ip - 16 bytes 2180 * dst_ip - 16 bytes
2179 * src_port - 2 bytes 2181 * src_port - 2 bytes
2180 * dst_port - 2 bytes 2182 * dst_port - 2 bytes
2181 * flex_bytes - 2 bytes 2183 * flex_bytes - 2 bytes
2182 * vm_pool - 1 byte 2184 * rsvd0 - 2 bytes - space reserved must be 0.
2183 * l4type - 1 byte
2184 */ 2185 */
2185 u8 byte_stream[42]; 2186 struct {
2187 u8 vm_pool;
2188 u8 flow_type;
2189 __be16 vlan_id;
2190 __be32 dst_ip[4];
2191 __be32 src_ip[4];
2192 __be16 src_port;
2193 __be16 dst_port;
2194 __be16 flex_bytes;
2195 __be16 rsvd0;
2196 } formatted;
2197 __be32 dword_stream[11];
2198};
2199
2200/* Flow Director compressed ATR hash input struct */
2201union ixgbe_atr_hash_dword {
2202 struct {
2203 u8 vm_pool;
2204 u8 flow_type;
2205 __be16 vlan_id;
2206 } formatted;
2207 __be32 ip;
2208 struct {
2209 __be16 src;
2210 __be16 dst;
2211 } port;
2212 __be16 flex_bytes;
2213 __be32 dword;
2186}; 2214};
2187 2215
2188struct ixgbe_atr_input_masks { 2216struct ixgbe_atr_input_masks {
2189 u32 src_ip_mask; 2217 __be16 rsvd0;
2190 u32 dst_ip_mask; 2218 __be16 vlan_id_mask;
2191 u16 src_port_mask; 2219 __be32 dst_ip_mask[4];
2192 u16 dst_port_mask; 2220 __be32 src_ip_mask[4];
2193 u16 vlan_id_mask; 2221 __be16 src_port_mask;
2194 u16 data_mask; 2222 __be16 dst_port_mask;
2223 __be16 flex_mask;
2195}; 2224};
2196 2225
2197enum ixgbe_eeprom_type { 2226enum ixgbe_eeprom_type {
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
178 } else { 178 } else {
179 int i; 179 int i;
180 180
181 buf->direct.buf = NULL;
181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 182 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
182 buf->npages = buf->nbufs; 183 buf->npages = buf->nbufs;
183 buf->page_shift = PAGE_SHIFT; 184 buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
229 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 230 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
230 buf->direct.map); 231 buf->direct.map);
231 else { 232 else {
232 if (BITS_PER_LONG == 64) 233 if (BITS_PER_LONG == 64 && buf->direct.buf)
233 vunmap(buf->direct.buf); 234 vunmap(buf->direct.buf);
234 235
235 for (i = 0; i < buf->nbufs; ++i) 236 for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e3..897f576b8b17 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
972 int i; 972 int i;
973 int err; 973 int err;
974 974
975 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 975 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
976 prof->tx_ring_num, prof->rx_ring_num);
976 if (dev == NULL) { 977 if (dev == NULL) {
977 mlx4_err(mdev, "Net device allocation failed\n"); 978 mlx4_err(mdev, "Net device allocation failed\n");
978 return -ENOMEM; 979 return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { 292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3; 293 field = 3;
295 }
296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 294 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 295 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 296 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c158910f7ea..e953793a33ff 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1541 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481bf..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h> 33#include <linux/jiffies.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/unaligned.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/string.h> 37#include <asm/string.h>
37 38
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
542 data = ap->tpkt->data; 543 data = ap->tpkt->data;
543 count = ap->tpkt->len; 544 count = ap->tpkt->len;
544 fcs = ap->tfcs; 545 fcs = ap->tfcs;
545 proto = (data[0] << 8) + data[1]; 546 proto = get_unaligned_be16(data);
546 547
547 /* 548 /*
548 * LCP packets with code values between 1 (configure-reqest) 549 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
963 code = data[0]; 964 code = data[0];
964 if (code != CONFACK && code != CONFREQ) 965 if (code != CONFACK && code != CONFREQ)
965 return; 966 return;
966 dlen = (data[2] << 8) + data[3]; 967 dlen = get_unaligned_be16(data + 2);
967 if (len < dlen) 968 if (len < dlen)
968 return; /* packet got truncated or length is bogus */ 969 return; /* packet got truncated or length is bogus */
969 970
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
997 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 998 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
998 switch (data[0]) { 999 switch (data[0]) {
999 case LCP_MRU: 1000 case LCP_MRU:
1000 val = (data[2] << 8) + data[3]; 1001 val = get_unaligned_be16(data + 2);
1001 if (inbound) 1002 if (inbound)
1002 ap->mru = val; 1003 ap->mru = val;
1003 else 1004 else
1004 ap->chan.mtu = val; 1005 ap->chan.mtu = val;
1005 break; 1006 break;
1006 case LCP_ASYNCMAP: 1007 case LCP_ASYNCMAP:
1007 val = (data[2] << 24) + (data[3] << 16) 1008 val = get_unaligned_be32(data + 2);
1008 + (data[4] << 8) + data[5];
1009 if (inbound) 1009 if (inbound)
1010 ap->raccm = val; 1010 ap->raccm = val;
1011 else 1011 else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cfd..43583309a65d 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
41#include <linux/ppp-comp.h> 41#include <linux/ppp-comp.h>
42 42
43#include <linux/zlib.h> 43#include <linux/zlib.h>
44#include <asm/unaligned.h>
44 45
45/* 46/*
46 * State for a Deflate (de)compressor. 47 * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
232 */ 233 */
233 wptr[0] = PPP_ADDRESS(rptr); 234 wptr[0] = PPP_ADDRESS(rptr);
234 wptr[1] = PPP_CONTROL(rptr); 235 wptr[1] = PPP_CONTROL(rptr);
235 wptr[2] = PPP_COMP >> 8; 236 put_unaligned_be16(PPP_COMP, wptr + 2);
236 wptr[3] = PPP_COMP;
237 wptr += PPP_HDRLEN; 237 wptr += PPP_HDRLEN;
238 wptr[0] = state->seqno >> 8; 238 put_unaligned_be16(state->seqno, wptr);
239 wptr[1] = state->seqno;
240 wptr += DEFLATE_OVHD; 239 wptr += DEFLATE_OVHD;
241 olen = PPP_HDRLEN + DEFLATE_OVHD; 240 olen = PPP_HDRLEN + DEFLATE_OVHD;
242 state->strm.next_out = wptr; 241 state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
451 } 450 }
452 451
453 /* Check the sequence number. */ 452 /* Check the sequence number. */
454 seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; 453 seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
455 if (seq != (state->seqno & 0xffff)) { 454 if (seq != (state->seqno & 0xffff)) {
456 if (state->debug) 455 if (state->debug)
457 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", 456 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484c0299..c7a6c4466978 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
46#include <linux/device.h> 46#include <linux/device.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <asm/unaligned.h>
49#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
50#include <asm/atomic.h> 51#include <asm/atomic.h>
51 52
@@ -210,7 +211,7 @@ struct ppp_net {
210}; 211};
211 212
212/* Get the PPP protocol number from a skb */ 213/* Get the PPP protocol number from a skb */
213#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 214#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
214 215
215/* We limit the length of ppp->file.rq to this (arbitrary) value */ 216/* We limit the length of ppp->file.rq to this (arbitrary) value */
216#define PPP_MAX_RQLEN 32 217#define PPP_MAX_RQLEN 32
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
964 965
965 pp = skb_push(skb, 2); 966 pp = skb_push(skb, 2);
966 proto = npindex_to_proto[npi]; 967 proto = npindex_to_proto[npi];
967 pp[0] = proto >> 8; 968 put_unaligned_be16(proto, pp);
968 pp[1] = proto;
969 969
970 netif_stop_queue(dev); 970 netif_stop_queue(dev);
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1473 q = skb_put(frag, flen + hdrlen); 1473 q = skb_put(frag, flen + hdrlen);
1474 1474
1475 /* make the MP header */ 1475 /* make the MP header */
1476 q[0] = PPP_MP >> 8; 1476 put_unaligned_be16(PPP_MP, q);
1477 q[1] = PPP_MP;
1478 if (ppp->flags & SC_MP_XSHORTSEQ) { 1477 if (ppp->flags & SC_MP_XSHORTSEQ) {
1479 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1478 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1480 q[3] = ppp->nxseq; 1479 q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3e..9a1849a83e2a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
55#include <linux/ppp_defs.h> 55#include <linux/ppp_defs.h>
56#include <linux/ppp-comp.h> 56#include <linux/ppp-comp.h>
57#include <linux/scatterlist.h> 57#include <linux/scatterlist.h>
58#include <asm/unaligned.h>
58 59
59#include "ppp_mppe.h" 60#include "ppp_mppe.h"
60 61
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
395 */ 396 */
396 obuf[0] = PPP_ADDRESS(ibuf); 397 obuf[0] = PPP_ADDRESS(ibuf);
397 obuf[1] = PPP_CONTROL(ibuf); 398 obuf[1] = PPP_CONTROL(ibuf);
398 obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ 399 put_unaligned_be16(PPP_COMP, obuf + 2);
399 obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
400 obuf += PPP_HDRLEN; 400 obuf += PPP_HDRLEN;
401 401
402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; 402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
403 if (state->debug >= 7) 403 if (state->debug >= 7)
404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, 404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
405 state->ccount); 405 state->ccount);
406 obuf[0] = state->ccount >> 8; 406 put_unaligned_be16(state->ccount, obuf);
407 obuf[1] = state->ccount & 0xff;
408 407
409 if (!state->stateful || /* stateless mode */ 408 if (!state->stateful || /* stateless mode */
410 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ 409 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d4..4e6b72f57de8 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <asm/unaligned.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#define PPP_VERSION "2.4.2" 51#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
563 int islcp; 564 int islcp;
564 565
565 data = skb->data; 566 data = skb->data;
566 proto = (data[0] << 8) + data[1]; 567 proto = get_unaligned_be16(data);
567 568
568 /* LCP packets with codes between 1 (configure-request) 569 /* LCP packets with codes between 1 (configure-request)
569 * and 7 (code-reject) must be sent as though no options 570 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d204dc..44e316fd67b8 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
34 34
35#define _QLCNIC_LINUX_MAJOR 5 35#define _QLCNIC_LINUX_MAJOR 5
36#define _QLCNIC_LINUX_MINOR 0 36#define _QLCNIC_LINUX_MINOR 0
37#define _QLCNIC_LINUX_SUBVERSION 14 37#define _QLCNIC_LINUX_SUBVERSION 15
38#define QLCNIC_LINUX_VERSIONID "5.0.14" 38#define QLCNIC_LINUX_VERSIONID "5.0.15"
39#define QLCNIC_DRV_IDC_VER 0x01 39#define QLCNIC_DRV_IDC_VER 0x01
40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@ struct uni_data_desc{
289 u32 reserved[5]; 289 u32 reserved[5];
290}; 290};
291 291
292/* Flash Defines and Structures */
293#define QLCNIC_FLT_LOCATION 0x3F1000
294#define QLCNIC_FW_IMAGE_REGION 0x74
295struct qlcnic_flt_header {
296 u16 version;
297 u16 len;
298 u16 checksum;
299 u16 reserved;
300};
301
302struct qlcnic_flt_entry {
303 u8 region;
304 u8 reserved0;
305 u8 attrib;
306 u8 reserved1;
307 u32 size;
308 u32 start_addr;
309 u32 end_add;
310};
311
292/* Magic number to let user know flash is programmed */ 312/* Magic number to let user know flash is programmed */
293#define QLCNIC_BDINFO_MAGIC 0x12345678 313#define QLCNIC_BDINFO_MAGIC 0x12345678
294 314
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af709d395..4c14510e2a87 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
672 if (data[1]) 672 if (data[1])
673 eth_test->flags |= ETH_TEST_FL_FAILED; 673 eth_test->flags |= ETH_TEST_FL_FAILED;
674 674
675 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 675 if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
676 data[2] = qlcnic_irq_test(dev); 676 data[2] = qlcnic_irq_test(dev);
677 if (data[2]) 677 if (data[2])
678 eth_test->flags |= ETH_TEST_FL_FAILED; 678 eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c39d3ee..a7f1d5b7e811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
627 return 0; 627 return 0;
628} 628}
629 629
630static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
631 struct qlcnic_flt_entry *region_entry)
632{
633 struct qlcnic_flt_header flt_hdr;
634 struct qlcnic_flt_entry *flt_entry;
635 int i = 0, ret;
636 u32 entry_size;
637
638 memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
639 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
640 (u8 *)&flt_hdr,
641 sizeof(struct qlcnic_flt_header));
642 if (ret) {
643 dev_warn(&adapter->pdev->dev,
644 "error reading flash layout header\n");
645 return -EIO;
646 }
647
648 entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
649 flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
650 if (flt_entry == NULL) {
651 dev_warn(&adapter->pdev->dev, "error allocating memory\n");
652 return -EIO;
653 }
654
655 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
656 sizeof(struct qlcnic_flt_header),
657 (u8 *)flt_entry, entry_size);
658 if (ret) {
659 dev_warn(&adapter->pdev->dev,
660 "error reading flash layout entries\n");
661 goto err_out;
662 }
663
664 while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
665 if (flt_entry[i].region == region)
666 break;
667 i++;
668 }
669 if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
670 dev_warn(&adapter->pdev->dev,
671 "region=%x not found in %d regions\n", region, i);
672 ret = -EIO;
673 goto err_out;
674 }
675 memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
676
677err_out:
678 vfree(flt_entry);
679 return ret;
680}
681
630int 682int
631qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) 683qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
632{ 684{
685 struct qlcnic_flt_entry fw_entry;
633 u32 ver = -1, min_ver; 686 u32 ver = -1, min_ver;
687 int ret;
634 688
635 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); 689 ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
690 if (!ret)
691 /* 0-4:-signature, 4-8:-fw version */
692 qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
693 (int *)&ver);
694 else
695 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
696 (int *)&ver);
636 697
637 ver = QLCNIC_DECODE_VERSION(ver); 698 ver = QLCNIC_DECODE_VERSION(ver);
638 min_ver = QLCNIC_MIN_FW_VERSION; 699 min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46c0911..37c04b4fade3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
31 31
32static struct workqueue_struct *qlcnic_wq; 32static struct workqueue_struct *qlcnic_wq;
33static int qlcnic_mac_learn; 33static int qlcnic_mac_learn;
34module_param(qlcnic_mac_learn, int, 0644); 34module_param(qlcnic_mac_learn, int, 0444);
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36 36
37static int use_msi = 1; 37static int use_msi = 1;
38module_param(use_msi, int, 0644); 38module_param(use_msi, int, 0444);
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40 40
41static int use_msi_x = 1; 41static int use_msi_x = 1;
42module_param(use_msi_x, int, 0644); 42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44 44
45static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48 48
49static int load_fw_file; 49static int load_fw_file;
50module_param(load_fw_file, int, 0644); 50module_param(load_fw_file, int, 0444);
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52 52
53static int qlcnic_config_npars; 53static int qlcnic_config_npars;
54module_param(qlcnic_config_npars, int, 0644); 54module_param(qlcnic_config_npars, int, 0444);
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56 56
57static int __devinit qlcnic_probe(struct pci_dev *pdev, 57static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20f64cd..bb8645ab247c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1632,36 +1632,134 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1632{ 1632{
1633 __le32 *phytable = (__le32 *)fw->data; 1633 __le32 *phytable = (__le32 *)fw->data;
1634 struct net_device *dev = tp->dev; 1634 struct net_device *dev = tp->dev;
1635 size_t i; 1635 size_t index, fw_size = fw->size / sizeof(*phytable);
1636 u32 predata, count;
1636 1637
1637 if (fw->size % sizeof(*phytable)) { 1638 if (fw->size % sizeof(*phytable)) {
1638 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); 1639 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
1639 return; 1640 return;
1640 } 1641 }
1641 1642
1642 for (i = 0; i < fw->size / sizeof(*phytable); i++) { 1643 for (index = 0; index < fw_size; index++) {
1643 u32 action = le32_to_cpu(phytable[i]); 1644 u32 action = le32_to_cpu(phytable[index]);
1645 u32 regno = (action & 0x0fff0000) >> 16;
1644 1646
1645 if (!action) 1647 switch(action & 0xf0000000) {
1648 case PHY_READ:
1649 case PHY_DATA_OR:
1650 case PHY_DATA_AND:
1651 case PHY_READ_EFUSE:
1652 case PHY_CLEAR_READCOUNT:
1653 case PHY_WRITE:
1654 case PHY_WRITE_PREVIOUS:
1655 case PHY_DELAY_MS:
1656 break;
1657
1658 case PHY_BJMPN:
1659 if (regno > index) {
1660 netif_err(tp, probe, tp->dev,
1661 "Out of range of firmware\n");
1662 return;
1663 }
1664 break;
1665 case PHY_READCOUNT_EQ_SKIP:
1666 if (index + 2 >= fw_size) {
1667 netif_err(tp, probe, tp->dev,
1668 "Out of range of firmware\n");
1669 return;
1670 }
1671 break;
1672 case PHY_COMP_EQ_SKIPN:
1673 case PHY_COMP_NEQ_SKIPN:
1674 case PHY_SKIPN:
1675 if (index + 1 + regno >= fw_size) {
1676 netif_err(tp, probe, tp->dev,
1677 "Out of range of firmware\n");
1678 return;
1679 }
1646 break; 1680 break;
1647 1681
1648 if ((action & 0xf0000000) != PHY_WRITE) { 1682 case PHY_READ_MAC_BYTE:
1649 netif_err(tp, probe, dev, 1683 case PHY_WRITE_MAC_BYTE:
1650 "unknown action 0x%08x\n", action); 1684 case PHY_WRITE_ERI_WORD:
1685 default:
1686 netif_err(tp, probe, tp->dev,
1687 "Invalid action 0x%08x\n", action);
1651 return; 1688 return;
1652 } 1689 }
1653 } 1690 }
1654 1691
1655 while (i-- != 0) { 1692 predata = 0;
1656 u32 action = le32_to_cpu(*phytable); 1693 count = 0;
1694
1695 for (index = 0; index < fw_size; ) {
1696 u32 action = le32_to_cpu(phytable[index]);
1657 u32 data = action & 0x0000ffff; 1697 u32 data = action & 0x0000ffff;
1658 u32 reg = (action & 0x0fff0000) >> 16; 1698 u32 regno = (action & 0x0fff0000) >> 16;
1699
1700 if (!action)
1701 break;
1659 1702
1660 switch(action & 0xf0000000) { 1703 switch(action & 0xf0000000) {
1704 case PHY_READ:
1705 predata = rtl_readphy(tp, regno);
1706 count++;
1707 index++;
1708 break;
1709 case PHY_DATA_OR:
1710 predata |= data;
1711 index++;
1712 break;
1713 case PHY_DATA_AND:
1714 predata &= data;
1715 index++;
1716 break;
1717 case PHY_BJMPN:
1718 index -= regno;
1719 break;
1720 case PHY_READ_EFUSE:
1721 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
1722 index++;
1723 break;
1724 case PHY_CLEAR_READCOUNT:
1725 count = 0;
1726 index++;
1727 break;
1661 case PHY_WRITE: 1728 case PHY_WRITE:
1662 rtl_writephy(tp, reg, data); 1729 rtl_writephy(tp, regno, data);
1663 phytable++; 1730 index++;
1731 break;
1732 case PHY_READCOUNT_EQ_SKIP:
1733 if (count == data)
1734 index += 2;
1735 else
1736 index += 1;
1737 break;
1738 case PHY_COMP_EQ_SKIPN:
1739 if (predata == data)
1740 index += regno;
1741 index++;
1664 break; 1742 break;
1743 case PHY_COMP_NEQ_SKIPN:
1744 if (predata != data)
1745 index += regno;
1746 index++;
1747 break;
1748 case PHY_WRITE_PREVIOUS:
1749 rtl_writephy(tp, regno, predata);
1750 index++;
1751 break;
1752 case PHY_SKIPN:
1753 index += regno + 1;
1754 break;
1755 case PHY_DELAY_MS:
1756 mdelay(data);
1757 index++;
1758 break;
1759
1760 case PHY_READ_MAC_BYTE:
1761 case PHY_WRITE_MAC_BYTE:
1762 case PHY_WRITE_ERI_WORD:
1665 default: 1763 default:
1666 BUG(); 1764 BUG();
1667 } 1765 }
@@ -3069,15 +3167,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3069 rtl8168_driver_start(tp); 3167 rtl8168_driver_start(tp);
3070 } 3168 }
3071 3169
3072 rtl8169_init_phy(dev, tp);
3073
3074 /*
3075 * Pretend we are using VLANs; This bypasses a nasty bug where
3076 * Interrupts stop flowing on high load on 8110SCd controllers.
3077 */
3078 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3079 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3080
3081 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3170 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3082 3171
3083 if (pci_dev_run_wake(pdev)) 3172 if (pci_dev_run_wake(pdev))
@@ -3127,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3127static int rtl8169_open(struct net_device *dev) 3216static int rtl8169_open(struct net_device *dev)
3128{ 3217{
3129 struct rtl8169_private *tp = netdev_priv(dev); 3218 struct rtl8169_private *tp = netdev_priv(dev);
3219 void __iomem *ioaddr = tp->mmio_addr;
3130 struct pci_dev *pdev = tp->pci_dev; 3220 struct pci_dev *pdev = tp->pci_dev;
3131 int retval = -ENOMEM; 3221 int retval = -ENOMEM;
3132 3222
@@ -3162,6 +3252,15 @@ static int rtl8169_open(struct net_device *dev)
3162 3252
3163 napi_enable(&tp->napi); 3253 napi_enable(&tp->napi);
3164 3254
3255 rtl8169_init_phy(dev, tp);
3256
3257 /*
3258 * Pretend we are using VLANs; This bypasses a nasty bug where
3259 * Interrupts stop flowing on high load on 8110SCd controllers.
3260 */
3261 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3262 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3263
3165 rtl_pll_power_up(tp); 3264 rtl_pll_power_up(tp);
3166 3265
3167 rtl_hw_start(dev); 3266 rtl_hw_start(dev);
@@ -3171,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev)
3171 tp->saved_wolopts = 0; 3270 tp->saved_wolopts = 0;
3172 pm_runtime_put_noidle(&pdev->dev); 3271 pm_runtime_put_noidle(&pdev->dev);
3173 3272
3174 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3273 rtl8169_check_link_status(dev, tp, ioaddr);
3175out: 3274out:
3176 return retval; 3275 return retval;
3177 3276
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf3b247..7d85a38377a1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
46 46
47#include <asm/irq.h> 47#include <asm/irq.h>
48 48
49#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
50#define SKY2_VLAN_TAG_USED 1
51#endif
52
53#include "sky2.h" 49#include "sky2.h"
54 50
55#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
@@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 return err; 1322 return err;
1327} 1323}
1328 1324
1329#ifdef SKY2_VLAN_TAG_USED 1325#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
1330static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) 1326
1327static void sky2_vlan_mode(struct net_device *dev)
1331{ 1328{
1332 if (onoff) { 1329 struct sky2_port *sky2 = netdev_priv(dev);
1330 struct sky2_hw *hw = sky2->hw;
1331 u16 port = sky2->port;
1332
1333 if (dev->features & NETIF_F_HW_VLAN_RX)
1333 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1334 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1334 RX_VLAN_STRIP_ON); 1335 RX_VLAN_STRIP_ON);
1335 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1336 else
1336 TX_VLAN_TAG_ON);
1337 } else {
1338 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1337 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1339 RX_VLAN_STRIP_OFF); 1338 RX_VLAN_STRIP_OFF);
1339
1340 dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
1341 if (dev->features & NETIF_F_HW_VLAN_TX)
1342 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1343 TX_VLAN_TAG_ON);
1344 else {
1340 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1345 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1341 TX_VLAN_TAG_OFF); 1346 TX_VLAN_TAG_OFF);
1347
1348 /* Can't do transmit offload of vlan without hw vlan */
1349 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
1350 | NETIF_F_ALL_CSUM);
1342 } 1351 }
1343} 1352}
1344 1353
1345static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1346{
1347 struct sky2_port *sky2 = netdev_priv(dev);
1348 struct sky2_hw *hw = sky2->hw;
1349 u16 port = sky2->port;
1350
1351 netif_tx_lock_bh(dev);
1352 napi_disable(&hw->napi);
1353
1354 sky2->vlgrp = grp;
1355 sky2_set_vlan_mode(hw, port, grp != NULL);
1356
1357 sky2_read32(hw, B0_Y2_SP_LISR);
1358 napi_enable(&hw->napi);
1359 netif_tx_unlock_bh(dev);
1360}
1361#endif
1362
1363/* Amount of required worst case padding in rx buffer */ 1354/* Amount of required worst case padding in rx buffer */
1364static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) 1355static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1365{ 1356{
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
1635 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1636 sky2->tx_ring_size - 1); 1627 sky2->tx_ring_size - 1);
1637 1628
1638#ifdef SKY2_VLAN_TAG_USED 1629 sky2_vlan_mode(sky2->netdev);
1639 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1640#endif
1641 1630
1642 sky2_rx_start(sky2); 1631 sky2_rx_start(sky2);
1643} 1632}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1780 } 1769 }
1781 1770
1782 ctrl = 0; 1771 ctrl = 0;
1783#ifdef SKY2_VLAN_TAG_USED 1772
1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1773 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1785 if (vlan_tx_tag_present(skb)) { 1774 if (vlan_tx_tag_present(skb)) {
1786 if (!le) { 1775 if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1792 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1781 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1793 ctrl |= INS_VLAN; 1782 ctrl |= INS_VLAN;
1794 } 1783 }
1795#endif
1796 1784
1797 /* Handle TCP checksum offload */ 1785 /* Handle TCP checksum offload */
1798 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2432 struct sk_buff *skb = NULL; 2420 struct sk_buff *skb = NULL;
2433 u16 count = (status & GMR_FS_LEN) >> 16; 2421 u16 count = (status & GMR_FS_LEN) >> 16;
2434 2422
2435#ifdef SKY2_VLAN_TAG_USED 2423 if (status & GMR_FS_VLAN)
2436 /* Account for vlan tag */ 2424 count -= VLAN_HLEN; /* Account for vlan tag */
2437 if (sky2->vlgrp && (status & GMR_FS_VLAN))
2438 count -= VLAN_HLEN;
2439#endif
2440 2425
2441 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2426 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2442 "rx slot %u status 0x%x len %d\n", 2427 "rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2504static inline void sky2_skb_rx(const struct sky2_port *sky2, 2489static inline void sky2_skb_rx(const struct sky2_port *sky2,
2505 u32 status, struct sk_buff *skb) 2490 u32 status, struct sk_buff *skb)
2506{ 2491{
2507#ifdef SKY2_VLAN_TAG_USED 2492 if (status & GMR_FS_VLAN)
2508 u16 vlan_tag = be16_to_cpu(sky2->rx_tag); 2493 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2509 if (sky2->vlgrp && (status & GMR_FS_VLAN)) { 2494
2510 if (skb->ip_summed == CHECKSUM_NONE)
2511 vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
2512 else
2513 vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
2514 vlan_tag, skb);
2515 return;
2516 }
2517#endif
2518 if (skb->ip_summed == CHECKSUM_NONE) 2495 if (skb->ip_summed == CHECKSUM_NONE)
2519 netif_receive_skb(skb); 2496 netif_receive_skb(skb);
2520 else 2497 else
@@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2631 goto exit_loop; 2608 goto exit_loop;
2632 break; 2609 break;
2633 2610
2634#ifdef SKY2_VLAN_TAG_USED
2635 case OP_RXVLAN: 2611 case OP_RXVLAN:
2636 sky2->rx_tag = length; 2612 sky2->rx_tag = length;
2637 break; 2613 break;
@@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2639 case OP_RXCHKSVLAN: 2615 case OP_RXCHKSVLAN:
2640 sky2->rx_tag = length; 2616 sky2->rx_tag = length;
2641 /* fall through */ 2617 /* fall through */
2642#endif
2643 case OP_RXCHKS: 2618 case OP_RXCHKS:
2644 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2619 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
2645 sky2_rx_checksum(sky2, status); 2620 sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
3042 | SKY2_HW_NEW_LE 3017 | SKY2_HW_NEW_LE
3043 | SKY2_HW_AUTO_TX_SUM 3018 | SKY2_HW_AUTO_TX_SUM
3044 | SKY2_HW_ADV_POWER_CTL; 3019 | SKY2_HW_ADV_POWER_CTL;
3020
3021 /* The workaround for status conflicts VLAN tag detection. */
3022 if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3023 hw->flags |= SKY2_HW_VLAN_BROKEN;
3045 break; 3024 break;
3046 3025
3047 case CHIP_ID_YUKON_SUPR: 3026 case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
3411 u32 modes = SUPPORTED_10baseT_Half 3390 u32 modes = SUPPORTED_10baseT_Half
3412 | SUPPORTED_10baseT_Full 3391 | SUPPORTED_10baseT_Full
3413 | SUPPORTED_100baseT_Half 3392 | SUPPORTED_100baseT_Half
3414 | SUPPORTED_100baseT_Full 3393 | SUPPORTED_100baseT_Full;
3415 | SUPPORTED_Autoneg | SUPPORTED_TP;
3416 3394
3417 if (hw->flags & SKY2_HW_GIGABIT) 3395 if (hw->flags & SKY2_HW_GIGABIT)
3418 modes |= SUPPORTED_1000baseT_Half 3396 modes |= SUPPORTED_1000baseT_Half
3419 | SUPPORTED_1000baseT_Full; 3397 | SUPPORTED_1000baseT_Full;
3420 return modes; 3398 return modes;
3421 } else 3399 } else
3422 return SUPPORTED_1000baseT_Half 3400 return SUPPORTED_1000baseT_Half
3423 | SUPPORTED_1000baseT_Full 3401 | SUPPORTED_1000baseT_Full;
3424 | SUPPORTED_Autoneg
3425 | SUPPORTED_FIBRE;
3426} 3402}
3427 3403
3428static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3404static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3436 if (sky2_is_copper(hw)) { 3412 if (sky2_is_copper(hw)) {
3437 ecmd->port = PORT_TP; 3413 ecmd->port = PORT_TP;
3438 ecmd->speed = sky2->speed; 3414 ecmd->speed = sky2->speed;
3415 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
3439 } else { 3416 } else {
3440 ecmd->speed = SPEED_1000; 3417 ecmd->speed = SPEED_1000;
3441 ecmd->port = PORT_FIBRE; 3418 ecmd->port = PORT_FIBRE;
3419 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3442 } 3420 }
3443 3421
3444 ecmd->advertising = sky2->advertising; 3422 ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3455 u32 supported = sky2_supported_modes(hw); 3433 u32 supported = sky2_supported_modes(hw);
3456 3434
3457 if (ecmd->autoneg == AUTONEG_ENABLE) { 3435 if (ecmd->autoneg == AUTONEG_ENABLE) {
3436 if (ecmd->advertising & ~supported)
3437 return -EINVAL;
3438
3439 if (sky2_is_copper(hw))
3440 sky2->advertising = ecmd->advertising |
3441 ADVERTISED_TP |
3442 ADVERTISED_Autoneg;
3443 else
3444 sky2->advertising = ecmd->advertising |
3445 ADVERTISED_FIBRE |
3446 ADVERTISED_Autoneg;
3447
3458 sky2->flags |= SKY2_FLAG_AUTO_SPEED; 3448 sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3459 ecmd->advertising = supported;
3460 sky2->duplex = -1; 3449 sky2->duplex = -1;
3461 sky2->speed = -1; 3450 sky2->speed = -1;
3462 } else { 3451 } else {
@@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3500 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3489 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3501 } 3490 }
3502 3491
3503 sky2->advertising = ecmd->advertising;
3504
3505 if (netif_running(dev)) { 3492 if (netif_running(dev)) {
3506 sky2_phy_reinit(sky2); 3493 sky2_phy_reinit(sky2);
3507 sky2_set_multicast(dev); 3494 sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4229static int sky2_set_flags(struct net_device *dev, u32 data) 4216static int sky2_set_flags(struct net_device *dev, u32 data)
4230{ 4217{
4231 struct sky2_port *sky2 = netdev_priv(dev); 4218 struct sky2_port *sky2 = netdev_priv(dev);
4232 u32 supported = 4219 unsigned long old_feat = dev->features;
4233 (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; 4220 u32 supported = 0;
4234 int rc; 4221 int rc;
4235 4222
4223 if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
4224 supported |= ETH_FLAG_RXHASH;
4225
4226 if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
4227 supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
4228
4229 printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
4230 supported, data);
4231
4236 rc = ethtool_op_set_flags(dev, data, supported); 4232 rc = ethtool_op_set_flags(dev, data, supported);
4237 if (rc) 4233 if (rc)
4238 return rc; 4234 return rc;
4239 4235
4240 rx_set_rss(dev); 4236 if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
4237 rx_set_rss(dev);
4238
4239 if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
4240 sky2_vlan_mode(dev);
4241 4241
4242 return 0; 4242 return 0;
4243} 4243}
@@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4273 .get_sset_count = sky2_get_sset_count, 4273 .get_sset_count = sky2_get_sset_count,
4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4274 .get_ethtool_stats = sky2_get_ethtool_stats,
4275 .set_flags = sky2_set_flags, 4275 .set_flags = sky2_set_flags,
4276 .get_flags = ethtool_op_get_flags,
4276}; 4277};
4277 4278
4278#ifdef CONFIG_SKY2_DEBUG 4279#ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4554 .ndo_change_mtu = sky2_change_mtu, 4555 .ndo_change_mtu = sky2_change_mtu,
4555 .ndo_tx_timeout = sky2_tx_timeout, 4556 .ndo_tx_timeout = sky2_tx_timeout,
4556 .ndo_get_stats64 = sky2_get_stats, 4557 .ndo_get_stats64 = sky2_get_stats,
4557#ifdef SKY2_VLAN_TAG_USED
4558 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4559#endif
4560#ifdef CONFIG_NET_POLL_CONTROLLER 4558#ifdef CONFIG_NET_POLL_CONTROLLER
4561 .ndo_poll_controller = sky2_netpoll, 4559 .ndo_poll_controller = sky2_netpoll,
4562#endif 4560#endif
@@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4572 .ndo_change_mtu = sky2_change_mtu, 4570 .ndo_change_mtu = sky2_change_mtu,
4573 .ndo_tx_timeout = sky2_tx_timeout, 4571 .ndo_tx_timeout = sky2_tx_timeout,
4574 .ndo_get_stats64 = sky2_get_stats, 4572 .ndo_get_stats64 = sky2_get_stats,
4575#ifdef SKY2_VLAN_TAG_USED
4576 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4577#endif
4578 }, 4573 },
4579}; 4574};
4580 4575
@@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4625 sky2->port = port; 4620 sky2->port = port;
4626 4621
4627 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4622 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
4628 | NETIF_F_TSO | NETIF_F_GRO; 4623 | NETIF_F_TSO | NETIF_F_GRO;
4624
4629 if (highmem) 4625 if (highmem)
4630 dev->features |= NETIF_F_HIGHDMA; 4626 dev->features |= NETIF_F_HIGHDMA;
4631 4627
@@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4633 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4629 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4634 dev->features |= NETIF_F_RXHASH; 4630 dev->features |= NETIF_F_RXHASH;
4635 4631
4636#ifdef SKY2_VLAN_TAG_USED 4632 if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
4637 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4638 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4639 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4640 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4633 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4641 }
4642#endif
4643 4634
4644 /* read the mac address */ 4635 /* read the mac address */
4645 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4636 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc404f1ea..6861b0e8db9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@ struct sky2_port {
2236 u16 rx_pending; 2236 u16 rx_pending;
2237 u16 rx_data_size; 2237 u16 rx_data_size;
2238 u16 rx_nfrags; 2238 u16 rx_nfrags;
2239
2240#ifdef SKY2_VLAN_TAG_USED
2241 u16 rx_tag; 2239 u16 rx_tag;
2242 struct vlan_group *vlgrp; 2240
2243#endif
2244 struct { 2241 struct {
2245 unsigned long last; 2242 unsigned long last;
2246 u32 mac_rp; 2243 u32 mac_rp;
@@ -2284,6 +2281,7 @@ struct sky2_hw {
2284#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2281#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2285#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2282#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2286#define SKY2_HW_RSS_BROKEN 0x00000100 2283#define SKY2_HW_RSS_BROKEN 0x00000100
2284#define SKY2_HW_VLAN_BROKEN 0x00000200
2287 2285
2288 u8 chip_id; 2286 u8 chip_id;
2289 u8 chip_rev; 2287 u8 chip_rev;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9f83ea..546de5749824 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
488 488
489 if (unlikely(!netif_carrier_ok(dev) || 489 if (unlikely(!netif_carrier_ok(dev) ||
490 (frags > 1 && !xennet_can_sg(dev)) || 490 (frags > 1 && !xennet_can_sg(dev)) ||
491 netif_needs_gso(dev, skb))) { 491 netif_needs_gso(skb, netif_skb_features(skb)))) {
492 spin_unlock_irq(&np->tx_lock); 492 spin_unlock_irq(&np->tx_lock);
493 goto drop; 493 goto drop;
494 } 494 }
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c11c19..50cb1e1b4a12 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_PS3_VUART) += ps3-vuart.o 1obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
2obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o 2obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
3ps3av_mod-objs += ps3av.o ps3av_cmd.o 3ps3av_mod-y := ps3av.o ps3av_cmd.o
4obj-$(CONFIG_PPC_PS3) += sys-manager-core.o 4obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
5obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o 5obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
6obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o 6obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cbabb35..9583cbcc6b79 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/idr.h> 17#include <linux/idr.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h>
19 20
20#include "rtc-core.h" 21#include "rtc-core.h"
21 22
@@ -152,6 +153,18 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
152 spin_lock_init(&rtc->irq_task_lock); 153 spin_lock_init(&rtc->irq_task_lock);
153 init_waitqueue_head(&rtc->irq_queue); 154 init_waitqueue_head(&rtc->irq_queue);
154 155
156 /* Init timerqueue */
157 timerqueue_init_head(&rtc->timerqueue);
158 INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
159 /* Init aie timer */
160 rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
161 /* Init uie timer */
162 rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
163 /* Init pie timer */
164 hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
165 rtc->pie_timer.function = rtc_pie_update_irq;
166 rtc->pie_enabled = 0;
167
155 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); 168 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
156 dev_set_name(&rtc->dev, "rtc%d", id); 169 dev_set_name(&rtc->dev, "rtc%d", id);
157 170
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c816238aa9..90384b9f6b2c 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,15 +14,11 @@
14#include <linux/rtc.h> 14#include <linux/rtc.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/workqueue.h>
17 18
18int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 19static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
19{ 20{
20 int err; 21 int err;
21
22 err = mutex_lock_interruptible(&rtc->ops_lock);
23 if (err)
24 return err;
25
26 if (!rtc->ops) 22 if (!rtc->ops)
27 err = -ENODEV; 23 err = -ENODEV;
28 else if (!rtc->ops->read_time) 24 else if (!rtc->ops->read_time)
@@ -31,7 +27,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
31 memset(tm, 0, sizeof(struct rtc_time)); 27 memset(tm, 0, sizeof(struct rtc_time));
32 err = rtc->ops->read_time(rtc->dev.parent, tm); 28 err = rtc->ops->read_time(rtc->dev.parent, tm);
33 } 29 }
30 return err;
31}
32
33int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
34{
35 int err;
34 36
37 err = mutex_lock_interruptible(&rtc->ops_lock);
38 if (err)
39 return err;
40
41 err = __rtc_read_time(rtc, tm);
35 mutex_unlock(&rtc->ops_lock); 42 mutex_unlock(&rtc->ops_lock);
36 return err; 43 return err;
37} 44}
@@ -106,188 +113,54 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
106} 113}
107EXPORT_SYMBOL_GPL(rtc_set_mmss); 114EXPORT_SYMBOL_GPL(rtc_set_mmss);
108 115
109static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 116int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
110{ 117{
111 int err; 118 int err;
112 119
113 err = mutex_lock_interruptible(&rtc->ops_lock); 120 err = mutex_lock_interruptible(&rtc->ops_lock);
114 if (err) 121 if (err)
115 return err; 122 return err;
116 123 alarm->enabled = rtc->aie_timer.enabled;
117 if (rtc->ops == NULL) 124 if (alarm->enabled)
118 err = -ENODEV; 125 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
119 else if (!rtc->ops->read_alarm)
120 err = -EINVAL;
121 else {
122 memset(alarm, 0, sizeof(struct rtc_wkalrm));
123 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
124 }
125
126 mutex_unlock(&rtc->ops_lock); 126 mutex_unlock(&rtc->ops_lock);
127 return err; 127
128 return 0;
128} 129}
130EXPORT_SYMBOL_GPL(rtc_read_alarm);
129 131
130int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 132int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
131{ 133{
134 struct rtc_time tm;
135 long now, scheduled;
132 int err; 136 int err;
133 struct rtc_time before, now;
134 int first_time = 1;
135 unsigned long t_now, t_alm;
136 enum { none, day, month, year } missing = none;
137 unsigned days;
138
139 /* The lower level RTC driver may return -1 in some fields,
140 * creating invalid alarm->time values, for reasons like:
141 *
142 * - The hardware may not be capable of filling them in;
143 * many alarms match only on time-of-day fields, not
144 * day/month/year calendar data.
145 *
146 * - Some hardware uses illegal values as "wildcard" match
147 * values, which non-Linux firmware (like a BIOS) may try
148 * to set up as e.g. "alarm 15 minutes after each hour".
149 * Linux uses only oneshot alarms.
150 *
151 * When we see that here, we deal with it by using values from
152 * a current RTC timestamp for any missing (-1) values. The
153 * RTC driver prevents "periodic alarm" modes.
154 *
155 * But this can be racey, because some fields of the RTC timestamp
156 * may have wrapped in the interval since we read the RTC alarm,
157 * which would lead to us inserting inconsistent values in place
158 * of the -1 fields.
159 *
160 * Reading the alarm and timestamp in the reverse sequence
161 * would have the same race condition, and not solve the issue.
162 *
163 * So, we must first read the RTC timestamp,
164 * then read the RTC alarm value,
165 * and then read a second RTC timestamp.
166 *
167 * If any fields of the second timestamp have changed
168 * when compared with the first timestamp, then we know
169 * our timestamp may be inconsistent with that used by
170 * the low-level rtc_read_alarm_internal() function.
171 *
172 * So, when the two timestamps disagree, we just loop and do
173 * the process again to get a fully consistent set of values.
174 *
175 * This could all instead be done in the lower level driver,
176 * but since more than one lower level RTC implementation needs it,
177 * then it's probably best best to do it here instead of there..
178 */
179 137
180 /* Get the "before" timestamp */ 138 err = rtc_valid_tm(&alarm->time);
181 err = rtc_read_time(rtc, &before); 139 if (err)
182 if (err < 0)
183 return err; 140 return err;
184 do { 141 rtc_tm_to_time(&alarm->time, &scheduled);
185 if (!first_time)
186 memcpy(&before, &now, sizeof(struct rtc_time));
187 first_time = 0;
188
189 /* get the RTC alarm values, which may be incomplete */
190 err = rtc_read_alarm_internal(rtc, alarm);
191 if (err)
192 return err;
193 if (!alarm->enabled)
194 return 0;
195
196 /* full-function RTCs won't have such missing fields */
197 if (rtc_valid_tm(&alarm->time) == 0)
198 return 0;
199
200 /* get the "after" timestamp, to detect wrapped fields */
201 err = rtc_read_time(rtc, &now);
202 if (err < 0)
203 return err;
204
205 /* note that tm_sec is a "don't care" value here: */
206 } while ( before.tm_min != now.tm_min
207 || before.tm_hour != now.tm_hour
208 || before.tm_mon != now.tm_mon
209 || before.tm_year != now.tm_year);
210
211 /* Fill in the missing alarm fields using the timestamp; we
212 * know there's at least one since alarm->time is invalid.
213 */
214 if (alarm->time.tm_sec == -1)
215 alarm->time.tm_sec = now.tm_sec;
216 if (alarm->time.tm_min == -1)
217 alarm->time.tm_min = now.tm_min;
218 if (alarm->time.tm_hour == -1)
219 alarm->time.tm_hour = now.tm_hour;
220
221 /* For simplicity, only support date rollover for now */
222 if (alarm->time.tm_mday == -1) {
223 alarm->time.tm_mday = now.tm_mday;
224 missing = day;
225 }
226 if (alarm->time.tm_mon == -1) {
227 alarm->time.tm_mon = now.tm_mon;
228 if (missing == none)
229 missing = month;
230 }
231 if (alarm->time.tm_year == -1) {
232 alarm->time.tm_year = now.tm_year;
233 if (missing == none)
234 missing = year;
235 }
236
237 /* with luck, no rollover is needed */
238 rtc_tm_to_time(&now, &t_now);
239 rtc_tm_to_time(&alarm->time, &t_alm);
240 if (t_now < t_alm)
241 goto done;
242
243 switch (missing) {
244 142
245 /* 24 hour rollover ... if it's now 10am Monday, an alarm that 143 /* Make sure we're not setting alarms in the past */
246 * that will trigger at 5am will do so at 5am Tuesday, which 144 err = __rtc_read_time(rtc, &tm);
247 * could also be in the next month or year. This is a common 145 rtc_tm_to_time(&tm, &now);
248 * case, especially for PCs. 146 if (scheduled <= now)
249 */ 147 return -ETIME;
250 case day: 148 /*
251 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); 149 * XXX - We just checked to make sure the alarm time is not
252 t_alm += 24 * 60 * 60; 150 * in the past, but there is still a race window where if
253 rtc_time_to_tm(t_alm, &alarm->time); 151 * the is alarm set for the next second and the second ticks
254 break; 152 * over right here, before we set the alarm.
255
256 /* Month rollover ... if it's the 31th, an alarm on the 3rd will
257 * be next month. An alarm matching on the 30th, 29th, or 28th
258 * may end up in the month after that! Many newer PCs support
259 * this type of alarm.
260 */ 153 */
261 case month:
262 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
263 do {
264 if (alarm->time.tm_mon < 11)
265 alarm->time.tm_mon++;
266 else {
267 alarm->time.tm_mon = 0;
268 alarm->time.tm_year++;
269 }
270 days = rtc_month_days(alarm->time.tm_mon,
271 alarm->time.tm_year);
272 } while (days < alarm->time.tm_mday);
273 break;
274
275 /* Year rollover ... easy except for leap years! */
276 case year:
277 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
278 do {
279 alarm->time.tm_year++;
280 } while (rtc_valid_tm(&alarm->time) != 0);
281 break;
282
283 default:
284 dev_warn(&rtc->dev, "alarm rollover not handled\n");
285 }
286 154
287done: 155 if (!rtc->ops)
288 return 0; 156 err = -ENODEV;
157 else if (!rtc->ops->set_alarm)
158 err = -EINVAL;
159 else
160 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
161
162 return err;
289} 163}
290EXPORT_SYMBOL_GPL(rtc_read_alarm);
291 164
292int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 165int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
293{ 166{
@@ -300,16 +173,18 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
300 err = mutex_lock_interruptible(&rtc->ops_lock); 173 err = mutex_lock_interruptible(&rtc->ops_lock);
301 if (err) 174 if (err)
302 return err; 175 return err;
303 176 if (rtc->aie_timer.enabled) {
304 if (!rtc->ops) 177 rtc_timer_remove(rtc, &rtc->aie_timer);
305 err = -ENODEV; 178 rtc->aie_timer.enabled = 0;
306 else if (!rtc->ops->set_alarm) 179 }
307 err = -EINVAL; 180 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
308 else 181 rtc->aie_timer.period = ktime_set(0, 0);
309 err = rtc->ops->set_alarm(rtc->dev.parent, alarm); 182 if (alarm->enabled) {
310 183 rtc->aie_timer.enabled = 1;
184 rtc_timer_enqueue(rtc, &rtc->aie_timer);
185 }
311 mutex_unlock(&rtc->ops_lock); 186 mutex_unlock(&rtc->ops_lock);
312 return err; 187 return 0;
313} 188}
314EXPORT_SYMBOL_GPL(rtc_set_alarm); 189EXPORT_SYMBOL_GPL(rtc_set_alarm);
315 190
@@ -319,6 +194,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
319 if (err) 194 if (err)
320 return err; 195 return err;
321 196
197 if (rtc->aie_timer.enabled != enabled) {
198 if (enabled) {
199 rtc->aie_timer.enabled = 1;
200 rtc_timer_enqueue(rtc, &rtc->aie_timer);
201 } else {
202 rtc_timer_remove(rtc, &rtc->aie_timer);
203 rtc->aie_timer.enabled = 0;
204 }
205 }
206
322 if (!rtc->ops) 207 if (!rtc->ops)
323 err = -ENODEV; 208 err = -ENODEV;
324 else if (!rtc->ops->alarm_irq_enable) 209 else if (!rtc->ops->alarm_irq_enable)
@@ -337,52 +222,53 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
337 if (err) 222 if (err)
338 return err; 223 return err;
339 224
340#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 225 /* make sure we're changing state */
341 if (enabled == 0 && rtc->uie_irq_active) { 226 if (rtc->uie_rtctimer.enabled == enabled)
342 mutex_unlock(&rtc->ops_lock); 227 goto out;
343 return rtc_dev_update_irq_enable_emul(rtc, enabled); 228
229 if (enabled) {
230 struct rtc_time tm;
231 ktime_t now, onesec;
232
233 __rtc_read_time(rtc, &tm);
234 onesec = ktime_set(1, 0);
235 now = rtc_tm_to_ktime(tm);
236 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
237 rtc->uie_rtctimer.period = ktime_set(1, 0);
238 rtc->uie_rtctimer.enabled = 1;
239 rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
240 } else {
241 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
242 rtc->uie_rtctimer.enabled = 0;
344 } 243 }
345#endif
346
347 if (!rtc->ops)
348 err = -ENODEV;
349 else if (!rtc->ops->update_irq_enable)
350 err = -EINVAL;
351 else
352 err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
353 244
245out:
354 mutex_unlock(&rtc->ops_lock); 246 mutex_unlock(&rtc->ops_lock);
355
356#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
357 /*
358 * Enable emulation if the driver did not provide
359 * the update_irq_enable function pointer or if returned
360 * -EINVAL to signal that it has been configured without
361 * interrupts or that are not available at the moment.
362 */
363 if (err == -EINVAL)
364 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
365#endif
366 return err; 247 return err;
248
367} 249}
368EXPORT_SYMBOL_GPL(rtc_update_irq_enable); 250EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
369 251
252
370/** 253/**
371 * rtc_update_irq - report RTC periodic, alarm, and/or update irqs 254 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
372 * @rtc: the rtc device 255 * @rtc: pointer to the rtc device
373 * @num: how many irqs are being reported (usually one) 256 *
374 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF 257 * This function is called when an AIE, UIE or PIE mode interrupt
375 * Context: any 258 * has occured (or been emulated).
259 *
260 * Triggers the registered irq_task function callback.
376 */ 261 */
377void rtc_update_irq(struct rtc_device *rtc, 262static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
378 unsigned long num, unsigned long events)
379{ 263{
380 unsigned long flags; 264 unsigned long flags;
381 265
266 /* mark one irq of the appropriate mode */
382 spin_lock_irqsave(&rtc->irq_lock, flags); 267 spin_lock_irqsave(&rtc->irq_lock, flags);
383 rtc->irq_data = (rtc->irq_data + (num << 8)) | events; 268 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
384 spin_unlock_irqrestore(&rtc->irq_lock, flags); 269 spin_unlock_irqrestore(&rtc->irq_lock, flags);
385 270
271 /* call the task func */
386 spin_lock_irqsave(&rtc->irq_task_lock, flags); 272 spin_lock_irqsave(&rtc->irq_task_lock, flags);
387 if (rtc->irq_task) 273 if (rtc->irq_task)
388 rtc->irq_task->func(rtc->irq_task->private_data); 274 rtc->irq_task->func(rtc->irq_task->private_data);
@@ -391,6 +277,69 @@ void rtc_update_irq(struct rtc_device *rtc,
391 wake_up_interruptible(&rtc->irq_queue); 277 wake_up_interruptible(&rtc->irq_queue);
392 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); 278 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
393} 279}
280
281
282/**
283 * rtc_aie_update_irq - AIE mode rtctimer hook
284 * @private: pointer to the rtc_device
285 *
286 * This functions is called when the aie_timer expires.
287 */
288void rtc_aie_update_irq(void *private)
289{
290 struct rtc_device *rtc = (struct rtc_device *)private;
291 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
292}
293
294
295/**
296 * rtc_uie_update_irq - UIE mode rtctimer hook
297 * @private: pointer to the rtc_device
298 *
299 * This functions is called when the uie_timer expires.
300 */
301void rtc_uie_update_irq(void *private)
302{
303 struct rtc_device *rtc = (struct rtc_device *)private;
304 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
305}
306
307
308/**
309 * rtc_pie_update_irq - PIE mode hrtimer hook
310 * @timer: pointer to the pie mode hrtimer
311 *
312 * This function is used to emulate PIE mode interrupts
313 * using an hrtimer. This function is called when the periodic
314 * hrtimer expires.
315 */
316enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
317{
318 struct rtc_device *rtc;
319 ktime_t period;
320 int count;
321 rtc = container_of(timer, struct rtc_device, pie_timer);
322
323 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
324 count = hrtimer_forward_now(timer, period);
325
326 rtc_handle_legacy_irq(rtc, count, RTC_PF);
327
328 return HRTIMER_RESTART;
329}
330
331/**
332 * rtc_update_irq - Triggered when a RTC interrupt occurs.
333 * @rtc: the rtc device
334 * @num: how many irqs are being reported (usually one)
335 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
336 * Context: any
337 */
338void rtc_update_irq(struct rtc_device *rtc,
339 unsigned long num, unsigned long events)
340{
341 schedule_work(&rtc->irqwork);
342}
394EXPORT_SYMBOL_GPL(rtc_update_irq); 343EXPORT_SYMBOL_GPL(rtc_update_irq);
395 344
396static int __rtc_match(struct device *dev, void *data) 345static int __rtc_match(struct device *dev, void *data)
@@ -477,18 +426,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
477 int err = 0; 426 int err = 0;
478 unsigned long flags; 427 unsigned long flags;
479 428
480 if (rtc->ops->irq_set_state == NULL)
481 return -ENXIO;
482
483 spin_lock_irqsave(&rtc->irq_task_lock, flags); 429 spin_lock_irqsave(&rtc->irq_task_lock, flags);
484 if (rtc->irq_task != NULL && task == NULL) 430 if (rtc->irq_task != NULL && task == NULL)
485 err = -EBUSY; 431 err = -EBUSY;
486 if (rtc->irq_task != task) 432 if (rtc->irq_task != task)
487 err = -EACCES; 433 err = -EACCES;
488 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
489 434
490 if (err == 0) 435 if (enabled) {
491 err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); 436 ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
437 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
438 } else {
439 hrtimer_cancel(&rtc->pie_timer);
440 }
441 rtc->pie_enabled = enabled;
442 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
492 443
493 return err; 444 return err;
494} 445}
@@ -509,21 +460,194 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
509 int err = 0; 460 int err = 0;
510 unsigned long flags; 461 unsigned long flags;
511 462
512 if (rtc->ops->irq_set_freq == NULL)
513 return -ENXIO;
514
515 spin_lock_irqsave(&rtc->irq_task_lock, flags); 463 spin_lock_irqsave(&rtc->irq_task_lock, flags);
516 if (rtc->irq_task != NULL && task == NULL) 464 if (rtc->irq_task != NULL && task == NULL)
517 err = -EBUSY; 465 err = -EBUSY;
518 if (rtc->irq_task != task) 466 if (rtc->irq_task != task)
519 err = -EACCES; 467 err = -EACCES;
520 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
521
522 if (err == 0) { 468 if (err == 0) {
523 err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); 469 rtc->irq_freq = freq;
524 if (err == 0) 470 if (rtc->pie_enabled) {
525 rtc->irq_freq = freq; 471 ktime_t period;
472 hrtimer_cancel(&rtc->pie_timer);
473 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
474 hrtimer_start(&rtc->pie_timer, period,
475 HRTIMER_MODE_REL);
476 }
526 } 477 }
478 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
527 return err; 479 return err;
528} 480}
529EXPORT_SYMBOL_GPL(rtc_irq_set_freq); 481EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
482
483/**
484 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
485 * @rtc rtc device
486 * @timer timer being added.
487 *
488 * Enqueues a timer onto the rtc devices timerqueue and sets
489 * the next alarm event appropriately.
490 *
491 * Must hold ops_lock for proper serialization of timerqueue
492 */
493void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
494{
495 timerqueue_add(&rtc->timerqueue, &timer->node);
496 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
497 struct rtc_wkalrm alarm;
498 int err;
499 alarm.time = rtc_ktime_to_tm(timer->node.expires);
500 alarm.enabled = 1;
501 err = __rtc_set_alarm(rtc, &alarm);
502 if (err == -ETIME)
503 schedule_work(&rtc->irqwork);
504 }
505}
506
507/**
508 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
509 * @rtc rtc device
510 * @timer timer being removed.
511 *
512 * Removes a timer onto the rtc devices timerqueue and sets
513 * the next alarm event appropriately.
514 *
515 * Must hold ops_lock for proper serialization of timerqueue
516 */
517void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
518{
519 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
520 timerqueue_del(&rtc->timerqueue, &timer->node);
521
522 if (next == &timer->node) {
523 struct rtc_wkalrm alarm;
524 int err;
525 next = timerqueue_getnext(&rtc->timerqueue);
526 if (!next)
527 return;
528 alarm.time = rtc_ktime_to_tm(next->expires);
529 alarm.enabled = 1;
530 err = __rtc_set_alarm(rtc, &alarm);
531 if (err == -ETIME)
532 schedule_work(&rtc->irqwork);
533 }
534}
535
536/**
537 * rtc_timer_do_work - Expires rtc timers
538 * @rtc rtc device
539 * @timer timer being removed.
540 *
541 * Expires rtc timers. Reprograms next alarm event if needed.
542 * Called via worktask.
543 *
544 * Serializes access to timerqueue via ops_lock mutex
545 */
546void rtc_timer_do_work(struct work_struct *work)
547{
548 struct rtc_timer *timer;
549 struct timerqueue_node *next;
550 ktime_t now;
551 struct rtc_time tm;
552
553 struct rtc_device *rtc =
554 container_of(work, struct rtc_device, irqwork);
555
556 mutex_lock(&rtc->ops_lock);
557again:
558 __rtc_read_time(rtc, &tm);
559 now = rtc_tm_to_ktime(tm);
560 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
561 if (next->expires.tv64 > now.tv64)
562 break;
563
564 /* expire timer */
565 timer = container_of(next, struct rtc_timer, node);
566 timerqueue_del(&rtc->timerqueue, &timer->node);
567 timer->enabled = 0;
568 if (timer->task.func)
569 timer->task.func(timer->task.private_data);
570
571 /* Re-add/fwd periodic timers */
572 if (ktime_to_ns(timer->period)) {
573 timer->node.expires = ktime_add(timer->node.expires,
574 timer->period);
575 timer->enabled = 1;
576 timerqueue_add(&rtc->timerqueue, &timer->node);
577 }
578 }
579
580 /* Set next alarm */
581 if (next) {
582 struct rtc_wkalrm alarm;
583 int err;
584 alarm.time = rtc_ktime_to_tm(next->expires);
585 alarm.enabled = 1;
586 err = __rtc_set_alarm(rtc, &alarm);
587 if (err == -ETIME)
588 goto again;
589 }
590
591 mutex_unlock(&rtc->ops_lock);
592}
593
594
595/* rtc_timer_init - Initializes an rtc_timer
596 * @timer: timer to be intiialized
597 * @f: function pointer to be called when timer fires
598 * @data: private data passed to function pointer
599 *
600 * Kernel interface to initializing an rtc_timer.
601 */
602void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
603{
604 timerqueue_init(&timer->node);
605 timer->enabled = 0;
606 timer->task.func = f;
607 timer->task.private_data = data;
608}
609
610/* rtc_timer_start - Sets an rtc_timer to fire in the future
611 * @ rtc: rtc device to be used
612 * @ timer: timer being set
613 * @ expires: time at which to expire the timer
614 * @ period: period that the timer will recur
615 *
616 * Kernel interface to set an rtc_timer
617 */
618int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
619 ktime_t expires, ktime_t period)
620{
621 int ret = 0;
622 mutex_lock(&rtc->ops_lock);
623 if (timer->enabled)
624 rtc_timer_remove(rtc, timer);
625
626 timer->node.expires = expires;
627 timer->period = period;
628
629 timer->enabled = 1;
630 rtc_timer_enqueue(rtc, timer);
631
632 mutex_unlock(&rtc->ops_lock);
633 return ret;
634}
635
636/* rtc_timer_cancel - Stops an rtc_timer
637 * @ rtc: rtc device to be used
638 * @ timer: timer being set
639 *
640 * Kernel interface to cancel an rtc_timer
641 */
642int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
643{
644 int ret = 0;
645 mutex_lock(&rtc->ops_lock);
646 if (timer->enabled)
647 rtc_timer_remove(rtc, timer);
648 timer->enabled = 0;
649 mutex_unlock(&rtc->ops_lock);
650 return ret;
651}
652
653
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167a0c90..7e6ce626b7f1 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -687,7 +687,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
687#if defined(CONFIG_ATARI) 687#if defined(CONFIG_ATARI)
688 address_space = 64; 688 address_space = 64;
689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \ 689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
690 || defined(__sparc__) || defined(__mips__) 690 || defined(__sparc__) || defined(__mips__) \
691 || defined(__powerpc__)
691 address_space = 128; 692 address_space = 128;
692#else 693#else
693#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes. 694#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984d155b..212b16edafc0 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,105 +46,6 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
46 return err; 46 return err;
47} 47}
48 48
49#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
50/*
51 * Routine to poll RTC seconds field for change as often as possible,
52 * after first RTC_UIE use timer to reduce polling
53 */
54static void rtc_uie_task(struct work_struct *work)
55{
56 struct rtc_device *rtc =
57 container_of(work, struct rtc_device, uie_task);
58 struct rtc_time tm;
59 int num = 0;
60 int err;
61
62 err = rtc_read_time(rtc, &tm);
63
64 spin_lock_irq(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) {
68 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
69 rtc->oldsecs = tm.tm_sec;
70 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
71 rtc->uie_timer_active = 1;
72 rtc->uie_task_active = 0;
73 add_timer(&rtc->uie_timer);
74 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0;
76 }
77 spin_unlock_irq(&rtc->irq_lock);
78 if (num)
79 rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
80}
81static void rtc_uie_timer(unsigned long data)
82{
83 struct rtc_device *rtc = (struct rtc_device *)data;
84 unsigned long flags;
85
86 spin_lock_irqsave(&rtc->irq_lock, flags);
87 rtc->uie_timer_active = 0;
88 rtc->uie_task_active = 1;
89 if ((schedule_work(&rtc->uie_task) == 0))
90 rtc->uie_task_active = 0;
91 spin_unlock_irqrestore(&rtc->irq_lock, flags);
92}
93
94static int clear_uie(struct rtc_device *rtc)
95{
96 spin_lock_irq(&rtc->irq_lock);
97 if (rtc->uie_irq_active) {
98 rtc->stop_uie_polling = 1;
99 if (rtc->uie_timer_active) {
100 spin_unlock_irq(&rtc->irq_lock);
101 del_timer_sync(&rtc->uie_timer);
102 spin_lock_irq(&rtc->irq_lock);
103 rtc->uie_timer_active = 0;
104 }
105 if (rtc->uie_task_active) {
106 spin_unlock_irq(&rtc->irq_lock);
107 flush_work_sync(&rtc->uie_task);
108 spin_lock_irq(&rtc->irq_lock);
109 }
110 rtc->uie_irq_active = 0;
111 }
112 spin_unlock_irq(&rtc->irq_lock);
113 return 0;
114}
115
116static int set_uie(struct rtc_device *rtc)
117{
118 struct rtc_time tm;
119 int err;
120
121 err = rtc_read_time(rtc, &tm);
122 if (err)
123 return err;
124 spin_lock_irq(&rtc->irq_lock);
125 if (!rtc->uie_irq_active) {
126 rtc->uie_irq_active = 1;
127 rtc->stop_uie_polling = 0;
128 rtc->oldsecs = tm.tm_sec;
129 rtc->uie_task_active = 1;
130 if (schedule_work(&rtc->uie_task) == 0)
131 rtc->uie_task_active = 0;
132 }
133 rtc->irq_data = 0;
134 spin_unlock_irq(&rtc->irq_lock);
135 return 0;
136}
137
138int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
139{
140 if (enabled)
141 return set_uie(rtc);
142 else
143 return clear_uie(rtc);
144}
145EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
146
147#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
148 49
149static ssize_t 50static ssize_t
150rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 51rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -493,11 +394,6 @@ void rtc_dev_prepare(struct rtc_device *rtc)
493 394
494 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); 395 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
495 396
496#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
497 INIT_WORK(&rtc->uie_task, rtc_uie_task);
498 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
499#endif
500
501 cdev_init(&rtc->char_dev, &rtc_dev_fops); 397 cdev_init(&rtc->char_dev, &rtc_dev_fops);
502 rtc->char_dev.owner = rtc->owner; 398 rtc->char_dev.owner = rtc->owner;
503} 399}
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f338b8..075f1708deae 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
117} 117}
118EXPORT_SYMBOL(rtc_tm_to_time); 118EXPORT_SYMBOL(rtc_tm_to_time);
119 119
120/*
121 * Convert rtc_time to ktime
122 */
123ktime_t rtc_tm_to_ktime(struct rtc_time tm)
124{
125 time_t time;
126 rtc_tm_to_time(&tm, &time);
127 return ktime_set(time, 0);
128}
129EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
130
131/*
132 * Convert ktime to rtc_time
133 */
134struct rtc_time rtc_ktime_to_tm(ktime_t kt)
135{
136 struct timespec ts;
137 struct rtc_time ret;
138
139 ts = ktime_to_timespec(kt);
140 /* Round up any ns */
141 if (ts.tv_nsec)
142 ts.tv_sec++;
143 rtc_time_to_tm(ts.tv_sec, &ret);
144 return ret;
145}
146EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
147
120MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5bf6e2c..24b966d5061a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
469 unsigned long rom_pl; 469 unsigned long rom_pl;
470 static int die_nmi_called; 470 static int die_nmi_called;
471 471
472 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) 472 if (ulReason != DIE_NMIUNKNOWN)
473 goto out; 473 goto out;
474 474
475 if (!hpwdt_nmi_decoding) 475 if (!hpwdt_nmi_decoding)