aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-06-19 19:49:39 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-19 19:49:39 -0400
commitd98cae64e4a733ff377184d78aa0b1f2b54faede (patch)
treee973e3c93fe7e17741567ac3947f5197bc9d582d /drivers
parent646093a29f85630d8efe2aa38fa585d2c3ea2e46 (diff)
parent4067c666f2dccf56f5db5c182713e68c40d46013 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/wireless/ath/ath9k/Kconfig drivers/net/xen-netback/netback.c net/batman-adv/bat_iv_ogm.c net/wireless/nl80211.c The ath9k Kconfig conflict was a change of a Kconfig option name right next to the deletion of another option. The xen-netback conflict was overlapping changes involving the handling of the notify list in xen_netbk_rx_action(). Batman conflict resolution provided by Antonio Quartulli, basically keep everything in both conflict hunks. The nl80211 conflict is a little more involved. In 'net' we added a dynamic memory allocation to nl80211_dump_wiphy() to fix a race that Linus reported. Meanwhile in 'net-next' the handlers were converted to use pre and post doit handlers which use a flag to determine whether to hold the RTNL mutex around the operation. However, the dump handlers to not use this logic. Instead they have to explicitly do the locking. There were apparent bugs in the conversion of nl80211_dump_wiphy() in that we were not dropping the RTNL mutex in all the return paths, and it seems we very much should be doing so. So I fixed that whilst handling the overlapping changes. To simplify the initial returns, I take the RTNL mutex after we try to allocate 'tb'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/ghes.c7
-rw-r--r--drivers/acpi/device_pm.c10
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/video.c19
-rw-r--r--drivers/base/regmap/regcache-rbtree.c6
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/cciss.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c5
-rw-r--r--drivers/cpufreq/cpufreq_governor.c3
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/gpu/drm/drm_irq.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c9
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r420.c10
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c9
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c16
-rw-r--r--drivers/irqchip/irq-mxs.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c185
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c38
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/nfc.c2
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c134
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c45
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c131
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-tps6586x.c3
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/s390/net/netiucv.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c2
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi-xilinx.c74
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/udc.c13
-rw-r--r--drivers/usb/serial/f81232.c8
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/spcp8x5.c10
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/net.c29
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/xen/tmem.c4
161 files changed, 5855 insertions, 665 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 403baf4dffc1..fcd7d91cec34 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -919,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev)
919 break; 919 break;
920 case ACPI_HEST_NOTIFY_EXTERNAL: 920 case ACPI_HEST_NOTIFY_EXTERNAL:
921 /* External interrupt vector is GSI */ 921 /* External interrupt vector is GSI */
922 if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { 922 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
923 if (rc) {
923 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 924 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
924 generic->header.source_id); 925 generic->header.source_id);
925 goto err_edac_unreg; 926 goto err_edac_unreg;
926 } 927 }
927 if (request_irq(ghes->irq, ghes_irq_func, 928 rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
928 0, "GHES IRQ", ghes)) { 929 if (rc) {
929 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 930 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
930 generic->header.source_id); 931 generic->header.source_id);
931 goto err_edac_unreg; 932 goto err_edac_unreg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bc493aa3af19..318fa32a141e 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -278,11 +278,13 @@ int acpi_bus_init_power(struct acpi_device *device)
278 if (result) 278 if (result)
279 return result; 279 return result;
280 } else if (state == ACPI_STATE_UNKNOWN) { 280 } else if (state == ACPI_STATE_UNKNOWN) {
281 /* No power resources and missing _PSC? Try to force D0. */ 281 /*
282 * No power resources and missing _PSC? Cross fingers and make
283 * it D0 in hope that this is what the BIOS put the device into.
284 * [We tried to force D0 here by executing _PS0, but that broke
285 * Toshiba P870-303 in a nasty way.]
286 */
282 state = ACPI_STATE_D0; 287 state = ACPI_STATE_D0;
283 result = acpi_dev_pm_explicit_set(device, state);
284 if (result)
285 return result;
286 } 288 }
287 device->power.state = state; 289 device->power.state = state;
288 return 0; 290 return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 44225cb15f3a..b14ac46948c9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
1017 return -ENOSYS; 1017 return -ENOSYS;
1018 1018
1019 result = driver->ops.add(device); 1019 result = driver->ops.add(device);
1020 if (result) { 1020 if (result)
1021 device->driver = NULL;
1022 device->driver_data = NULL;
1023 return result; 1021 return result;
1024 }
1025 1022
1026 device->driver = driver; 1023 device->driver = driver;
1027 1024
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5b32e15a65ce..440eadf2d32c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -458,12 +458,28 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
458 }, 458 },
459 { 459 {
460 .callback = video_ignore_initial_backlight, 460 .callback = video_ignore_initial_backlight,
461 .ident = "HP Pavilion g6 Notebook PC",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
465 },
466 },
467 {
468 .callback = video_ignore_initial_backlight,
461 .ident = "HP 1000 Notebook PC", 469 .ident = "HP 1000 Notebook PC",
462 .matches = { 470 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 471 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), 472 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
465 }, 473 },
466 }, 474 },
475 {
476 .callback = video_ignore_initial_backlight,
477 .ident = "HP Pavilion m4",
478 .matches = {
479 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
480 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
481 },
482 },
467 {} 483 {}
468}; 484};
469 485
@@ -1706,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
1706 int error; 1722 int error;
1707 acpi_status status; 1723 acpi_status status;
1708 1724
1725 if (device->handler)
1726 return -EINVAL;
1727
1709 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, 1728 status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
1710 device->parent->handle, 1, 1729 device->parent->handle, 1,
1711 acpi_video_bus_match, NULL, 1730 acpi_video_bus_match, NULL,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b7..02f490bad30f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
143 int registers = 0; 143 int registers = 0;
144 int this_registers, average; 144 int this_registers, average;
145 145
146 map->lock(map); 146 map->lock(map->lock_arg);
147 147
148 mem_size = sizeof(*rbtree_ctx); 148 mem_size = sizeof(*rbtree_ctx);
149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); 149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
171 nodes, registers, average, mem_size); 171 nodes, registers, average, mem_size);
172 172
173 map->unlock(map); 173 map->unlock(map->lock_arg);
174 174
175 return 0; 175 return 0;
176} 176}
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
392 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 392 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
393 393
394 if (rbnode->base_reg < min)
395 continue;
396 if (rbnode->base_reg > max) 394 if (rbnode->base_reg > max)
397 break; 395 break;
398 if (rbnode->base_reg + rbnode->blklen < min) 396 if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396bd..507ee2da0f6e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
270 270
271 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 271 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
272 272
273 map->lock(map); 273 map->lock(map->lock_arg);
274 /* Remember the initial bypass state */ 274 /* Remember the initial bypass state */
275 bypass = map->cache_bypass; 275 bypass = map->cache_bypass;
276 dev_dbg(map->dev, "Syncing %s cache\n", 276 dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
306 trace_regcache_sync(map->dev, name, "stop"); 306 trace_regcache_sync(map->dev, name, "stop");
307 /* Restore the bypass state */ 307 /* Restore the bypass state */
308 map->cache_bypass = bypass; 308 map->cache_bypass = bypass;
309 map->unlock(map); 309 map->unlock(map->lock_arg);
310 310
311 return ret; 311 return ret;
312} 312}
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
333 333
334 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 334 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
335 335
336 map->lock(map); 336 map->lock(map->lock_arg);
337 337
338 /* Remember the initial bypass state */ 338 /* Remember the initial bypass state */
339 bypass = map->cache_bypass; 339 bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
352 trace_regcache_sync(map->dev, name, "stop region"); 352 trace_regcache_sync(map->dev, name, "stop region");
353 /* Restore the bypass state */ 353 /* Restore the bypass state */
354 map->cache_bypass = bypass; 354 map->cache_bypass = bypass;
355 map->unlock(map); 355 map->unlock(map->lock_arg);
356 356
357 return ret; 357 return ret;
358} 358}
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
372 */ 372 */
373void regcache_cache_only(struct regmap *map, bool enable) 373void regcache_cache_only(struct regmap *map, bool enable)
374{ 374{
375 map->lock(map); 375 map->lock(map->lock_arg);
376 WARN_ON(map->cache_bypass && enable); 376 WARN_ON(map->cache_bypass && enable);
377 map->cache_only = enable; 377 map->cache_only = enable;
378 trace_regmap_cache_only(map->dev, enable); 378 trace_regmap_cache_only(map->dev, enable);
379 map->unlock(map); 379 map->unlock(map->lock_arg);
380} 380}
381EXPORT_SYMBOL_GPL(regcache_cache_only); 381EXPORT_SYMBOL_GPL(regcache_cache_only);
382 382
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
391 */ 391 */
392void regcache_mark_dirty(struct regmap *map) 392void regcache_mark_dirty(struct regmap *map)
393{ 393{
394 map->lock(map); 394 map->lock(map->lock_arg);
395 map->cache_dirty = true; 395 map->cache_dirty = true;
396 map->unlock(map); 396 map->unlock(map->lock_arg);
397} 397}
398EXPORT_SYMBOL_GPL(regcache_mark_dirty); 398EXPORT_SYMBOL_GPL(regcache_mark_dirty);
399 399
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
410 */ 410 */
411void regcache_cache_bypass(struct regmap *map, bool enable) 411void regcache_cache_bypass(struct regmap *map, bool enable)
412{ 412{
413 map->lock(map); 413 map->lock(map->lock_arg);
414 WARN_ON(map->cache_only && enable); 414 WARN_ON(map->cache_only && enable);
415 map->cache_bypass = enable; 415 map->cache_bypass = enable;
416 trace_regmap_cache_bypass(map->dev, enable); 416 trace_regmap_cache_bypass(map->dev, enable);
417 map->unlock(map); 417 map->unlock(map->lock_arg);
418} 418}
419EXPORT_SYMBOL_GPL(regcache_cache_bypass); 419EXPORT_SYMBOL_GPL(regcache_cache_bypass);
420 420
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2f..975719bc3450 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
265 char *start = buf; 265 char *start = buf;
266 unsigned long reg, value; 266 unsigned long reg, value;
267 struct regmap *map = file->private_data; 267 struct regmap *map = file->private_data;
268 int ret;
268 269
269 buf_size = min(count, (sizeof(buf)-1)); 270 buf_size = min(count, (sizeof(buf)-1));
270 if (copy_from_user(buf, user_buf, buf_size)) 271 if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
282 /* Userspace has been fiddling around behind the kernel's back */ 283 /* Userspace has been fiddling around behind the kernel's back */
283 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); 284 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
284 285
285 regmap_write(map, reg, value); 286 ret = regmap_write(map, reg, value);
287 if (ret < 0)
288 return ret;
286 return buf_size; 289 return buf_size;
287} 290}
288#else 291#else
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6374dc103521..62b6c2cc80b5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
168static int cciss_open(struct block_device *bdev, fmode_t mode); 168static int cciss_open(struct block_device *bdev, fmode_t mode);
169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
170static void cciss_release(struct gendisk *disk, fmode_t mode); 170static void cciss_release(struct gendisk *disk, fmode_t mode);
171static int do_ioctl(struct block_device *bdev, fmode_t mode,
172 unsigned int cmd, unsigned long arg);
173static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 171static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
174 unsigned int cmd, unsigned long arg); 172 unsigned int cmd, unsigned long arg);
175static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 173static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
235 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
236 .open = cciss_unlocked_open, 234 .open = cciss_unlocked_open,
237 .release = cciss_release, 235 .release = cciss_release,
238 .ioctl = do_ioctl, 236 .ioctl = cciss_ioctl,
239 .getgeo = cciss_getgeo, 237 .getgeo = cciss_getgeo,
240#ifdef CONFIG_COMPAT 238#ifdef CONFIG_COMPAT
241 .compat_ioctl = cciss_compat_ioctl, 239 .compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
1143 mutex_unlock(&cciss_mutex); 1141 mutex_unlock(&cciss_mutex);
1144} 1142}
1145 1143
1146static int do_ioctl(struct block_device *bdev, fmode_t mode,
1147 unsigned cmd, unsigned long arg)
1148{
1149 int ret;
1150 mutex_lock(&cciss_mutex);
1151 ret = cciss_ioctl(bdev, mode, cmd, arg);
1152 mutex_unlock(&cciss_mutex);
1153 return ret;
1154}
1155
1156#ifdef CONFIG_COMPAT 1144#ifdef CONFIG_COMPAT
1157 1145
1158static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1146static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
1179 case CCISS_REGNEWD: 1167 case CCISS_REGNEWD:
1180 case CCISS_RESCANDISK: 1168 case CCISS_RESCANDISK:
1181 case CCISS_GETLUNINFO: 1169 case CCISS_GETLUNINFO:
1182 return do_ioctl(bdev, mode, cmd, arg); 1170 return cciss_ioctl(bdev, mode, cmd, arg);
1183 1171
1184 case CCISS_PASSTHRU32: 1172 case CCISS_PASSTHRU32:
1185 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); 1173 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1219 if (err) 1207 if (err)
1220 return -EFAULT; 1208 return -EFAULT;
1221 1209
1222 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); 1210 err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
1223 if (err) 1211 if (err)
1224 return err; 1212 return err;
1225 err |= 1213 err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
1261 if (err) 1249 if (err)
1262 return -EFAULT; 1250 return -EFAULT;
1263 1251
1264 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); 1252 err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
1265 if (err) 1253 if (err)
1266 return err; 1254 return err;
1267 err |= 1255 err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
1311static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) 1299static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
1312{ 1300{
1313 cciss_coalint_struct intinfo; 1301 cciss_coalint_struct intinfo;
1302 unsigned long flags;
1314 1303
1315 if (!argp) 1304 if (!argp)
1316 return -EINVAL; 1305 return -EINVAL;
1306 spin_lock_irqsave(&h->lock, flags);
1317 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); 1307 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
1318 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); 1308 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
1309 spin_unlock_irqrestore(&h->lock, flags);
1319 if (copy_to_user 1310 if (copy_to_user
1320 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1311 (argp, &intinfo, sizeof(cciss_coalint_struct)))
1321 return -EFAULT; 1312 return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
1356static int cciss_getnodename(ctlr_info_t *h, void __user *argp) 1347static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
1357{ 1348{
1358 NodeName_type NodeName; 1349 NodeName_type NodeName;
1350 unsigned long flags;
1359 int i; 1351 int i;
1360 1352
1361 if (!argp) 1353 if (!argp)
1362 return -EINVAL; 1354 return -EINVAL;
1355 spin_lock_irqsave(&h->lock, flags);
1363 for (i = 0; i < 16; i++) 1356 for (i = 0; i < 16; i++)
1364 NodeName[i] = readb(&h->cfgtable->ServerName[i]); 1357 NodeName[i] = readb(&h->cfgtable->ServerName[i]);
1358 spin_unlock_irqrestore(&h->lock, flags);
1365 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1359 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1366 return -EFAULT; 1360 return -EFAULT;
1367 return 0; 1361 return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
1398static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) 1392static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1399{ 1393{
1400 Heartbeat_type heartbeat; 1394 Heartbeat_type heartbeat;
1395 unsigned long flags;
1401 1396
1402 if (!argp) 1397 if (!argp)
1403 return -EINVAL; 1398 return -EINVAL;
1399 spin_lock_irqsave(&h->lock, flags);
1404 heartbeat = readl(&h->cfgtable->HeartBeat); 1400 heartbeat = readl(&h->cfgtable->HeartBeat);
1401 spin_unlock_irqrestore(&h->lock, flags);
1405 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) 1402 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
1406 return -EFAULT; 1403 return -EFAULT;
1407 return 0; 1404 return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1410static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) 1407static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
1411{ 1408{
1412 BusTypes_type BusTypes; 1409 BusTypes_type BusTypes;
1410 unsigned long flags;
1413 1411
1414 if (!argp) 1412 if (!argp)
1415 return -EINVAL; 1413 return -EINVAL;
1414 spin_lock_irqsave(&h->lock, flags);
1416 BusTypes = readl(&h->cfgtable->BusTypes); 1415 BusTypes = readl(&h->cfgtable->BusTypes);
1416 spin_unlock_irqrestore(&h->lock, flags);
1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) 1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
1418 return -EFAULT; 1418 return -EFAULT;
1419 return 0; 1419 return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 847107ef0cce..20dd52a2f92f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
3002 3002
3003static void mtip_hw_debugfs_exit(struct driver_data *dd) 3003static void mtip_hw_debugfs_exit(struct driver_data *dd)
3004{ 3004{
3005 debugfs_remove_recursive(dd->dfs_node); 3005 if (dd->dfs_node)
3006 debugfs_remove_recursive(dd->dfs_node);
3006} 3007}
3007 3008
3008 3009
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3863 struct driver_data *dd = queue->queuedata; 3864 struct driver_data *dd = queue->queuedata;
3864 struct scatterlist *sg; 3865 struct scatterlist *sg;
3865 struct bio_vec *bvec; 3866 struct bio_vec *bvec;
3866 int nents = 0; 3867 int i, nents = 0;
3867 int tag = 0, unaligned = 0; 3868 int tag = 0, unaligned = 0;
3868 3869
3869 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3870 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3921 } 3922 }
3922 3923
3923 /* Create the scatter list for this bio. */ 3924 /* Create the scatter list for this bio. */
3924 bio_for_each_segment(bvec, bio, nents) { 3925 bio_for_each_segment(bvec, bio, i) {
3925 sg_set_page(&sg[nents], 3926 sg_set_page(&sg[nents],
3926 bvec->bv_page, 3927 bvec->bv_page,
3927 bvec->bv_len, 3928 bvec->bv_len,
3928 bvec->bv_offset); 3929 bvec->bv_offset);
3930 nents++;
3929 } 3931 }
3930 3932
3931 /* Issue the read/write. */ 3933 /* Issue the read/write. */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a59..ce79a590b45b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
629 struct nvme_command *cmnd; 629 struct nvme_command *cmnd;
630 struct nvme_iod *iod; 630 struct nvme_iod *iod;
631 enum dma_data_direction dma_dir; 631 enum dma_data_direction dma_dir;
632 int cmdid, length, result = -ENOMEM; 632 int cmdid, length, result;
633 u16 control; 633 u16 control;
634 u32 dsmgmt; 634 u32 dsmgmt;
635 int psegs = bio_phys_segments(ns->queue, bio); 635 int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
640 return result; 640 return result;
641 } 641 }
642 642
643 result = -ENOMEM;
643 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 644 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
644 if (!iod) 645 if (!iod)
645 goto nomem; 646 goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
977 978
978 if (timeout && !time_after(now, info[cmdid].timeout)) 979 if (timeout && !time_after(now, info[cmdid].timeout))
979 continue; 980 continue;
981 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
982 continue;
980 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 983 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
981 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 984 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
982 fn(nvmeq->dev, ctx, &cqe); 985 fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1206 1209
1207 if (addr & 3) 1210 if (addr & 3)
1208 return ERR_PTR(-EINVAL); 1211 return ERR_PTR(-EINVAL);
1209 if (!length) 1212 if (!length || length > INT_MAX - PAGE_SIZE)
1210 return ERR_PTR(-EINVAL); 1213 return ERR_PTR(-EINVAL);
1211 1214
1212 offset = offset_in_page(addr); 1215 offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1227 sg_init_table(sg, count); 1230 sg_init_table(sg, count);
1228 for (i = 0; i < count; i++) { 1231 for (i = 0; i < count; i++) {
1229 sg_set_page(&sg[i], pages[i], 1232 sg_set_page(&sg[i], pages[i],
1230 min_t(int, length, PAGE_SIZE - offset), offset); 1233 min_t(unsigned, length, PAGE_SIZE - offset),
1234 offset);
1231 length -= (PAGE_SIZE - offset); 1235 length -= (PAGE_SIZE - offset);
1232 offset = 0; 1236 offset = 0;
1233 } 1237 }
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1435 nvme_free_iod(dev, iod); 1439 nvme_free_iod(dev, iod);
1436 } 1440 }
1437 1441
1438 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1442 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1439 sizeof(cmd.result))) 1443 sizeof(cmd.result)))
1440 status = -EFAULT; 1444 status = -EFAULT;
1441 1445
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1633 1637
1634static int nvme_setup_io_queues(struct nvme_dev *dev) 1638static int nvme_setup_io_queues(struct nvme_dev *dev)
1635{ 1639{
1636 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1640 struct pci_dev *pdev = dev->pci_dev;
1641 int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
1637 1642
1638 nr_io_queues = num_online_cpus(); 1643 nr_io_queues = num_online_cpus();
1639 result = set_queue_count(dev, nr_io_queues); 1644 result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1642 if (result < nr_io_queues) 1647 if (result < nr_io_queues)
1643 nr_io_queues = result; 1648 nr_io_queues = result;
1644 1649
1650 q_count = nr_io_queues;
1645 /* Deregister the admin queue's interrupt */ 1651 /* Deregister the admin queue's interrupt */
1646 free_irq(dev->entry[0].vector, dev->queues[0]); 1652 free_irq(dev->entry[0].vector, dev->queues[0]);
1647 1653
1648 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1654 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1649 if (db_bar_size > 8192) { 1655 if (db_bar_size > 8192) {
1650 iounmap(dev->bar); 1656 iounmap(dev->bar);
1651 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1657 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
1652 db_bar_size);
1653 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1658 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1654 dev->queues[0]->q_db = dev->dbs; 1659 dev->queues[0]->q_db = dev->dbs;
1655 } 1660 }
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1657 for (i = 0; i < nr_io_queues; i++) 1662 for (i = 0; i < nr_io_queues; i++)
1658 dev->entry[i].entry = i; 1663 dev->entry[i].entry = i;
1659 for (;;) { 1664 for (;;) {
1660 result = pci_enable_msix(dev->pci_dev, dev->entry, 1665 result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
1661 nr_io_queues);
1662 if (result == 0) { 1666 if (result == 0) {
1663 break; 1667 break;
1664 } else if (result > 0) { 1668 } else if (result > 0) {
1665 nr_io_queues = result; 1669 nr_io_queues = result;
1666 continue; 1670 continue;
1667 } else { 1671 } else {
1668 nr_io_queues = 1; 1672 nr_io_queues = 0;
1669 break; 1673 break;
1670 } 1674 }
1671 } 1675 }
1672 1676
1677 if (nr_io_queues == 0) {
1678 nr_io_queues = q_count;
1679 for (;;) {
1680 result = pci_enable_msi_block(pdev, nr_io_queues);
1681 if (result == 0) {
1682 for (i = 0; i < nr_io_queues; i++)
1683 dev->entry[i].vector = i + pdev->irq;
1684 break;
1685 } else if (result > 0) {
1686 nr_io_queues = result;
1687 continue;
1688 } else {
1689 nr_io_queues = 1;
1690 break;
1691 }
1692 }
1693 }
1694
1673 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1695 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1674 /* XXX: handle failure here */ 1696 /* XXX: handle failure here */
1675 1697
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
1850{ 1872{
1851 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1873 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1852 nvme_dev_remove(dev); 1874 nvme_dev_remove(dev);
1853 pci_disable_msix(dev->pci_dev); 1875 if (dev->pci_dev->msi_enabled)
1876 pci_disable_msi(dev->pci_dev);
1877 else if (dev->pci_dev->msix_enabled)
1878 pci_disable_msix(dev->pci_dev);
1854 iounmap(dev->bar); 1879 iounmap(dev->bar);
1855 nvme_release_instance(dev); 1880 nvme_release_instance(dev);
1856 nvme_release_prp_pools(dev); 1881 nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1923 INIT_LIST_HEAD(&dev->namespaces); 1948 INIT_LIST_HEAD(&dev->namespaces);
1924 dev->pci_dev = pdev; 1949 dev->pci_dev = pdev;
1925 pci_set_drvdata(pdev, dev); 1950 pci_set_drvdata(pdev, dev);
1926 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1951
1927 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1952 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1954 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1955 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1956 else
1957 goto disable;
1958
1928 result = nvme_set_instance(dev); 1959 result = nvme_set_instance(dev);
1929 if (result) 1960 if (result)
1930 goto disable; 1961 goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1977 unmap: 2008 unmap:
1978 iounmap(dev->bar); 2009 iounmap(dev->bar);
1979 disable_msix: 2010 disable_msix:
1980 pci_disable_msix(pdev); 2011 if (dev->pci_dev->msi_enabled)
2012 pci_disable_msi(dev->pci_dev);
2013 else if (dev->pci_dev->msix_enabled)
2014 pci_disable_msix(dev->pci_dev);
1981 nvme_release_instance(dev); 2015 nvme_release_instance(dev);
1982 nvme_release_prp_pools(dev); 2016 nvme_release_prp_pools(dev);
1983 disable: 2017 disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b039893..102de2f52b5c 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/version.h>
48#include <scsi/sg.h> 47#include <scsi/sg.h>
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50 49
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1654 } 1653 }
1655} 1654}
1656 1655
1657static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1656static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1658 u8 *mode_page, u8 page_code) 1657 u8 *mode_page, u8 page_code)
1659{ 1658{
1660 int res = SNTI_TRANSLATION_SUCCESS; 1659 int res = SNTI_TRANSLATION_SUCCESS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3c08983e600a..f5d0ea11d9fd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -83,7 +83,8 @@
83 83
84#define MAX_SPEED 0xffff 84#define MAX_SPEED 0xffff
85 85
86#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) 86#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
87 ~(sector_t)((pd)->settings.size - 1))
87 88
88static DEFINE_MUTEX(pktcdvd_mutex); 89static DEFINE_MUTEX(pktcdvd_mutex);
89static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 90static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5d..3063452e55da 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
519}; 519};
520 520
521/* 521/*
522 * Initialize an rbd client instance. 522 * Initialize an rbd client instance. Success or not, this function
523 * We own *ceph_opts. 523 * consumes ceph_opts.
524 */ 524 */
525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526{ 526{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
675 675
676/* 676/*
677 * Get a ceph client with specific addr and configuration, if one does 677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. 678 * not exist create it. Either way, ceph_opts is consumed by this
679 * function.
679 */ 680 */
680static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 681static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
681{ 682{
@@ -4697,8 +4698,10 @@ out:
4697 return ret; 4698 return ret;
4698} 4699}
4699 4700
4700/* Undo whatever state changes are made by v1 or v2 image probe */ 4701/*
4701 4702 * Undo whatever state changes are made by v1 or v2 header info
4703 * call.
4704 */
4702static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 4705static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4703{ 4706{
4704 struct rbd_image_header *header; 4707 struct rbd_image_header *header;
@@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4902 int tmp; 4905 int tmp;
4903 4906
4904 /* 4907 /*
4905 * Get the id from the image id object. If it's not a 4908 * Get the id from the image id object. Unless there's an
4906 * format 2 image, we'll get ENOENT back, and we'll assume 4909 * error, rbd_dev->spec->image_id will be filled in with
4907 * it's a format 1 image. 4910 * a dynamically-allocated string, and rbd_dev->image_format
4911 * will be set to either 1 or 2.
4908 */ 4912 */
4909 ret = rbd_dev_image_id(rbd_dev); 4913 ret = rbd_dev_image_id(rbd_dev);
4910 if (ret) 4914 if (ret)
@@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
4992 rc = PTR_ERR(rbdc); 4996 rc = PTR_ERR(rbdc);
4993 goto err_out_args; 4997 goto err_out_args;
4994 } 4998 }
4995 ceph_opts = NULL; /* rbd_dev client now owns this */
4996 4999
4997 /* pick the pool */ 5000 /* pick the pool */
4998 osdc = &rbdc->client->osdc; 5001 osdc = &rbdc->client->osdc;
@@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
5027 rbd_dev->mapping.read_only = read_only; 5030 rbd_dev->mapping.read_only = read_only;
5028 5031
5029 rc = rbd_dev_device_setup(rbd_dev); 5032 rc = rbd_dev_device_setup(rbd_dev);
5030 if (!rc) 5033 if (rc) {
5031 return count; 5034 rbd_dev_image_release(rbd_dev);
5035 goto err_out_module;
5036 }
5037
5038 return count;
5032 5039
5033 rbd_dev_image_release(rbd_dev);
5034err_out_rbd_dev: 5040err_out_rbd_dev:
5035 rbd_dev_destroy(rbd_dev); 5041 rbd_dev_destroy(rbd_dev);
5036err_out_client: 5042err_out_client:
5037 rbd_put_client(rbdc); 5043 rbd_put_client(rbdc);
5038err_out_args: 5044err_out_args:
5039 if (ceph_opts)
5040 ceph_destroy_options(ceph_opts);
5041 kfree(rbd_opts);
5042 rbd_spec_put(spec); 5045 rbd_spec_put(spec);
5043err_out_module: 5046err_out_module:
5044 module_put(THIS_MODULE); 5047 module_put(THIS_MODULE);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fdfd61a2d523..11a6104a1e4f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -201,7 +201,7 @@ config BT_MRVL
201 The core driver to support Marvell Bluetooth devices. 201 The core driver to support Marvell Bluetooth devices.
202 202
203 This driver is required if you want to support 203 This driver is required if you want to support
204 Marvell Bluetooth devices, such as 8688/8787/8797. 204 Marvell Bluetooth devices, such as 8688/8787/8797/8897.
205 205
206 Say Y here to compile Marvell Bluetooth driver 206 Say Y here to compile Marvell Bluetooth driver
207 into the kernel or say M to compile it as module. 207 into the kernel or say M to compile it as module.
@@ -214,7 +214,7 @@ config BT_MRVL_SDIO
214 The driver for Marvell Bluetooth chipsets with SDIO interface. 214 The driver for Marvell Bluetooth chipsets with SDIO interface.
215 215
216 This driver is required if you want to use Marvell Bluetooth 216 This driver is required if you want to use Marvell Bluetooth
217 devices with SDIO interface. Currently SD8688/SD8787/SD8797 217 devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
218 chipsets are supported. 218 chipsets are supported.
219 219
220 Say Y here to compile support for Marvell BT-over-SDIO driver 220 Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6d..9a9f51875df5 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
498 add_wait_queue(&thread->wait_q, &wait); 498 add_wait_queue(&thread->wait_q, &wait);
499 499
500 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
501 if (kthread_should_stop()) {
502 BT_DBG("main_thread: break from main thread");
503 break;
504 }
501 505
502 if (adapter->wakeup_tries || 506 if (adapter->wakeup_tries ||
503 ((!adapter->int_count) && 507 ((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
513 517
514 BT_DBG("main_thread woke up"); 518 BT_DBG("main_thread woke up");
515 519
516 if (kthread_should_stop()) {
517 BT_DBG("main_thread: break from main thread");
518 break;
519 }
520
521 spin_lock_irqsave(&priv->driver_lock, flags); 520 spin_lock_irqsave(&priv->driver_lock, flags);
522 if (adapter->int_count) { 521 if (adapter->int_count) {
523 adapter->int_count = 0; 522 adapter->int_count = 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c63488c54f4a..13693b7a0d5c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
82 .io_port_2 = 0x7a, 82 .io_port_2 = 0x7a,
83}; 83};
84 84
85static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
86 .cfg = 0x00,
87 .host_int_mask = 0x02,
88 .host_intstatus = 0x03,
89 .card_status = 0x50,
90 .sq_read_base_addr_a0 = 0x60,
91 .sq_read_base_addr_a1 = 0x61,
92 .card_revision = 0xbc,
93 .card_fw_status0 = 0xc0,
94 .card_fw_status1 = 0xc1,
95 .card_rx_len = 0xc2,
96 .card_rx_unit = 0xc3,
97 .io_port_0 = 0xd8,
98 .io_port_1 = 0xd9,
99 .io_port_2 = 0xda,
100};
101
85static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 102static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
86 .helper = "mrvl/sd8688_helper.bin", 103 .helper = "mrvl/sd8688_helper.bin",
87 .firmware = "mrvl/sd8688.bin", 104 .firmware = "mrvl/sd8688.bin",
@@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
103 .sd_blksz_fw_dl = 256, 120 .sd_blksz_fw_dl = 256,
104}; 121};
105 122
123static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
124 .helper = NULL,
125 .firmware = "mrvl/sd8897_uapsta.bin",
126 .reg = &btmrvl_reg_88xx,
127 .sd_blksz_fw_dl = 256,
128};
129
106static const struct sdio_device_id btmrvl_sdio_ids[] = { 130static const struct sdio_device_id btmrvl_sdio_ids[] = {
107 /* Marvell SD8688 Bluetooth device */ 131 /* Marvell SD8688 Bluetooth device */
108 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 132 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
116 /* Marvell SD8797 Bluetooth device */ 140 /* Marvell SD8797 Bluetooth device */
117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 141 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 142 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
143 /* Marvell SD8897 Bluetooth device */
144 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
145 .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
119 146
120 { } /* Terminating entry */ 147 { } /* Terminating entry */
121}; 148};
@@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
1194MODULE_FIRMWARE("mrvl/sd8688.bin"); 1221MODULE_FIRMWARE("mrvl/sd8688.bin");
1195MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1222MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1196MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1223MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
1224MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 11b8b4b54ceb..edc089e9d0c4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { 347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 case SYSTEM_INTEL_MSR_CAPABLE: 348 case SYSTEM_INTEL_MSR_CAPABLE:
349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
351 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE: 352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; 354 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
355 break; 355 break;
356 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
357 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index a64eb8b70444..ad1fde277661 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
45 struct cpufreq_freqs freqs; 45 struct cpufreq_freqs freqs;
46 struct opp *opp; 46 struct opp *opp;
47 unsigned long volt = 0, volt_old = 0, tol = 0; 47 unsigned long volt = 0, volt_old = 0, tol = 0;
48 long freq_Hz; 48 long freq_Hz, freq_exact;
49 unsigned int index; 49 unsigned int index;
50 int ret; 50 int ret;
51 51
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
60 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); 60 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
61 if (freq_Hz < 0) 61 if (freq_Hz < 0)
62 freq_Hz = freq_table[index].frequency * 1000; 62 freq_Hz = freq_table[index].frequency * 1000;
63 freq_exact = freq_Hz;
63 freqs.new = freq_Hz / 1000; 64 freqs.new = freq_Hz / 1000;
64 freqs.old = clk_get_rate(cpu_clk) / 1000; 65 freqs.old = clk_get_rate(cpu_clk) / 1000;
65 66
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
98 } 99 }
99 } 100 }
100 101
101 ret = clk_set_rate(cpu_clk, freqs.new * 1000); 102 ret = clk_set_rate(cpu_clk, freq_exact);
102 if (ret) { 103 if (ret) {
103 pr_err("failed to set clock rate: %d\n", ret); 104 pr_err("failed to set clock rate: %d\n", ret);
104 if (cpu_reg) 105 if (cpu_reg)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5af40ad82d23..dc9b72e25c1a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,6 +26,7 @@
26#include <linux/tick.h> 26#include <linux/tick.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/cpu.h>
29 30
30#include "cpufreq_governor.h" 31#include "cpufreq_governor.h"
31 32
@@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
180 if (!all_cpus) { 181 if (!all_cpus) {
181 __gov_queue_work(smp_processor_id(), dbs_data, delay); 182 __gov_queue_work(smp_processor_id(), dbs_data, delay);
182 } else { 183 } else {
184 get_online_cpus();
183 for_each_cpu(i, policy->cpus) 185 for_each_cpu(i, policy->cpus)
184 __gov_queue_work(i, dbs_data, delay); 186 __gov_queue_work(i, dbs_data, delay);
187 put_online_cpus();
185 } 188 }
186} 189}
187EXPORT_SYMBOL_GPL(gov_queue_work); 190EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index a97bb6c1596c..c3dc1c04a5df 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
863 { .compatible = "fsl,imx27-sahara" }, 863 { .compatible = "fsl,imx27-sahara" },
864 { /* sentinel */ } 864 { /* sentinel */ }
865}; 865};
866MODULE_DEVICE_TABLE(platform, sahara_dt_ids); 866MODULE_DEVICE_TABLE(of, sahara_dt_ids);
867 867
868static int sahara_probe(struct platform_device *pdev) 868static int sahara_probe(struct platform_device *pdev)
869{ 869{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d8ce4ecfef18..e88ded2c8d2f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -716,8 +716,7 @@ static int dmatest_func(void *data)
716 } 716 }
717 dma_async_issue_pending(chan); 717 dma_async_issue_pending(chan);
718 718
719 wait_event_freezable_timeout(done_wait, 719 wait_event_freezable_timeout(done_wait, done.done,
720 done.done || kthread_should_stop(),
721 msecs_to_jiffies(params->timeout)); 720 msecs_to_jiffies(params->timeout));
722 721
723 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 722 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info)
997static int __restart_threaded_test(struct dmatest_info *info, bool run) 996static int __restart_threaded_test(struct dmatest_info *info, bool run)
998{ 997{
999 struct dmatest_params *params = &info->params; 998 struct dmatest_params *params = &info->params;
1000 int ret;
1001 999
1002 /* Stop any running test first */ 1000 /* Stop any running test first */
1003 __stop_threaded_test(info); 1001 __stop_threaded_test(info);
@@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
1012 memcpy(params, &info->dbgfs_params, sizeof(*params)); 1010 memcpy(params, &info->dbgfs_params, sizeof(*params));
1013 1011
1014 /* Run test with new parameters */ 1012 /* Run test with new parameters */
1015 ret = __run_threaded_test(info); 1013 return __run_threaded_test(info);
1016 if (ret) { 1014}
1017 __stop_threaded_test(info); 1015
1018 pr_err("dmatest: Can't run test\n"); 1016static bool __is_threaded_test_run(struct dmatest_info *info)
1017{
1018 struct dmatest_chan *dtc;
1019
1020 list_for_each_entry(dtc, &info->channels, node) {
1021 struct dmatest_thread *thread;
1022
1023 list_for_each_entry(thread, &dtc->threads, node) {
1024 if (!thread->done)
1025 return true;
1026 }
1019 } 1027 }
1020 1028
1021 return ret; 1029 return false;
1022} 1030}
1023 1031
1024static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, 1032static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
@@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
1091{ 1099{
1092 struct dmatest_info *info = file->private_data; 1100 struct dmatest_info *info = file->private_data;
1093 char buf[3]; 1101 char buf[3];
1094 struct dmatest_chan *dtc;
1095 bool alive = false;
1096 1102
1097 mutex_lock(&info->lock); 1103 mutex_lock(&info->lock);
1098 list_for_each_entry(dtc, &info->channels, node) {
1099 struct dmatest_thread *thread;
1100
1101 list_for_each_entry(thread, &dtc->threads, node) {
1102 if (!thread->done) {
1103 alive = true;
1104 break;
1105 }
1106 }
1107 }
1108 1104
1109 if (alive) { 1105 if (__is_threaded_test_run(info)) {
1110 buf[0] = 'Y'; 1106 buf[0] = 'Y';
1111 } else { 1107 } else {
1112 __stop_threaded_test(info); 1108 __stop_threaded_test(info);
@@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
1132 1128
1133 if (strtobool(buf, &bv) == 0) { 1129 if (strtobool(buf, &bv) == 0) {
1134 mutex_lock(&info->lock); 1130 mutex_lock(&info->lock);
1135 ret = __restart_threaded_test(info, bv); 1131
1132 if (__is_threaded_test_run(info))
1133 ret = -EBUSY;
1134 else
1135 ret = __restart_threaded_test(info, bv);
1136
1136 mutex_unlock(&info->lock); 1137 mutex_unlock(&info->lock);
1137 } 1138 }
1138 1139
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1734feec47b1..71bf4ec300ea 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
1566 return; 1566 return;
1567 } 1567 }
1568 1568
1569 if (d40_queue_start(d40c) == NULL) 1569 if (d40_queue_start(d40c) == NULL) {
1570 d40c->busy = false; 1570 d40c->busy = false;
1571 pm_runtime_mark_last_busy(d40c->base->dev); 1571
1572 pm_runtime_put_autosuspend(d40c->base->dev); 1572 pm_runtime_mark_last_busy(d40c->base->dev);
1573 pm_runtime_put_autosuspend(d40c->base->dev);
1574 }
1573 1575
1574 d40_desc_remove(d40d); 1576 d40_desc_remove(d40d);
1575 d40_desc_done(d40c, d40d); 1577 d40_desc_done(d40c, d40d);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a6a8643a6a77..8bcce7866d36 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
1054 */ 1054 */
1055void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 1055void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1056{ 1056{
1057 /* vblank is not initialized (IRQ not installed ?) */ 1057 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1058 if (!dev->num_crtcs) 1058 if (!dev->num_crtcs)
1059 return; 1059 return;
1060 /* 1060 /*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1076{ 1076{
1077 unsigned long irqflags; 1077 unsigned long irqflags;
1078 1078
1079 /* vblank is not initialized (IRQ not installed ?), or has been freed */
1080 if (!dev->num_crtcs)
1081 return;
1082
1079 if (dev->vblank_inmodeset[crtc]) { 1083 if (dev->vblank_inmodeset[crtc]) {
1080 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1084 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1081 dev->vblank_disable_allowed = 1; 1085 dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 3cfd0931fbfb..82430ad8ba62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1462 size_t addr = 0; 1462 size_t addr = 0;
1463 struct gtt_range *gt; 1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj; 1464 struct drm_gem_object *obj;
1465 int ret; 1465 int ret = 0;
1466 1466
1467 /* if we want to turn of the cursor ignore width and height */ 1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) { 1468 if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1499 1499
1500 if (obj->size < width * height * 4) { 1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n"); 1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 return -ENOMEM; 1502 ret = -ENOMEM;
1503 goto unref_cursor;
1503 } 1504 }
1504 1505
1505 gt = container_of(obj, struct gtt_range, gem); 1506 gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1508 ret = psb_gtt_pin(gt); 1509 ret = psb_gtt_pin(gt);
1509 if (ret) { 1510 if (ret) {
1510 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1511 return ret; 1512 goto unref_cursor;
1512 } 1513 }
1513 1514
1514 addr = gt->offset; /* Or resource.start ??? */ 1515 addr = gt->offset; /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1532 struct gtt_range, gem); 1533 struct gtt_range, gem);
1533 psb_gtt_unpin(gt); 1534 psb_gtt_unpin(gt);
1534 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1535 psb_intel_crtc->cursor_obj = obj;
1536 } 1536 }
1537 return 0; 1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1538} 1544}
1539 1545
1540static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1750 kfree(psb_intel_crtc); 1756 kfree(psb_intel_crtc);
1751} 1757}
1752 1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1753const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1754 .dpms = cdv_intel_crtc_dpms, 1773 .dpms = cdv_intel_crtc_dpms,
1755 .mode_fixup = cdv_intel_crtc_mode_fixup, 1774 .mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1757 .mode_set_base = cdv_intel_pipe_set_base, 1776 .mode_set_base = cdv_intel_pipe_set_base,
1758 .prepare = cdv_intel_crtc_prepare, 1777 .prepare = cdv_intel_crtc_prepare,
1759 .commit = cdv_intel_crtc_commit, 1778 .commit = cdv_intel_crtc_commit,
1779 .disable = cdv_intel_crtc_disable,
1760}; 1780};
1761 1781
1762const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1534e220097a..8b1b6d923abe 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
121 unsigned long address; 121 unsigned long address;
122 int ret; 122 int ret;
123 unsigned long pfn; 123 unsigned long pfn;
124 /* FIXME: assumes fb at stolen base which may not be true */ 124 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
125 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; 125 psbfb->gtt->offset;
126 126
127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); 128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6e8f42b61ff6..6666493789d1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; 843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj; 844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src; 845 void *tmp_dst, *tmp_src;
846 int ret, i, cursor_pages; 846 int ret = 0, i, cursor_pages;
847 847
848 /* if we want to turn of the cursor ignore width and height */ 848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) { 849 if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
880 880
881 if (obj->size < width * height * 4) { 881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n"); 882 dev_dbg(dev->dev, "buffer is to small\n");
883 return -ENOMEM; 883 ret = -ENOMEM;
884 goto unref_cursor;
884 } 885 }
885 886
886 gt = container_of(obj, struct gtt_range, gem); 887 gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
889 ret = psb_gtt_pin(gt); 890 ret = psb_gtt_pin(gt);
890 if (ret) { 891 if (ret) {
891 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
892 return ret; 893 goto unref_cursor;
893 } 894 }
894 895
895 if (dev_priv->ops->cursor_needs_phys) { 896 if (dev_priv->ops->cursor_needs_phys) {
896 if (cursor_gt == NULL) { 897 if (cursor_gt == NULL) {
897 dev_err(dev->dev, "No hardware cursor mem available"); 898 dev_err(dev->dev, "No hardware cursor mem available");
898 return -ENOMEM; 899 ret = -ENOMEM;
900 goto unref_cursor;
899 } 901 }
900 902
901 /* Prevent overflow */ 903 /* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
936 struct gtt_range, gem); 938 struct gtt_range, gem);
937 psb_gtt_unpin(gt); 939 psb_gtt_unpin(gt);
938 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
939 psb_intel_crtc->cursor_obj = obj;
940 } 941 }
941 return 0; 942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
942} 949}
943 950
944static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1150 kfree(psb_intel_crtc); 1157 kfree(psb_intel_crtc);
1151} 1158}
1152 1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1153const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1154 .dpms = psb_intel_crtc_dpms, 1174 .dpms = psb_intel_crtc_dpms,
1155 .mode_fixup = psb_intel_crtc_mode_fixup, 1175 .mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1157 .mode_set_base = psb_intel_pipe_set_base, 1177 .mode_set_base = psb_intel_pipe_set_base,
1158 .prepare = psb_intel_crtc_prepare, 1178 .prepare = psb_intel_crtc_prepare,
1159 .commit = psb_intel_crtc_commit, 1179 .commit = psb_intel_crtc_commit,
1180 .disable = psb_intel_crtc_disable,
1160}; 1181};
1161 1182
1162const struct drm_crtc_funcs psb_intel_crtc_funcs = { 1183const struct drm_crtc_funcs psb_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6cf8e843973..970ad17c99ab 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
91{ 91{
92 int ret; 92 int ret;
93 93
94#define EXIT_COND (!i915_reset_in_progress(error)) 94#define EXIT_COND (!i915_reset_in_progress(error) || \
95 i915_terminally_wedged(error))
95 if (EXIT_COND) 96 if (EXIT_COND)
96 return 0; 97 return 0;
97 98
98 /* GPU is already declared terminally dead, give up. */
99 if (i915_terminally_wedged(error))
100 return -EIO;
101
102 /* 99 /*
103 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 100 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
104 * userspace. If it takes that long something really bad is going on and 101 * userspace. If it takes that long something really bad is going on and
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ad1117bebd7e..56746dcac40f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev)
7937 memset(&pipe_config, 0, sizeof(pipe_config)); 7937 memset(&pipe_config, 0, sizeof(pipe_config));
7938 active = dev_priv->display.get_pipe_config(crtc, 7938 active = dev_priv->display.get_pipe_config(crtc,
7939 &pipe_config); 7939 &pipe_config);
7940
7941 /* hw state is inconsistent with the pipe A quirk */
7942 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
7943 active = crtc->active;
7944
7940 WARN(crtc->active != active, 7945 WARN(crtc->active != active,
7941 "crtc active state doesn't match with hw state " 7946 "crtc active state doesn't match with hw state "
7942 "(expected %i, found %i)\n", crtc->active, active); 7947 "(expected %i, found %i)\n", crtc->active, active);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f36f1baabd5a..29412cc89c7a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
815 }, 815 },
816 { 816 {
817 .callback = intel_no_lvds_dmi_callback, 817 .callback = intel_no_lvds_dmi_callback,
818 .ident = "Hewlett-Packard HP t5740e Thin Client", 818 .ident = "Hewlett-Packard HP t5740",
819 .matches = { 819 .matches = {
820 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 820 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
821 DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"), 821 DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
822 }, 822 },
823 }, 823 },
824 { 824 {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d15428404b9a..d4ea6c265ce1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1776 * Assume that the preferred modes are 1776 * Assume that the preferred modes are
1777 * arranged in priority order. 1777 * arranged in priority order.
1778 */ 1778 */
1779 intel_ddc_get_modes(connector, intel_sdvo->i2c); 1779 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
1780 if (list_empty(&connector->probed_modes) == false)
1781 goto end;
1782 1780
1783 /* Fetch modes from VBT */ 1781 /*
1782 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1783 * SDVO->LVDS transcoders can't cope with the EDID mode. Since
1784 * drm_mode_probed_add adds the mode at the head of the list we add it
1785 * last.
1786 */
1784 if (dev_priv->sdvo_lvds_vbt_mode != NULL) { 1787 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1785 newmode = drm_mode_duplicate(connector->dev, 1788 newmode = drm_mode_duplicate(connector->dev,
1786 dev_priv->sdvo_lvds_vbt_mode); 1789 dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1792 } 1795 }
1793 } 1796 }
1794 1797
1795end:
1796 list_for_each_entry(newmode, &connector->probed_modes, head) { 1798 list_for_each_entry(newmode, &connector->probed_modes, head) {
1797 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1799 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1798 intel_sdvo->sdvo_lvds_fixed_mode = 1800 intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2790 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; 2792 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2791 } 2793 }
2792 2794
2793 /* Only enable the hotplug irq if we need it, to work around noisy
2794 * hotplug lines.
2795 */
2796 if (intel_sdvo->hotplug_active)
2797 intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
2798
2799 intel_encoder->compute_config = intel_sdvo_compute_config; 2795 intel_encoder->compute_config = intel_sdvo_compute_config;
2800 intel_encoder->disable = intel_disable_sdvo; 2796 intel_encoder->disable = intel_disable_sdvo;
2801 intel_encoder->mode_set = intel_sdvo_mode_set; 2797 intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2814 goto err_output; 2810 goto err_output;
2815 } 2811 }
2816 2812
2813 /* Only enable the hotplug irq if we need it, to work around noisy
2814 * hotplug lines.
2815 */
2816 if (intel_sdvo->hotplug_active) {
2817 intel_encoder->hpd_pin =
2818 intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
2819 }
2820
2817 /* 2821 /*
2818 * Cloning SDVO with anything is often impossible, since the SDVO 2822 * Cloning SDVO with anything is often impossible, since the SDVO
2819 * encoder can request a special input timing mode. And even if that's 2823 * encoder can request a special input timing mode. And even if that's
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 77b8a45fb10a..ee66badc8bb6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1034,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1034 else 1034 else
1035 hi_pri_lvl = 5; 1035 hi_pri_lvl = 5;
1036 1036
1037 WREG8(0x1fde, 0x06); 1037 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1038 WREG8(0x1fdf, hi_pri_lvl); 1038 WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
1039 } else { 1039 } else {
1040 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1040 if (mdev->reg_1e24 >= 0x01) 1041 if (mdev->reg_1e24 >= 0x01)
1041 WREG8(0x1fdf, 0x03); 1042 WREG8(MGAREG_CRTCEXT_DATA, 0x03);
1042 else 1043 else
1043 WREG8(0x1fdf, 0x04); 1044 WREG8(MGAREG_CRTCEXT_DATA, 0x04);
1044 } 1045 }
1045 } 1046 }
1046 return 0; 1047 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index d0817d94454c..f02fd9f443ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
50{ 50{
51 const u32 doff = (or * 0x800); 51 const u32 doff = (or * 0x800);
52 int load = -EINVAL; 52 int load = -EINVAL;
53 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
54 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
53 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); 55 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
54 udelay(9500); 56 mdelay(9);
57 udelay(500);
55 nv_wr32(priv, 0x61a00c + doff, 0x80000000); 58 nv_wr32(priv, 0x61a00c + doff, 0x80000000);
56 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; 59 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
57 nv_wr32(priv, 0x61a00c + doff, 0x00000000); 60 nv_wr32(priv, 0x61a00c + doff, 0x00000000);
61 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
62 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
58 return load; 63 return load;
59} 64}
60 65
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 0d36bdc51417..7fdade6e604d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
55 nv_wr32(priv, 0x616510 + hoff, 0x00000000); 55 nv_wr32(priv, 0x616510 + hoff, 0x00000000);
56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); 56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
57 57
58 nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
59 nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
60 nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
61
58 /* ??? */ 62 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 63 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 64 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 89bf459d584b..e9b8217d0075 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -40,14 +40,13 @@
40 * FIFO channel objects 40 * FIFO channel objects
41 ******************************************************************************/ 41 ******************************************************************************/
42 42
43void 43static void
44nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) 44nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
45{ 45{
46 struct nouveau_bar *bar = nouveau_bar(priv); 46 struct nouveau_bar *bar = nouveau_bar(priv);
47 struct nouveau_gpuobj *cur; 47 struct nouveau_gpuobj *cur;
48 int i, p; 48 int i, p;
49 49
50 mutex_lock(&nv_subdev(priv)->mutex);
51 cur = priv->playlist[priv->cur_playlist]; 50 cur = priv->playlist[priv->cur_playlist];
52 priv->cur_playlist = !priv->cur_playlist; 51 priv->cur_playlist = !priv->cur_playlist;
53 52
@@ -61,6 +60,13 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
61 nv_wr32(priv, 0x0032f4, cur->addr >> 12); 60 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
62 nv_wr32(priv, 0x0032ec, p); 61 nv_wr32(priv, 0x0032ec, p);
63 nv_wr32(priv, 0x002500, 0x00000101); 62 nv_wr32(priv, 0x002500, 0x00000101);
63}
64
65void
66nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
67{
68 mutex_lock(&nv_subdev(priv)->mutex);
69 nv50_fifo_playlist_update_locked(priv);
64 mutex_unlock(&nv_subdev(priv)->mutex); 70 mutex_unlock(&nv_subdev(priv)->mutex);
65} 71}
66 72
@@ -489,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object)
489 495
490 for (i = 0; i < 128; i++) 496 for (i = 0; i < 128; i++)
491 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); 497 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
492 nv50_fifo_playlist_update(priv); 498 nv50_fifo_playlist_update_locked(priv);
493 499
494 nv_wr32(priv, 0x003200, 0x00000001); 500 nv_wr32(priv, 0x003200, 0x00000001);
495 nv_wr32(priv, 0x003250, 0x00000001); 501 nv_wr32(priv, 0x003250, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 0a393f7f055f..5a5961b6a6a3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -218,7 +218,7 @@ struct nv04_display_class {
218#define NV50_DISP_DAC_PWR_STATE 0x00000040 218#define NV50_DISP_DAC_PWR_STATE 0x00000040
219#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 219#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
220#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 220#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
221#define NV50_DISP_DAC_LOAD 0x0002000c 221#define NV50_DISP_DAC_LOAD 0x00020100
222#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 222#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
223 223
224#define NV50_DISP_PIOR_MTHD 0x00030000 224#define NV50_DISP_PIOR_MTHD 0x00030000
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ebf0a683305e..dd5e01f89f28 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1554{ 1554{
1555 struct nv50_disp *disp = nv50_disp(encoder->dev); 1555 struct nv50_disp *disp = nv50_disp(encoder->dev);
1556 int ret, or = nouveau_encoder(encoder)->or; 1556 int ret, or = nouveau_encoder(encoder)->or;
1557 u32 load = 0; 1557 u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
1558 if (load == 0)
1559 load = 340;
1558 1560
1559 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); 1561 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
1560 if (ret || load != 7) 1562 if (ret || load != 7)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 44a7da66e081..8406c8251fbf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
667int 667int
668atombios_get_encoder_mode(struct drm_encoder *encoder) 668atombios_get_encoder_mode(struct drm_encoder *encoder)
669{ 669{
670 struct drm_device *dev = encoder->dev;
671 struct radeon_device *rdev = dev->dev_private;
670 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 672 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
671 struct drm_connector *connector; 673 struct drm_connector *connector;
672 struct radeon_connector *radeon_connector; 674 struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
693 case DRM_MODE_CONNECTOR_DVII: 695 case DRM_MODE_CONNECTOR_DVII:
694 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 696 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
695 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 697 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
696 radeon_audio) 698 radeon_audio &&
699 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
697 return ATOM_ENCODER_MODE_HDMI; 700 return ATOM_ENCODER_MODE_HDMI;
698 else if (radeon_connector->use_digital) 701 else if (radeon_connector->use_digital)
699 return ATOM_ENCODER_MODE_DVI; 702 return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
704 case DRM_MODE_CONNECTOR_HDMIA: 707 case DRM_MODE_CONNECTOR_HDMIA:
705 default: 708 default:
706 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 709 if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
707 radeon_audio) 710 radeon_audio &&
711 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
708 return ATOM_ENCODER_MODE_HDMI; 712 return ATOM_ENCODER_MODE_HDMI;
709 else 713 else
710 return ATOM_ENCODER_MODE_DVI; 714 return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
718 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 722 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
719 return ATOM_ENCODER_MODE_DP; 723 return ATOM_ENCODER_MODE_DP;
720 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 724 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
721 radeon_audio) 725 radeon_audio &&
726 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
722 return ATOM_ENCODER_MODE_HDMI; 727 return ATOM_ENCODER_MODE_HDMI;
723 else 728 else
724 return ATOM_ENCODER_MODE_DVI; 729 return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8546e3b333b4..0f89ce3d02b9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4754,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev)
4754 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 4754 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4755 4755
4756 /* Enable IRQ */ 4756 /* Enable IRQ */
4757 if (!rdev->irq.installed) {
4758 r = radeon_irq_kms_init(rdev);
4759 if (r)
4760 return r;
4761 }
4762
4757 r = r600_irq_init(rdev); 4763 r = r600_irq_init(rdev);
4758 if (r) { 4764 if (r) {
4759 DRM_ERROR("radeon: IH init failed (%d).\n", r); 4765 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4923,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev)
4923 if (r) 4929 if (r)
4924 return r; 4930 return r;
4925 4931
4926 r = radeon_irq_kms_init(rdev);
4927 if (r)
4928 return r;
4929
4930 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 4932 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
4931 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 4933 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
4932 4934
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7969c0c8ec20..84583302b081 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev)
2025 } 2025 }
2026 2026
2027 /* Enable IRQ */ 2027 /* Enable IRQ */
2028 if (!rdev->irq.installed) {
2029 r = radeon_irq_kms_init(rdev);
2030 if (r)
2031 return r;
2032 }
2033
2028 r = r600_irq_init(rdev); 2034 r = r600_irq_init(rdev);
2029 if (r) { 2035 if (r) {
2030 DRM_ERROR("radeon: IH init failed (%d).\n", r); 2036 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
2190 if (r) 2196 if (r)
2191 return r; 2197 return r;
2192 2198
2193 r = radeon_irq_kms_init(rdev);
2194 if (r)
2195 return r;
2196
2197 ring->ring_obj = NULL; 2199 ring->ring_obj = NULL;
2198 r600_ring_init(rdev, ring, 1024 * 1024); 2200 r600_ring_init(rdev, ring, 1024 * 1024);
2199 2201
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4973bff37fec..d0314ecbd7c1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
3869 } 3869 }
3870 3870
3871 /* Enable IRQ */ 3871 /* Enable IRQ */
3872 if (!rdev->irq.installed) {
3873 r = radeon_irq_kms_init(rdev);
3874 if (r)
3875 return r;
3876 }
3877
3872 r100_irq_set(rdev); 3878 r100_irq_set(rdev);
3873 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 3879 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3874 /* 1M ring buffer */ 3880 /* 1M ring buffer */
@@ -4024,9 +4030,6 @@ int r100_init(struct radeon_device *rdev)
4024 r = radeon_fence_driver_init(rdev); 4030 r = radeon_fence_driver_init(rdev);
4025 if (r) 4031 if (r)
4026 return r; 4032 return r;
4027 r = radeon_irq_kms_init(rdev);
4028 if (r)
4029 return r;
4030 /* Memory manager */ 4033 /* Memory manager */
4031 r = radeon_bo_init(rdev); 4034 r = radeon_bo_init(rdev);
4032 if (r) 4035 if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c60350e6872d..b9b776f1e582 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
1382 } 1382 }
1383 1383
1384 /* Enable IRQ */ 1384 /* Enable IRQ */
1385 if (!rdev->irq.installed) {
1386 r = radeon_irq_kms_init(rdev);
1387 if (r)
1388 return r;
1389 }
1390
1385 r100_irq_set(rdev); 1391 r100_irq_set(rdev);
1386 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1392 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1387 /* 1M ring buffer */ 1393 /* 1M ring buffer */
@@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev)
1516 r = radeon_fence_driver_init(rdev); 1522 r = radeon_fence_driver_init(rdev);
1517 if (r) 1523 if (r)
1518 return r; 1524 return r;
1519 r = radeon_irq_kms_init(rdev);
1520 if (r)
1521 return r;
1522 /* Memory manager */ 1525 /* Memory manager */
1523 r = radeon_bo_init(rdev); 1526 r = radeon_bo_init(rdev);
1524 if (r) 1527 if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6fce2eb4dd16..4e796ecf9ea4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
265 } 265 }
266 266
267 /* Enable IRQ */ 267 /* Enable IRQ */
268 if (!rdev->irq.installed) {
269 r = radeon_irq_kms_init(rdev);
270 if (r)
271 return r;
272 }
273
268 r100_irq_set(rdev); 274 r100_irq_set(rdev);
269 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 275 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
270 /* 1M ring buffer */ 276 /* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
411 if (r) { 417 if (r) {
412 return r; 418 return r;
413 } 419 }
414 r = radeon_irq_kms_init(rdev);
415 if (r) {
416 return r;
417 }
418 /* Memory manager */ 420 /* Memory manager */
419 r = radeon_bo_init(rdev); 421 r = radeon_bo_init(rdev);
420 if (r) { 422 if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f795a4e092cb..e1aece73b370 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
194 } 194 }
195 195
196 /* Enable IRQ */ 196 /* Enable IRQ */
197 if (!rdev->irq.installed) {
198 r = radeon_irq_kms_init(rdev);
199 if (r)
200 return r;
201 }
202
197 rs600_irq_set(rdev); 203 rs600_irq_set(rdev);
198 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 204 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
199 /* 1M ring buffer */ 205 /* 1M ring buffer */
@@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev)
297 r = radeon_fence_driver_init(rdev); 303 r = radeon_fence_driver_init(rdev);
298 if (r) 304 if (r)
299 return r; 305 return r;
300 r = radeon_irq_kms_init(rdev);
301 if (r)
302 return r;
303 /* Memory manager */ 306 /* Memory manager */
304 r = radeon_bo_init(rdev); 307 r = radeon_bo_init(rdev);
305 if (r) 308 if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index b45e64848677..0e5341695922 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
1046 return -1; 1046 return -1;
1047} 1047}
1048 1048
1049uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1050{
1051 uint32_t r;
1052
1053 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1054 r = RREG32(R_0028FC_MC_DATA);
1055 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1056 return r;
1057}
1058
1059void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1060{
1061 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1062 S_0028F8_MC_IND_WR_EN(1));
1063 WREG32(R_0028FC_MC_DATA, v);
1064 WREG32(R_0028F8_MC_INDEX, 0x7F);
1065}
1066
1049static void r600_mc_program(struct radeon_device *rdev) 1067static void r600_mc_program(struct radeon_device *rdev)
1050{ 1068{
1051 struct rv515_mc_save save; 1069 struct rv515_mc_save save;
@@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev)
1181{ 1199{
1182 u32 tmp; 1200 u32 tmp;
1183 int chansize, numchan; 1201 int chansize, numchan;
1202 uint32_t h_addr, l_addr;
1203 unsigned long long k8_addr;
1184 1204
1185 /* Get VRAM informations */ 1205 /* Get VRAM informations */
1186 rdev->mc.vram_is_ddr = true; 1206 rdev->mc.vram_is_ddr = true;
@@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev)
1221 if (rdev->flags & RADEON_IS_IGP) { 1241 if (rdev->flags & RADEON_IS_IGP) {
1222 rs690_pm_info(rdev); 1242 rs690_pm_info(rdev);
1223 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1243 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1244
1245 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1246 /* Use K8 direct mapping for fast fb access. */
1247 rdev->fastfb_working = false;
1248 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1249 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1250 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1251#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1252 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1253#endif
1254 {
1255 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1256 * memory is present.
1257 */
1258 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1259 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1260 (unsigned long long)rdev->mc.aper_base, k8_addr);
1261 rdev->mc.aper_base = (resource_size_t)k8_addr;
1262 rdev->fastfb_working = true;
1263 }
1264 }
1265 }
1224 } 1266 }
1267
1225 radeon_update_bandwidth_info(rdev); 1268 radeon_update_bandwidth_info(rdev);
1226 return 0; 1269 return 0;
1227} 1270}
@@ -3202,6 +3245,12 @@ static int r600_startup(struct radeon_device *rdev)
3202 } 3245 }
3203 3246
3204 /* Enable IRQ */ 3247 /* Enable IRQ */
3248 if (!rdev->irq.installed) {
3249 r = radeon_irq_kms_init(rdev);
3250 if (r)
3251 return r;
3252 }
3253
3205 r = r600_irq_init(rdev); 3254 r = r600_irq_init(rdev);
3206 if (r) { 3255 if (r) {
3207 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3256 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3356,10 +3405,6 @@ int r600_init(struct radeon_device *rdev)
3356 if (r) 3405 if (r)
3357 return r; 3406 return r;
3358 3407
3359 r = radeon_irq_kms_init(rdev);
3360 if (r)
3361 return r;
3362
3363 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3408 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3364 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3409 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3365 3410
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index acb146c06973..79df558f8c40 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1342,6 +1342,14 @@
1342#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ 1342#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
1343#define PACKET3_SURFACE_BASE_UPDATE 0x73 1343#define PACKET3_SURFACE_BASE_UPDATE 0x73
1344 1344
1345#define R_000011_K8_FB_LOCATION 0x11
1346#define R_000012_MC_MISC_UMA_CNTL 0x12
1347#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
1348#define R_0028F8_MC_INDEX 0x28F8
1349#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
1350#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
1351#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
1352#define R_0028FC_MC_DATA 0x28FC
1345 1353
1346#define R_008020_GRBM_SOFT_RESET 0x8020 1354#define R_008020_GRBM_SOFT_RESET 0x8020
1347#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0) 1355#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 06b8c19ab19e..a2802b47ee95 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
122 rdev->mc_rreg = &rs600_mc_rreg; 122 rdev->mc_rreg = &rs600_mc_rreg;
123 rdev->mc_wreg = &rs600_mc_wreg; 123 rdev->mc_wreg = &rs600_mc_wreg;
124 } 124 }
125 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
126 rdev->mc_rreg = &rs780_mc_rreg;
127 rdev->mc_wreg = &rs780_mc_wreg;
128 }
125 if (rdev->family >= CHIP_R600) { 129 if (rdev->family >= CHIP_R600) {
126 rdev->pciep_rreg = &r600_pciep_rreg; 130 rdev->pciep_rreg = &r600_pciep_rreg;
127 rdev->pciep_wreg = &r600_pciep_wreg; 131 rdev->pciep_wreg = &r600_pciep_wreg;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2c87365d345f..a72759ede753 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
347extern void r600_pm_misc(struct radeon_device *rdev); 347extern void r600_pm_misc(struct radeon_device *rdev);
348extern void r600_pm_init_profile(struct radeon_device *rdev); 348extern void r600_pm_init_profile(struct radeon_device *rdev);
349extern void rs780_pm_init_profile(struct radeon_device *rdev); 349extern void rs780_pm_init_profile(struct radeon_device *rdev);
350extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
351extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
350extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); 352extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
351extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); 353extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
352extern int r600_get_pcie_lanes(struct radeon_device *rdev); 354extern int r600_get_pcie_lanes(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 73051ce3121e..233a9b9fa1f7 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
417 } 417 }
418 418
419 /* Enable IRQ */ 419 /* Enable IRQ */
420 if (!rdev->irq.installed) {
421 r = radeon_irq_kms_init(rdev);
422 if (r)
423 return r;
424 }
425
420 r100_irq_set(rdev); 426 r100_irq_set(rdev);
421 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 427 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
422 /* 1M ring buffer */ 428 /* 1M ring buffer */
@@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev)
535 r = radeon_fence_driver_init(rdev); 541 r = radeon_fence_driver_init(rdev);
536 if (r) 542 if (r)
537 return r; 543 return r;
538 r = radeon_irq_kms_init(rdev);
539 if (r)
540 return r;
541 /* Memory manager */ 544 /* Memory manager */
542 r = radeon_bo_init(rdev); 545 r = radeon_bo_init(rdev);
543 if (r) 546 if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 46fa1b07c560..670b555d2ca2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
923 } 923 }
924 924
925 /* Enable IRQ */ 925 /* Enable IRQ */
926 if (!rdev->irq.installed) {
927 r = radeon_irq_kms_init(rdev);
928 if (r)
929 return r;
930 }
931
926 rs600_irq_set(rdev); 932 rs600_irq_set(rdev);
927 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 933 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
928 /* 1M ring buffer */ 934 /* 1M ring buffer */
@@ -1047,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev)
1047 r = radeon_fence_driver_init(rdev); 1053 r = radeon_fence_driver_init(rdev);
1048 if (r) 1054 if (r)
1049 return r; 1055 return r;
1050 r = radeon_irq_kms_init(rdev);
1051 if (r)
1052 return r;
1053 /* Memory manager */ 1056 /* Memory manager */
1054 r = radeon_bo_init(rdev); 1057 r = radeon_bo_init(rdev);
1055 if (r) 1058 if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ab4c86cfd552..55880d5962c3 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev)
651 } 651 }
652 652
653 /* Enable IRQ */ 653 /* Enable IRQ */
654 if (!rdev->irq.installed) {
655 r = radeon_irq_kms_init(rdev);
656 if (r)
657 return r;
658 }
659
654 rs600_irq_set(rdev); 660 rs600_irq_set(rdev);
655 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 661 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
656 /* 1M ring buffer */ 662 /* 1M ring buffer */
@@ -776,9 +782,6 @@ int rs690_init(struct radeon_device *rdev)
776 r = radeon_fence_driver_init(rdev); 782 r = radeon_fence_driver_init(rdev);
777 if (r) 783 if (r)
778 return r; 784 return r;
779 r = radeon_irq_kms_init(rdev);
780 if (r)
781 return r;
782 /* Memory manager */ 785 /* Memory manager */
783 r = radeon_bo_init(rdev); 786 r = radeon_bo_init(rdev);
784 if (r) 787 if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffcba730c57c..21c7d7b26e55 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
532 } 532 }
533 533
534 /* Enable IRQ */ 534 /* Enable IRQ */
535 if (!rdev->irq.installed) {
536 r = radeon_irq_kms_init(rdev);
537 if (r)
538 return r;
539 }
540
535 rs600_irq_set(rdev); 541 rs600_irq_set(rdev);
536 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 542 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
537 /* 1M ring buffer */ 543 /* 1M ring buffer */
@@ -662,9 +668,6 @@ int rv515_init(struct radeon_device *rdev)
662 r = radeon_fence_driver_init(rdev); 668 r = radeon_fence_driver_init(rdev);
663 if (r) 669 if (r)
664 return r; 670 return r;
665 r = radeon_irq_kms_init(rdev);
666 if (r)
667 return r;
668 /* Memory manager */ 671 /* Memory manager */
669 r = radeon_bo_init(rdev); 672 r = radeon_bo_init(rdev);
670 if (r) 673 if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 08aef24afe40..4a62ad2e5399 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1887,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev)
1887 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 1887 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1888 1888
1889 /* Enable IRQ */ 1889 /* Enable IRQ */
1890 if (!rdev->irq.installed) {
1891 r = radeon_irq_kms_init(rdev);
1892 if (r)
1893 return r;
1894 }
1895
1890 r = r600_irq_init(rdev); 1896 r = r600_irq_init(rdev);
1891 if (r) { 1897 if (r) {
1892 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1898 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2045,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev)
2045 if (r) 2051 if (r)
2046 return r; 2052 return r;
2047 2053
2048 r = radeon_irq_kms_init(rdev);
2049 if (r)
2050 return r;
2051
2052 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2054 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2053 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2055 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2054 2056
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d1ba9d88f311..a1b0da6b5808 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5350,6 +5350,12 @@ static int si_startup(struct radeon_device *rdev)
5350 } 5350 }
5351 5351
5352 /* Enable IRQ */ 5352 /* Enable IRQ */
5353 if (!rdev->irq.installed) {
5354 r = radeon_irq_kms_init(rdev);
5355 if (r)
5356 return r;
5357 }
5358
5353 r = si_irq_init(rdev); 5359 r = si_irq_init(rdev);
5354 if (r) { 5360 if (r) {
5355 DRM_ERROR("radeon: IH init failed (%d).\n", r); 5361 DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -5533,10 +5539,6 @@ int si_init(struct radeon_device *rdev)
5533 if (r) 5539 if (r)
5534 return r; 5540 return r;
5535 5541
5536 r = radeon_irq_kms_init(rdev);
5537 if (r)
5538 return r;
5539
5540 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 5542 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5541 ring->ring_obj = NULL; 5543 ring->ring_obj = NULL;
5542 r600_ring_init(rdev, ring, 1024 * 1024); 5544 r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e461e9972455..7a4d10106906 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -6,6 +6,7 @@ config DRM_TILCDC
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
8 select BACKLIGHT_CLASS_DEVICE 8 select BACKLIGHT_CLASS_DEVICE
9 select BACKLIGHT_LCD_SUPPORT
9 help 10 help
10 Choose this option if you have an TI SoC with LCDC display 11 Choose this option if you have an TI SoC with LCDC display
11 controller, for example AM33xx in beagle-bone, DA8xx, or 12 controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dc3ae5c56f56..d39a5cede0b0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
264static void mt_free_input_name(struct hid_input *hi) 264static void mt_free_input_name(struct hid_input *hi)
265{ 265{
266 struct hid_device *hdev = hi->report->device; 266 struct hid_device *hdev = hi->report->device;
267 const char *name = hi->input->name;
267 268
268 if (hi->input->name != hdev->name) 269 if (name != hdev->name) {
269 kfree(hi->input->name); 270 hi->input->name = hdev->name;
271 kfree(name);
272 }
270} 273}
271 274
272static ssize_t mt_show_quirks(struct device *dev, 275static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
1040 struct hid_input *hi; 1043 struct hid_input *hi;
1041 1044
1042 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); 1045 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
1043 hid_hw_stop(hdev);
1044
1045 list_for_each_entry(hi, &hdev->inputs, list) 1046 list_for_each_entry(hi, &hdev->inputs, list)
1046 mt_free_input_name(hi); 1047 mt_free_input_name(hi);
1047 1048
1049 hid_hw_stop(hdev);
1050
1048 kfree(td); 1051 kfree(td);
1049 hid_set_drvdata(hdev, NULL); 1052 hid_set_drvdata(hdev, NULL);
1050} 1053}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 7e76922a4ba9..f920619cd6da 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); 331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); 332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
333 333
334 if (man_id < 0 || dev_id < 0)
335 return -ENODEV;
336
334 if (man_id == 0x4d && dev_id == 0x01) 337 if (man_id == 0x4d && dev_id == 0x01)
335 type_name = "max1617a"; 338 type_name = "max1617a";
336 else if (man_id == 0x41) { 339 else if (man_id == 0x41) {
337 if ((dev_id & 0xF0) == 0x30) 340 if ((dev_id & 0xF0) == 0x30)
338 type_name = "adm1023"; 341 type_name = "adm1023";
339 else 342 else if ((dev_id & 0xF0) == 0x00)
340 type_name = "adm1021"; 343 type_name = "adm1021";
344 else
345 return -ENODEV;
341 } else if (man_id == 0x49) 346 } else if (man_id == 0x49)
342 type_name = "thmc10"; 347 type_name = "thmc10";
343 else if (man_id == 0x23) 348 else if (man_id == 0x23)
344 type_name = "gl523sm"; 349 type_name = "gl523sm";
345 else if (man_id == 0x54) 350 else if (man_id == 0x54)
346 type_name = "mc1066"; 351 type_name = "mc1066";
347 /* LM84 Mfr ID in a different place, and it has more unused bits */ 352 else {
348 else if (conv_rate == 0x00 353 int lte, rte, lhi, rhi, llo, rlo;
349 && (config & 0x7F) == 0x00 354
350 && (status & 0xAB) == 0x00) 355 /* extra checks for LM84 and MAX1617 to avoid misdetections */
351 type_name = "lm84"; 356
352 else 357 llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
353 type_name = "max1617"; 358 rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
359
360 /* fail if any of the additional register reads failed */
361 if (llo < 0 || rlo < 0)
362 return -ENODEV;
363
364 lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
365 rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
366 lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
367 rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
368
369 /*
370 * Fail for negative temperatures and negative high limits.
371 * This check also catches read errors on the tested registers.
372 */
373 if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
374 return -ENODEV;
375
376 /* fail if all registers hold the same value */
377 if (lte == rte && lte == lhi && lte == rhi && lte == llo
378 && lte == rlo)
379 return -ENODEV;
380
381 /*
382 * LM84 Mfr ID is in a different place,
383 * and it has more unused bits.
384 */
385 if (conv_rate == 0x00
386 && (config & 0x7F) == 0x00
387 && (status & 0xAB) == 0x00) {
388 type_name = "lm84";
389 } else {
390 /* fail if low limits are larger than high limits */
391 if ((s8)llo > lhi || (s8)rlo > rhi)
392 return -ENODEV;
393 type_name = "max1617";
394 }
395 }
354 396
355 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", 397 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
356 type_name, i2c_adapter_id(adapter), client->addr); 398 type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 81c7b73695d2..3b9afccaaade 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
61 if (dma_region) { 61 if (dma_region) {
62 struct qib_mregion *tmr; 62 struct qib_mregion *tmr;
63 63
64 tmr = rcu_dereference(dev->dma_mr); 64 tmr = rcu_access_pointer(dev->dma_mr);
65 if (!tmr) { 65 if (!tmr) {
66 qib_get_mr(mr); 66 qib_get_mr(mr);
67 rcu_assign_pointer(dev->dma_mr, mr); 67 rcu_assign_pointer(dev->dma_mr, mr);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f19b0998a53c..2e84ef859c5b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2004 Alex Aizman 5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie 6 * Copyright (C) 2005 Mike Christie
7 * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. 7 * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
8 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
8 * maintained by openib-general@openib.org 9 * maintained by openib-general@openib.org
9 * 10 *
10 * This software is available to you under a choice of one of two 11 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 06f578cde75b..4f069c0d4c04 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,6 +8,7 @@
8 * 8 *
9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
11 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
11 * 12 *
12 * This software is available to you under a choice of one of two 13 * This software is available to you under a choice of one of two
13 * licenses. You may choose to be licensed under the terms of the GNU 14 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index a00ccd1ca333..b6d81a86c976 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 68ebb7fe072a..7827baf455a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 5278916c3103..2c4941d0656b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -292,10 +293,10 @@ out_err:
292} 293}
293 294
294/** 295/**
295 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 296 * releases the FMR pool and QP objects, returns 0 on success,
296 * -1 on failure 297 * -1 on failure
297 */ 298 */
298static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) 299static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
299{ 300{
300 int cq_index; 301 int cq_index;
301 BUG_ON(ib_conn == NULL); 302 BUG_ON(ib_conn == NULL);
@@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
314 315
315 rdma_destroy_qp(ib_conn->cma_id); 316 rdma_destroy_qp(ib_conn->cma_id);
316 } 317 }
317 /* if cma handler context, the caller acts s.t the cma destroy the id */
318 if (ib_conn->cma_id != NULL && can_destroy_id)
319 rdma_destroy_id(ib_conn->cma_id);
320 318
321 ib_conn->fmr_pool = NULL; 319 ib_conn->fmr_pool = NULL;
322 ib_conn->qp = NULL; 320 ib_conn->qp = NULL;
323 ib_conn->cma_id = NULL;
324 kfree(ib_conn->page_vec); 321 kfree(ib_conn->page_vec);
325 322
326 if (ib_conn->login_buf) { 323 if (ib_conn->login_buf) {
@@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
415 list_del(&ib_conn->conn_list); 412 list_del(&ib_conn->conn_list);
416 mutex_unlock(&ig.connlist_mutex); 413 mutex_unlock(&ig.connlist_mutex);
417 iser_free_rx_descriptors(ib_conn); 414 iser_free_rx_descriptors(ib_conn);
418 iser_free_ib_conn_res(ib_conn, can_destroy_id); 415 iser_free_ib_conn_res(ib_conn);
419 ib_conn->device = NULL; 416 ib_conn->device = NULL;
420 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 417 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
421 if (device != NULL) 418 if (device != NULL)
422 iser_device_try_release(device); 419 iser_device_try_release(device);
420 /* if cma handler context, the caller actually destroy the id */
421 if (ib_conn->cma_id != NULL && can_destroy_id) {
422 rdma_destroy_id(ib_conn->cma_id);
423 ib_conn->cma_id = NULL;
424 }
423 iscsi_destroy_endpoint(ib_conn->ep); 425 iscsi_destroy_endpoint(ib_conn->ep);
424} 426}
425 427
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 29889bbdcc6d..63b3d4eb0ef7 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
76{ 76{
77 u32 irqnr; 77 u32 irqnr;
78 78
79 do { 79 irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
80 irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); 80 __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
81 if (irqnr != 0x7f) { 81 irqnr = irq_find_mapping(icoll_domain, irqnr);
82 __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); 82 handle_IRQ(irqnr, regs);
83 irqnr = irq_find_mapping(icoll_domain, irqnr);
84 handle_IRQ(irqnr, regs);
85 continue;
86 }
87 break;
88 } while (1);
89} 83}
90 84
91static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, 85static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 065b7a31a478..47a52ab580d8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
119 119
120 /* Skip invalid IRQs, only register handlers for the real ones */ 120 /* Skip invalid IRQs, only register handlers for the real ones */
121 if (!(f->valid & BIT(hwirq))) 121 if (!(f->valid & BIT(hwirq)))
122 return -ENOTSUPP; 122 return -EPERM;
123 irq_set_chip_data(irq, f); 123 irq_set_chip_data(irq, f);
124 irq_set_chip_and_handler(irq, &f->chip, 124 irq_set_chip_and_handler(irq, &f->chip,
125 handle_level_irq); 125 handle_level_irq);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 884d11c7355f..2bbb00404cf5 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
197 197
198 /* Skip invalid IRQs, only register handlers for the real ones */ 198 /* Skip invalid IRQs, only register handlers for the real ones */
199 if (!(v->valid_sources & (1 << hwirq))) 199 if (!(v->valid_sources & (1 << hwirq)))
200 return -ENOTSUPP; 200 return -EPERM;
201 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); 201 irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
202 irq_set_chip_data(irq, v->base); 202 irq_set_chip_data(irq, v->base);
203 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 203 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 05c220d05e23..f950c9d29f3e 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
1 1
2config BCACHE 2config BCACHE
3 tristate "Block device as cache" 3 tristate "Block device as cache"
4 select CLOSURES
5 ---help--- 4 ---help---
6 Allows a block device to be used as cache for other devices; uses 5 Allows a block device to be used as cache for other devices; uses
7 a btree for indexing and the layout is optimized for SSDs. 6 a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17f..d3e15b42a4ab 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1241struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1242void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1243int bch_btree_cache_alloc(struct cache_set *);
1244void bch_writeback_init_cached_dev(struct cached_dev *); 1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1245void bch_moving_init_cache_set(struct cache_set *);
1246 1246
1247void bch_cache_allocator_exit(struct cache *ca); 1247void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2a..b8730e714d69 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
93}; 93};
94static KTYPE(bch_stats); 94static KTYPE(bch_stats);
95 95
96static void scale_accounting(unsigned long data);
97
98void bch_cache_accounting_init(struct cache_accounting *acc,
99 struct closure *parent)
100{
101 kobject_init(&acc->total.kobj, &bch_stats_ktype);
102 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
103 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
104 kobject_init(&acc->day.kobj, &bch_stats_ktype);
105
106 closure_init(&acc->cl, parent);
107 init_timer(&acc->timer);
108 acc->timer.expires = jiffies + accounting_delay;
109 acc->timer.data = (unsigned long) acc;
110 acc->timer.function = scale_accounting;
111 add_timer(&acc->timer);
112}
113
114int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 96int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
115 struct kobject *parent) 97 struct kobject *parent)
116{ 98{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 227 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
246} 228}
229
230void bch_cache_accounting_init(struct cache_accounting *acc,
231 struct closure *parent)
232{
233 kobject_init(&acc->total.kobj, &bch_stats_ktype);
234 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
235 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
236 kobject_init(&acc->day.kobj, &bch_stats_ktype);
237
238 closure_init(&acc->cl, parent);
239 init_timer(&acc->timer);
240 acc->timer.expires = jiffies + accounting_delay;
241 acc->timer.data = (unsigned long) acc;
242 acc->timer.function = scale_accounting;
243 add_timer(&acc->timer);
244}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c8046bc4aa57..f88e2b653a3f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
634 return 0; 634 return 0;
635} 635}
636 636
637static int release_dev(struct gendisk *b, fmode_t mode) 637static void release_dev(struct gendisk *b, fmode_t mode)
638{ 638{
639 struct bcache_device *d = b->private_data; 639 struct bcache_device *d = b->private_data;
640 closure_put(&d->cl); 640 closure_put(&d->cl);
641 return 0;
642} 641}
643 642
644static int ioctl_dev(struct block_device *b, fmode_t mode, 643static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
732 731
733 if (d->c) 732 if (d->c)
734 bcache_device_detach(d); 733 bcache_device_detach(d);
735 734 if (d->disk && d->disk->flags & GENHD_FL_UP)
736 if (d->disk)
737 del_gendisk(d->disk); 735 del_gendisk(d->disk);
738 if (d->disk && d->disk->queue) 736 if (d->disk && d->disk->queue)
739 blk_cleanup_queue(d->disk->queue); 737 blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
756 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
757 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
758 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
759 bio_split_pool_init(&d->bio_split_hook)) 757 bio_split_pool_init(&d->bio_split_hook) ||
760 758 !(d->disk = alloc_disk(1)) ||
761 return -ENOMEM; 759 !(q = blk_alloc_queue(GFP_KERNEL)))
762
763 d->disk = alloc_disk(1);
764 if (!d->disk)
765 return -ENOMEM; 760 return -ENOMEM;
766 761
767 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
771 d->disk->fops = &bcache_ops; 766 d->disk->fops = &bcache_ops;
772 d->disk->private_data = d; 767 d->disk->private_data = d;
773 768
774 q = blk_alloc_queue(GFP_KERNEL);
775 if (!q)
776 return -ENOMEM;
777
778 blk_queue_make_request(q, NULL); 769 blk_queue_make_request(q, NULL);
779 d->disk->queue = q; 770 d->disk->queue = q;
780 q->queuedata = d; 771 q->queuedata = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
999 990
1000 mutex_lock(&bch_register_lock); 991 mutex_lock(&bch_register_lock);
1001 992
1002 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 993 if (atomic_read(&dc->running))
994 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1003 bcache_device_free(&dc->disk); 995 bcache_device_free(&dc->disk);
1004 list_del(&dc->list); 996 list_del(&dc->list);
1005 997
1006 mutex_unlock(&bch_register_lock); 998 mutex_unlock(&bch_register_lock);
1007 999
1008 if (!IS_ERR_OR_NULL(dc->bdev)) { 1000 if (!IS_ERR_OR_NULL(dc->bdev)) {
1009 blk_sync_queue(bdev_get_queue(dc->bdev)); 1001 if (dc->bdev->bd_disk)
1002 blk_sync_queue(bdev_get_queue(dc->bdev));
1003
1010 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1004 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1011 } 1005 }
1012 1006
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
1028 1022
1029static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1023static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1030{ 1024{
1031 int err; 1025 int ret;
1032 struct io *io; 1026 struct io *io;
1033 1027 struct request_queue *q = bdev_get_queue(dc->bdev);
1034 closure_init(&dc->disk.cl, NULL);
1035 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1036 1028
1037 __module_get(THIS_MODULE); 1029 __module_get(THIS_MODULE);
1038 INIT_LIST_HEAD(&dc->list); 1030 INIT_LIST_HEAD(&dc->list);
1031 closure_init(&dc->disk.cl, NULL);
1032 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1039 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1033 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1040
1041 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1042
1043 err = bcache_device_init(&dc->disk, block_size);
1044 if (err)
1045 goto err;
1046
1047 spin_lock_init(&dc->io_lock);
1048 closure_init_unlocked(&dc->sb_write);
1049 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1034 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1035 closure_init_unlocked(&dc->sb_write);
1036 INIT_LIST_HEAD(&dc->io_lru);
1037 spin_lock_init(&dc->io_lock);
1038 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1050 1039
1051 dc->sequential_merge = true; 1040 dc->sequential_merge = true;
1052 dc->sequential_cutoff = 4 << 20; 1041 dc->sequential_cutoff = 4 << 20;
1053 1042
1054 INIT_LIST_HEAD(&dc->io_lru);
1055 dc->sb_bio.bi_max_vecs = 1;
1056 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1057
1058 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1059 list_add(&io->lru, &dc->io_lru); 1044 list_add(&io->lru, &dc->io_lru);
1060 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1061 } 1046 }
1062 1047
1063 bch_writeback_init_cached_dev(dc); 1048 ret = bcache_device_init(&dc->disk, block_size);
1049 if (ret)
1050 return ret;
1051
1052 set_capacity(dc->disk.disk,
1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1054
1055 dc->disk.disk->queue->backing_dev_info.ra_pages =
1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1057 q->backing_dev_info.ra_pages);
1058
1059 bch_cached_dev_request_init(dc);
1060 bch_cached_dev_writeback_init(dc);
1064 return 0; 1061 return 0;
1065err:
1066 bcache_device_stop(&dc->disk);
1067 return err;
1068} 1062}
1069 1063
1070/* Cached device - bcache superblock */ 1064/* Cached device - bcache superblock */
1071 1065
1072static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1066static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1073 struct block_device *bdev, 1067 struct block_device *bdev,
1074 struct cached_dev *dc) 1068 struct cached_dev *dc)
1075{ 1069{
1076 char name[BDEVNAME_SIZE]; 1070 char name[BDEVNAME_SIZE];
1077 const char *err = "cannot allocate memory"; 1071 const char *err = "cannot allocate memory";
1078 struct gendisk *g;
1079 struct cache_set *c; 1072 struct cache_set *c;
1080 1073
1081 if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
1082 return err;
1083
1084 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1074 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1085 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1086 dc->bdev = bdev; 1075 dc->bdev = bdev;
1087 dc->bdev->bd_holder = dc; 1076 dc->bdev->bd_holder = dc;
1088 1077
1089 g = dc->disk.disk; 1078 bio_init(&dc->sb_bio);
1090 1079 dc->sb_bio.bi_max_vecs = 1;
1091 set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1080 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1092 1081 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1093 g->queue->backing_dev_info.ra_pages = 1082 get_page(sb_page);
1094 max(g->queue->backing_dev_info.ra_pages,
1095 bdev->bd_queue->backing_dev_info.ra_pages);
1096 1083
1097 bch_cached_dev_request_init(dc); 1084 if (cached_dev_init(dc, sb->block_size << 9))
1085 goto err;
1098 1086
1099 err = "error creating kobject"; 1087 err = "error creating kobject";
1100 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1088 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1103 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1091 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1104 goto err; 1092 goto err;
1105 1093
1094 pr_info("registered backing device %s", bdevname(bdev, name));
1095
1106 list_add(&dc->list, &uncached_devices); 1096 list_add(&dc->list, &uncached_devices);
1107 list_for_each_entry(c, &bch_cache_sets, list) 1097 list_for_each_entry(c, &bch_cache_sets, list)
1108 bch_cached_dev_attach(dc, c); 1098 bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1111 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1101 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1112 bch_cached_dev_run(dc); 1102 bch_cached_dev_run(dc);
1113 1103
1114 return NULL; 1104 return;
1115err: 1105err:
1116 kobject_put(&dc->disk.kobj);
1117 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1106 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1118 /* 1107 bcache_device_stop(&dc->disk);
1119 * Return NULL instead of an error because kobject_put() cleans
1120 * everything up
1121 */
1122 return NULL;
1123} 1108}
1124 1109
1125/* Flash only volumes */ 1110/* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1717 size_t free; 1702 size_t free;
1718 struct bucket *b; 1703 struct bucket *b;
1719 1704
1720 if (!ca)
1721 return -ENOMEM;
1722
1723 __module_get(THIS_MODULE); 1705 __module_get(THIS_MODULE);
1724 kobject_init(&ca->kobj, &bch_cache_ktype); 1706 kobject_init(&ca->kobj, &bch_cache_ktype);
1725 1707
1726 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1727
1728 INIT_LIST_HEAD(&ca->discards); 1708 INIT_LIST_HEAD(&ca->discards);
1729 1709
1730 bio_init(&ca->sb_bio);
1731 ca->sb_bio.bi_max_vecs = 1;
1732 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1733
1734 bio_init(&ca->journal.bio); 1710 bio_init(&ca->journal.bio);
1735 ca->journal.bio.bi_max_vecs = 8; 1711 ca->journal.bio.bi_max_vecs = 8;
1736 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1712 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1742 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1743 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1744 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
1745 !(ca->buckets = vmalloc(sizeof(struct bucket) * 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) *
1746 ca->sb.nbuckets)) || 1722 ca->sb.nbuckets)) ||
1747 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1748 2, GFP_KERNEL)) || 1724 2, GFP_KERNEL)) ||
1749 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1750 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1751 bio_split_pool_init(&ca->bio_split_hook)) 1727 bio_split_pool_init(&ca->bio_split_hook))
1752 goto err; 1728 return -ENOMEM;
1753 1729
1754 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1755 1731
1756 memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
1757 for_each_bucket(b, ca) 1732 for_each_bucket(b, ca)
1758 atomic_set(&b->pin, 0); 1733 atomic_set(&b->pin, 0);
1759 1734
@@ -1766,22 +1741,28 @@ err:
1766 return -ENOMEM; 1741 return -ENOMEM;
1767} 1742}
1768 1743
1769static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1744static void register_cache(struct cache_sb *sb, struct page *sb_page,
1770 struct block_device *bdev, struct cache *ca) 1745 struct block_device *bdev, struct cache *ca)
1771{ 1746{
1772 char name[BDEVNAME_SIZE]; 1747 char name[BDEVNAME_SIZE];
1773 const char *err = "cannot allocate memory"; 1748 const char *err = "cannot allocate memory";
1774 1749
1775 if (cache_alloc(sb, ca) != 0) 1750 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1776 return err;
1777
1778 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1779 ca->bdev = bdev; 1751 ca->bdev = bdev;
1780 ca->bdev->bd_holder = ca; 1752 ca->bdev->bd_holder = ca;
1781 1753
1754 bio_init(&ca->sb_bio);
1755 ca->sb_bio.bi_max_vecs = 1;
1756 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1757 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1758 get_page(sb_page);
1759
1782 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1760 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1783 ca->discard = CACHE_DISCARD(&ca->sb); 1761 ca->discard = CACHE_DISCARD(&ca->sb);
1784 1762
1763 if (cache_alloc(sb, ca) != 0)
1764 goto err;
1765
1785 err = "error creating kobject"; 1766 err = "error creating kobject";
1786 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1767 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
1787 goto err; 1768 goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
1791 goto err; 1772 goto err;
1792 1773
1793 pr_info("registered cache device %s", bdevname(bdev, name)); 1774 pr_info("registered cache device %s", bdevname(bdev, name));
1794 1775 return;
1795 return NULL;
1796err: 1776err:
1777 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1797 kobject_put(&ca->kobj); 1778 kobject_put(&ca->kobj);
1798 pr_info("error opening %s: %s", bdevname(bdev, name), err);
1799 /* Return NULL instead of an error because kobject_put() cleans
1800 * everything up
1801 */
1802 return NULL;
1803} 1779}
1804 1780
1805/* Global interfaces/init */ 1781/* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1833 bdev = blkdev_get_by_path(strim(path), 1809 bdev = blkdev_get_by_path(strim(path),
1834 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1835 sb); 1811 sb);
1836 if (bdev == ERR_PTR(-EBUSY)) 1812 if (IS_ERR(bdev)) {
1837 err = "device busy"; 1813 if (bdev == ERR_PTR(-EBUSY))
1838 1814 err = "device busy";
1839 if (IS_ERR(bdev) ||
1840 set_blocksize(bdev, 4096))
1841 goto err; 1815 goto err;
1816 }
1817
1818 err = "failed to set blocksize";
1819 if (set_blocksize(bdev, 4096))
1820 goto err_close;
1842 1821
1843 err = read_super(sb, bdev, &sb_page); 1822 err = read_super(sb, bdev, &sb_page);
1844 if (err) 1823 if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1846 1825
1847 if (SB_IS_BDEV(sb)) { 1826 if (SB_IS_BDEV(sb)) {
1848 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1827 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1828 if (!dc)
1829 goto err_close;
1849 1830
1850 err = register_bdev(sb, sb_page, bdev, dc); 1831 register_bdev(sb, sb_page, bdev, dc);
1851 } else { 1832 } else {
1852 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1833 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1834 if (!ca)
1835 goto err_close;
1853 1836
1854 err = register_cache(sb, sb_page, bdev, ca); 1837 register_cache(sb, sb_page, bdev, ca);
1855 } 1838 }
1856 1839out:
1857 if (err) { 1840 if (sb_page)
1858 /* register_(bdev|cache) will only return an error if they
1859 * didn't get far enough to create the kobject - if they did,
1860 * the kobject destructor will do this cleanup.
1861 */
1862 put_page(sb_page); 1841 put_page(sb_page);
1863err_close:
1864 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1865err:
1866 if (attr != &ksysfs_register_quiet)
1867 pr_info("error opening %s: %s", path, err);
1868 ret = -EINVAL;
1869 }
1870
1871 kfree(sb); 1842 kfree(sb);
1872 kfree(path); 1843 kfree(path);
1873 mutex_unlock(&bch_register_lock); 1844 mutex_unlock(&bch_register_lock);
1874 module_put(THIS_MODULE); 1845 module_put(THIS_MODULE);
1875 return ret; 1846 return ret;
1847
1848err_close:
1849 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1850err:
1851 if (attr != &ksysfs_register_quiet)
1852 pr_info("error opening %s: %s", path, err);
1853 ret = -EINVAL;
1854 goto out;
1876} 1855}
1877 1856
1878static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1857static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd3..2714ed3991d1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
375 refill_dirty(cl); 375 refill_dirty(cl);
376} 376}
377 377
378void bch_writeback_init_cached_dev(struct cached_dev *dc) 378void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 379{
380 closure_init_unlocked(&dc->writeback); 380 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 381 init_rwsem(&dc->writeback_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 681d1099a2d5..9b82377a833b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
5268 5268
5269static void __md_stop_writes(struct mddev *mddev) 5269static void __md_stop_writes(struct mddev *mddev)
5270{ 5270{
5271 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5271 if (mddev->sync_thread) { 5272 if (mddev->sync_thread) {
5272 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5274 md_reap_sync_thread(mddev); 5274 md_reap_sync_thread(mddev);
5275 } 5275 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55951182af73..6e17f8181c4b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
417 417
418 r1_bio->bios[mirror] = NULL; 418 r1_bio->bios[mirror] = NULL;
419 to_put = bio; 419 to_put = bio;
420 set_bit(R1BIO_Uptodate, &r1_bio->state); 420 /*
421 * Do not set R1BIO_Uptodate if the current device is
422 * rebuilding or Faulty. This is because we cannot use
423 * such device for properly reading the data back (we could
424 * potentially use it, if the current write would have felt
425 * before rdev->recovery_offset, but for simplicity we don't
426 * check this here.
427 */
428 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
429 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
430 set_bit(R1BIO_Uptodate, &r1_bio->state);
421 431
422 /* Maybe we can clear some bad blocks. */ 432 /* Maybe we can clear some bad blocks. */
423 if (is_badblock(conf->mirrors[mirror].rdev, 433 if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
870 wake_up(&conf->wait_barrier); 880 wake_up(&conf->wait_barrier);
871} 881}
872 882
873static void freeze_array(struct r1conf *conf) 883static void freeze_array(struct r1conf *conf, int extra)
874{ 884{
875 /* stop syncio and normal IO and wait for everything to 885 /* stop syncio and normal IO and wait for everything to
876 * go quite. 886 * go quite.
877 * We increment barrier and nr_waiting, and then 887 * We increment barrier and nr_waiting, and then
878 * wait until nr_pending match nr_queued+1 888 * wait until nr_pending match nr_queued+extra
879 * This is called in the context of one normal IO request 889 * This is called in the context of one normal IO request
880 * that has failed. Thus any sync request that might be pending 890 * that has failed. Thus any sync request that might be pending
881 * will be blocked by nr_pending, and we need to wait for 891 * will be blocked by nr_pending, and we need to wait for
882 * pending IO requests to complete or be queued for re-try. 892 * pending IO requests to complete or be queued for re-try.
883 * Thus the number queued (nr_queued) plus this request (1) 893 * Thus the number queued (nr_queued) plus this request (extra)
884 * must match the number of pending IOs (nr_pending) before 894 * must match the number of pending IOs (nr_pending) before
885 * we continue. 895 * we continue.
886 */ 896 */
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
888 conf->barrier++; 898 conf->barrier++;
889 conf->nr_waiting++; 899 conf->nr_waiting++;
890 wait_event_lock_irq_cmd(conf->wait_barrier, 900 wait_event_lock_irq_cmd(conf->wait_barrier,
891 conf->nr_pending == conf->nr_queued+1, 901 conf->nr_pending == conf->nr_queued+extra,
892 conf->resync_lock, 902 conf->resync_lock,
893 flush_pending_writes(conf)); 903 flush_pending_writes(conf));
894 spin_unlock_irq(&conf->resync_lock); 904 spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1544 * we wait for all outstanding requests to complete. 1554 * we wait for all outstanding requests to complete.
1545 */ 1555 */
1546 synchronize_sched(); 1556 synchronize_sched();
1547 raise_barrier(conf); 1557 freeze_array(conf, 0);
1548 lower_barrier(conf); 1558 unfreeze_array(conf);
1549 clear_bit(Unmerged, &rdev->flags); 1559 clear_bit(Unmerged, &rdev->flags);
1550 } 1560 }
1551 md_integrity_add_rdev(rdev, mddev); 1561 md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1595 */ 1605 */
1596 struct md_rdev *repl = 1606 struct md_rdev *repl =
1597 conf->mirrors[conf->raid_disks + number].rdev; 1607 conf->mirrors[conf->raid_disks + number].rdev;
1598 raise_barrier(conf); 1608 freeze_array(conf, 0);
1599 clear_bit(Replacement, &repl->flags); 1609 clear_bit(Replacement, &repl->flags);
1600 p->rdev = repl; 1610 p->rdev = repl;
1601 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1611 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1602 lower_barrier(conf); 1612 unfreeze_array(conf);
1603 clear_bit(WantReplacement, &rdev->flags); 1613 clear_bit(WantReplacement, &rdev->flags);
1604 } else 1614 } else
1605 clear_bit(WantReplacement, &rdev->flags); 1615 clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2195 * frozen 2205 * frozen
2196 */ 2206 */
2197 if (mddev->ro == 0) { 2207 if (mddev->ro == 0) {
2198 freeze_array(conf); 2208 freeze_array(conf, 1);
2199 fix_read_error(conf, r1_bio->read_disk, 2209 fix_read_error(conf, r1_bio->read_disk,
2200 r1_bio->sector, r1_bio->sectors); 2210 r1_bio->sector, r1_bio->sectors);
2201 unfreeze_array(conf); 2211 unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
2780 return PTR_ERR(conf); 2790 return PTR_ERR(conf);
2781 2791
2782 if (mddev->queue) 2792 if (mddev->queue)
2783 blk_queue_max_write_same_sectors(mddev->queue, 2793 blk_queue_max_write_same_sectors(mddev->queue, 0);
2784 mddev->chunk_sectors); 2794
2785 rdev_for_each(rdev, mddev) { 2795 rdev_for_each(rdev, mddev) {
2786 if (!mddev->gendisk) 2796 if (!mddev->gendisk)
2787 continue; 2797 continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
2963 return -ENOMEM; 2973 return -ENOMEM;
2964 } 2974 }
2965 2975
2966 raise_barrier(conf); 2976 freeze_array(conf, 0);
2967 2977
2968 /* ok, everything is stopped */ 2978 /* ok, everything is stopped */
2969 oldpool = conf->r1bio_pool; 2979 oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
2994 conf->raid_disks = mddev->raid_disks = raid_disks; 3004 conf->raid_disks = mddev->raid_disks = raid_disks;
2995 mddev->delta_disks = 0; 3005 mddev->delta_disks = 0;
2996 3006
2997 lower_barrier(conf); 3007 unfreeze_array(conf);
2998 3008
2999 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3009 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3000 md_wakeup_thread(mddev->thread); 3010 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 59d4daa5f4c7..6ddae2501b9a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
490 sector_t first_bad; 490 sector_t first_bad;
491 int bad_sectors; 491 int bad_sectors;
492 492
493 set_bit(R10BIO_Uptodate, &r10_bio->state); 493 /*
494 * Do not set R10BIO_Uptodate if the current device is
495 * rebuilding or Faulty. This is because we cannot use
496 * such device for properly reading the data back (we could
497 * potentially use it, if the current write would have felt
498 * before rdev->recovery_offset, but for simplicity we don't
499 * check this here.
500 */
501 if (test_bit(In_sync, &rdev->flags) &&
502 !test_bit(Faulty, &rdev->flags))
503 set_bit(R10BIO_Uptodate, &r10_bio->state);
494 504
495 /* Maybe we can clear some bad blocks. */ 505 /* Maybe we can clear some bad blocks. */
496 if (is_badblock(rdev, 506 if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
1055 wake_up(&conf->wait_barrier); 1065 wake_up(&conf->wait_barrier);
1056} 1066}
1057 1067
1058static void freeze_array(struct r10conf *conf) 1068static void freeze_array(struct r10conf *conf, int extra)
1059{ 1069{
1060 /* stop syncio and normal IO and wait for everything to 1070 /* stop syncio and normal IO and wait for everything to
1061 * go quiet. 1071 * go quiet.
1062 * We increment barrier and nr_waiting, and then 1072 * We increment barrier and nr_waiting, and then
1063 * wait until nr_pending match nr_queued+1 1073 * wait until nr_pending match nr_queued+extra
1064 * This is called in the context of one normal IO request 1074 * This is called in the context of one normal IO request
1065 * that has failed. Thus any sync request that might be pending 1075 * that has failed. Thus any sync request that might be pending
1066 * will be blocked by nr_pending, and we need to wait for 1076 * will be blocked by nr_pending, and we need to wait for
1067 * pending IO requests to complete or be queued for re-try. 1077 * pending IO requests to complete or be queued for re-try.
1068 * Thus the number queued (nr_queued) plus this request (1) 1078 * Thus the number queued (nr_queued) plus this request (extra)
1069 * must match the number of pending IOs (nr_pending) before 1079 * must match the number of pending IOs (nr_pending) before
1070 * we continue. 1080 * we continue.
1071 */ 1081 */
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
1073 conf->barrier++; 1083 conf->barrier++;
1074 conf->nr_waiting++; 1084 conf->nr_waiting++;
1075 wait_event_lock_irq_cmd(conf->wait_barrier, 1085 wait_event_lock_irq_cmd(conf->wait_barrier,
1076 conf->nr_pending == conf->nr_queued+1, 1086 conf->nr_pending == conf->nr_queued+extra,
1077 conf->resync_lock, 1087 conf->resync_lock,
1078 flush_pending_writes(conf)); 1088 flush_pending_writes(conf));
1079 1089
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1837 * we wait for all outstanding requests to complete. 1847 * we wait for all outstanding requests to complete.
1838 */ 1848 */
1839 synchronize_sched(); 1849 synchronize_sched();
1840 raise_barrier(conf, 0); 1850 freeze_array(conf, 0);
1841 lower_barrier(conf); 1851 unfreeze_array(conf);
1842 clear_bit(Unmerged, &rdev->flags); 1852 clear_bit(Unmerged, &rdev->flags);
1843 } 1853 }
1844 md_integrity_add_rdev(rdev, mddev); 1854 md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2612 r10_bio->devs[slot].bio = NULL; 2622 r10_bio->devs[slot].bio = NULL;
2613 2623
2614 if (mddev->ro == 0) { 2624 if (mddev->ro == 0) {
2615 freeze_array(conf); 2625 freeze_array(conf, 1);
2616 fix_read_error(conf, mddev, r10_bio); 2626 fix_read_error(conf, mddev, r10_bio);
2617 unfreeze_array(conf); 2627 unfreeze_array(conf);
2618 } else 2628 } else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
3609 if (mddev->queue) { 3619 if (mddev->queue) {
3610 blk_queue_max_discard_sectors(mddev->queue, 3620 blk_queue_max_discard_sectors(mddev->queue,
3611 mddev->chunk_sectors); 3621 mddev->chunk_sectors);
3612 blk_queue_max_write_same_sectors(mddev->queue, 3622 blk_queue_max_write_same_sectors(mddev->queue, 0);
3613 mddev->chunk_sectors);
3614 blk_queue_io_min(mddev->queue, chunk_size); 3623 blk_queue_io_min(mddev->queue, chunk_size);
3615 if (conf->geo.raid_disks % conf->geo.near_copies) 3624 if (conf->geo.raid_disks % conf->geo.near_copies)
3616 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3625 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9359828ffe26..05e4a105b9c7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
665 bi->bi_rw |= REQ_FLUSH; 665 bi->bi_rw |= REQ_FLUSH;
666 666
667 bi->bi_vcnt = 1;
667 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 668 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
668 bi->bi_io_vec[0].bv_offset = 0; 669 bi->bi_io_vec[0].bv_offset = 0;
669 bi->bi_size = STRIPE_SIZE; 670 bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
701 else 702 else
702 rbi->bi_sector = (sh->sector 703 rbi->bi_sector = (sh->sector
703 + rrdev->data_offset); 704 + rrdev->data_offset);
705 rbi->bi_vcnt = 1;
704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 706 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
705 rbi->bi_io_vec[0].bv_offset = 0; 707 rbi->bi_io_vec[0].bv_offset = 0;
706 rbi->bi_size = STRIPE_SIZE; 708 rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
5464 if (mddev->major_version == 0 && 5466 if (mddev->major_version == 0 &&
5465 mddev->minor_version > 90) 5467 mddev->minor_version > 90)
5466 rdev->recovery_offset = reshape_offset; 5468 rdev->recovery_offset = reshape_offset;
5467 5469
5468 if (rdev->recovery_offset < reshape_offset) { 5470 if (rdev->recovery_offset < reshape_offset) {
5469 /* We need to check old and new layout */ 5471 /* We need to check old and new layout */
5470 if (!only_parity(rdev->raid_disk, 5472 if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
5587 */ 5589 */
5588 mddev->queue->limits.discard_zeroes_data = 0; 5590 mddev->queue->limits.discard_zeroes_data = 0;
5589 5591
5592 blk_queue_max_write_same_sectors(mddev->queue, 0);
5593
5590 rdev_for_each(rdev, mddev) { 5594 rdev_for_each(rdev, mddev) {
5591 disk_stack_limits(mddev->gendisk, rdev->bdev, 5595 disk_stack_limits(mddev->gendisk, rdev->bdev,
5592 rdev->data_offset << 9); 5596 rdev->data_offset << 9);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 713d89fedc46..f580d30bb784 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
197{ 197{
198 dev_dbg(&dev->pdev->dev, "stopping the device.\n"); 198 dev_dbg(&dev->pdev->dev, "stopping the device.\n");
199 199
200 flush_scheduled_work();
201
200 mutex_lock(&dev->device_lock); 202 mutex_lock(&dev->device_lock);
201 203
202 cancel_delayed_work(&dev->timer_work); 204 cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
210 212
211 mutex_unlock(&dev->device_lock); 213 mutex_unlock(&dev->device_lock);
212 214
213 flush_scheduled_work();
214
215 mei_watchdog_unregister(dev); 215 mei_watchdog_unregister(dev);
216} 216}
217EXPORT_SYMBOL_GPL(mei_stop); 217EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 3adf8a70f26e..d0c6907dfd92 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
142 mei_cl_unlink(ndev->cl_info); 142 mei_cl_unlink(ndev->cl_info);
143 kfree(ndev->cl_info); 143 kfree(ndev->cl_info);
144 } 144 }
145
146 memset(ndev, 0, sizeof(struct mei_nfc_dev));
145} 147}
146 148
147static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) 149static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a727464e9c3f..0f268329bd3a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
325 325
326 mutex_lock(&dev->device_lock); 326 mutex_lock(&dev->device_lock);
327 dev->dev_state = MEI_DEV_POWER_UP; 327 dev->dev_state = MEI_DEV_POWER_UP;
328 mei_clear_interrupts(dev);
328 mei_reset(dev, 1); 329 mei_reset(dev, 1);
329 mutex_unlock(&dev->device_lock); 330 mutex_unlock(&dev->device_lock);
330 331
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 44d273c5e19d..0535d1e0bc78 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
172 nodesperblade = 2; 172 nodesperblade = 2;
173 else 173 else
174 nodesperblade = 1; 174 nodesperblade = 1;
175 memset(&info, 0, sizeof(info));
175 info.cpus = num_online_cpus(); 176 info.cpus = num_online_cpus();
176 info.nodes = num_online_nodes(); 177 info.nodes = num_online_nodes();
177 info.blades = info.nodes / nodesperblade; 178 info.blades = info.nodes / nodesperblade;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bc1246f6f86a..3b31c19972d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -725,8 +725,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
725 struct net_device *bond_dev, *vlan_dev, *upper_dev; 725 struct net_device *bond_dev, *vlan_dev, *upper_dev;
726 struct vlan_entry *vlan; 726 struct vlan_entry *vlan;
727 727
728 rcu_read_lock();
729 read_lock(&bond->lock); 728 read_lock(&bond->lock);
729 rcu_read_lock();
730 730
731 bond_dev = bond->dev; 731 bond_dev = bond->dev;
732 732
@@ -748,12 +748,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
748 if (vlan_dev) 748 if (vlan_dev)
749 __bond_resend_igmp_join_requests(vlan_dev); 749 __bond_resend_igmp_join_requests(vlan_dev);
750 } 750 }
751 rcu_read_unlock();
751 752
752 if (--bond->igmp_retrans > 0) 753 /* We use curr_slave_lock to protect against concurrent access to
754 * igmp_retrans from multiple running instances of this function and
755 * bond_change_active_slave
756 */
757 write_lock_bh(&bond->curr_slave_lock);
758 if (bond->igmp_retrans > 1) {
759 bond->igmp_retrans--;
753 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 760 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
754 761 }
762 write_unlock_bh(&bond->curr_slave_lock);
755 read_unlock(&bond->lock); 763 read_unlock(&bond->lock);
756 rcu_read_unlock();
757} 764}
758 765
759static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 766static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1901,6 +1908,10 @@ err_free:
1901 1908
1902err_undo_flags: 1909err_undo_flags:
1903 bond_compute_features(bond); 1910 bond_compute_features(bond);
1911 /* Enslave of first slave has failed and we need to fix master's mac */
1912 if (bond->slave_cnt == 0 &&
1913 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
1914 eth_hw_addr_random(bond_dev);
1904 1915
1905 return res; 1916 return res;
1906} 1917}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b38609b967b7..c990b42cf7a1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@ struct bonding {
225 rwlock_t curr_slave_lock; 225 rwlock_t curr_slave_lock;
226 u8 send_peer_notif; 226 u8 send_peer_notif;
227 s8 setup_by_slave; 227 s8 setup_by_slave;
228 s8 igmp_retrans; 228 u8 igmp_retrans;
229#ifdef CONFIG_PROC_FS 229#ifdef CONFIG_PROC_FS
230 struct proc_dir_entry *proc_entry; 230 struct proc_dir_entry *proc_entry;
231 char proc_file_name[IFNAMSIZ]; 231 char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f301..cbd388eea682 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
977 err = usb_8dev_cmd_version(priv, &version); 977 err = usb_8dev_cmd_version(priv, &version);
978 if (err) { 978 if (err) {
979 netdev_err(netdev, "can't get firmware version\n"); 979 netdev_err(netdev, "can't get firmware version\n");
980 goto cleanup_cmd_msg_buffer; 980 goto cleanup_unregister_candev;
981 } else { 981 } else {
982 netdev_info(netdev, 982 netdev_info(netdev,
983 "firmware: %d.%d, hardware: %d.%d\n", 983 "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
989 989
990 return 0; 990 return 0;
991 991
992cleanup_unregister_candev:
993 unregister_netdev(priv->netdev);
994
992cleanup_cmd_msg_buffer: 995cleanup_cmd_msg_buffer:
993 kfree(priv->cmd_msg_buffer); 996 kfree(priv->cmd_msg_buffer);
994 997
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cfff..ad6aa1e98348 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
67 To compile this driver as a module, choose M here. The module 67 To compile this driver as a module, choose M here. The module
68 will be called atl1c. 68 will be called atl1c.
69 69
70config ALX
71 tristate "Qualcomm Atheros AR816x/AR817x support"
72 depends on PCI
73 select CRC32
74 select NET_CORE
75 select MDIO
76 help
77 This driver supports the Qualcomm Atheros L1F ethernet adapter,
78 i.e. the following chipsets:
79
80 1969:1091 - AR8161 Gigabit Ethernet
81 1969:1090 - AR8162 Fast Ethernet
82 1969:10A1 - AR8171 Gigabit Ethernet
83 1969:10A0 - AR8172 Fast Ethernet
84
85 To compile this driver as a module, choose M here. The module
86 will be called alx.
87
70endif # NET_VENDOR_ATHEROS 88endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576ff..5cf1c65bbce9 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
6obj-$(CONFIG_ATL2) += atlx/ 6obj-$(CONFIG_ATL2) += atlx/
7obj-$(CONFIG_ATL1E) += atl1e/ 7obj-$(CONFIG_ATL1E) += atl1e/
8obj-$(CONFIG_ATL1C) += atl1c/ 8obj-$(CONFIG_ATL1C) += atl1c/
9obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 000000000000..5901fa407d52
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ALX) += alx.o
2alx-objs := main.o ethtool.o hw.o
3ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 000000000000..50b3ae2b143d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef _ALX_H_
36#define _ALX_H_
37
38#include <linux/types.h>
39#include <linux/etherdevice.h>
40#include <linux/dma-mapping.h>
41#include <linux/spinlock.h>
42#include "hw.h"
43
44#define ALX_WATCHDOG_TIME (5 * HZ)
45
46struct alx_buffer {
47 struct sk_buff *skb;
48 DEFINE_DMA_UNMAP_ADDR(dma);
49 DEFINE_DMA_UNMAP_LEN(size);
50};
51
52struct alx_rx_queue {
53 struct alx_rrd *rrd;
54 dma_addr_t rrd_dma;
55
56 struct alx_rfd *rfd;
57 dma_addr_t rfd_dma;
58
59 struct alx_buffer *bufs;
60
61 u16 write_idx, read_idx;
62 u16 rrd_read_idx;
63};
64#define ALX_RX_ALLOC_THRESH 32
65
66struct alx_tx_queue {
67 struct alx_txd *tpd;
68 dma_addr_t tpd_dma;
69 struct alx_buffer *bufs;
70 u16 write_idx, read_idx;
71};
72
73#define ALX_DEFAULT_TX_WORK 128
74
75enum alx_device_quirks {
76 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
77};
78
79struct alx_priv {
80 struct net_device *dev;
81
82 struct alx_hw hw;
83
84 /* all descriptor memory */
85 struct {
86 dma_addr_t dma;
87 void *virt;
88 int size;
89 } descmem;
90
91 /* protect int_mask updates */
92 spinlock_t irq_lock;
93 u32 int_mask;
94
95 int tx_ringsz;
96 int rx_ringsz;
97 int rxbuf_size;
98
99 struct napi_struct napi;
100 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq;
102
103 struct work_struct link_check_wk;
104 struct work_struct reset_wk;
105
106 u16 msg_enable;
107
108 bool msi;
109};
110
111extern const struct ethtool_ops alx_ethtool_ops;
112extern const char alx_drv_name[];
113
114#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 000000000000..6fa2aec2bc81
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/mdio.h>
42#include <linux/interrupt.h>
43#include <asm/byteorder.h>
44
45#include "alx.h"
46#include "reg.h"
47#include "hw.h"
48
49
50static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
51{
52 struct alx_priv *alx = netdev_priv(netdev);
53 struct alx_hw *hw = &alx->hw;
54
55 ecmd->supported = SUPPORTED_10baseT_Half |
56 SUPPORTED_10baseT_Full |
57 SUPPORTED_100baseT_Half |
58 SUPPORTED_100baseT_Full |
59 SUPPORTED_Autoneg |
60 SUPPORTED_TP |
61 SUPPORTED_Pause;
62 if (alx_hw_giga(hw))
63 ecmd->supported |= SUPPORTED_1000baseT_Full;
64
65 ecmd->advertising = ADVERTISED_TP;
66 if (hw->adv_cfg & ADVERTISED_Autoneg)
67 ecmd->advertising |= hw->adv_cfg;
68
69 ecmd->port = PORT_TP;
70 ecmd->phy_address = 0;
71 if (hw->adv_cfg & ADVERTISED_Autoneg)
72 ecmd->autoneg = AUTONEG_ENABLE;
73 else
74 ecmd->autoneg = AUTONEG_DISABLE;
75 ecmd->transceiver = XCVR_INTERNAL;
76
77 if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
78 if (hw->flowctrl & ALX_FC_RX) {
79 ecmd->advertising |= ADVERTISED_Pause;
80
81 if (!(hw->flowctrl & ALX_FC_TX))
82 ecmd->advertising |= ADVERTISED_Asym_Pause;
83 } else if (hw->flowctrl & ALX_FC_TX) {
84 ecmd->advertising |= ADVERTISED_Asym_Pause;
85 }
86 }
87
88 if (hw->link_speed != SPEED_UNKNOWN) {
89 ethtool_cmd_speed_set(ecmd,
90 hw->link_speed - hw->link_speed % 10);
91 ecmd->duplex = hw->link_speed % 10;
92 } else {
93 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
94 ecmd->duplex = DUPLEX_UNKNOWN;
95 }
96
97 return 0;
98}
99
100static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
101{
102 struct alx_priv *alx = netdev_priv(netdev);
103 struct alx_hw *hw = &alx->hw;
104 u32 adv_cfg;
105
106 ASSERT_RTNL();
107
108 if (ecmd->autoneg == AUTONEG_ENABLE) {
109 if (ecmd->advertising & ADVERTISED_1000baseT_Half)
110 return -EINVAL;
111 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
112 } else {
113 int speed = ethtool_cmd_speed(ecmd);
114
115 switch (speed + ecmd->duplex) {
116 case SPEED_10 + DUPLEX_HALF:
117 adv_cfg = ADVERTISED_10baseT_Half;
118 break;
119 case SPEED_10 + DUPLEX_FULL:
120 adv_cfg = ADVERTISED_10baseT_Full;
121 break;
122 case SPEED_100 + DUPLEX_HALF:
123 adv_cfg = ADVERTISED_100baseT_Half;
124 break;
125 case SPEED_100 + DUPLEX_FULL:
126 adv_cfg = ADVERTISED_100baseT_Full;
127 break;
128 default:
129 return -EINVAL;
130 }
131 }
132
133 hw->adv_cfg = adv_cfg;
134 return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
135}
136
137static void alx_get_pauseparam(struct net_device *netdev,
138 struct ethtool_pauseparam *pause)
139{
140 struct alx_priv *alx = netdev_priv(netdev);
141 struct alx_hw *hw = &alx->hw;
142
143 if (hw->flowctrl & ALX_FC_ANEG &&
144 hw->adv_cfg & ADVERTISED_Autoneg)
145 pause->autoneg = AUTONEG_ENABLE;
146 else
147 pause->autoneg = AUTONEG_DISABLE;
148
149 if (hw->flowctrl & ALX_FC_TX)
150 pause->tx_pause = 1;
151 else
152 pause->tx_pause = 0;
153
154 if (hw->flowctrl & ALX_FC_RX)
155 pause->rx_pause = 1;
156 else
157 pause->rx_pause = 0;
158}
159
160
161static int alx_set_pauseparam(struct net_device *netdev,
162 struct ethtool_pauseparam *pause)
163{
164 struct alx_priv *alx = netdev_priv(netdev);
165 struct alx_hw *hw = &alx->hw;
166 int err = 0;
167 bool reconfig_phy = false;
168 u8 fc = 0;
169
170 if (pause->tx_pause)
171 fc |= ALX_FC_TX;
172 if (pause->rx_pause)
173 fc |= ALX_FC_RX;
174 if (pause->autoneg)
175 fc |= ALX_FC_ANEG;
176
177 ASSERT_RTNL();
178
179 /* restart auto-neg for auto-mode */
180 if (hw->adv_cfg & ADVERTISED_Autoneg) {
181 if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
182 reconfig_phy = true;
183 if (fc & hw->flowctrl & ALX_FC_ANEG &&
184 (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
185 reconfig_phy = true;
186 }
187
188 if (reconfig_phy) {
189 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
190 return err;
191 }
192
193 /* flow control on mac */
194 if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
195 alx_cfg_mac_flowcontrol(hw, fc);
196
197 hw->flowctrl = fc;
198
199 return 0;
200}
201
202static u32 alx_get_msglevel(struct net_device *netdev)
203{
204 struct alx_priv *alx = netdev_priv(netdev);
205
206 return alx->msg_enable;
207}
208
209static void alx_set_msglevel(struct net_device *netdev, u32 data)
210{
211 struct alx_priv *alx = netdev_priv(netdev);
212
213 alx->msg_enable = data;
214}
215
216static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
217{
218 struct alx_priv *alx = netdev_priv(netdev);
219 struct alx_hw *hw = &alx->hw;
220
221 wol->supported = WAKE_MAGIC | WAKE_PHY;
222 wol->wolopts = 0;
223
224 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
225 wol->wolopts |= WAKE_MAGIC;
226 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
227 wol->wolopts |= WAKE_PHY;
228}
229
230static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
231{
232 struct alx_priv *alx = netdev_priv(netdev);
233 struct alx_hw *hw = &alx->hw;
234
235 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
236 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
237 return -EOPNOTSUPP;
238
239 hw->sleep_ctrl = 0;
240
241 if (wol->wolopts & WAKE_MAGIC)
242 hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
243 if (wol->wolopts & WAKE_PHY)
244 hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
245
246 device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
247
248 return 0;
249}
250
251static void alx_get_drvinfo(struct net_device *netdev,
252 struct ethtool_drvinfo *drvinfo)
253{
254 struct alx_priv *alx = netdev_priv(netdev);
255
256 strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
257 strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
258 sizeof(drvinfo->bus_info));
259}
260
261const struct ethtool_ops alx_ethtool_ops = {
262 .get_settings = alx_get_settings,
263 .set_settings = alx_set_settings,
264 .get_pauseparam = alx_get_pauseparam,
265 .set_pauseparam = alx_set_pauseparam,
266 .get_drvinfo = alx_get_drvinfo,
267 .get_msglevel = alx_get_msglevel,
268 .set_msglevel = alx_set_msglevel,
269 .get_wol = alx_get_wol,
270 .set_wol = alx_set_wol,
271 .get_link = ethtool_op_get_link,
272};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 000000000000..220a16ad0e49
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36#include <linux/pci.h>
37#include <linux/mdio.h>
38#include "reg.h"
39#include "hw.h"
40
41static inline bool alx_is_rev_a(u8 rev)
42{
43 return rev == ALX_REV_A0 || rev == ALX_REV_A1;
44}
45
46static int alx_wait_mdio_idle(struct alx_hw *hw)
47{
48 u32 val;
49 int i;
50
51 for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
52 val = alx_read_mem32(hw, ALX_MDIO);
53 if (!(val & ALX_MDIO_BUSY))
54 return 0;
55 udelay(10);
56 }
57
58 return -ETIMEDOUT;
59}
60
61static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
62 u16 reg, u16 *phy_data)
63{
64 u32 val, clk_sel;
65 int err;
66
67 *phy_data = 0;
68
69 /* use slow clock when it's in hibernation status */
70 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
71 ALX_MDIO_CLK_SEL_25MD4 :
72 ALX_MDIO_CLK_SEL_25MD128;
73
74 if (ext) {
75 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
76 reg << ALX_MDIO_EXTN_REG_SHIFT;
77 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
78
79 val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
80 ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
81 clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
82 } else {
83 val = ALX_MDIO_SPRES_PRMBL |
84 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
85 reg << ALX_MDIO_REG_SHIFT |
86 ALX_MDIO_START | ALX_MDIO_OP_READ;
87 }
88 alx_write_mem32(hw, ALX_MDIO, val);
89
90 err = alx_wait_mdio_idle(hw);
91 if (err)
92 return err;
93 val = alx_read_mem32(hw, ALX_MDIO);
94 *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
95 return 0;
96}
97
98static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
99 u16 reg, u16 phy_data)
100{
101 u32 val, clk_sel;
102
103 /* use slow clock when it's in hibernation status */
104 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
105 ALX_MDIO_CLK_SEL_25MD4 :
106 ALX_MDIO_CLK_SEL_25MD128;
107
108 if (ext) {
109 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
110 reg << ALX_MDIO_EXTN_REG_SHIFT;
111 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
112
113 val = ALX_MDIO_SPRES_PRMBL |
114 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
115 phy_data << ALX_MDIO_DATA_SHIFT |
116 ALX_MDIO_START | ALX_MDIO_MODE_EXT;
117 } else {
118 val = ALX_MDIO_SPRES_PRMBL |
119 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
120 reg << ALX_MDIO_REG_SHIFT |
121 phy_data << ALX_MDIO_DATA_SHIFT |
122 ALX_MDIO_START;
123 }
124 alx_write_mem32(hw, ALX_MDIO, val);
125
126 return alx_wait_mdio_idle(hw);
127}
128
129static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
130{
131 return alx_read_phy_core(hw, false, 0, reg, phy_data);
132}
133
134static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
135{
136 return alx_write_phy_core(hw, false, 0, reg, phy_data);
137}
138
139static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
140{
141 return alx_read_phy_core(hw, true, dev, reg, pdata);
142}
143
144static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
145{
146 return alx_write_phy_core(hw, true, dev, reg, data);
147}
148
149static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
150{
151 int err;
152
153 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
154 if (err)
155 return err;
156
157 return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
158}
159
160static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
161{
162 int err;
163
164 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
165 if (err)
166 return err;
167
168 return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
169}
170
171int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
172{
173 int err;
174
175 spin_lock(&hw->mdio_lock);
176 err = __alx_read_phy_reg(hw, reg, phy_data);
177 spin_unlock(&hw->mdio_lock);
178
179 return err;
180}
181
182int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
183{
184 int err;
185
186 spin_lock(&hw->mdio_lock);
187 err = __alx_write_phy_reg(hw, reg, phy_data);
188 spin_unlock(&hw->mdio_lock);
189
190 return err;
191}
192
193int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
194{
195 int err;
196
197 spin_lock(&hw->mdio_lock);
198 err = __alx_read_phy_ext(hw, dev, reg, pdata);
199 spin_unlock(&hw->mdio_lock);
200
201 return err;
202}
203
204int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
205{
206 int err;
207
208 spin_lock(&hw->mdio_lock);
209 err = __alx_write_phy_ext(hw, dev, reg, data);
210 spin_unlock(&hw->mdio_lock);
211
212 return err;
213}
214
215static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
216{
217 int err;
218
219 spin_lock(&hw->mdio_lock);
220 err = __alx_read_phy_dbg(hw, reg, pdata);
221 spin_unlock(&hw->mdio_lock);
222
223 return err;
224}
225
226static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
227{
228 int err;
229
230 spin_lock(&hw->mdio_lock);
231 err = __alx_write_phy_dbg(hw, reg, data);
232 spin_unlock(&hw->mdio_lock);
233
234 return err;
235}
236
237static u16 alx_get_phy_config(struct alx_hw *hw)
238{
239 u32 val;
240 u16 phy_val;
241
242 val = alx_read_mem32(hw, ALX_PHY_CTRL);
243 /* phy in reset */
244 if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
245 return ALX_DRV_PHY_UNKNOWN;
246
247 val = alx_read_mem32(hw, ALX_DRV);
248 val = ALX_GET_FIELD(val, ALX_DRV_PHY);
249 if (ALX_DRV_PHY_UNKNOWN == val)
250 return ALX_DRV_PHY_UNKNOWN;
251
252 alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
253 if (ALX_PHY_INITED == phy_val)
254 return val;
255
256 return ALX_DRV_PHY_UNKNOWN;
257}
258
259static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
260{
261 u32 read;
262 int i;
263
264 for (i = 0; i < ALX_SLD_MAX_TO; i++) {
265 read = alx_read_mem32(hw, reg);
266 if ((read & wait) == 0) {
267 if (val)
268 *val = read;
269 return true;
270 }
271 mdelay(1);
272 }
273
274 return false;
275}
276
277static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
278{
279 u32 mac0, mac1;
280
281 mac0 = alx_read_mem32(hw, ALX_STAD0);
282 mac1 = alx_read_mem32(hw, ALX_STAD1);
283
284 /* addr should be big-endian */
285 *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
286 *(__be16 *)addr = cpu_to_be16(mac1);
287
288 return is_valid_ether_addr(addr);
289}
290
291int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
292{
293 u32 val;
294
295 /* try to get it from register first */
296 if (alx_read_macaddr(hw, addr))
297 return 0;
298
299 /* try to load from efuse */
300 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
301 return -EIO;
302 alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
303 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
304 return -EIO;
305 if (alx_read_macaddr(hw, addr))
306 return 0;
307
308 /* try to load from flash/eeprom (if present) */
309 val = alx_read_mem32(hw, ALX_EFLD);
310 if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
311 if (!alx_wait_reg(hw, ALX_EFLD,
312 ALX_EFLD_STAT | ALX_EFLD_START, &val))
313 return -EIO;
314 alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
315 if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
316 return -EIO;
317 if (alx_read_macaddr(hw, addr))
318 return 0;
319 }
320
321 return -EIO;
322}
323
324void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
325{
326 u32 val;
327
328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 val = be32_to_cpu(*(__be32 *)(addr + 2));
330 alx_write_mem32(hw, ALX_STAD0, val);
331 val = be16_to_cpu(*(__be16 *)addr);
332 alx_write_mem32(hw, ALX_STAD1, val);
333}
334
335static void alx_enable_osc(struct alx_hw *hw)
336{
337 u32 val;
338
339 /* rising edge */
340 val = alx_read_mem32(hw, ALX_MISC);
341 alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
342 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
343}
344
345static void alx_reset_osc(struct alx_hw *hw, u8 rev)
346{
347 u32 val, val2;
348
349 /* clear Internal OSC settings, switching OSC by hw itself */
350 val = alx_read_mem32(hw, ALX_MISC3);
351 alx_write_mem32(hw, ALX_MISC3,
352 (val & ~ALX_MISC3_25M_BY_SW) |
353 ALX_MISC3_25M_NOTO_INTNL);
354
355 /* 25M clk from chipset may be unstable 1s after de-assert of
356 * PERST, driver need re-calibrate before enter Sleep for WoL
357 */
358 val = alx_read_mem32(hw, ALX_MISC);
359 if (rev >= ALX_REV_B0) {
360 /* restore over current protection def-val,
361 * this val could be reset by MAC-RST
362 */
363 ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
364 /* a 0->1 change will update the internal val of osc */
365 val &= ~ALX_MISC_INTNLOSC_OPEN;
366 alx_write_mem32(hw, ALX_MISC, val);
367 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
368 /* hw will automatically dis OSC after cab. */
369 val2 = alx_read_mem32(hw, ALX_MSIC2);
370 val2 &= ~ALX_MSIC2_CALB_START;
371 alx_write_mem32(hw, ALX_MSIC2, val2);
372 alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
373 } else {
374 val &= ~ALX_MISC_INTNLOSC_OPEN;
375 /* disable isolate for rev A devices */
376 if (alx_is_rev_a(rev))
377 val &= ~ALX_MISC_ISO_EN;
378
379 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
380 alx_write_mem32(hw, ALX_MISC, val);
381 }
382
383 udelay(20);
384}
385
386static int alx_stop_mac(struct alx_hw *hw)
387{
388 u32 rxq, txq, val;
389 u16 i;
390
391 rxq = alx_read_mem32(hw, ALX_RXQ0);
392 alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
393 txq = alx_read_mem32(hw, ALX_TXQ0);
394 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
395
396 udelay(40);
397
398 hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
399 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
400
401 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
402 val = alx_read_mem32(hw, ALX_MAC_STS);
403 if (!(val & ALX_MAC_STS_IDLE))
404 return 0;
405 udelay(10);
406 }
407
408 return -ETIMEDOUT;
409}
410
411int alx_reset_mac(struct alx_hw *hw)
412{
413 u32 val, pmctrl;
414 int i, ret;
415 u8 rev;
416 bool a_cr;
417
418 pmctrl = 0;
419 rev = alx_hw_revision(hw);
420 a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
421
422 /* disable all interrupts, RXQ/TXQ */
423 alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
424 alx_write_mem32(hw, ALX_IMR, 0);
425 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
426
427 ret = alx_stop_mac(hw);
428 if (ret)
429 return ret;
430
431 /* mac reset workaroud */
432 alx_write_mem32(hw, ALX_RFD_PIDX, 1);
433
434 /* dis l0s/l1 before mac reset */
435 if (a_cr) {
436 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
437 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
438 alx_write_mem32(hw, ALX_PMCTRL,
439 pmctrl & ~(ALX_PMCTRL_L1_EN |
440 ALX_PMCTRL_L0S_EN));
441 }
442
443 /* reset whole mac safely */
444 val = alx_read_mem32(hw, ALX_MASTER);
445 alx_write_mem32(hw, ALX_MASTER,
446 val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
447
448 /* make sure it's real idle */
449 udelay(10);
450 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
451 val = alx_read_mem32(hw, ALX_RFD_PIDX);
452 if (val == 0)
453 break;
454 udelay(10);
455 }
456 for (; i < ALX_DMA_MAC_RST_TO; i++) {
457 val = alx_read_mem32(hw, ALX_MASTER);
458 if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
459 break;
460 udelay(10);
461 }
462 if (i == ALX_DMA_MAC_RST_TO)
463 return -EIO;
464 udelay(10);
465
466 if (a_cr) {
467 alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
468 /* restore l0s / l1 */
469 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
470 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
471 }
472
473 alx_reset_osc(hw, rev);
474
475 /* clear Internal OSC settings, switching OSC by hw itself,
476 * disable isolate for rev A devices
477 */
478 val = alx_read_mem32(hw, ALX_MISC3);
479 alx_write_mem32(hw, ALX_MISC3,
480 (val & ~ALX_MISC3_25M_BY_SW) |
481 ALX_MISC3_25M_NOTO_INTNL);
482 val = alx_read_mem32(hw, ALX_MISC);
483 val &= ~ALX_MISC_INTNLOSC_OPEN;
484 if (alx_is_rev_a(rev))
485 val &= ~ALX_MISC_ISO_EN;
486 alx_write_mem32(hw, ALX_MISC, val);
487 udelay(20);
488
489 /* driver control speed/duplex, hash-alg */
490 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
491
492 val = alx_read_mem32(hw, ALX_SERDES);
493 alx_write_mem32(hw, ALX_SERDES,
494 val | ALX_SERDES_MACCLK_SLWDWN |
495 ALX_SERDES_PHYCLK_SLWDWN);
496
497 return 0;
498}
499
500void alx_reset_phy(struct alx_hw *hw)
501{
502 int i;
503 u32 val;
504 u16 phy_val;
505
506 /* (DSP)reset PHY core */
507 val = alx_read_mem32(hw, ALX_PHY_CTRL);
508 val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
509 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
510 ALX_PHY_CTRL_CLS);
511 val |= ALX_PHY_CTRL_RST_ANALOG;
512
513 val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
514 alx_write_mem32(hw, ALX_PHY_CTRL, val);
515 udelay(10);
516 alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
517
518 for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
519 udelay(10);
520
521 /* phy power saving & hib */
522 alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
523 alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
524 ALX_SYSMODCTRL_IECHOADJ_DEF);
525 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
526 ALX_VDRVBIAS_DEF);
527
528 /* EEE advertisement */
529 val = alx_read_mem32(hw, ALX_LPI_CTRL);
530 alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
531 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
532
533 /* phy power saving */
534 alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
535 alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
536 alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
537 alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
538 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
539 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
540 phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
541 /* rtl8139c, 120m issue */
542 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
543 ALX_MIIEXT_NLP78_120M_DEF);
544 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
545 ALX_MIIEXT_S3DIG10_DEF);
546
547 if (hw->lnk_patch) {
548 /* Turn off half amplitude */
549 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
550 &phy_val);
551 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
552 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
553 /* Turn off Green feature */
554 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
555 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
556 phy_val | ALX_GREENCFG2_BP_GREEN);
557 /* Turn off half Bias */
558 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
559 &phy_val);
560 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
561 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
562 }
563
564 /* set phy interrupt mask */
565 alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
566}
567
568#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
569
570void alx_reset_pcie(struct alx_hw *hw)
571{
572 u8 rev = alx_hw_revision(hw);
573 u32 val;
574 u16 val16;
575
576 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
577 pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
578 if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
579 val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
580 pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
581 }
582
583 /* clear WoL setting/status */
584 val = alx_read_mem32(hw, ALX_WOL0);
585 alx_write_mem32(hw, ALX_WOL0, 0);
586
587 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
588 alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
589
590 /* mask some pcie error bits */
591 val = alx_read_mem32(hw, ALX_UE_SVRT);
592 val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
593 alx_write_mem32(hw, ALX_UE_SVRT, val);
594
595 /* wol 25M & pclk */
596 val = alx_read_mem32(hw, ALX_MASTER);
597 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
598 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
599 (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
600 alx_write_mem32(hw, ALX_MASTER,
601 val | ALX_MASTER_PCLKSEL_SRDS |
602 ALX_MASTER_WAKEN_25M);
603 } else {
604 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
605 (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
606 alx_write_mem32(hw, ALX_MASTER,
607 (val & ~ALX_MASTER_PCLKSEL_SRDS) |
608 ALX_MASTER_WAKEN_25M);
609 }
610
611 /* ASPM setting */
612 alx_enable_aspm(hw, true, true);
613
614 udelay(10);
615}
616
617void alx_start_mac(struct alx_hw *hw)
618{
619 u32 mac, txq, rxq;
620
621 rxq = alx_read_mem32(hw, ALX_RXQ0);
622 alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
623 txq = alx_read_mem32(hw, ALX_TXQ0);
624 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
625
626 mac = hw->rx_ctrl;
627 if (hw->link_speed % 10 == DUPLEX_FULL)
628 mac |= ALX_MAC_CTRL_FULLD;
629 else
630 mac &= ~ALX_MAC_CTRL_FULLD;
631 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
632 hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
633 ALX_MAC_CTRL_SPEED_10_100);
634 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
635 hw->rx_ctrl = mac;
636 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
637}
638
639void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
640{
641 if (fc & ALX_FC_RX)
642 hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
643 else
644 hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
645
646 if (fc & ALX_FC_TX)
647 hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
648 else
649 hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
650
651 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
652}
653
654void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
655{
656 u32 pmctrl;
657 u8 rev = alx_hw_revision(hw);
658
659 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
660
661 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
662 ALX_PMCTRL_LCKDET_TIMER_DEF);
663 pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
664 ALX_PMCTRL_L1_CLKSW_EN |
665 ALX_PMCTRL_L1_SRDSRX_PWD;
666 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
667 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
668 pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
669 ALX_PMCTRL_L1_SRDSPLL_EN |
670 ALX_PMCTRL_L1_BUFSRX_EN |
671 ALX_PMCTRL_SADLY_EN |
672 ALX_PMCTRL_HOTRST_WTEN|
673 ALX_PMCTRL_L0S_EN |
674 ALX_PMCTRL_L1_EN |
675 ALX_PMCTRL_ASPM_FCEN |
676 ALX_PMCTRL_TXL1_AFTER_L0S |
677 ALX_PMCTRL_RXL1_AFTER_L0S);
678 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
679 pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
680
681 if (l0s_en)
682 pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
683 if (l1_en)
684 pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
685
686 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
687}
688
689
690static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
691{
692 u32 cfg = 0;
693
694 if (ethadv_cfg & ADVERTISED_Autoneg) {
695 cfg |= ALX_DRV_PHY_AUTO;
696 if (ethadv_cfg & ADVERTISED_10baseT_Half)
697 cfg |= ALX_DRV_PHY_10;
698 if (ethadv_cfg & ADVERTISED_10baseT_Full)
699 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
700 if (ethadv_cfg & ADVERTISED_100baseT_Half)
701 cfg |= ALX_DRV_PHY_100;
702 if (ethadv_cfg & ADVERTISED_100baseT_Full)
703 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
704 if (ethadv_cfg & ADVERTISED_1000baseT_Half)
705 cfg |= ALX_DRV_PHY_1000;
706 if (ethadv_cfg & ADVERTISED_1000baseT_Full)
707 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
708 if (ethadv_cfg & ADVERTISED_Pause)
709 cfg |= ADVERTISE_PAUSE_CAP;
710 if (ethadv_cfg & ADVERTISED_Asym_Pause)
711 cfg |= ADVERTISE_PAUSE_ASYM;
712 } else {
713 switch (ethadv_cfg) {
714 case ADVERTISED_10baseT_Half:
715 cfg |= ALX_DRV_PHY_10;
716 break;
717 case ADVERTISED_100baseT_Half:
718 cfg |= ALX_DRV_PHY_100;
719 break;
720 case ADVERTISED_10baseT_Full:
721 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
722 break;
723 case ADVERTISED_100baseT_Full:
724 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
725 break;
726 }
727 }
728
729 return cfg;
730}
731
732int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
733{
734 u16 adv, giga, cr;
735 u32 val;
736 int err = 0;
737
738 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
739 val = alx_read_mem32(hw, ALX_DRV);
740 ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
741
742 if (ethadv & ADVERTISED_Autoneg) {
743 adv = ADVERTISE_CSMA;
744 adv |= ethtool_adv_to_mii_adv_t(ethadv);
745
746 if (flowctrl & ALX_FC_ANEG) {
747 if (flowctrl & ALX_FC_RX) {
748 adv |= ADVERTISED_Pause;
749 if (!(flowctrl & ALX_FC_TX))
750 adv |= ADVERTISED_Asym_Pause;
751 } else if (flowctrl & ALX_FC_TX) {
752 adv |= ADVERTISED_Asym_Pause;
753 }
754 }
755 giga = 0;
756 if (alx_hw_giga(hw))
757 giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
758
759 cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
760
761 if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
762 alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
763 alx_write_phy_reg(hw, MII_BMCR, cr))
764 err = -EBUSY;
765 } else {
766 cr = BMCR_RESET;
767 if (ethadv == ADVERTISED_100baseT_Half ||
768 ethadv == ADVERTISED_100baseT_Full)
769 cr |= BMCR_SPEED100;
770 if (ethadv == ADVERTISED_10baseT_Full ||
771 ethadv == ADVERTISED_100baseT_Full)
772 cr |= BMCR_FULLDPLX;
773
774 err = alx_write_phy_reg(hw, MII_BMCR, cr);
775 }
776
777 if (!err) {
778 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
779 val |= ethadv_to_hw_cfg(hw, ethadv);
780 }
781
782 alx_write_mem32(hw, ALX_DRV, val);
783
784 return err;
785}
786
787
788void alx_post_phy_link(struct alx_hw *hw)
789{
790 u16 phy_val, len, agc;
791 u8 revid = alx_hw_revision(hw);
792 bool adj_th = revid == ALX_REV_B0;
793 int speed;
794
795 if (hw->link_speed == SPEED_UNKNOWN)
796 speed = SPEED_UNKNOWN;
797 else
798 speed = hw->link_speed - hw->link_speed % 10;
799
800 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
801 return;
802
803 /* 1000BT/AZ, wrong cable length */
804 if (speed != SPEED_UNKNOWN) {
805 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
806 &phy_val);
807 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
808 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
809 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
810
811 if ((speed == SPEED_1000 &&
812 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
813 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
814 (speed == SPEED_100 &&
815 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
816 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
817 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
818 ALX_AZ_ANADECT_LONG);
819 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
820 &phy_val);
821 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
822 phy_val | ALX_AFE_10BT_100M_TH);
823 } else {
824 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
825 ALX_AZ_ANADECT_DEF);
826 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
827 ALX_MIIEXT_AFE, &phy_val);
828 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
829 phy_val & ~ALX_AFE_10BT_100M_TH);
830 }
831
832 /* threshold adjust */
833 if (adj_th && hw->lnk_patch) {
834 if (speed == SPEED_100) {
835 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
836 ALX_MSE16DB_UP);
837 } else if (speed == SPEED_1000) {
838 /*
839 * Giga link threshold, raise the tolerance of
840 * noise 50%
841 */
842 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
843 &phy_val);
844 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
845 ALX_MSE20DB_TH_HI);
846 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
847 phy_val);
848 }
849 }
850 } else {
851 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
852 &phy_val);
853 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
854 phy_val & ~ALX_AFE_10BT_100M_TH);
855
856 if (adj_th && hw->lnk_patch) {
857 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
858 ALX_MSE16DB_DOWN);
859 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
860 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
861 ALX_MSE20DB_TH_DEF);
862 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
863 }
864 }
865}
866
867
868/* NOTE:
869 * 1. phy link must be established before calling this function
870 * 2. wol option (pattern,magic,link,etc.) is configed before call it.
871 */
872int alx_pre_suspend(struct alx_hw *hw, int speed)
873{
874 u32 master, mac, phy, val;
875 int err = 0;
876
877 master = alx_read_mem32(hw, ALX_MASTER);
878 master &= ~ALX_MASTER_PCLKSEL_SRDS;
879 mac = hw->rx_ctrl;
880 /* 10/100 half */
881 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
882 mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
883
884 phy = alx_read_mem32(hw, ALX_PHY_CTRL);
885 phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
886 phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
887 ALX_PHY_CTRL_HIB_EN;
888
889 /* without any activity */
890 if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
891 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
892 if (err)
893 return err;
894 phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
895 } else {
896 if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
897 mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
898 if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
899 mac |= ALX_MAC_CTRL_TX_EN;
900 if (speed % 10 == DUPLEX_FULL)
901 mac |= ALX_MAC_CTRL_FULLD;
902 if (speed >= SPEED_1000)
903 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
904 ALX_MAC_CTRL_SPEED_1000);
905 phy |= ALX_PHY_CTRL_DSPRST_OUT;
906 err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
907 ALX_MIIEXT_S3DIG10,
908 ALX_MIIEXT_S3DIG10_SL);
909 if (err)
910 return err;
911 }
912
913 alx_enable_osc(hw);
914 hw->rx_ctrl = mac;
915 alx_write_mem32(hw, ALX_MASTER, master);
916 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
917 alx_write_mem32(hw, ALX_PHY_CTRL, phy);
918
919 /* set val of PDLL D3PLLOFF */
920 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
921 val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
922 alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
923
924 return 0;
925}
926
927bool alx_phy_configured(struct alx_hw *hw)
928{
929 u32 cfg, hw_cfg;
930
931 cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
932 cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
933 hw_cfg = alx_get_phy_config(hw);
934
935 if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
936 return false;
937
938 return cfg == hw_cfg;
939}
940
941int alx_get_phy_link(struct alx_hw *hw, int *speed)
942{
943 struct pci_dev *pdev = hw->pdev;
944 u16 bmsr, giga;
945 int err;
946
947 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
948 if (err)
949 return err;
950
951 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
952 if (err)
953 return err;
954
955 if (!(bmsr & BMSR_LSTATUS)) {
956 *speed = SPEED_UNKNOWN;
957 return 0;
958 }
959
960 /* speed/duplex result is saved in PHY Specific Status Register */
961 err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
962 if (err)
963 return err;
964
965 if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
966 goto wrong_speed;
967
968 switch (giga & ALX_GIGA_PSSR_SPEED) {
969 case ALX_GIGA_PSSR_1000MBS:
970 *speed = SPEED_1000;
971 break;
972 case ALX_GIGA_PSSR_100MBS:
973 *speed = SPEED_100;
974 break;
975 case ALX_GIGA_PSSR_10MBS:
976 *speed = SPEED_10;
977 break;
978 default:
979 goto wrong_speed;
980 }
981
982 *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
983 return 1;
984
985wrong_speed:
986 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
987 return -EINVAL;
988}
989
990int alx_clear_phy_intr(struct alx_hw *hw)
991{
992 u16 isr;
993
994 /* clear interrupt status by reading it */
995 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
996}
997
998int alx_config_wol(struct alx_hw *hw)
999{
1000 u32 wol = 0;
1001 int err = 0;
1002
1003 /* turn on magic packet event */
1004 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
1005 wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
1006
1007 /* turn on link up event */
1008 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
1009 wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
1010 /* only link up can wake up */
1011 err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
1012 }
1013 alx_write_mem32(hw, ALX_WOL0, wol);
1014
1015 return err;
1016}
1017
1018void alx_disable_rss(struct alx_hw *hw)
1019{
1020 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
1021
1022 ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
1023 alx_write_mem32(hw, ALX_RXQ0, ctrl);
1024}
1025
1026void alx_configure_basic(struct alx_hw *hw)
1027{
1028 u32 val, raw_mtu, max_payload;
1029 u16 val16;
1030 u8 chip_rev = alx_hw_revision(hw);
1031
1032 alx_set_macaddr(hw, hw->mac_addr);
1033
1034 alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
1035
1036 /* idle timeout to switch clk_125M */
1037 if (chip_rev >= ALX_REV_B0)
1038 alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
1039 ALX_IDLE_DECISN_TIMER_DEF);
1040
1041 alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
1042
1043 val = alx_read_mem32(hw, ALX_MASTER);
1044 val |= ALX_MASTER_IRQMOD2_EN |
1045 ALX_MASTER_IRQMOD1_EN |
1046 ALX_MASTER_SYSALVTIMER_EN;
1047 alx_write_mem32(hw, ALX_MASTER, val);
1048 alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
1049 (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
1050 /* intr re-trig timeout */
1051 alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
1052 /* tpd threshold to trig int */
1053 alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
1054 alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
1055
1056 raw_mtu = hw->mtu + ETH_HLEN;
1057 alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
1058 if (raw_mtu > ALX_MTU_JUMBO_TH)
1059 hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
1060
1061 if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
1062 val = (raw_mtu + 8 + 7) >> 3;
1063 else
1064 val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
1065 alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
1066
1067 max_payload = pcie_get_readrq(hw->pdev) >> 8;
1068 /*
1069 * if BIOS had changed the default dma read max length,
1070 * restore it to default value
1071 */
1072 if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
1073 pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
1074
1075 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
1076 ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
1077 ALX_TXQ0_SUPT_IPOPT |
1078 ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
1079 alx_write_mem32(hw, ALX_TXQ0, val);
1080 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
1081 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
1082 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
1083 ALX_HQTPD_BURST_EN;
1084 alx_write_mem32(hw, ALX_HQTPD, val);
1085
1086 /* rxq, flow control */
1087 val = alx_read_mem32(hw, ALX_SRAM5);
1088 val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
1089 if (val > ALX_SRAM_RXF_LEN_8K) {
1090 val16 = ALX_MTU_STD_ALGN >> 3;
1091 val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
1092 } else {
1093 val16 = ALX_MTU_STD_ALGN >> 3;
1094 val = (val - ALX_MTU_STD_ALGN) >> 3;
1095 }
1096 alx_write_mem32(hw, ALX_RXQ2,
1097 val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
1098 val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
1099 val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
1100 ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
1101 ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
1102 ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
1103 ALX_RXQ0_IPV6_PARSE_EN;
1104
1105 if (alx_hw_giga(hw))
1106 ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
1107 ALX_RXQ0_ASPM_THRESH_100M);
1108
1109 alx_write_mem32(hw, ALX_RXQ0, val);
1110
1111 val = alx_read_mem32(hw, ALX_DMA);
1112 val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
1113 ALX_DMA_RREQ_PRI_DATA |
1114 max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
1115 ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
1116 ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
1117 (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
1118 alx_write_mem32(hw, ALX_DMA, val);
1119
1120 /* default multi-tx-q weights */
1121 val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
1122 4 << ALX_WRR_PRI0_SHIFT |
1123 4 << ALX_WRR_PRI1_SHIFT |
1124 4 << ALX_WRR_PRI2_SHIFT |
1125 4 << ALX_WRR_PRI3_SHIFT;
1126 alx_write_mem32(hw, ALX_WRR, val);
1127}
1128
1129static inline u32 alx_speed_to_ethadv(int speed)
1130{
1131 switch (speed) {
1132 case SPEED_1000 + DUPLEX_FULL:
1133 return ADVERTISED_1000baseT_Full;
1134 case SPEED_100 + DUPLEX_FULL:
1135 return ADVERTISED_100baseT_Full;
1136 case SPEED_100 + DUPLEX_HALF:
1137 return ADVERTISED_10baseT_Half;
1138 case SPEED_10 + DUPLEX_FULL:
1139 return ADVERTISED_10baseT_Full;
1140 case SPEED_10 + DUPLEX_HALF:
1141 return ADVERTISED_10baseT_Half;
1142 default:
1143 return 0;
1144 }
1145}
1146
1147int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
1148{
1149 int i, err, spd;
1150 u16 lpa;
1151
1152 err = alx_get_phy_link(hw, &spd);
1153 if (err < 0)
1154 return err;
1155
1156 if (spd == SPEED_UNKNOWN)
1157 return 0;
1158
1159 err = alx_read_phy_reg(hw, MII_LPA, &lpa);
1160 if (err)
1161 return err;
1162
1163 if (!(lpa & LPA_LPACK)) {
1164 *speed = spd;
1165 return 0;
1166 }
1167
1168 if (lpa & LPA_10FULL)
1169 *speed = SPEED_10 + DUPLEX_FULL;
1170 else if (lpa & LPA_10HALF)
1171 *speed = SPEED_10 + DUPLEX_HALF;
1172 else if (lpa & LPA_100FULL)
1173 *speed = SPEED_100 + DUPLEX_FULL;
1174 else
1175 *speed = SPEED_100 + DUPLEX_HALF;
1176
1177 if (*speed != spd) {
1178 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
1179 if (err)
1180 return err;
1181 err = alx_setup_speed_duplex(hw,
1182 alx_speed_to_ethadv(*speed) |
1183 ADVERTISED_Autoneg,
1184 ALX_FC_ANEG | ALX_FC_RX |
1185 ALX_FC_TX);
1186 if (err)
1187 return err;
1188
1189 /* wait for linkup */
1190 for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
1191 int speed2;
1192
1193 msleep(100);
1194
1195 err = alx_get_phy_link(hw, &speed2);
1196 if (err < 0)
1197 return err;
1198 if (speed2 != SPEED_UNKNOWN)
1199 break;
1200 }
1201 if (i == ALX_MAX_SETUP_LNK_CYCLE)
1202 return -ETIMEDOUT;
1203 }
1204
1205 return 0;
1206}
1207
1208bool alx_get_phy_info(struct alx_hw *hw)
1209{
1210 u16 devs1, devs2;
1211
1212 if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
1213 alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
1214 return false;
1215
1216 /* since we haven't PMA/PMD status2 register, we can't
1217 * use mdio45_probe function for prtad and mmds.
1218 * use fixed MMD3 to get mmds.
1219 */
1220 if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
1221 alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
1222 return false;
1223 hw->mdio.mmds = devs1 | devs2 << 16;
1224
1225 return true;
1226}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 000000000000..65e723d2172a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_HW_H_
36#define ALX_HW_H_
37#include <linux/types.h>
38#include <linux/mdio.h>
39#include <linux/pci.h>
40#include "reg.h"
41
42/* Transmit Packet Descriptor, contains 4 32-bit words.
43 *
44 * 31 16 0
45 * +----------------+----------------+
46 * | vlan-tag | buf length |
47 * +----------------+----------------+
48 * | Word 1 |
49 * +----------------+----------------+
50 * | Word 2: buf addr lo |
51 * +----------------+----------------+
52 * | Word 3: buf addr hi |
53 * +----------------+----------------+
54 *
55 * Word 2 and 3 combine to form a 64-bit buffer address
56 *
57 * Word 1 has three forms, depending on the state of bit 8/12/13:
58 * if bit8 =='1', the definition is just for custom checksum offload.
59 * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
60 * for the skb is special for LSO V2, Word 2 become total skb length ,
61 * Word 3 is meaningless.
62 * other condition, the definition is for general skb or ip/tcp/udp
63 * checksum or LSO(TSO) offload.
64 *
65 * Here is the depiction:
66 *
67 * 0-+ 0-+
68 * 1 | 1 |
69 * 2 | 2 |
70 * 3 | Payload offset 3 | L4 header offset
71 * 4 | (7:0) 4 | (7:0)
72 * 5 | 5 |
73 * 6 | 6 |
74 * 7-+ 7-+
75 * 8 Custom csum enable = 1 8 Custom csum enable = 0
76 * 9 General IPv4 checksum 9 General IPv4 checksum
77 * 10 General TCP checksum 10 General TCP checksum
78 * 11 General UDP checksum 11 General UDP checksum
79 * 12 Large Send Segment enable 12 Large Send Segment enable
80 * 13 Large Send Segment type 13 Large Send Segment type
81 * 14 VLAN tagged 14 VLAN tagged
82 * 15 Insert VLAN tag 15 Insert VLAN tag
83 * 16 IPv4 packet 16 IPv4 packet
84 * 17 Ethernet frame type 17 Ethernet frame type
85 * 18-+ 18-+
86 * 19 | 19 |
87 * 20 | 20 |
88 * 21 | Custom csum offset 21 |
89 * 22 | (25:18) 22 |
90 * 23 | 23 | MSS (30:18)
91 * 24 | 24 |
92 * 25-+ 25 |
93 * 26-+ 26 |
94 * 27 | 27 |
95 * 28 | Reserved 28 |
96 * 29 | 29 |
97 * 30-+ 30-+
98 * 31 End of packet 31 End of packet
99 */
100struct alx_txd {
101 __le16 len;
102 __le16 vlan_tag;
103 __le32 word1;
104 union {
105 __le64 addr;
106 struct {
107 __le32 pkt_len;
108 __le32 resvd;
109 } l;
110 } adrl;
111} __packed;
112
113/* tpd word 1 */
114#define TPD_CXSUMSTART_MASK 0x00FF
115#define TPD_CXSUMSTART_SHIFT 0
116#define TPD_L4HDROFFSET_MASK 0x00FF
117#define TPD_L4HDROFFSET_SHIFT 0
118#define TPD_CXSUM_EN_MASK 0x0001
119#define TPD_CXSUM_EN_SHIFT 8
120#define TPD_IP_XSUM_MASK 0x0001
121#define TPD_IP_XSUM_SHIFT 9
122#define TPD_TCP_XSUM_MASK 0x0001
123#define TPD_TCP_XSUM_SHIFT 10
124#define TPD_UDP_XSUM_MASK 0x0001
125#define TPD_UDP_XSUM_SHIFT 11
126#define TPD_LSO_EN_MASK 0x0001
127#define TPD_LSO_EN_SHIFT 12
128#define TPD_LSO_V2_MASK 0x0001
129#define TPD_LSO_V2_SHIFT 13
130#define TPD_VLTAGGED_MASK 0x0001
131#define TPD_VLTAGGED_SHIFT 14
132#define TPD_INS_VLTAG_MASK 0x0001
133#define TPD_INS_VLTAG_SHIFT 15
134#define TPD_IPV4_MASK 0x0001
135#define TPD_IPV4_SHIFT 16
136#define TPD_ETHTYPE_MASK 0x0001
137#define TPD_ETHTYPE_SHIFT 17
138#define TPD_CXSUMOFFSET_MASK 0x00FF
139#define TPD_CXSUMOFFSET_SHIFT 18
140#define TPD_MSS_MASK 0x1FFF
141#define TPD_MSS_SHIFT 18
142#define TPD_EOP_MASK 0x0001
143#define TPD_EOP_SHIFT 31
144
145#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
146
147/* Receive Free Descriptor */
148struct alx_rfd {
149 __le64 addr; /* data buffer address, length is
150 * declared in register --- every
151 * buffer has the same size
152 */
153} __packed;
154
155/* Receive Return Descriptor, contains 4 32-bit words.
156 *
157 * 31 16 0
158 * +----------------+----------------+
159 * | Word 0 |
160 * +----------------+----------------+
161 * | Word 1: RSS Hash value |
162 * +----------------+----------------+
163 * | Word 2 |
164 * +----------------+----------------+
165 * | Word 3 |
166 * +----------------+----------------+
167 *
168 * Word 0 depiction & Word 2 depiction:
169 *
170 * 0--+ 0--+
171 * 1 | 1 |
172 * 2 | 2 |
173 * 3 | 3 |
174 * 4 | 4 |
175 * 5 | 5 |
176 * 6 | 6 |
177 * 7 | IP payload checksum 7 | VLAN tag
178 * 8 | (15:0) 8 | (15:0)
179 * 9 | 9 |
180 * 10 | 10 |
181 * 11 | 11 |
182 * 12 | 12 |
183 * 13 | 13 |
184 * 14 | 14 |
185 * 15-+ 15-+
186 * 16-+ 16-+
187 * 17 | Number of RFDs 17 |
188 * 18 | (19:16) 18 |
189 * 19-+ 19 | Protocol ID
190 * 20-+ 20 | (23:16)
191 * 21 | 21 |
192 * 22 | 22 |
193 * 23 | 23-+
194 * 24 | 24 | Reserved
195 * 25 | Start index of RFD-ring 25-+
196 * 26 | (31:20) 26 | RSS Q-num (27:25)
197 * 27 | 27-+
198 * 28 | 28-+
199 * 29 | 29 | RSS Hash algorithm
200 * 30 | 30 | (31:28)
201 * 31-+ 31-+
202 *
203 * Word 3 depiction:
204 *
205 * 0--+
206 * 1 |
207 * 2 |
208 * 3 |
209 * 4 |
210 * 5 |
211 * 6 |
212 * 7 | Packet length (include FCS)
213 * 8 | (13:0)
214 * 9 |
215 * 10 |
216 * 11 |
217 * 12 |
218 * 13-+
219 * 14 L4 Header checksum error
220 * 15 IPv4 checksum error
221 * 16 VLAN tagged
222 * 17-+
223 * 18 | Protocol ID (19:17)
224 * 19-+
225 * 20 Receive error summary
226 * 21 FCS(CRC) error
227 * 22 Frame alignment error
228 * 23 Truncated packet
229 * 24 Runt packet
230 * 25 Incomplete packet due to insufficient rx-desc
231 * 26 Broadcast packet
232 * 27 Multicast packet
233 * 28 Ethernet type (EII or 802.3)
234 * 29 FIFO overflow
235 * 30 Length error (for 802.3, length field mismatch with actual len)
236 * 31 Updated, indicate to driver that this RRD is refreshed.
237 */
238struct alx_rrd {
239 __le32 word0;
240 __le32 rss_hash;
241 __le32 word2;
242 __le32 word3;
243} __packed;
244
245/* rrd word 0 */
246#define RRD_XSUM_MASK 0xFFFF
247#define RRD_XSUM_SHIFT 0
248#define RRD_NOR_MASK 0x000F
249#define RRD_NOR_SHIFT 16
250#define RRD_SI_MASK 0x0FFF
251#define RRD_SI_SHIFT 20
252
253/* rrd word 2 */
254#define RRD_VLTAG_MASK 0xFFFF
255#define RRD_VLTAG_SHIFT 0
256#define RRD_PID_MASK 0x00FF
257#define RRD_PID_SHIFT 16
258/* non-ip packet */
259#define RRD_PID_NONIP 0
260/* ipv4(only) */
261#define RRD_PID_IPV4 1
262/* tcp/ipv6 */
263#define RRD_PID_IPV6TCP 2
264/* tcp/ipv4 */
265#define RRD_PID_IPV4TCP 3
266/* udp/ipv6 */
267#define RRD_PID_IPV6UDP 4
268/* udp/ipv4 */
269#define RRD_PID_IPV4UDP 5
270/* ipv6(only) */
271#define RRD_PID_IPV6 6
272/* LLDP packet */
273#define RRD_PID_LLDP 7
274/* 1588 packet */
275#define RRD_PID_1588 8
276#define RRD_RSSQ_MASK 0x0007
277#define RRD_RSSQ_SHIFT 25
278#define RRD_RSSALG_MASK 0x000F
279#define RRD_RSSALG_SHIFT 28
280#define RRD_RSSALG_TCPV6 0x1
281#define RRD_RSSALG_IPV6 0x2
282#define RRD_RSSALG_TCPV4 0x4
283#define RRD_RSSALG_IPV4 0x8
284
285/* rrd word 3 */
286#define RRD_PKTLEN_MASK 0x3FFF
287#define RRD_PKTLEN_SHIFT 0
288#define RRD_ERR_L4_MASK 0x0001
289#define RRD_ERR_L4_SHIFT 14
290#define RRD_ERR_IPV4_MASK 0x0001
291#define RRD_ERR_IPV4_SHIFT 15
292#define RRD_VLTAGGED_MASK 0x0001
293#define RRD_VLTAGGED_SHIFT 16
294#define RRD_OLD_PID_MASK 0x0007
295#define RRD_OLD_PID_SHIFT 17
296#define RRD_ERR_RES_MASK 0x0001
297#define RRD_ERR_RES_SHIFT 20
298#define RRD_ERR_FCS_MASK 0x0001
299#define RRD_ERR_FCS_SHIFT 21
300#define RRD_ERR_FAE_MASK 0x0001
301#define RRD_ERR_FAE_SHIFT 22
302#define RRD_ERR_TRUNC_MASK 0x0001
303#define RRD_ERR_TRUNC_SHIFT 23
304#define RRD_ERR_RUNT_MASK 0x0001
305#define RRD_ERR_RUNT_SHIFT 24
306#define RRD_ERR_ICMP_MASK 0x0001
307#define RRD_ERR_ICMP_SHIFT 25
308#define RRD_BCAST_MASK 0x0001
309#define RRD_BCAST_SHIFT 26
310#define RRD_MCAST_MASK 0x0001
311#define RRD_MCAST_SHIFT 27
312#define RRD_ETHTYPE_MASK 0x0001
313#define RRD_ETHTYPE_SHIFT 28
314#define RRD_ERR_FIFOV_MASK 0x0001
315#define RRD_ERR_FIFOV_SHIFT 29
316#define RRD_ERR_LEN_MASK 0x0001
317#define RRD_ERR_LEN_SHIFT 30
318#define RRD_UPDATED_MASK 0x0001
319#define RRD_UPDATED_SHIFT 31
320
321
322#define ALX_MAX_SETUP_LNK_CYCLE 50
323
324/* for FlowControl */
325#define ALX_FC_RX 0x01
326#define ALX_FC_TX 0x02
327#define ALX_FC_ANEG 0x04
328
329/* for sleep control */
330#define ALX_SLEEP_WOL_PHY 0x00000001
331#define ALX_SLEEP_WOL_MAGIC 0x00000002
332#define ALX_SLEEP_CIFS 0x00000004
333#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
334 ALX_SLEEP_WOL_MAGIC | \
335 ALX_SLEEP_CIFS)
336
337/* for RSS hash type */
338#define ALX_RSS_HASH_TYPE_IPV4 0x1
339#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
340#define ALX_RSS_HASH_TYPE_IPV6 0x4
341#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
342#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
343 ALX_RSS_HASH_TYPE_IPV4_TCP | \
344 ALX_RSS_HASH_TYPE_IPV6 | \
345 ALX_RSS_HASH_TYPE_IPV6_TCP)
346#define ALX_DEF_RXBUF_SIZE 1536
347#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
348#define ALX_MAX_TSO_PKT_SIZE (7*1024)
349#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
350#define ALX_MIN_FRAME_SIZE 68
351#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
352
353#define ALX_MAX_RX_QUEUES 8
354#define ALX_MAX_TX_QUEUES 4
355#define ALX_MAX_HANDLED_INTRS 5
356
357#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
358 ALX_ISR_DMAW | \
359 ALX_ISR_DMAR | \
360 ALX_ISR_SMB | \
361 ALX_ISR_MANU | \
362 ALX_ISR_TIMER)
363
364#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
365 ALX_ISR_DMAW | ALX_ISR_DMAR)
366
367#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
368 ALX_ISR_TXF_UR | \
369 ALX_ISR_RFD_UR)
370
371#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
372 ALX_ISR_TX_Q1 | \
373 ALX_ISR_TX_Q2 | \
374 ALX_ISR_TX_Q3 | \
375 ALX_ISR_RX_Q0 | \
376 ALX_ISR_RX_Q1 | \
377 ALX_ISR_RX_Q2 | \
378 ALX_ISR_RX_Q3 | \
379 ALX_ISR_RX_Q4 | \
380 ALX_ISR_RX_Q5 | \
381 ALX_ISR_RX_Q6 | \
382 ALX_ISR_RX_Q7)
383
384/* maximum interrupt vectors for msix */
385#define ALX_MAX_MSIX_INTRS 16
386
387#define ALX_GET_FIELD(_data, _field) \
388 (((_data) >> _field ## _SHIFT) & _field ## _MASK)
389
390#define ALX_SET_FIELD(_data, _field, _value) do { \
391 (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
392 (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
393 } while (0)
394
395struct alx_hw {
396 struct pci_dev *pdev;
397 u8 __iomem *hw_addr;
398
399 /* current & permanent mac addr */
400 u8 mac_addr[ETH_ALEN];
401 u8 perm_addr[ETH_ALEN];
402
403 u16 mtu;
404 u16 imt;
405 u8 dma_chnl;
406 u8 max_dma_chnl;
407 /* tpd threshold to trig INT */
408 u32 ith_tpd;
409 u32 rx_ctrl;
410 u32 mc_hash[2];
411
412 u32 smb_timer;
413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
414 int link_speed;
415
416 /* auto-neg advertisement or force mode config */
417 u32 adv_cfg;
418 u8 flowctrl;
419
420 u32 sleep_ctrl;
421
422 spinlock_t mdio_lock;
423 struct mdio_if_info mdio;
424 u16 phy_id[2];
425
426 /* PHY link patch flag */
427 bool lnk_patch;
428};
429
430static inline int alx_hw_revision(struct alx_hw *hw)
431{
432 return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
433}
434
435static inline bool alx_hw_with_cr(struct alx_hw *hw)
436{
437 return hw->pdev->revision & 1;
438}
439
440static inline bool alx_hw_giga(struct alx_hw *hw)
441{
442 return hw->pdev->device & 1;
443}
444
445static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
446{
447 writeb(val, hw->hw_addr + reg);
448}
449
450static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
451{
452 writew(val, hw->hw_addr + reg);
453}
454
455static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
456{
457 return readw(hw->hw_addr + reg);
458}
459
460static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
461{
462 writel(val, hw->hw_addr + reg);
463}
464
465static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
466{
467 return readl(hw->hw_addr + reg);
468}
469
470static inline void alx_post_write(struct alx_hw *hw)
471{
472 readl(hw->hw_addr);
473}
474
475int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
476void alx_reset_phy(struct alx_hw *hw);
477void alx_reset_pcie(struct alx_hw *hw);
478void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
479int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
480void alx_post_phy_link(struct alx_hw *hw);
481int alx_pre_suspend(struct alx_hw *hw, int speed);
482int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
483int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
484int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
485int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
486int alx_get_phy_link(struct alx_hw *hw, int *speed);
487int alx_clear_phy_intr(struct alx_hw *hw);
488int alx_config_wol(struct alx_hw *hw);
489void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
490void alx_start_mac(struct alx_hw *hw);
491int alx_reset_mac(struct alx_hw *hw);
492void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
493bool alx_phy_configured(struct alx_hw *hw);
494void alx_configure_basic(struct alx_hw *hw);
495void alx_disable_rss(struct alx_hw *hw);
496int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
497bool alx_get_phy_info(struct alx_hw *hw);
498
499#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 000000000000..418de8b13165
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
54
55static void alx_free_txbuf(struct alx_priv *alx, int entry)
56{
57 struct alx_buffer *txb = &alx->txq.bufs[entry];
58
59 if (dma_unmap_len(txb, size)) {
60 dma_unmap_single(&alx->hw.pdev->dev,
61 dma_unmap_addr(txb, dma),
62 dma_unmap_len(txb, size),
63 DMA_TO_DEVICE);
64 dma_unmap_len_set(txb, size, 0);
65 }
66
67 if (txb->skb) {
68 dev_kfree_skb_any(txb->skb);
69 txb->skb = NULL;
70 }
71}
72
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{
75 struct alx_rx_queue *rxq = &alx->rxq;
76 struct sk_buff *skb;
77 struct alx_buffer *cur_buf;
78 dma_addr_t dma;
79 u16 cur, next, count = 0;
80
81 next = cur = rxq->write_idx;
82 if (++next == alx->rx_ringsz)
83 next = 0;
84 cur_buf = &rxq->bufs[cur];
85
86 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur];
88
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
90 if (!skb)
91 break;
92 dma = dma_map_single(&alx->hw.pdev->dev,
93 skb->data, alx->rxbuf_size,
94 DMA_FROM_DEVICE);
95 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
96 dev_kfree_skb(skb);
97 break;
98 }
99
100 /* Unfortunately, RX descriptor buffers must be 4-byte
101 * aligned, so we can't use IP alignment.
102 */
103 if (WARN_ON(dma & 3)) {
104 dev_kfree_skb(skb);
105 break;
106 }
107
108 cur_buf->skb = skb;
109 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
110 dma_unmap_addr_set(cur_buf, dma, dma);
111 rfd->addr = cpu_to_le64(dma);
112
113 cur = next;
114 if (++next == alx->rx_ringsz)
115 next = 0;
116 cur_buf = &rxq->bufs[cur];
117 count++;
118 }
119
120 if (count) {
121 /* flush all updates before updating hardware */
122 wmb();
123 rxq->write_idx = cur;
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 }
126
127 return count;
128}
129
130static inline int alx_tpd_avail(struct alx_priv *alx)
131{
132 struct alx_tx_queue *txq = &alx->txq;
133
134 if (txq->write_idx >= txq->read_idx)
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
136 return txq->read_idx - txq->write_idx - 1;
137}
138
139static bool alx_clean_tx_irq(struct alx_priv *alx)
140{
141 struct alx_tx_queue *txq = &alx->txq;
142 u16 hw_read_idx, sw_read_idx;
143 unsigned int total_bytes = 0, total_packets = 0;
144 int budget = ALX_DEFAULT_TX_WORK;
145
146 sw_read_idx = txq->read_idx;
147 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
148
149 if (sw_read_idx != hw_read_idx) {
150 while (sw_read_idx != hw_read_idx && budget > 0) {
151 struct sk_buff *skb;
152
153 skb = txq->bufs[sw_read_idx].skb;
154 if (skb) {
155 total_bytes += skb->len;
156 total_packets++;
157 budget--;
158 }
159
160 alx_free_txbuf(alx, sw_read_idx);
161
162 if (++sw_read_idx == alx->tx_ringsz)
163 sw_read_idx = 0;
164 }
165 txq->read_idx = sw_read_idx;
166
167 netdev_completed_queue(alx->dev, total_packets, total_bytes);
168 }
169
170 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
171 alx_tpd_avail(alx) > alx->tx_ringsz/4)
172 netif_wake_queue(alx->dev);
173
174 return sw_read_idx == hw_read_idx;
175}
176
177static void alx_schedule_link_check(struct alx_priv *alx)
178{
179 schedule_work(&alx->link_check_wk);
180}
181
182static void alx_schedule_reset(struct alx_priv *alx)
183{
184 schedule_work(&alx->reset_wk);
185}
186
187static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
188{
189 struct alx_rx_queue *rxq = &alx->rxq;
190 struct alx_rrd *rrd;
191 struct alx_buffer *rxb;
192 struct sk_buff *skb;
193 u16 length, rfd_cleaned = 0;
194
195 while (budget > 0) {
196 rrd = &rxq->rrd[rxq->rrd_read_idx];
197 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
198 break;
199 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
200
201 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
202 RRD_SI) != rxq->read_idx ||
203 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
204 RRD_NOR) != 1) {
205 alx_schedule_reset(alx);
206 return 0;
207 }
208
209 rxb = &rxq->bufs[rxq->read_idx];
210 dma_unmap_single(&alx->hw.pdev->dev,
211 dma_unmap_addr(rxb, dma),
212 dma_unmap_len(rxb, size),
213 DMA_FROM_DEVICE);
214 dma_unmap_len_set(rxb, size, 0);
215 skb = rxb->skb;
216 rxb->skb = NULL;
217
218 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
219 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
220 rrd->word3 = 0;
221 dev_kfree_skb_any(skb);
222 goto next_pkt;
223 }
224
225 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
226 RRD_PKTLEN) - ETH_FCS_LEN;
227 skb_put(skb, length);
228 skb->protocol = eth_type_trans(skb, alx->dev);
229
230 skb_checksum_none_assert(skb);
231 if (alx->dev->features & NETIF_F_RXCSUM &&
232 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
233 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
234 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
235 RRD_PID)) {
236 case RRD_PID_IPV6UDP:
237 case RRD_PID_IPV4UDP:
238 case RRD_PID_IPV4TCP:
239 case RRD_PID_IPV6TCP:
240 skb->ip_summed = CHECKSUM_UNNECESSARY;
241 break;
242 }
243 }
244
245 napi_gro_receive(&alx->napi, skb);
246 budget--;
247
248next_pkt:
249 if (++rxq->read_idx == alx->rx_ringsz)
250 rxq->read_idx = 0;
251 if (++rxq->rrd_read_idx == alx->rx_ringsz)
252 rxq->rrd_read_idx = 0;
253
254 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
255 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
256 }
257
258 if (rfd_cleaned)
259 alx_refill_rx_ring(alx, GFP_ATOMIC);
260
261 return budget > 0;
262}
263
264static int alx_poll(struct napi_struct *napi, int budget)
265{
266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
267 struct alx_hw *hw = &alx->hw;
268 bool complete = true;
269 unsigned long flags;
270
271 complete = alx_clean_tx_irq(alx) &&
272 alx_clean_rx_irq(alx, budget);
273
274 if (!complete)
275 return 1;
276
277 napi_complete(&alx->napi);
278
279 /* enable interrupt */
280 spin_lock_irqsave(&alx->irq_lock, flags);
281 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
282 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
283 spin_unlock_irqrestore(&alx->irq_lock, flags);
284
285 alx_post_write(hw);
286
287 return 0;
288}
289
290static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
291{
292 struct alx_hw *hw = &alx->hw;
293 bool write_int_mask = false;
294
295 spin_lock(&alx->irq_lock);
296
297 /* ACK interrupt */
298 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
299 intr &= alx->int_mask;
300
301 if (intr & ALX_ISR_FATAL) {
302 netif_warn(alx, hw, alx->dev,
303 "fatal interrupt 0x%x, resetting\n", intr);
304 alx_schedule_reset(alx);
305 goto out;
306 }
307
308 if (intr & ALX_ISR_ALERT)
309 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
310
311 if (intr & ALX_ISR_PHY) {
312 /* suppress PHY interrupt, because the source
313 * is from PHY internal. only the internal status
314 * is cleared, the interrupt status could be cleared.
315 */
316 alx->int_mask &= ~ALX_ISR_PHY;
317 write_int_mask = true;
318 alx_schedule_link_check(alx);
319 }
320
321 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
322 napi_schedule(&alx->napi);
323 /* mask rx/tx interrupt, enable them when napi complete */
324 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
325 write_int_mask = true;
326 }
327
328 if (write_int_mask)
329 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
330
331 alx_write_mem32(hw, ALX_ISR, 0);
332
333 out:
334 spin_unlock(&alx->irq_lock);
335 return IRQ_HANDLED;
336}
337
338static irqreturn_t alx_intr_msi(int irq, void *data)
339{
340 struct alx_priv *alx = data;
341
342 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
343}
344
345static irqreturn_t alx_intr_legacy(int irq, void *data)
346{
347 struct alx_priv *alx = data;
348 struct alx_hw *hw = &alx->hw;
349 u32 intr;
350
351 intr = alx_read_mem32(hw, ALX_ISR);
352
353 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
354 return IRQ_NONE;
355
356 return alx_intr_handle(alx, intr);
357}
358
359static void alx_init_ring_ptrs(struct alx_priv *alx)
360{
361 struct alx_hw *hw = &alx->hw;
362 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
363
364 alx->rxq.read_idx = 0;
365 alx->rxq.write_idx = 0;
366 alx->rxq.rrd_read_idx = 0;
367 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
368 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
369 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
370 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
371 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
372 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
373
374 alx->txq.read_idx = 0;
375 alx->txq.write_idx = 0;
376 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
377 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
378 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
379
380 /* load these pointers into the chip */
381 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
382}
383
384static void alx_free_txring_buf(struct alx_priv *alx)
385{
386 struct alx_tx_queue *txq = &alx->txq;
387 int i;
388
389 if (!txq->bufs)
390 return;
391
392 for (i = 0; i < alx->tx_ringsz; i++)
393 alx_free_txbuf(alx, i);
394
395 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
396 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
397 txq->write_idx = 0;
398 txq->read_idx = 0;
399
400 netdev_reset_queue(alx->dev);
401}
402
403static void alx_free_rxring_buf(struct alx_priv *alx)
404{
405 struct alx_rx_queue *rxq = &alx->rxq;
406 struct alx_buffer *cur_buf;
407 u16 i;
408
409 if (rxq == NULL)
410 return;
411
412 for (i = 0; i < alx->rx_ringsz; i++) {
413 cur_buf = rxq->bufs + i;
414 if (cur_buf->skb) {
415 dma_unmap_single(&alx->hw.pdev->dev,
416 dma_unmap_addr(cur_buf, dma),
417 dma_unmap_len(cur_buf, size),
418 DMA_FROM_DEVICE);
419 dev_kfree_skb(cur_buf->skb);
420 cur_buf->skb = NULL;
421 dma_unmap_len_set(cur_buf, size, 0);
422 dma_unmap_addr_set(cur_buf, dma, 0);
423 }
424 }
425
426 rxq->write_idx = 0;
427 rxq->read_idx = 0;
428 rxq->rrd_read_idx = 0;
429}
430
431static void alx_free_buffers(struct alx_priv *alx)
432{
433 alx_free_txring_buf(alx);
434 alx_free_rxring_buf(alx);
435}
436
437static int alx_reinit_rings(struct alx_priv *alx)
438{
439 alx_free_buffers(alx);
440
441 alx_init_ring_ptrs(alx);
442
443 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
444 return -ENOMEM;
445
446 return 0;
447}
448
449static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
450{
451 u32 crc32, bit, reg;
452
453 crc32 = ether_crc(ETH_ALEN, addr);
454 reg = (crc32 >> 31) & 0x1;
455 bit = (crc32 >> 26) & 0x1F;
456
457 mc_hash[reg] |= BIT(bit);
458}
459
460static void __alx_set_rx_mode(struct net_device *netdev)
461{
462 struct alx_priv *alx = netdev_priv(netdev);
463 struct alx_hw *hw = &alx->hw;
464 struct netdev_hw_addr *ha;
465 u32 mc_hash[2] = {};
466
467 if (!(netdev->flags & IFF_ALLMULTI)) {
468 netdev_for_each_mc_addr(ha, netdev)
469 alx_add_mc_addr(hw, ha->addr, mc_hash);
470
471 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
472 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
473 }
474
475 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
476 if (netdev->flags & IFF_PROMISC)
477 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
478 if (netdev->flags & IFF_ALLMULTI)
479 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
480
481 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
482}
483
484static void alx_set_rx_mode(struct net_device *netdev)
485{
486 __alx_set_rx_mode(netdev);
487}
488
489static int alx_set_mac_address(struct net_device *netdev, void *data)
490{
491 struct alx_priv *alx = netdev_priv(netdev);
492 struct alx_hw *hw = &alx->hw;
493 struct sockaddr *addr = data;
494
495 if (!is_valid_ether_addr(addr->sa_data))
496 return -EADDRNOTAVAIL;
497
498 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
499 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
500
501 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
502 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
503 alx_set_macaddr(hw, hw->mac_addr);
504
505 return 0;
506}
507
508static int alx_alloc_descriptors(struct alx_priv *alx)
509{
510 alx->txq.bufs = kcalloc(alx->tx_ringsz,
511 sizeof(struct alx_buffer),
512 GFP_KERNEL);
513 if (!alx->txq.bufs)
514 return -ENOMEM;
515
516 alx->rxq.bufs = kcalloc(alx->rx_ringsz,
517 sizeof(struct alx_buffer),
518 GFP_KERNEL);
519 if (!alx->rxq.bufs)
520 goto out_free;
521
522 /* physical tx/rx ring descriptors
523 *
524 * Allocate them as a single chunk because they must not cross a
525 * 4G boundary (hardware has a single register for high 32 bits
526 * of addresses only)
527 */
528 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
529 sizeof(struct alx_rrd) * alx->rx_ringsz +
530 sizeof(struct alx_rfd) * alx->rx_ringsz;
531 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
532 alx->descmem.size,
533 &alx->descmem.dma,
534 GFP_KERNEL);
535 if (!alx->descmem.virt)
536 goto out_free;
537
538 alx->txq.tpd = (void *)alx->descmem.virt;
539 alx->txq.tpd_dma = alx->descmem.dma;
540
541 /* alignment requirement for next block */
542 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
543
544 alx->rxq.rrd =
545 (void *)((u8 *)alx->descmem.virt +
546 sizeof(struct alx_txd) * alx->tx_ringsz);
547 alx->rxq.rrd_dma = alx->descmem.dma +
548 sizeof(struct alx_txd) * alx->tx_ringsz;
549
550 /* alignment requirement for next block */
551 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
552
553 alx->rxq.rfd =
554 (void *)((u8 *)alx->descmem.virt +
555 sizeof(struct alx_txd) * alx->tx_ringsz +
556 sizeof(struct alx_rrd) * alx->rx_ringsz);
557 alx->rxq.rfd_dma = alx->descmem.dma +
558 sizeof(struct alx_txd) * alx->tx_ringsz +
559 sizeof(struct alx_rrd) * alx->rx_ringsz;
560
561 return 0;
562out_free:
563 kfree(alx->txq.bufs);
564 kfree(alx->rxq.bufs);
565 return -ENOMEM;
566}
567
568static int alx_alloc_rings(struct alx_priv *alx)
569{
570 int err;
571
572 err = alx_alloc_descriptors(alx);
573 if (err)
574 return err;
575
576 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
577 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
578 alx->tx_ringsz = alx->tx_ringsz;
579
580 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
581
582 alx_reinit_rings(alx);
583 return 0;
584}
585
586static void alx_free_rings(struct alx_priv *alx)
587{
588 netif_napi_del(&alx->napi);
589 alx_free_buffers(alx);
590
591 kfree(alx->txq.bufs);
592 kfree(alx->rxq.bufs);
593
594 dma_free_coherent(&alx->hw.pdev->dev,
595 alx->descmem.size,
596 alx->descmem.virt,
597 alx->descmem.dma);
598}
599
600static void alx_config_vector_mapping(struct alx_priv *alx)
601{
602 struct alx_hw *hw = &alx->hw;
603
604 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
605 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
606 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
607}
608
609static void alx_irq_enable(struct alx_priv *alx)
610{
611 struct alx_hw *hw = &alx->hw;
612
613 /* level-1 interrupt switch */
614 alx_write_mem32(hw, ALX_ISR, 0);
615 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
616 alx_post_write(hw);
617}
618
619static void alx_irq_disable(struct alx_priv *alx)
620{
621 struct alx_hw *hw = &alx->hw;
622
623 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
624 alx_write_mem32(hw, ALX_IMR, 0);
625 alx_post_write(hw);
626
627 synchronize_irq(alx->hw.pdev->irq);
628}
629
630static int alx_request_irq(struct alx_priv *alx)
631{
632 struct pci_dev *pdev = alx->hw.pdev;
633 struct alx_hw *hw = &alx->hw;
634 int err;
635 u32 msi_ctrl;
636
637 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
638
639 if (!pci_enable_msi(alx->hw.pdev)) {
640 alx->msi = true;
641
642 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
643 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
644 err = request_irq(pdev->irq, alx_intr_msi, 0,
645 alx->dev->name, alx);
646 if (!err)
647 goto out;
648 /* fall back to legacy interrupt */
649 pci_disable_msi(alx->hw.pdev);
650 }
651
652 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
653 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
654 alx->dev->name, alx);
655out:
656 if (!err)
657 alx_config_vector_mapping(alx);
658 return err;
659}
660
661static void alx_free_irq(struct alx_priv *alx)
662{
663 struct pci_dev *pdev = alx->hw.pdev;
664
665 free_irq(pdev->irq, alx);
666
667 if (alx->msi) {
668 pci_disable_msi(alx->hw.pdev);
669 alx->msi = false;
670 }
671}
672
673static int alx_identify_hw(struct alx_priv *alx)
674{
675 struct alx_hw *hw = &alx->hw;
676 int rev = alx_hw_revision(hw);
677
678 if (rev > ALX_REV_C0)
679 return -EINVAL;
680
681 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
682
683 return 0;
684}
685
686static int alx_init_sw(struct alx_priv *alx)
687{
688 struct pci_dev *pdev = alx->hw.pdev;
689 struct alx_hw *hw = &alx->hw;
690 int err;
691
692 err = alx_identify_hw(alx);
693 if (err) {
694 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
695 return err;
696 }
697
698 alx->hw.lnk_patch =
699 pdev->device == ALX_DEV_ID_AR8161 &&
700 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
701 pdev->subsystem_device == 0x0091 &&
702 pdev->revision == 0;
703
704 hw->smb_timer = 400;
705 hw->mtu = alx->dev->mtu;
706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
707 alx->tx_ringsz = 256;
708 alx->rx_ringsz = 512;
709 hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
710 hw->imt = 200;
711 alx->int_mask = ALX_ISR_MISC;
712 hw->dma_chnl = hw->max_dma_chnl;
713 hw->ith_tpd = alx->tx_ringsz / 3;
714 hw->link_speed = SPEED_UNKNOWN;
715 hw->adv_cfg = ADVERTISED_Autoneg |
716 ADVERTISED_10baseT_Half |
717 ADVERTISED_10baseT_Full |
718 ADVERTISED_100baseT_Full |
719 ADVERTISED_100baseT_Half |
720 ADVERTISED_1000baseT_Full;
721 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
722
723 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
724 ALX_MAC_CTRL_MHASH_ALG_HI5B |
725 ALX_MAC_CTRL_BRD_EN |
726 ALX_MAC_CTRL_PCRCE |
727 ALX_MAC_CTRL_CRCE |
728 ALX_MAC_CTRL_RXFC_EN |
729 ALX_MAC_CTRL_TXFC_EN |
730 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
731
732 return err;
733}
734
735
736static netdev_features_t alx_fix_features(struct net_device *netdev,
737 netdev_features_t features)
738{
739 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
740 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
741
742 return features;
743}
744
745static void alx_netif_stop(struct alx_priv *alx)
746{
747 alx->dev->trans_start = jiffies;
748 if (netif_carrier_ok(alx->dev)) {
749 netif_carrier_off(alx->dev);
750 netif_tx_disable(alx->dev);
751 napi_disable(&alx->napi);
752 }
753}
754
755static void alx_halt(struct alx_priv *alx)
756{
757 struct alx_hw *hw = &alx->hw;
758
759 alx_netif_stop(alx);
760 hw->link_speed = SPEED_UNKNOWN;
761
762 alx_reset_mac(hw);
763
764 /* disable l0s/l1 */
765 alx_enable_aspm(hw, false, false);
766 alx_irq_disable(alx);
767 alx_free_buffers(alx);
768}
769
770static void alx_configure(struct alx_priv *alx)
771{
772 struct alx_hw *hw = &alx->hw;
773
774 alx_configure_basic(hw);
775 alx_disable_rss(hw);
776 __alx_set_rx_mode(alx->dev);
777
778 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
779}
780
781static void alx_activate(struct alx_priv *alx)
782{
783 /* hardware setting lost, restore it */
784 alx_reinit_rings(alx);
785 alx_configure(alx);
786
787 /* clear old interrupts */
788 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
789
790 alx_irq_enable(alx);
791
792 alx_schedule_link_check(alx);
793}
794
795static void alx_reinit(struct alx_priv *alx)
796{
797 ASSERT_RTNL();
798
799 alx_halt(alx);
800 alx_activate(alx);
801}
802
803static int alx_change_mtu(struct net_device *netdev, int mtu)
804{
805 struct alx_priv *alx = netdev_priv(netdev);
806 int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
807
808 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
809 (max_frame > ALX_MAX_FRAME_SIZE))
810 return -EINVAL;
811
812 if (netdev->mtu == mtu)
813 return 0;
814
815 netdev->mtu = mtu;
816 alx->hw.mtu = mtu;
817 alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
818 ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
819 netdev_update_features(netdev);
820 if (netif_running(netdev))
821 alx_reinit(alx);
822 return 0;
823}
824
825static void alx_netif_start(struct alx_priv *alx)
826{
827 netif_tx_wake_all_queues(alx->dev);
828 napi_enable(&alx->napi);
829 netif_carrier_on(alx->dev);
830}
831
832static int __alx_open(struct alx_priv *alx, bool resume)
833{
834 int err;
835
836 if (!resume)
837 netif_carrier_off(alx->dev);
838
839 err = alx_alloc_rings(alx);
840 if (err)
841 return err;
842
843 alx_configure(alx);
844
845 err = alx_request_irq(alx);
846 if (err)
847 goto out_free_rings;
848
849 /* clear old interrupts */
850 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
851
852 alx_irq_enable(alx);
853
854 if (!resume)
855 netif_tx_start_all_queues(alx->dev);
856
857 alx_schedule_link_check(alx);
858 return 0;
859
860out_free_rings:
861 alx_free_rings(alx);
862 return err;
863}
864
865static void __alx_stop(struct alx_priv *alx)
866{
867 alx_halt(alx);
868 alx_free_irq(alx);
869 alx_free_rings(alx);
870}
871
872static const char *alx_speed_desc(u16 speed)
873{
874 switch (speed) {
875 case SPEED_1000 + DUPLEX_FULL:
876 return "1 Gbps Full";
877 case SPEED_100 + DUPLEX_FULL:
878 return "100 Mbps Full";
879 case SPEED_100 + DUPLEX_HALF:
880 return "100 Mbps Half";
881 case SPEED_10 + DUPLEX_FULL:
882 return "10 Mbps Full";
883 case SPEED_10 + DUPLEX_HALF:
884 return "10 Mbps Half";
885 default:
886 return "Unknown speed";
887 }
888}
889
890static void alx_check_link(struct alx_priv *alx)
891{
892 struct alx_hw *hw = &alx->hw;
893 unsigned long flags;
894 int speed, old_speed;
895 int err;
896
897 /* clear PHY internal interrupt status, otherwise the main
898 * interrupt status will be asserted forever
899 */
900 alx_clear_phy_intr(hw);
901
902 err = alx_get_phy_link(hw, &speed);
903 if (err < 0)
904 goto reset;
905
906 spin_lock_irqsave(&alx->irq_lock, flags);
907 alx->int_mask |= ALX_ISR_PHY;
908 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
909 spin_unlock_irqrestore(&alx->irq_lock, flags);
910
911 old_speed = hw->link_speed;
912
913 if (old_speed == speed)
914 return;
915 hw->link_speed = speed;
916
917 if (speed != SPEED_UNKNOWN) {
918 netif_info(alx, link, alx->dev,
919 "NIC Up: %s\n", alx_speed_desc(speed));
920 alx_post_phy_link(hw);
921 alx_enable_aspm(hw, true, true);
922 alx_start_mac(hw);
923
924 if (old_speed == SPEED_UNKNOWN)
925 alx_netif_start(alx);
926 } else {
927 /* link is now down */
928 alx_netif_stop(alx);
929 netif_info(alx, link, alx->dev, "Link Down\n");
930 err = alx_reset_mac(hw);
931 if (err)
932 goto reset;
933 alx_irq_disable(alx);
934
935 /* MAC reset causes all HW settings to be lost, restore all */
936 err = alx_reinit_rings(alx);
937 if (err)
938 goto reset;
939 alx_configure(alx);
940 alx_enable_aspm(hw, false, true);
941 alx_post_phy_link(hw);
942 alx_irq_enable(alx);
943 }
944
945 return;
946
947reset:
948 alx_schedule_reset(alx);
949}
950
951static int alx_open(struct net_device *netdev)
952{
953 return __alx_open(netdev_priv(netdev), false);
954}
955
956static int alx_stop(struct net_device *netdev)
957{
958 __alx_stop(netdev_priv(netdev));
959 return 0;
960}
961
962static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
963{
964 struct alx_priv *alx = pci_get_drvdata(pdev);
965 struct net_device *netdev = alx->dev;
966 struct alx_hw *hw = &alx->hw;
967 int err, speed;
968
969 netif_device_detach(netdev);
970
971 if (netif_running(netdev))
972 __alx_stop(alx);
973
974#ifdef CONFIG_PM_SLEEP
975 err = pci_save_state(pdev);
976 if (err)
977 return err;
978#endif
979
980 err = alx_select_powersaving_speed(hw, &speed);
981 if (err)
982 return err;
983 err = alx_clear_phy_intr(hw);
984 if (err)
985 return err;
986 err = alx_pre_suspend(hw, speed);
987 if (err)
988 return err;
989 err = alx_config_wol(hw);
990 if (err)
991 return err;
992
993 *wol_en = false;
994 if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
995 netif_info(alx, wol, netdev,
996 "wol: ctrl=%X, speed=%X\n",
997 hw->sleep_ctrl, speed);
998 device_set_wakeup_enable(&pdev->dev, true);
999 *wol_en = true;
1000 }
1001
1002 pci_disable_device(pdev);
1003
1004 return 0;
1005}
1006
1007static void alx_shutdown(struct pci_dev *pdev)
1008{
1009 int err;
1010 bool wol_en;
1011
1012 err = __alx_shutdown(pdev, &wol_en);
1013 if (!err) {
1014 pci_wake_from_d3(pdev, wol_en);
1015 pci_set_power_state(pdev, PCI_D3hot);
1016 } else {
1017 dev_err(&pdev->dev, "shutdown fail %d\n", err);
1018 }
1019}
1020
1021static void alx_link_check(struct work_struct *work)
1022{
1023 struct alx_priv *alx;
1024
1025 alx = container_of(work, struct alx_priv, link_check_wk);
1026
1027 rtnl_lock();
1028 alx_check_link(alx);
1029 rtnl_unlock();
1030}
1031
1032static void alx_reset(struct work_struct *work)
1033{
1034 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1035
1036 rtnl_lock();
1037 alx_reinit(alx);
1038 rtnl_unlock();
1039}
1040
1041static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1042{
1043 u8 cso, css;
1044
1045 if (skb->ip_summed != CHECKSUM_PARTIAL)
1046 return 0;
1047
1048 cso = skb_checksum_start_offset(skb);
1049 if (cso & 1)
1050 return -EINVAL;
1051
1052 css = cso + skb->csum_offset;
1053 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1054 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1055 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1056
1057 return 0;
1058}
1059
1060static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1061{
1062 struct alx_tx_queue *txq = &alx->txq;
1063 struct alx_txd *tpd, *first_tpd;
1064 dma_addr_t dma;
1065 int maplen, f, first_idx = txq->write_idx;
1066
1067 first_tpd = &txq->tpd[txq->write_idx];
1068 tpd = first_tpd;
1069
1070 maplen = skb_headlen(skb);
1071 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
1072 DMA_TO_DEVICE);
1073 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1074 goto err_dma;
1075
1076 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1077 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1078
1079 tpd->adrl.addr = cpu_to_le64(dma);
1080 tpd->len = cpu_to_le16(maplen);
1081
1082 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1083 struct skb_frag_struct *frag;
1084
1085 frag = &skb_shinfo(skb)->frags[f];
1086
1087 if (++txq->write_idx == alx->tx_ringsz)
1088 txq->write_idx = 0;
1089 tpd = &txq->tpd[txq->write_idx];
1090
1091 tpd->word1 = first_tpd->word1;
1092
1093 maplen = skb_frag_size(frag);
1094 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
1095 maplen, DMA_TO_DEVICE);
1096 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1097 goto err_dma;
1098 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1099 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1100
1101 tpd->adrl.addr = cpu_to_le64(dma);
1102 tpd->len = cpu_to_le16(maplen);
1103 }
1104
1105 /* last TPD, set EOP flag and store skb */
1106 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1107 txq->bufs[txq->write_idx].skb = skb;
1108
1109 if (++txq->write_idx == alx->tx_ringsz)
1110 txq->write_idx = 0;
1111
1112 return 0;
1113
1114err_dma:
1115 f = first_idx;
1116 while (f != txq->write_idx) {
1117 alx_free_txbuf(alx, f);
1118 if (++f == alx->tx_ringsz)
1119 f = 0;
1120 }
1121 return -ENOMEM;
1122}
1123
1124static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1125 struct net_device *netdev)
1126{
1127 struct alx_priv *alx = netdev_priv(netdev);
1128 struct alx_tx_queue *txq = &alx->txq;
1129 struct alx_txd *first;
1130 int tpdreq = skb_shinfo(skb)->nr_frags + 1;
1131
1132 if (alx_tpd_avail(alx) < tpdreq) {
1133 netif_stop_queue(alx->dev);
1134 goto drop;
1135 }
1136
1137 first = &txq->tpd[txq->write_idx];
1138 memset(first, 0, sizeof(*first));
1139
1140 if (alx_tx_csum(skb, first))
1141 goto drop;
1142
1143 if (alx_map_tx_skb(alx, skb) < 0)
1144 goto drop;
1145
1146 netdev_sent_queue(alx->dev, skb->len);
1147
1148 /* flush updates before updating hardware */
1149 wmb();
1150 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
1151
1152 if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
1153 netif_stop_queue(alx->dev);
1154
1155 return NETDEV_TX_OK;
1156
1157drop:
1158 dev_kfree_skb(skb);
1159 return NETDEV_TX_OK;
1160}
1161
1162static void alx_tx_timeout(struct net_device *dev)
1163{
1164 struct alx_priv *alx = netdev_priv(dev);
1165
1166 alx_schedule_reset(alx);
1167}
1168
1169static int alx_mdio_read(struct net_device *netdev,
1170 int prtad, int devad, u16 addr)
1171{
1172 struct alx_priv *alx = netdev_priv(netdev);
1173 struct alx_hw *hw = &alx->hw;
1174 u16 val;
1175 int err;
1176
1177 if (prtad != hw->mdio.prtad)
1178 return -EINVAL;
1179
1180 if (devad == MDIO_DEVAD_NONE)
1181 err = alx_read_phy_reg(hw, addr, &val);
1182 else
1183 err = alx_read_phy_ext(hw, devad, addr, &val);
1184
1185 if (err)
1186 return err;
1187 return val;
1188}
1189
1190static int alx_mdio_write(struct net_device *netdev,
1191 int prtad, int devad, u16 addr, u16 val)
1192{
1193 struct alx_priv *alx = netdev_priv(netdev);
1194 struct alx_hw *hw = &alx->hw;
1195
1196 if (prtad != hw->mdio.prtad)
1197 return -EINVAL;
1198
1199 if (devad == MDIO_DEVAD_NONE)
1200 return alx_write_phy_reg(hw, addr, val);
1201
1202 return alx_write_phy_ext(hw, devad, addr, val);
1203}
1204
1205static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1206{
1207 struct alx_priv *alx = netdev_priv(netdev);
1208
1209 if (!netif_running(netdev))
1210 return -EAGAIN;
1211
1212 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1213}
1214
1215#ifdef CONFIG_NET_POLL_CONTROLLER
1216static void alx_poll_controller(struct net_device *netdev)
1217{
1218 struct alx_priv *alx = netdev_priv(netdev);
1219
1220 if (alx->msi)
1221 alx_intr_msi(0, alx);
1222 else
1223 alx_intr_legacy(0, alx);
1224}
1225#endif
1226
1227static const struct net_device_ops alx_netdev_ops = {
1228 .ndo_open = alx_open,
1229 .ndo_stop = alx_stop,
1230 .ndo_start_xmit = alx_start_xmit,
1231 .ndo_set_rx_mode = alx_set_rx_mode,
1232 .ndo_validate_addr = eth_validate_addr,
1233 .ndo_set_mac_address = alx_set_mac_address,
1234 .ndo_change_mtu = alx_change_mtu,
1235 .ndo_do_ioctl = alx_ioctl,
1236 .ndo_tx_timeout = alx_tx_timeout,
1237 .ndo_fix_features = alx_fix_features,
1238#ifdef CONFIG_NET_POLL_CONTROLLER
1239 .ndo_poll_controller = alx_poll_controller,
1240#endif
1241};
1242
1243static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1244{
1245 struct net_device *netdev;
1246 struct alx_priv *alx;
1247 struct alx_hw *hw;
1248 bool phy_configured;
1249 int bars, pm_cap, err;
1250
1251 err = pci_enable_device_mem(pdev);
1252 if (err)
1253 return err;
1254
1255 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1256 * shared register for the high 32 bits, so only a single, aligned,
1257 * 4 GB physical address range can be used for descriptors.
1258 */
1259 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
1260 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1261 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1262 } else {
1263 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1264 if (err) {
1265 err = dma_set_coherent_mask(&pdev->dev,
1266 DMA_BIT_MASK(32));
1267 if (err) {
1268 dev_err(&pdev->dev,
1269 "No usable DMA config, aborting\n");
1270 goto out_pci_disable;
1271 }
1272 }
1273 }
1274
1275 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1276 err = pci_request_selected_regions(pdev, bars, alx_drv_name);
1277 if (err) {
1278 dev_err(&pdev->dev,
1279 "pci_request_selected_regions failed(bars:%d)\n", bars);
1280 goto out_pci_disable;
1281 }
1282
1283 pci_enable_pcie_error_reporting(pdev);
1284 pci_set_master(pdev);
1285
1286 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1287 if (pm_cap == 0) {
1288 dev_err(&pdev->dev,
1289 "Can't find power management capability, aborting\n");
1290 err = -EIO;
1291 goto out_pci_release;
1292 }
1293
1294 err = pci_set_power_state(pdev, PCI_D0);
1295 if (err)
1296 goto out_pci_release;
1297
1298 netdev = alloc_etherdev(sizeof(*alx));
1299 if (!netdev) {
1300 err = -ENOMEM;
1301 goto out_pci_release;
1302 }
1303
1304 SET_NETDEV_DEV(netdev, &pdev->dev);
1305 alx = netdev_priv(netdev);
1306 alx->dev = netdev;
1307 alx->hw.pdev = pdev;
1308 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1309 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1310 hw = &alx->hw;
1311 pci_set_drvdata(pdev, alx);
1312
1313 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1314 if (!hw->hw_addr) {
1315 dev_err(&pdev->dev, "cannot map device registers\n");
1316 err = -EIO;
1317 goto out_free_netdev;
1318 }
1319
1320 netdev->netdev_ops = &alx_netdev_ops;
1321 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
1322 netdev->irq = pdev->irq;
1323 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1324
1325 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1326 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1327
1328 err = alx_init_sw(alx);
1329 if (err) {
1330 dev_err(&pdev->dev, "net device private data init failed\n");
1331 goto out_unmap;
1332 }
1333
1334 alx_reset_pcie(hw);
1335
1336 phy_configured = alx_phy_configured(hw);
1337
1338 if (!phy_configured)
1339 alx_reset_phy(hw);
1340
1341 err = alx_reset_mac(hw);
1342 if (err) {
1343 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1344 goto out_unmap;
1345 }
1346
1347 /* setup link to put it in a known good starting state */
1348 if (!phy_configured) {
1349 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1350 if (err) {
1351 dev_err(&pdev->dev,
1352 "failed to configure PHY speed/duplex (err=%d)\n",
1353 err);
1354 goto out_unmap;
1355 }
1356 }
1357
1358 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
1359
1360 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1361 dev_warn(&pdev->dev,
1362 "Invalid permanent address programmed, using random one\n");
1363 eth_hw_addr_random(netdev);
1364 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1365 }
1366
1367 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1368 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1369 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1370
1371 hw->mdio.prtad = 0;
1372 hw->mdio.mmds = 0;
1373 hw->mdio.dev = netdev;
1374 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1375 MDIO_SUPPORTS_C22 |
1376 MDIO_EMULATE_C22;
1377 hw->mdio.mdio_read = alx_mdio_read;
1378 hw->mdio.mdio_write = alx_mdio_write;
1379
1380 if (!alx_get_phy_info(hw)) {
1381 dev_err(&pdev->dev, "failed to identify PHY\n");
1382 err = -EIO;
1383 goto out_unmap;
1384 }
1385
1386 INIT_WORK(&alx->link_check_wk, alx_link_check);
1387 INIT_WORK(&alx->reset_wk, alx_reset);
1388 spin_lock_init(&alx->hw.mdio_lock);
1389 spin_lock_init(&alx->irq_lock);
1390
1391 netif_carrier_off(netdev);
1392
1393 err = register_netdev(netdev);
1394 if (err) {
1395 dev_err(&pdev->dev, "register netdevice failed\n");
1396 goto out_unmap;
1397 }
1398
1399 device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
1400
1401 netdev_info(netdev,
1402 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1403 netdev->dev_addr);
1404
1405 return 0;
1406
1407out_unmap:
1408 iounmap(hw->hw_addr);
1409out_free_netdev:
1410 free_netdev(netdev);
1411out_pci_release:
1412 pci_release_selected_regions(pdev, bars);
1413out_pci_disable:
1414 pci_disable_device(pdev);
1415 return err;
1416}
1417
1418static void alx_remove(struct pci_dev *pdev)
1419{
1420 struct alx_priv *alx = pci_get_drvdata(pdev);
1421 struct alx_hw *hw = &alx->hw;
1422
1423 cancel_work_sync(&alx->link_check_wk);
1424 cancel_work_sync(&alx->reset_wk);
1425
1426 /* restore permanent mac address */
1427 alx_set_macaddr(hw, hw->perm_addr);
1428
1429 unregister_netdev(alx->dev);
1430 iounmap(hw->hw_addr);
1431 pci_release_selected_regions(pdev,
1432 pci_select_bars(pdev, IORESOURCE_MEM));
1433
1434 pci_disable_pcie_error_reporting(pdev);
1435 pci_disable_device(pdev);
1436 pci_set_drvdata(pdev, NULL);
1437
1438 free_netdev(alx->dev);
1439}
1440
1441#ifdef CONFIG_PM_SLEEP
1442static int alx_suspend(struct device *dev)
1443{
1444 struct pci_dev *pdev = to_pci_dev(dev);
1445 int err;
1446 bool wol_en;
1447
1448 err = __alx_shutdown(pdev, &wol_en);
1449 if (err) {
1450 dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
1451 return err;
1452 }
1453
1454 if (wol_en) {
1455 pci_prepare_to_sleep(pdev);
1456 } else {
1457 pci_wake_from_d3(pdev, false);
1458 pci_set_power_state(pdev, PCI_D3hot);
1459 }
1460
1461 return 0;
1462}
1463
1464static int alx_resume(struct device *dev)
1465{
1466 struct pci_dev *pdev = to_pci_dev(dev);
1467 struct alx_priv *alx = pci_get_drvdata(pdev);
1468 struct net_device *netdev = alx->dev;
1469 struct alx_hw *hw = &alx->hw;
1470 int err;
1471
1472 pci_set_power_state(pdev, PCI_D0);
1473 pci_restore_state(pdev);
1474 pci_save_state(pdev);
1475
1476 pci_enable_wake(pdev, PCI_D3hot, 0);
1477 pci_enable_wake(pdev, PCI_D3cold, 0);
1478
1479 hw->link_speed = SPEED_UNKNOWN;
1480 alx->int_mask = ALX_ISR_MISC;
1481
1482 alx_reset_pcie(hw);
1483 alx_reset_phy(hw);
1484
1485 err = alx_reset_mac(hw);
1486 if (err) {
1487 netif_err(alx, hw, alx->dev,
1488 "resume:reset_mac fail %d\n", err);
1489 return -EIO;
1490 }
1491
1492 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1493 if (err) {
1494 netif_err(alx, hw, alx->dev,
1495 "resume:setup_speed_duplex fail %d\n", err);
1496 return -EIO;
1497 }
1498
1499 if (netif_running(netdev)) {
1500 err = __alx_open(alx, true);
1501 if (err)
1502 return err;
1503 }
1504
1505 netif_device_attach(netdev);
1506
1507 return err;
1508}
1509#endif
1510
1511static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1512 pci_channel_state_t state)
1513{
1514 struct alx_priv *alx = pci_get_drvdata(pdev);
1515 struct net_device *netdev = alx->dev;
1516 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1517
1518 dev_info(&pdev->dev, "pci error detected\n");
1519
1520 rtnl_lock();
1521
1522 if (netif_running(netdev)) {
1523 netif_device_detach(netdev);
1524 alx_halt(alx);
1525 }
1526
1527 if (state == pci_channel_io_perm_failure)
1528 rc = PCI_ERS_RESULT_DISCONNECT;
1529 else
1530 pci_disable_device(pdev);
1531
1532 rtnl_unlock();
1533
1534 return rc;
1535}
1536
1537static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1538{
1539 struct alx_priv *alx = pci_get_drvdata(pdev);
1540 struct alx_hw *hw = &alx->hw;
1541 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1542
1543 dev_info(&pdev->dev, "pci error slot reset\n");
1544
1545 rtnl_lock();
1546
1547 if (pci_enable_device(pdev)) {
1548 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1549 goto out;
1550 }
1551
1552 pci_set_master(pdev);
1553 pci_enable_wake(pdev, PCI_D3hot, 0);
1554 pci_enable_wake(pdev, PCI_D3cold, 0);
1555
1556 alx_reset_pcie(hw);
1557 if (!alx_reset_mac(hw))
1558 rc = PCI_ERS_RESULT_RECOVERED;
1559out:
1560 pci_cleanup_aer_uncorrect_error_status(pdev);
1561
1562 rtnl_unlock();
1563
1564 return rc;
1565}
1566
1567static void alx_pci_error_resume(struct pci_dev *pdev)
1568{
1569 struct alx_priv *alx = pci_get_drvdata(pdev);
1570 struct net_device *netdev = alx->dev;
1571
1572 dev_info(&pdev->dev, "pci error resume\n");
1573
1574 rtnl_lock();
1575
1576 if (netif_running(netdev)) {
1577 alx_activate(alx);
1578 netif_device_attach(netdev);
1579 }
1580
1581 rtnl_unlock();
1582}
1583
1584static const struct pci_error_handlers alx_err_handlers = {
1585 .error_detected = alx_pci_error_detected,
1586 .slot_reset = alx_pci_error_slot_reset,
1587 .resume = alx_pci_error_resume,
1588};
1589
1590#ifdef CONFIG_PM_SLEEP
1591static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1592#define ALX_PM_OPS (&alx_pm_ops)
1593#else
1594#define ALX_PM_OPS NULL
1595#endif
1596
1597static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
1598 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1599 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1600 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1601 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1602 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1603 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1604 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
1605 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
1606 {}
1607};
1608
1609static struct pci_driver alx_driver = {
1610 .name = alx_drv_name,
1611 .id_table = alx_pci_tbl,
1612 .probe = alx_probe,
1613 .remove = alx_remove,
1614 .shutdown = alx_shutdown,
1615 .err_handler = &alx_err_handlers,
1616 .driver.pm = ALX_PM_OPS,
1617};
1618
1619module_pci_driver(alx_driver);
1620MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
1621MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1622MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1623MODULE_DESCRIPTION(
1624 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1625MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 000000000000..e4358c98bc4e
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_REG_H
36#define ALX_REG_H
37
38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0
43
44/* rev definition,
45 * bit(0): with xD support
46 * bit(1): with Card Reader function
47 * bit(7:2): real revision
48 */
49#define ALX_PCI_REVID_SHIFT 3
50#define ALX_REV_A0 0
51#define ALX_REV_A1 1
52#define ALX_REV_B0 2
53#define ALX_REV_C0 3
54
55#define ALX_DEV_CTRL 0x0060
56#define ALX_DEV_CTRL_MAXRRS_MIN 2
57
58#define ALX_MSIX_MASK 0x0090
59
60#define ALX_UE_SVRT 0x010C
61#define ALX_UE_SVRT_FCPROTERR BIT(13)
62#define ALX_UE_SVRT_DLPROTERR BIT(4)
63
64/* eeprom & flash load register */
65#define ALX_EFLD 0x0204
66#define ALX_EFLD_F_EXIST BIT(10)
67#define ALX_EFLD_E_EXIST BIT(9)
68#define ALX_EFLD_STAT BIT(5)
69#define ALX_EFLD_START BIT(0)
70
71/* eFuse load register */
72#define ALX_SLD 0x0218
73#define ALX_SLD_STAT BIT(12)
74#define ALX_SLD_START BIT(11)
75#define ALX_SLD_MAX_TO 100
76
77#define ALX_PDLL_TRNS1 0x1104
78#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
79
80#define ALX_PMCTRL 0x12F8
81#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
82/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
83#define ALX_PMCTRL_ASPM_FCEN BIT(30)
84#define ALX_PMCTRL_SADLY_EN BIT(29)
85#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
86#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
87#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
88/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
89#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
90#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
91#define ALX_PMCTRL_L1REG_TO_DEF 0xF
92#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
93#define ALX_PMCTRL_L1_TIMER_MASK 0x7
94#define ALX_PMCTRL_L1_TIMER_SHIFT 16
95#define ALX_PMCTRL_L1_TIMER_16US 4
96#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
97/* bit13: enable pcie clk switch in L1 state */
98#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
99#define ALX_PMCTRL_L0S_EN BIT(12)
100#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
101#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
102/* bit6: power down serdes RX */
103#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
104#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
105#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
106#define ALX_PMCTRL_L1_EN BIT(3)
107
108/*******************************************************/
109/* following registers are mapped only to memory space */
110/*******************************************************/
111
112#define ALX_MASTER 0x1400
113/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
114#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
115/* bit11: irq moduration for rx */
116#define ALX_MASTER_IRQMOD2_EN BIT(11)
117/* bit10: irq moduration for tx/rx */
118#define ALX_MASTER_IRQMOD1_EN BIT(10)
119#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
120#define ALX_MASTER_OOB_DIS BIT(6)
121/* bit5: wakeup without pcie clk */
122#define ALX_MASTER_WAKEN_25M BIT(5)
123/* bit0: MAC & DMA reset */
124#define ALX_MASTER_DMA_MAC_RST BIT(0)
125#define ALX_DMA_MAC_RST_TO 50
126
127#define ALX_IRQ_MODU_TIMER 0x1408
128#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
129#define ALX_IRQ_MODU_TIMER1_SHIFT 0
130
131#define ALX_PHY_CTRL 0x140C
132#define ALX_PHY_CTRL_100AB_EN BIT(17)
133/* bit14: affect MAC & PHY, go to low power sts */
134#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
135/* bit13: 1:pll always ON, 0:can switch in lpw */
136#define ALX_PHY_CTRL_PLL_ON BIT(13)
137#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
138#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
139#define ALX_PHY_CTRL_HIB_EN BIT(10)
140#define ALX_PHY_CTRL_IDDQ BIT(7)
141#define ALX_PHY_CTRL_GATE_25M BIT(5)
142#define ALX_PHY_CTRL_LED_MODE BIT(2)
143/* bit0: out of dsp RST state */
144#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
145#define ALX_PHY_CTRL_DSPRST_TO 80
146#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
147 ALX_PHY_CTRL_100AB_EN | \
148 ALX_PHY_CTRL_PLL_ON)
149
150#define ALX_MAC_STS 0x1410
151#define ALX_MAC_STS_TXQ_BUSY BIT(3)
152#define ALX_MAC_STS_RXQ_BUSY BIT(2)
153#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
154#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
155#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
156 ALX_MAC_STS_RXQ_BUSY | \
157 ALX_MAC_STS_TXMAC_BUSY | \
158 ALX_MAC_STS_RXMAC_BUSY)
159
160#define ALX_MDIO 0x1414
161#define ALX_MDIO_MODE_EXT BIT(30)
162#define ALX_MDIO_BUSY BIT(27)
163#define ALX_MDIO_CLK_SEL_MASK 0x7
164#define ALX_MDIO_CLK_SEL_SHIFT 24
165#define ALX_MDIO_CLK_SEL_25MD4 0
166#define ALX_MDIO_CLK_SEL_25MD128 7
167#define ALX_MDIO_START BIT(23)
168#define ALX_MDIO_SPRES_PRMBL BIT(22)
169/* bit21: 1:read,0:write */
170#define ALX_MDIO_OP_READ BIT(21)
171#define ALX_MDIO_REG_MASK 0x1F
172#define ALX_MDIO_REG_SHIFT 16
173#define ALX_MDIO_DATA_MASK 0xFFFF
174#define ALX_MDIO_DATA_SHIFT 0
175#define ALX_MDIO_MAX_AC_TO 120
176
177#define ALX_MDIO_EXTN 0x1448
178#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
179#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
180#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
181#define ALX_MDIO_EXTN_REG_SHIFT 0
182
183#define ALX_SERDES 0x1424
184#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
185#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
186
187#define ALX_LPI_CTRL 0x1440
188#define ALX_LPI_CTRL_EN BIT(0)
189
190/* for B0+, bit[13..] for C0+ */
191#define ALX_HRTBT_EXT_CTRL 0x1AD0
192#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
193#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
194#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
195#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
196#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
197#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
198#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
199#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
200#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
201#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
202#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
203#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
204#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
205#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
206#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
207#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
208#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
209#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
210#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
211#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
212
213#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
214#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
215#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
216#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
217#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
218#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
219
220/* 1B8C ~ 1B94 for C0+ */
221#define ALX_SWOI_ACER_CTRL 0x1B8C
222#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
223#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
224#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
225#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
226#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
227
228#define ALX_SWOI_IOAC_CTRL_2 0x1B90
229#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
230#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
231#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
232#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
233#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
234#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
235
236#define ALX_SWOI_IOAC_CTRL_3 0x1B94
237#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
238#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
239#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
240#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
241#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
242#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
243
244/* for B0 */
245#define ALX_IDLE_DECISN_TIMER 0x1474
246/* 1ms */
247#define ALX_IDLE_DECISN_TIMER_DEF 0x400
248
249#define ALX_MAC_CTRL 0x1480
250#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
251#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
252/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
253#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
254#define ALX_MAC_CTRL_BRD_EN BIT(26)
255#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
256#define ALX_MAC_CTRL_SPEED_MASK 0x3
257#define ALX_MAC_CTRL_SPEED_SHIFT 20
258#define ALX_MAC_CTRL_SPEED_10_100 1
259#define ALX_MAC_CTRL_SPEED_1000 2
260#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
261#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
262#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
263#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
264#define ALX_MAC_CTRL_PCRCE BIT(7)
265#define ALX_MAC_CTRL_CRCE BIT(6)
266#define ALX_MAC_CTRL_FULLD BIT(5)
267#define ALX_MAC_CTRL_RXFC_EN BIT(3)
268#define ALX_MAC_CTRL_TXFC_EN BIT(2)
269#define ALX_MAC_CTRL_RX_EN BIT(1)
270#define ALX_MAC_CTRL_TX_EN BIT(0)
271
272#define ALX_STAD0 0x1488
273#define ALX_STAD1 0x148C
274
275#define ALX_HASH_TBL0 0x1490
276#define ALX_HASH_TBL1 0x1494
277
278#define ALX_MTU 0x149C
279#define ALX_MTU_JUMBO_TH 1514
280#define ALX_MTU_STD_ALGN 1536
281
282#define ALX_SRAM5 0x1524
283#define ALX_SRAM_RXF_LEN_MASK 0xFFF
284#define ALX_SRAM_RXF_LEN_SHIFT 0
285#define ALX_SRAM_RXF_LEN_8K (8*1024)
286
287#define ALX_SRAM9 0x1534
288#define ALX_SRAM_LOAD_PTR BIT(0)
289
290#define ALX_RX_BASE_ADDR_HI 0x1540
291
292#define ALX_TX_BASE_ADDR_HI 0x1544
293
294#define ALX_RFD_ADDR_LO 0x1550
295#define ALX_RFD_RING_SZ 0x1560
296#define ALX_RFD_BUF_SZ 0x1564
297
298#define ALX_RRD_ADDR_LO 0x1568
299#define ALX_RRD_RING_SZ 0x1578
300
301/* pri3: highest, pri0: lowest */
302#define ALX_TPD_PRI3_ADDR_LO 0x14E4
303#define ALX_TPD_PRI2_ADDR_LO 0x14E0
304#define ALX_TPD_PRI1_ADDR_LO 0x157C
305#define ALX_TPD_PRI0_ADDR_LO 0x1580
306
307/* producer index is 16bit */
308#define ALX_TPD_PRI3_PIDX 0x1618
309#define ALX_TPD_PRI2_PIDX 0x161A
310#define ALX_TPD_PRI1_PIDX 0x15F0
311#define ALX_TPD_PRI0_PIDX 0x15F2
312
313/* consumer index is 16bit */
314#define ALX_TPD_PRI3_CIDX 0x161C
315#define ALX_TPD_PRI2_CIDX 0x161E
316#define ALX_TPD_PRI1_CIDX 0x15F4
317#define ALX_TPD_PRI0_CIDX 0x15F6
318
319#define ALX_TPD_RING_SZ 0x1584
320
321#define ALX_TXQ0 0x1590
322#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
323#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
324#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
325#define ALX_TXQ0_LSO_8023_EN BIT(7)
326#define ALX_TXQ0_MODE_ENHANCE BIT(6)
327#define ALX_TXQ0_EN BIT(5)
328#define ALX_TXQ0_SUPT_IPOPT BIT(4)
329#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
330#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
331#define ALX_TXQ_TPD_BURSTPREF_DEF 5
332
333#define ALX_TXQ1 0x1594
334/* bit11: drop large packet, len > (rfd buf) */
335#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
336#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
337
338#define ALX_RXQ0 0x15A0
339#define ALX_RXQ0_EN BIT(31)
340#define ALX_RXQ0_RSS_HASH_EN BIT(29)
341#define ALX_RXQ0_RSS_MODE_MASK 0x3
342#define ALX_RXQ0_RSS_MODE_SHIFT 26
343#define ALX_RXQ0_RSS_MODE_DIS 0
344#define ALX_RXQ0_RSS_MODE_MQMI 3
345#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
346#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
347#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
348#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
349#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
350#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
351#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
352#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
353#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
354#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
355#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
356#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
357#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
358#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
359#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
360 ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
361 ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
362 ALX_RXQ0_RSS_HSTYP_IPV4_EN)
363#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
364#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
365#define ALX_RXQ0_ASPM_THRESH_100M 3
366
367#define ALX_RXQ2 0x15A8
368#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
369#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
370#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
371#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
372/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
373 * rx-packet(1522) + delay-of-link(64)
374 * = 3212.
375 */
376#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
377
378#define ALX_DMA 0x15C0
379#define ALX_DMA_RCHNL_SEL_MASK 0x3
380#define ALX_DMA_RCHNL_SEL_SHIFT 26
381#define ALX_DMA_WDLY_CNT_MASK 0xF
382#define ALX_DMA_WDLY_CNT_SHIFT 16
383#define ALX_DMA_WDLY_CNT_DEF 4
384#define ALX_DMA_RDLY_CNT_MASK 0x1F
385#define ALX_DMA_RDLY_CNT_SHIFT 11
386#define ALX_DMA_RDLY_CNT_DEF 15
387/* bit10: 0:tpd with pri, 1: data */
388#define ALX_DMA_RREQ_PRI_DATA BIT(10)
389#define ALX_DMA_RREQ_BLEN_MASK 0x7
390#define ALX_DMA_RREQ_BLEN_SHIFT 4
391#define ALX_DMA_RORDER_MODE_MASK 0x7
392#define ALX_DMA_RORDER_MODE_SHIFT 0
393#define ALX_DMA_RORDER_MODE_OUT 4
394
395#define ALX_WOL0 0x14A0
396#define ALX_WOL0_PME_LINK BIT(5)
397#define ALX_WOL0_LINK_EN BIT(4)
398#define ALX_WOL0_PME_MAGIC_EN BIT(3)
399#define ALX_WOL0_MAGIC_EN BIT(2)
400
401#define ALX_RFD_PIDX 0x15E0
402
403#define ALX_RFD_CIDX 0x15F8
404
405/* MIB */
406#define ALX_MIB_BASE 0x1700
407#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
408#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
409#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
410#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
411
412#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
413#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
414#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
415#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
416
417#define ALX_ISR 0x1600
418#define ALX_ISR_DIS BIT(31)
419#define ALX_ISR_RX_Q7 BIT(30)
420#define ALX_ISR_RX_Q6 BIT(29)
421#define ALX_ISR_RX_Q5 BIT(28)
422#define ALX_ISR_RX_Q4 BIT(27)
423#define ALX_ISR_PCIE_LNKDOWN BIT(26)
424#define ALX_ISR_RX_Q3 BIT(19)
425#define ALX_ISR_RX_Q2 BIT(18)
426#define ALX_ISR_RX_Q1 BIT(17)
427#define ALX_ISR_RX_Q0 BIT(16)
428#define ALX_ISR_TX_Q0 BIT(15)
429#define ALX_ISR_PHY BIT(12)
430#define ALX_ISR_DMAW BIT(10)
431#define ALX_ISR_DMAR BIT(9)
432#define ALX_ISR_TXF_UR BIT(8)
433#define ALX_ISR_TX_Q3 BIT(7)
434#define ALX_ISR_TX_Q2 BIT(6)
435#define ALX_ISR_TX_Q1 BIT(5)
436#define ALX_ISR_RFD_UR BIT(4)
437#define ALX_ISR_RXF_OV BIT(3)
438#define ALX_ISR_MANU BIT(2)
439#define ALX_ISR_TIMER BIT(1)
440#define ALX_ISR_SMB BIT(0)
441
442#define ALX_IMR 0x1604
443
444/* re-send assert msg if SW no response */
445#define ALX_INT_RETRIG 0x1608
446/* 40ms */
447#define ALX_INT_RETRIG_TO 20000
448
449#define ALX_SMB_TIMER 0x15C4
450
451#define ALX_TINT_TPD_THRSHLD 0x15C8
452
453#define ALX_TINT_TIMER 0x15CC
454
455#define ALX_CLK_GATE 0x1814
456#define ALX_CLK_GATE_RXMAC BIT(5)
457#define ALX_CLK_GATE_TXMAC BIT(4)
458#define ALX_CLK_GATE_RXQ BIT(3)
459#define ALX_CLK_GATE_TXQ BIT(2)
460#define ALX_CLK_GATE_DMAR BIT(1)
461#define ALX_CLK_GATE_DMAW BIT(0)
462#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
463 ALX_CLK_GATE_TXMAC | \
464 ALX_CLK_GATE_RXQ | \
465 ALX_CLK_GATE_TXQ | \
466 ALX_CLK_GATE_DMAR | \
467 ALX_CLK_GATE_DMAW)
468
469/* interop between drivers */
470#define ALX_DRV 0x1804
471#define ALX_DRV_PHY_AUTO BIT(28)
472#define ALX_DRV_PHY_1000 BIT(27)
473#define ALX_DRV_PHY_100 BIT(26)
474#define ALX_DRV_PHY_10 BIT(25)
475#define ALX_DRV_PHY_DUPLEX BIT(24)
476/* bit23: adv Pause */
477#define ALX_DRV_PHY_PAUSE BIT(23)
478/* bit22: adv Asym Pause */
479#define ALX_DRV_PHY_MASK 0xFF
480#define ALX_DRV_PHY_SHIFT 21
481#define ALX_DRV_PHY_UNKNOWN 0
482
483/* flag of phy inited */
484#define ALX_PHY_INITED 0x003F
485
486/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
487#define ALX_WOL_CTRL2 0x1830
488#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
489#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
490#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
491#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
492
493#define ALX_WOL_CTRL3 0x1834
494#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
495#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
496
497#define ALX_WOL_CTRL4 0x1838
498#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
499#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
500#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
501#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
502#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
503#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
504#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
505#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
506#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
507#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
508#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
509#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
510#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
511#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
512#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
513#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
514#define ALX_WOL_CTRL4_PT15_EN BIT(15)
515#define ALX_WOL_CTRL4_PT14_EN BIT(14)
516#define ALX_WOL_CTRL4_PT13_EN BIT(13)
517#define ALX_WOL_CTRL4_PT12_EN BIT(12)
518#define ALX_WOL_CTRL4_PT11_EN BIT(11)
519#define ALX_WOL_CTRL4_PT10_EN BIT(10)
520#define ALX_WOL_CTRL4_PT9_EN BIT(9)
521#define ALX_WOL_CTRL4_PT8_EN BIT(8)
522#define ALX_WOL_CTRL4_PT7_EN BIT(7)
523#define ALX_WOL_CTRL4_PT6_EN BIT(6)
524#define ALX_WOL_CTRL4_PT5_EN BIT(5)
525#define ALX_WOL_CTRL4_PT4_EN BIT(4)
526#define ALX_WOL_CTRL4_PT3_EN BIT(3)
527#define ALX_WOL_CTRL4_PT2_EN BIT(2)
528#define ALX_WOL_CTRL4_PT1_EN BIT(1)
529#define ALX_WOL_CTRL4_PT0_EN BIT(0)
530
531#define ALX_WOL_CTRL5 0x183C
532#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
533#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
534#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
535#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
536#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
537#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
538#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
539#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
540
541#define ALX_WOL_CTRL6 0x1840
542#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
543#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
544#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
545#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
546#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
547#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
548#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
549#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
550
551#define ALX_WOL_CTRL7 0x1844
552#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
553#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
554#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
555#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
556#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
557#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
558#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
559#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
560
561#define ALX_WOL_CTRL8 0x1848
562#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
563#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
564#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
565#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
566#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
567#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
568#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
569#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
570
571#define ALX_ACER_FIXED_PTN0 0x1850
572#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
573#define ALX_ACER_FIXED_PTN0_SHIFT 0
574
575#define ALX_ACER_FIXED_PTN1 0x1854
576#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
577#define ALX_ACER_FIXED_PTN1_SHIFT 0
578
579#define ALX_ACER_RANDOM_NUM0 0x1858
580#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
581#define ALX_ACER_RANDOM_NUM0_SHIFT 0
582
583#define ALX_ACER_RANDOM_NUM1 0x185C
584#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
585#define ALX_ACER_RANDOM_NUM1_SHIFT 0
586
587#define ALX_ACER_RANDOM_NUM2 0x1860
588#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
589#define ALX_ACER_RANDOM_NUM2_SHIFT 0
590
591#define ALX_ACER_RANDOM_NUM3 0x1864
592#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
593#define ALX_ACER_RANDOM_NUM3_SHIFT 0
594
595#define ALX_ACER_MAGIC 0x1868
596#define ALX_ACER_MAGIC_EN BIT(31)
597#define ALX_ACER_MAGIC_PME_EN BIT(30)
598#define ALX_ACER_MAGIC_MATCH BIT(29)
599#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
600#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
601#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
602#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
603#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
604
605#define ALX_ACER_TIMER 0x186C
606#define ALX_ACER_TIMER_EN BIT(31)
607#define ALX_ACER_TIMER_PME_EN BIT(30)
608#define ALX_ACER_TIMER_MATCH BIT(29)
609#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
610#define ALX_ACER_TIMER_THRES_SHIFT 0
611#define ALX_ACER_TIMER_THRES_DEF 1
612
613/* RSS definitions */
614#define ALX_RSS_KEY0 0x14B0
615#define ALX_RSS_KEY1 0x14B4
616#define ALX_RSS_KEY2 0x14B8
617#define ALX_RSS_KEY3 0x14BC
618#define ALX_RSS_KEY4 0x14C0
619#define ALX_RSS_KEY5 0x14C4
620#define ALX_RSS_KEY6 0x14C8
621#define ALX_RSS_KEY7 0x14CC
622#define ALX_RSS_KEY8 0x14D0
623#define ALX_RSS_KEY9 0x14D4
624
625#define ALX_RSS_IDT_TBL0 0x1B00
626
627#define ALX_MSI_MAP_TBL1 0x15D0
628#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
629#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
630#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
631#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
632#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
633#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
634
635#define ALX_MSI_MAP_TBL2 0x15D8
636#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
637#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
638#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
639#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
640#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
641#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
642
643#define ALX_MSI_ID_MAP 0x15D4
644
645#define ALX_MSI_RETRANS_TIMER 0x1920
646/* bit16: 1:line,0:standard */
647#define ALX_MSI_MASK_SEL_LINE BIT(16)
648#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
649#define ALX_MSI_RETRANS_TM_SHIFT 0
650
651/* CR DMA ctrl */
652
653/* TX QoS */
654#define ALX_WRR 0x1938
655#define ALX_WRR_PRI_MASK 0x3
656#define ALX_WRR_PRI_SHIFT 29
657#define ALX_WRR_PRI_RESTRICT_NONE 3
658#define ALX_WRR_PRI3_MASK 0x1F
659#define ALX_WRR_PRI3_SHIFT 24
660#define ALX_WRR_PRI2_MASK 0x1F
661#define ALX_WRR_PRI2_SHIFT 16
662#define ALX_WRR_PRI1_MASK 0x1F
663#define ALX_WRR_PRI1_SHIFT 8
664#define ALX_WRR_PRI0_MASK 0x1F
665#define ALX_WRR_PRI0_SHIFT 0
666
667#define ALX_HQTPD 0x193C
668#define ALX_HQTPD_BURST_EN BIT(31)
669#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
670#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
671#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
672#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
673#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
674#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
675
676#define ALX_MISC 0x19C0
677#define ALX_MISC_PSW_OCP_MASK 0x7
678#define ALX_MISC_PSW_OCP_SHIFT 21
679#define ALX_MISC_PSW_OCP_DEF 0x7
680#define ALX_MISC_ISO_EN BIT(12)
681#define ALX_MISC_INTNLOSC_OPEN BIT(3)
682
683#define ALX_MSIC2 0x19C8
684#define ALX_MSIC2_CALB_START BIT(0)
685
686#define ALX_MISC3 0x19CC
687/* bit1: 1:Software control 25M */
688#define ALX_MISC3_25M_BY_SW BIT(1)
689/* bit0: 25M switch to intnl OSC */
690#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
691
692/* MSIX tbl in memory space */
693#define ALX_MSIX_ENTRY_BASE 0x2000
694
695/********************* PHY regs definition ***************************/
696
697/* PHY Specific Status Register */
698#define ALX_MII_GIGA_PSSR 0x11
699#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
700#define ALX_GIGA_PSSR_DPLX 0x2000
701#define ALX_GIGA_PSSR_SPEED 0xC000
702#define ALX_GIGA_PSSR_10MBS 0x0000
703#define ALX_GIGA_PSSR_100MBS 0x4000
704#define ALX_GIGA_PSSR_1000MBS 0x8000
705
706/* PHY Interrupt Enable Register */
707#define ALX_MII_IER 0x12
708#define ALX_IER_LINK_UP 0x0400
709#define ALX_IER_LINK_DOWN 0x0800
710
711/* PHY Interrupt Status Register */
712#define ALX_MII_ISR 0x13
713
714#define ALX_MII_DBG_ADDR 0x1D
715#define ALX_MII_DBG_DATA 0x1E
716
717/***************************** debug port *************************************/
718
719#define ALX_MIIDBG_ANACTRL 0x00
720#define ALX_ANACTRL_DEF 0x02EF
721
722#define ALX_MIIDBG_SYSMODCTRL 0x04
723/* en half bias */
724#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
725
726#define ALX_MIIDBG_SRDSYSMOD 0x05
727#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
728#define ALX_SRDSYSMOD_DEF 0x2C46
729
730#define ALX_MIIDBG_HIBNEG 0x0B
731#define ALX_HIBNEG_PSHIB_EN 0x8000
732#define ALX_HIBNEG_HIB_PSE 0x1000
733#define ALX_HIBNEG_DEF 0xBC40
734#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
735 ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
736
737#define ALX_MIIDBG_TST10BTCFG 0x12
738#define ALX_TST10BTCFG_DEF 0x4C04
739
740#define ALX_MIIDBG_AZ_ANADECT 0x15
741#define ALX_AZ_ANADECT_DEF 0x3220
742#define ALX_AZ_ANADECT_LONG 0x3210
743
744#define ALX_MIIDBG_MSE16DB 0x18
745#define ALX_MSE16DB_UP 0x05EA
746#define ALX_MSE16DB_DOWN 0x02EA
747
748#define ALX_MIIDBG_MSE20DB 0x1C
749#define ALX_MSE20DB_TH_MASK 0x7F
750#define ALX_MSE20DB_TH_SHIFT 2
751#define ALX_MSE20DB_TH_DEF 0x2E
752#define ALX_MSE20DB_TH_HI 0x54
753
754#define ALX_MIIDBG_AGC 0x23
755#define ALX_AGC_2_VGA_MASK 0x3FU
756#define ALX_AGC_2_VGA_SHIFT 8
757#define ALX_AGC_LONG1G_LIMT 40
758#define ALX_AGC_LONG100M_LIMT 44
759
760#define ALX_MIIDBG_LEGCYPS 0x29
761#define ALX_LEGCYPS_EN 0x8000
762#define ALX_LEGCYPS_DEF 0x129D
763
764#define ALX_MIIDBG_TST100BTCFG 0x36
765#define ALX_TST100BTCFG_DEF 0xE12C
766
767#define ALX_MIIDBG_GREENCFG 0x3B
768#define ALX_GREENCFG_DEF 0x7078
769
770#define ALX_MIIDBG_GREENCFG2 0x3D
771#define ALX_GREENCFG2_BP_GREEN 0x8000
772#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
773
774/******* dev 3 *********/
775#define ALX_MIIEXT_PCS 3
776
777#define ALX_MIIEXT_CLDCTRL3 0x8003
778#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
779
780#define ALX_MIIEXT_CLDCTRL5 0x8005
781#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
782
783#define ALX_MIIEXT_CLDCTRL6 0x8006
784#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
785#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
786#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
787#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
788
789#define ALX_MIIEXT_VDRVBIAS 0x8062
790#define ALX_VDRVBIAS_DEF 0x3
791
792/********* dev 7 **********/
793#define ALX_MIIEXT_ANEG 7
794
795#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
796#define ALX_LOCAL_EEEADV_1000BT 0x0004
797#define ALX_LOCAL_EEEADV_100BT 0x0002
798
799#define ALX_MIIEXT_AFE 0x801A
800#define ALX_AFE_10BT_100M_TH 0x0040
801
802#define ALX_MIIEXT_S3DIG10 0x8023
803/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
804#define ALX_MIIEXT_S3DIG10_SL 0x0001
805#define ALX_MIIEXT_S3DIG10_DEF 0
806
807#define ALX_MIIEXT_NLP78 0x8027
808#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
809
810#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 297fc138fa15..986df04fdcb3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1790,6 +1790,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1790 int i; 1790 int i;
1791 u32 val; 1791 u32 val;
1792 1792
1793 if (tg3_flag(tp, NO_FWARE_REPORTED))
1794 return 0;
1795
1793 if (tg3_flag(tp, IS_SSB_CORE)) { 1796 if (tg3_flag(tp, IS_SSB_CORE)) {
1794 /* We don't use firmware. */ 1797 /* We don't use firmware. */
1795 return 0; 1798 return 0;
@@ -10475,6 +10478,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10475 */ 10478 */
10476static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10479static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10477{ 10480{
10481 /* Chip may have been just powered on. If so, the boot code may still
10482 * be running initialization. Wait for it to finish to avoid races in
10483 * accessing the hardware.
10484 */
10485 tg3_enable_register_access(tp);
10486 tg3_poll_fw(tp);
10487
10478 tg3_switch_clocks(tp); 10488 tg3_switch_clocks(tp);
10479 10489
10480 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10490 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 6e8bc9d88c41..94d957d203a6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
244 file->f_pos += offset; 244 file->f_pos += offset;
245 break; 245 break;
246 case 2: 246 case 2:
247 file->f_pos = debug->buffer_len - offset; 247 file->f_pos = debug->buffer_len + offset;
248 break; 248 break;
249 default: 249 default:
250 return -EINVAL; 250 return -EINVAL;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 28a5e425fecf..92306b320840 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,6 +76,12 @@ int tulip_refill_rx(struct net_device *dev)
76 76
77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
78 PCI_DMA_FROMDEVICE); 78 PCI_DMA_FROMDEVICE);
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 dev_kfree_skb(skb);
81 tp->rx_buffers[entry].skb = NULL;
82 break;
83 }
84
79 tp->rx_buffers[entry].mapping = mapping; 85 tp->rx_buffers[entry].mapping = mapping;
80 86
81 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9aef457dacbb..98efc29eaa55 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4259,6 +4259,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4259 netdev->features |= NETIF_F_HIGHDMA; 4259 netdev->features |= NETIF_F_HIGHDMA;
4260 } else { 4260 } else {
4261 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4261 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4262 if (!status)
4263 status = dma_set_coherent_mask(&pdev->dev,
4264 DMA_BIT_MASK(32));
4262 if (status) { 4265 if (status) {
4263 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 4266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4264 goto free_netdev; 4267 goto free_netdev;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a2eadc070328..8cb600ccaf1d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -722,8 +722,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
722 mdelay(1); 722 mdelay(1);
723 cnt--; 723 cnt--;
724 } 724 }
725 if (cnt < 0) { 725 if (cnt <= 0) {
726 pr_err("Device reset fail\n"); 726 pr_err("Device reset failed\n");
727 ret = -ETIMEDOUT; 727 ret = -ETIMEDOUT;
728 } 728 }
729 return ret; 729 return ret;
@@ -1260,16 +1260,23 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1260 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1260 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1261 pkt_len = rxdesc->frame_length; 1261 pkt_len = rxdesc->frame_length;
1262 1262
1263#if defined(CONFIG_ARCH_R8A7740)
1264 desc_status >>= 16;
1265#endif
1266
1267 if (--boguscnt < 0) 1263 if (--boguscnt < 0)
1268 break; 1264 break;
1269 1265
1270 if (!(desc_status & RDFEND)) 1266 if (!(desc_status & RDFEND))
1271 ndev->stats.rx_length_errors++; 1267 ndev->stats.rx_length_errors++;
1272 1268
1269#if defined(CONFIG_ARCH_R8A7740)
1270 /*
1271 * In case of almost all GETHER/ETHERs, the Receive Frame State
1272 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1273 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1274 * bits are from bit 25 to bit 16. So, the driver needs right
1275 * shifting by 16.
1276 */
1277 desc_status >>= 16;
1278#endif
1279
1273 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1280 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1274 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1281 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1275 ndev->stats.rx_errors++; 1282 ndev->stats.rx_errors++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 618446ae1ec1..ee919ca8b8a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1899,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1899 1899
1900#ifdef STMMAC_XMIT_DEBUG 1900#ifdef STMMAC_XMIT_DEBUG
1901 if (netif_msg_pktdata(priv)) { 1901 if (netif_msg_pktdata(priv)) {
1902 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d" 1902 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1903 __func__, (priv->cur_tx % txsize), 1903 __func__, (priv->cur_tx % txsize),
1904 (priv->dirty_tx % txsize), entry, first, nfrags); 1904 (priv->dirty_tx % txsize), entry, first, nfrags);
1905 if (priv->extend_desc) 1905 if (priv->extend_desc)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a45f64eef870..101b037a7dce 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1681,7 +1681,7 @@ static int cpsw_probe(struct platform_device *pdev)
1681 priv->rx_packet_max = max(rx_packet_max, 128); 1681 priv->rx_packet_max = max(rx_packet_max, 128);
1682 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1682 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1683 priv->irq_enabled = true; 1683 priv->irq_enabled = true;
1684 if (!ndev) { 1684 if (!priv->cpts) {
1685 pr_err("error allocating cpts\n"); 1685 pr_err("error allocating cpts\n");
1686 goto clean_ndev_ret; 1686 goto clean_ndev_ret;
1687 } 1687 }
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 12aec173564c..c47f0dbcebb5 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
449 __raw_writel(ctrl, &data->regs->control); 449 __raw_writel(ctrl, &data->regs->control);
450 wait_for_idle(data); 450 wait_for_idle(data);
451 451
452 pm_runtime_put_sync(data->dev);
453
454 data->suspended = true; 452 data->suspended = true;
455 spin_unlock(&data->lock); 453 spin_unlock(&data->lock);
454 pm_runtime_put_sync(data->dev);
456 455
457 return 0; 456 return 0;
458} 457}
@@ -460,15 +459,12 @@ static int davinci_mdio_suspend(struct device *dev)
460static int davinci_mdio_resume(struct device *dev) 459static int davinci_mdio_resume(struct device *dev)
461{ 460{
462 struct davinci_mdio_data *data = dev_get_drvdata(dev); 461 struct davinci_mdio_data *data = dev_get_drvdata(dev);
463 u32 ctrl;
464 462
465 spin_lock(&data->lock);
466 pm_runtime_get_sync(data->dev); 463 pm_runtime_get_sync(data->dev);
467 464
465 spin_lock(&data->lock);
468 /* restart the scan state machine */ 466 /* restart the scan state machine */
469 ctrl = __raw_readl(&data->regs->control); 467 __davinci_mdio_reset(data);
470 ctrl |= CONTROL_ENABLE;
471 __raw_writel(ctrl, &data->regs->control);
472 468
473 data->suspended = false; 469 data->suspended = false;
474 spin_unlock(&data->lock); 470 spin_unlock(&data->lock);
@@ -477,8 +473,8 @@ static int davinci_mdio_resume(struct device *dev)
477} 473}
478 474
479static const struct dev_pm_ops davinci_mdio_pm_ops = { 475static const struct dev_pm_ops davinci_mdio_pm_ops = {
480 .suspend = davinci_mdio_suspend, 476 .suspend_late = davinci_mdio_suspend,
481 .resume = davinci_mdio_resume, 477 .resume_early = davinci_mdio_resume,
482}; 478};
483 479
484static const struct of_device_id davinci_mdio_of_mtable[] = { 480static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b5d9a7..4dccead586be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
285 285
286 skb->protocol = eth_type_trans(skb, net); 286 skb->protocol = eth_type_trans(skb, net);
287 skb->ip_summed = CHECKSUM_NONE; 287 skb->ip_summed = CHECKSUM_NONE;
288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); 288 if (packet->vlan_tci & VLAN_TAG_PRESENT)
289 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 packet->vlan_tci);
289 291
290 net->stats.rx_packets++; 292 net->stats.rx_packets++;
291 net->stats.rx_bytes += packet->total_data_buflen; 293 net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index edfddc5f61b4..d811b06e2ccd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -853,18 +853,24 @@ static int macvlan_changelink(struct net_device *dev,
853 struct nlattr *tb[], struct nlattr *data[]) 853 struct nlattr *tb[], struct nlattr *data[])
854{ 854{
855 struct macvlan_dev *vlan = netdev_priv(dev); 855 struct macvlan_dev *vlan = netdev_priv(dev);
856 if (data && data[IFLA_MACVLAN_MODE]) 856
857 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
858 if (data && data[IFLA_MACVLAN_FLAGS]) { 857 if (data && data[IFLA_MACVLAN_FLAGS]) {
859 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 858 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
860 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 859 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
861 860 if (vlan->port->passthru && promisc) {
862 if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) 861 int err;
863 dev_set_promiscuity(vlan->lowerdev, -1); 862
864 else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) 863 if (flags & MACVLAN_FLAG_NOPROMISC)
865 dev_set_promiscuity(vlan->lowerdev, 1); 864 err = dev_set_promiscuity(vlan->lowerdev, -1);
865 else
866 err = dev_set_promiscuity(vlan->lowerdev, 1);
867 if (err < 0)
868 return err;
869 }
866 vlan->flags = flags; 870 vlan->flags = flags;
867 } 871 }
872 if (data && data[IFLA_MACVLAN_MODE])
873 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
868 return 0; 874 return 0;
869} 875}
870 876
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e46fef38685b..bff7e0b0b4e7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1111,8 +1111,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1111 } 1111 }
1112 1112
1113 port->index = -1; 1113 port->index = -1;
1114 team_port_enable(team, port);
1115 list_add_tail_rcu(&port->list, &team->port_list); 1114 list_add_tail_rcu(&port->list, &team->port_list);
1115 team_port_enable(team, port);
1116 __team_compute_features(team); 1116 __team_compute_features(team);
1117 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1117 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1118 __team_options_change_check(team); 1118 __team_options_change_check(team);
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 5ca14d463ba7..7f032e211343 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
28 28
29 port_index = random_N(team->en_port_count); 29 port_index = random_N(team->en_port_count);
30 port = team_get_port_by_index_rcu(team, port_index); 30 port = team_get_port_by_index_rcu(team, port_index);
31 if (unlikely(!port))
32 goto drop;
31 port = team_get_first_port_txable_rcu(team, port); 33 port = team_get_first_port_txable_rcu(team, port);
32 if (unlikely(!port)) 34 if (unlikely(!port))
33 goto drop; 35 goto drop;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 90311c7375fb..53665850b59e 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -33,6 +33,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
33 port_index = team_num_to_port_index(team, 33 port_index = team_num_to_port_index(team,
34 rr_priv(team)->sent_packets++); 34 rr_priv(team)->sent_packets++);
35 port = team_get_port_by_index_rcu(team, port_index); 35 port = team_get_port_by_index_rcu(team, port_index);
36 if (unlikely(!port))
37 goto drop;
36 port = team_get_first_port_txable_rcu(team, port); 38 port = team_get_first_port_txable_rcu(team, port);
37 if (unlikely(!port)) 39 if (unlikely(!port))
38 goto drop; 40 goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a34427065460..cea2fe4e9812 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
352 u32 numqueues = 0; 352 u32 numqueues = 0;
353 353
354 rcu_read_lock(); 354 rcu_read_lock();
355 numqueues = tun->numqueues; 355 numqueues = ACCESS_ONCE(tun->numqueues);
356 356
357 txq = skb_get_rxhash(skb); 357 txq = skb_get_rxhash(skb);
358 if (txq) { 358 if (txq) {
@@ -2157,6 +2157,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2157 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); 2157 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2158 INIT_LIST_HEAD(&tfile->next); 2158 INIT_LIST_HEAD(&tfile->next);
2159 2159
2160 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2161
2160 return 0; 2162 return 0;
2161} 2163}
2162 2164
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 078795fe6e31..04ee044dde51 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -627,6 +627,12 @@ static const struct usb_device_id products [] = {
627 .driver_info = 0, 627 .driver_info = 0,
628}, 628},
629 629
630/* Huawei E1820 - handled by qmi_wwan */
631{
632 USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
633 .driver_info = 0,
634},
635
630/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ 636/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
631#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) 637#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
632{ 638{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86adfa0a912e..d095d0d3056b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
519 /* 3. Combined interface devices matching on interface number */ 519 /* 3. Combined interface devices matching on interface number */
520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
522 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
522 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
523 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 524 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
524 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 525 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8111565c35fc..f6dce13c8f89 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -603,18 +603,22 @@ skip:
603 603
604/* Watch incoming packets to learn mapping between Ethernet address 604/* Watch incoming packets to learn mapping between Ethernet address
605 * and Tunnel endpoint. 605 * and Tunnel endpoint.
606 * Return true if packet is bogus and should be droppped.
606 */ 607 */
607static void vxlan_snoop(struct net_device *dev, 608static bool vxlan_snoop(struct net_device *dev,
608 __be32 src_ip, const u8 *src_mac) 609 __be32 src_ip, const u8 *src_mac)
609{ 610{
610 struct vxlan_dev *vxlan = netdev_priv(dev); 611 struct vxlan_dev *vxlan = netdev_priv(dev);
611 struct vxlan_fdb *f; 612 struct vxlan_fdb *f;
612 int err;
613 613
614 f = vxlan_find_mac(vxlan, src_mac); 614 f = vxlan_find_mac(vxlan, src_mac);
615 if (likely(f)) { 615 if (likely(f)) {
616 if (likely(f->remote.remote_ip == src_ip)) 616 if (likely(f->remote.remote_ip == src_ip))
617 return; 617 return false;
618
619 /* Don't migrate static entries, drop packets */
620 if (f->state & NUD_NOARP)
621 return true;
618 622
619 if (net_ratelimit()) 623 if (net_ratelimit())
620 netdev_info(dev, 624 netdev_info(dev,
@@ -626,14 +630,19 @@ static void vxlan_snoop(struct net_device *dev,
626 } else { 630 } else {
627 /* learned new entry */ 631 /* learned new entry */
628 spin_lock(&vxlan->hash_lock); 632 spin_lock(&vxlan->hash_lock);
629 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 633
630 NUD_REACHABLE, 634 /* close off race between vxlan_flush and incoming packets */
631 NLM_F_EXCL|NLM_F_CREATE, 635 if (netif_running(dev))
632 vxlan->dst_port, 636 vxlan_fdb_create(vxlan, src_mac, src_ip,
633 vxlan->default_dst.remote_vni, 637 NUD_REACHABLE,
634 0, NTF_SELF); 638 NLM_F_EXCL|NLM_F_CREATE,
639 vxlan->dst_port,
640 vxlan->default_dst.remote_vni,
641 0, NTF_SELF);
635 spin_unlock(&vxlan->hash_lock); 642 spin_unlock(&vxlan->hash_lock);
636 } 643 }
644
645 return false;
637} 646}
638 647
639 648
@@ -766,8 +775,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
766 vxlan->dev->dev_addr) == 0) 775 vxlan->dev->dev_addr) == 0)
767 goto drop; 776 goto drop;
768 777
769 if (vxlan->flags & VXLAN_F_LEARN) 778 if ((vxlan->flags & VXLAN_F_LEARN) &&
770 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); 779 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
780 goto drop;
771 781
772 __skb_tunnel_rx(skb, vxlan->dev); 782 __skb_tunnel_rx(skb, vxlan->dev);
773 skb_reset_network_header(skb); 783 skb_reset_network_header(skb);
@@ -1190,9 +1200,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1190 struct sk_buff *skb1; 1200 struct sk_buff *skb1;
1191 1201
1192 skb1 = skb_clone(skb, GFP_ATOMIC); 1202 skb1 = skb_clone(skb, GFP_ATOMIC);
1193 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); 1203 if (skb1) {
1194 if (rc == NETDEV_TX_OK) 1204 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1195 rc = rc1; 1205 if (rc == NETDEV_TX_OK)
1206 rc = rc1;
1207 }
1196 } 1208 }
1197 1209
1198 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); 1210 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 3b078515b422..760ab3fe09e2 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,13 +84,17 @@ config ATH9K_DFS_CERTIFIED
84 developed. At this point enabling this option won't do anything 84 developed. At this point enabling this option won't do anything
85 except increase code size. 85 except increase code size.
86 86
87config ATH9K_RATE_CONTROL 87config ATH9K_LEGACY_RATE_CONTROL
88 bool "Atheros ath9k rate control" 88 bool "Atheros ath9k rate control"
89 depends on ATH9K 89 depends on ATH9K
90 default y 90 default n
91 ---help--- 91 ---help---
92 Say Y, if you want to use the ath9k specific rate control 92 Say Y, if you want to use the ath9k specific rate control
93 module instead of minstrel_ht. 93 module instead of minstrel_ht. Be warned that there are various
94 issues with the ath9k RC and minstrel is a more robust algorithm.
95 Note that even if this option is selected, "ath9k_rate_control"
96 has to be passed to mac80211 using the module parameter,
97 ieee80211_default_rc_algo.
94 98
95config ATH9K_HTC 99config ATH9K_HTC
96 tristate "Atheros HTC based wireless cards support" 100 tristate "Atheros HTC based wireless cards support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 2ad8f9474ba1..75ee9e7704ce 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,7 @@ ath9k-y += beacon.o \
8 antenna.o 8 antenna.o
9 9
10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
11ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 11ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
12ath9k-$(CONFIG_ATH9K_PCI) += pci.o 12ath9k-$(CONFIG_ATH9K_PCI) += pci.o
13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db5ffada2217..7546b9a7dcbf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
958 {0x0000a074, 0x00000000}, 958 {0x0000a074, 0x00000000},
959 {0x0000a078, 0x00000000}, 959 {0x0000a078, 0x00000000},
960 {0x0000a07c, 0x00000000}, 960 {0x0000a07c, 0x00000000},
961 {0x0000a080, 0x1a1a1a1a}, 961 {0x0000a080, 0x22222229},
962 {0x0000a084, 0x1a1a1a1a}, 962 {0x0000a084, 0x1d1d1d1d},
963 {0x0000a088, 0x1a1a1a1a}, 963 {0x0000a088, 0x1d1d1d1d},
964 {0x0000a08c, 0x1a1a1a1a}, 964 {0x0000a08c, 0x1d1d1d1d},
965 {0x0000a090, 0x171a1a1a}, 965 {0x0000a090, 0x171d1d1d},
966 {0x0000a094, 0x11111717}, 966 {0x0000a094, 0x11111717},
967 {0x0000a098, 0x00030311}, 967 {0x0000a098, 0x00030311},
968 {0x0000a09c, 0x00000000}, 968 {0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index daba841808cf..389ee1b59976 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -792,8 +792,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
792 hw->wiphy->iface_combinations = if_comb; 792 hw->wiphy->iface_combinations = if_comb;
793 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 793 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
794 794
795 if (AR_SREV_5416(sc->sc_ah)) 795 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
796 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
797 796
798 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 797 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
799 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 798 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
@@ -831,10 +830,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
831 sc->ant_rx = hw->wiphy->available_antennas_rx; 830 sc->ant_rx = hw->wiphy->available_antennas_rx;
832 sc->ant_tx = hw->wiphy->available_antennas_tx; 831 sc->ant_tx = hw->wiphy->available_antennas_tx;
833 832
834#ifdef CONFIG_ATH9K_RATE_CONTROL
835 hw->rate_control_algorithm = "ath9k_rate_control";
836#endif
837
838 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 833 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
839 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 834 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
840 &sc->sbands[IEEE80211_BAND_2GHZ]; 835 &sc->sbands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 267dbfcfaa96..b9a87383cb43 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
231} 231}
232#endif 232#endif
233 233
234#ifdef CONFIG_ATH9K_RATE_CONTROL 234#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
235int ath_rate_control_register(void); 235int ath_rate_control_register(void);
236void ath_rate_control_unregister(void); 236void ath_rate_control_unregister(void);
237#else 237#else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6dd07e2ec595..a95b77ab360e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2458,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work)
2458 for (i = 0; i < B43_NR_FWTYPES; i++) { 2458 for (i = 0; i < B43_NR_FWTYPES; i++) {
2459 errmsg = ctx->errors[i]; 2459 errmsg = ctx->errors[i];
2460 if (strlen(errmsg)) 2460 if (strlen(errmsg))
2461 b43err(dev->wl, errmsg); 2461 b43err(dev->wl, "%s", errmsg);
2462 } 2462 }
2463 b43_print_fw_helptext(dev->wl, 1); 2463 b43_print_fw_helptext(dev->wl, 1);
2464 goto out; 2464 goto out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f2235978e..2c593570497c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@ fail:
930 brcmf_fws_del_interface(ifp); 930 brcmf_fws_del_interface(ifp);
931 brcmf_fws_deinit(drvr); 931 brcmf_fws_deinit(drvr);
932 } 932 }
933 if (drvr->iflist[0]) {
934 free_netdev(ifp->ndev);
935 drvr->iflist[0] = NULL;
936 }
933 if (p2p_ifp) { 937 if (p2p_ifp) {
934 free_netdev(p2p_ifp->ndev); 938 free_netdev(p2p_ifp->ndev);
935 drvr->iflist[1] = NULL; 939 drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd184..9fd6f2fef11b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
3074 */ 3074 */
3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) 3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3076{ 3076{
3077 /* disallow PS when one of the following global conditions meets */ 3077 /* not supporting PS so always return false for now */
3078 if (!wlc->pub->associated) 3078 return false;
3079 return false;
3080
3081 /* disallow PS when one of these meets when not scanning */
3082 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3083 return false;
3084
3085 if (wlc->bsscfg->type == BRCMS_TYPE_AP)
3086 return false;
3087
3088 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
3089 return false;
3090
3091 return true;
3092} 3079}
3093 3080
3094static void brcms_c_statsupd(struct brcms_c_info *wlc) 3081static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1e..fe31590a51b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
816 rs_sta->last_txrate_idx = idx; 816 rs_sta->last_txrate_idx = idx;
817 info->control.rates[0].idx = rs_sta->last_txrate_idx; 817 info->control.rates[0].idx = rs_sta->last_txrate_idx;
818 } 818 }
819 info->control.rates[0].count = 1;
819 820
820 D_RATE("leave: %d\n", idx); 821 D_RATE("leave: %d\n", idx);
821} 822}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e120..ed3c42a63a43 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2268 info->control.rates[0].flags = 0; 2268 info->control.rates[0].flags = 0;
2269 } 2269 }
2270 info->control.rates[0].idx = rate_idx; 2270 info->control.rates[0].idx = rate_idx;
2271 2271 info->control.rates[0].count = 1;
2272} 2272}
2273 2273
2274static void * 2274static void *
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index f8246f2d88f9..4caaf52986a4 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1833 u32 beacon_interval); 1833 u32 beacon_interval);
1834 1834
1835#ifdef CONFIG_PM 1835#ifdef CONFIG_PM_SLEEP
1836extern const struct dev_pm_ops il_pm_ops; 1836extern const struct dev_pm_ops il_pm_ops;
1837 1837
1838#define IL_LEGACY_PM_OPS (&il_pm_ops) 1838#define IL_LEGACY_PM_OPS (&il_pm_ops)
1839 1839
1840#else /* !CONFIG_PM */ 1840#else /* !CONFIG_PM_SLEEP */
1841 1841
1842#define IL_LEGACY_PM_OPS NULL 1842#define IL_LEGACY_PM_OPS NULL
1843 1843
1844#endif /* !CONFIG_PM */ 1844#endif /* !CONFIG_PM_SLEEP */
1845 1845
1846/***************************************************** 1846/*****************************************************
1847* Error Handling Debugging 1847* Error Handling Debugging
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 94314a8e1029..8fe76dc4f373 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2799 info->control.rates[0].flags = 0; 2799 info->control.rates[0].flags = 0;
2800 } 2800 }
2801 info->control.rates[0].idx = rate_idx; 2801 info->control.rates[0].idx = rate_idx;
2802 2802 info->control.rates[0].count = 1;
2803} 2803}
2804 2804
2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00bd..cd1ad0019185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1379 int ret; 1379 int ret;
1380 1380
1381 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) 1381 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1382 return; 1382 return;
1383 1383
1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 4f886133639a..2f690e578b5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1000 */ 1000 */
1001 if (load_module) { 1001 if (load_module) {
1002 err = request_module("%s", op->name); 1002 err = request_module("%s", op->name);
1003#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
1003 if (err) 1004 if (err)
1004 IWL_ERR(drv, 1005 IWL_ERR(drv,
1005 "failed to load module %s (error %d), is dynamic loading enabled?\n", 1006 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1006 op->name, err); 1007 op->name, err);
1008#endif
1007 } 1009 }
1008 return; 1010 return;
1009 1011
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index d6beec765d65..31587a318f8b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2652,6 +2652,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2652 info->control.rates[0].flags = 0; 2652 info->control.rates[0].flags = 0;
2653 } 2653 }
2654 info->control.rates[0].idx = rate_idx; 2654 info->control.rates[0].idx = rate_idx;
2655 info->control.rates[0].count = 1;
2655} 2656}
2656 2657
2657static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, 2658static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index a830eb6cc411..b9ba4e71ea4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
181 return; 181 return;
182 } else if (ieee80211_is_back_req(fc)) { 182 } else if (ieee80211_is_back_req(fc)) {
183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |=
184 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
184 } 185 }
185 186
186 /* HT rate doesn't make sense for a non data frame */ 187 /* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 753b5682d53f..a5f9875cfd6e 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -26,10 +26,17 @@
26static struct dentry *mwifiex_dfs_dir; 26static struct dentry *mwifiex_dfs_dir;
27 27
28static char *bss_modes[] = { 28static char *bss_modes[] = {
29 "Unknown", 29 "UNSPECIFIED",
30 "Ad-hoc", 30 "ADHOC",
31 "Managed", 31 "STATION",
32 "Auto" 32 "AP",
33 "AP_VLAN",
34 "WDS",
35 "MONITOR",
36 "MESH_POINT",
37 "P2P_CLIENT",
38 "P2P_GO",
39 "P2P_DEVICE",
33}; 40};
34 41
35/* size/addr for mwifiex_debug_info */ 42/* size/addr for mwifiex_debug_info */
@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
200 p += sprintf(p, "driver_version = %s", fmt); 207 p += sprintf(p, "driver_version = %s", fmt);
201 p += sprintf(p, "\nverext = %s", priv->version_str); 208 p += sprintf(p, "\nverext = %s", priv->version_str);
202 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); 209 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
203 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); 210
211 if (info.bss_mode >= ARRAY_SIZE(bss_modes))
212 p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
213 else
214 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
215
204 p += sprintf(p, "media_state=\"%s\"\n", 216 p += sprintf(p, "media_state=\"%s\"\n",
205 (!priv->media_connected ? "Disconnected" : "Connected")); 217 (!priv->media_connected ? "Disconnected" : "Connected"));
206 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); 218 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ead3a3e746a2..3aa30ddcbfea 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3027 * TODO: we do not use +6 dBm option to do not increase power beyond 3027 * TODO: we do not use +6 dBm option to do not increase power beyond
3028 * regulatory limit, however this could be utilized for devices with 3028 * regulatory limit, however this could be utilized for devices with
3029 * CAPABILITY_POWER_LIMIT. 3029 * CAPABILITY_POWER_LIMIT.
3030 *
3031 * TODO: add different temperature compensation code for RT3290 & RT5390
3032 * to allow to use BBP_R1 for those chips.
3030 */ 3033 */
3031 rt2800_bbp_read(rt2x00dev, 1, &r1); 3034 if (!rt2x00_rt(rt2x00dev, RT3290) &&
3032 if (delta <= -12) { 3035 !rt2x00_rt(rt2x00dev, RT5390)) {
3033 power_ctrl = 2; 3036 rt2800_bbp_read(rt2x00dev, 1, &r1);
3034 delta += 12; 3037 if (delta <= -12) {
3035 } else if (delta <= -6) { 3038 power_ctrl = 2;
3036 power_ctrl = 1; 3039 delta += 12;
3037 delta += 6; 3040 } else if (delta <= -6) {
3038 } else { 3041 power_ctrl = 1;
3039 power_ctrl = 0; 3042 delta += 6;
3043 } else {
3044 power_ctrl = 0;
3045 }
3046 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
3047 rt2800_bbp_write(rt2x00dev, 1, r1);
3040 } 3048 }
3041 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); 3049
3042 rt2800_bbp_write(rt2x00dev, 1, r1);
3043 offset = TX_PWR_CFG_0; 3050 offset = TX_PWR_CFG_0;
3044 3051
3045 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 3052 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 999ffc12578b..c97e9d327331 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
764 "can't alloc skb for rx\n"); 764 "can't alloc skb for rx\n");
765 goto done; 765 goto done;
766 } 766 }
767 kmemleak_not_leak(new_skb);
767 768
768 pci_unmap_single(rtlpci->pdev, 769 pci_unmap_single(rtlpci->pdev,
769 *((dma_addr_t *) skb->cb), 770 *((dma_addr_t *) skb->cb),
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 3d0498e69c8c..189ba124a8c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1973 } 1973 }
1974} 1974}
1975 1975
1976void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, 1976static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
1977 struct ieee80211_sta *sta, 1977 struct ieee80211_sta *sta)
1978 u8 rssi_level)
1979{ 1978{
1980 struct rtl_priv *rtlpriv = rtl_priv(hw); 1979 struct rtl_priv *rtlpriv = rtl_priv(hw);
1981 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1980 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1982 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1981 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1983 u32 ratr_value = (u32) mac->basic_rates; 1982 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1984 u8 *mcsrate = mac->mcs; 1983 u32 ratr_value;
1985 u8 ratr_index = 0; 1984 u8 ratr_index = 0;
1986 u8 nmode = mac->ht_enable; 1985 u8 nmode = mac->ht_enable;
1987 u8 mimo_ps = 1; 1986 u8 mimo_ps = IEEE80211_SMPS_OFF;
1988 u16 shortgi_rate = 0; 1987 u16 shortgi_rate;
1989 u32 tmp_ratr_value = 0; 1988 u32 tmp_ratr_value;
1990 u8 curtxbw_40mhz = mac->bw_40; 1989 u8 curtxbw_40mhz = mac->bw_40;
1991 u8 curshortgi_40mhz = mac->sgi_40; 1990 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1992 u8 curshortgi_20mhz = mac->sgi_20; 1991 1 : 0;
1992 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1993 1 : 0;
1993 enum wireless_mode wirelessmode = mac->mode; 1994 enum wireless_mode wirelessmode = mac->mode;
1994 1995
1995 ratr_value |= ((*(u16 *) (mcsrate))) << 12; 1996 if (rtlhal->current_bandtype == BAND_ON_5G)
1997 ratr_value = sta->supp_rates[1] << 4;
1998 else
1999 ratr_value = sta->supp_rates[0];
2000 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2001 ratr_value = 0xfff;
2002
2003 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2004 sta->ht_cap.mcs.rx_mask[0] << 12);
1996 switch (wirelessmode) { 2005 switch (wirelessmode) {
1997 case WIRELESS_MODE_B: 2006 case WIRELESS_MODE_B:
1998 if (ratr_value & 0x0000000c) 2007 if (ratr_value & 0x0000000c)
@@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2006 case WIRELESS_MODE_N_24G: 2015 case WIRELESS_MODE_N_24G:
2007 case WIRELESS_MODE_N_5G: 2016 case WIRELESS_MODE_N_5G:
2008 nmode = 1; 2017 nmode = 1;
2009 if (mimo_ps == 0) { 2018 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2010 ratr_value &= 0x0007F005; 2019 ratr_value &= 0x0007F005;
2011 } else { 2020 } else {
2012 u32 ratr_mask; 2021 u32 ratr_mask;
@@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2016 ratr_mask = 0x000ff005; 2025 ratr_mask = 0x000ff005;
2017 else 2026 else
2018 ratr_mask = 0x0f0ff005; 2027 ratr_mask = 0x0f0ff005;
2019 if (curtxbw_40mhz) 2028
2020 ratr_mask |= 0x00000010;
2021 ratr_value &= ratr_mask; 2029 ratr_value &= ratr_mask;
2022 } 2030 }
2023 break; 2031 break;
@@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2026 ratr_value &= 0x000ff0ff; 2034 ratr_value &= 0x000ff0ff;
2027 else 2035 else
2028 ratr_value &= 0x0f0ff0ff; 2036 ratr_value &= 0x0f0ff0ff;
2037
2029 break; 2038 break;
2030 } 2039 }
2040
2031 ratr_value &= 0x0FFFFFFF; 2041 ratr_value &= 0x0FFFFFFF;
2032 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || 2042
2033 (!curtxbw_40mhz && curshortgi_20mhz))) { 2043 if (nmode && ((curtxbw_40mhz &&
2044 curshortgi_40mhz) || (!curtxbw_40mhz &&
2045 curshortgi_20mhz))) {
2046
2034 ratr_value |= 0x10000000; 2047 ratr_value |= 0x10000000;
2035 tmp_ratr_value = (ratr_value >> 12); 2048 tmp_ratr_value = (ratr_value >> 12);
2049
2036 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { 2050 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2037 if ((1 << shortgi_rate) & tmp_ratr_value) 2051 if ((1 << shortgi_rate) & tmp_ratr_value)
2038 break; 2052 break;
2039 } 2053 }
2054
2040 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | 2055 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2041 (shortgi_rate << 4) | (shortgi_rate); 2056 (shortgi_rate << 4) | (shortgi_rate);
2042 } 2057 }
2058
2043 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2059 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2060
2061 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
2062 rtl_read_dword(rtlpriv, REG_ARFR0));
2044} 2063}
2045 2064
2046void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 2065static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
2066 struct ieee80211_sta *sta,
2067 u8 rssi_level)
2047{ 2068{
2048 struct rtl_priv *rtlpriv = rtl_priv(hw); 2069 struct rtl_priv *rtlpriv = rtl_priv(hw);
2049 struct rtl_phy *rtlphy = &(rtlpriv->phy); 2070 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2050 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 2071 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2051 u32 ratr_bitmap = (u32) mac->basic_rates; 2072 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2052 u8 *p_mcsrate = mac->mcs; 2073 struct rtl_sta_info *sta_entry = NULL;
2053 u8 ratr_index = 0; 2074 u32 ratr_bitmap;
2054 u8 curtxbw_40mhz = mac->bw_40; 2075 u8 ratr_index;
2055 u8 curshortgi_40mhz = mac->sgi_40; 2076 u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
2056 u8 curshortgi_20mhz = mac->sgi_20; 2077 u8 curshortgi_40mhz = curtxbw_40mhz &&
2057 enum wireless_mode wirelessmode = mac->mode; 2078 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2079 1 : 0;
2080 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2081 1 : 0;
2082 enum wireless_mode wirelessmode = 0;
2058 bool shortgi = false; 2083 bool shortgi = false;
2059 u8 rate_mask[5]; 2084 u8 rate_mask[5];
2060 u8 macid = 0; 2085 u8 macid = 0;
2061 u8 mimops = 1; 2086 u8 mimo_ps = IEEE80211_SMPS_OFF;
2062 2087
2063 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); 2088 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
2089 wirelessmode = sta_entry->wireless_mode;
2090 if (mac->opmode == NL80211_IFTYPE_STATION ||
2091 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2092 curtxbw_40mhz = mac->bw_40;
2093 else if (mac->opmode == NL80211_IFTYPE_AP ||
2094 mac->opmode == NL80211_IFTYPE_ADHOC)
2095 macid = sta->aid + 1;
2096
2097 if (rtlhal->current_bandtype == BAND_ON_5G)
2098 ratr_bitmap = sta->supp_rates[1] << 4;
2099 else
2100 ratr_bitmap = sta->supp_rates[0];
2101 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2102 ratr_bitmap = 0xfff;
2103 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2104 sta->ht_cap.mcs.rx_mask[0] << 12);
2064 switch (wirelessmode) { 2105 switch (wirelessmode) {
2065 case WIRELESS_MODE_B: 2106 case WIRELESS_MODE_B:
2066 ratr_index = RATR_INX_WIRELESS_B; 2107 ratr_index = RATR_INX_WIRELESS_B;
@@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2071 break; 2112 break;
2072 case WIRELESS_MODE_G: 2113 case WIRELESS_MODE_G:
2073 ratr_index = RATR_INX_WIRELESS_GB; 2114 ratr_index = RATR_INX_WIRELESS_GB;
2115
2074 if (rssi_level == 1) 2116 if (rssi_level == 1)
2075 ratr_bitmap &= 0x00000f00; 2117 ratr_bitmap &= 0x00000f00;
2076 else if (rssi_level == 2) 2118 else if (rssi_level == 2)
@@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2085 case WIRELESS_MODE_N_24G: 2127 case WIRELESS_MODE_N_24G:
2086 case WIRELESS_MODE_N_5G: 2128 case WIRELESS_MODE_N_5G:
2087 ratr_index = RATR_INX_WIRELESS_NGB; 2129 ratr_index = RATR_INX_WIRELESS_NGB;
2088 if (mimops == 0) { 2130
2131 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2089 if (rssi_level == 1) 2132 if (rssi_level == 1)
2090 ratr_bitmap &= 0x00070000; 2133 ratr_bitmap &= 0x00070000;
2091 else if (rssi_level == 2) 2134 else if (rssi_level == 2)
@@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2128 } 2171 }
2129 } 2172 }
2130 } 2173 }
2174
2131 if ((curtxbw_40mhz && curshortgi_40mhz) || 2175 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2132 (!curtxbw_40mhz && curshortgi_20mhz)) { 2176 (!curtxbw_40mhz && curshortgi_20mhz)) {
2177
2133 if (macid == 0) 2178 if (macid == 0)
2134 shortgi = true; 2179 shortgi = true;
2135 else if (macid == 1) 2180 else if (macid == 1)
@@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2138 break; 2183 break;
2139 default: 2184 default:
2140 ratr_index = RATR_INX_WIRELESS_NGB; 2185 ratr_index = RATR_INX_WIRELESS_NGB;
2186
2141 if (rtlphy->rf_type == RF_1T2R) 2187 if (rtlphy->rf_type == RF_1T2R)
2142 ratr_bitmap &= 0x000ff0ff; 2188 ratr_bitmap &= 0x000ff0ff;
2143 else 2189 else
2144 ratr_bitmap &= 0x0f0ff0ff; 2190 ratr_bitmap &= 0x0f0ff0ff;
2145 break; 2191 break;
2146 } 2192 }
2147 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", 2193 sta_entry->ratr_index = ratr_index;
2148 ratr_bitmap); 2194
2149 *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | 2195 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2150 ratr_index << 28); 2196 "ratr_bitmap :%x\n", ratr_bitmap);
2197 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
2198 (ratr_index << 28);
2151 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 2199 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2152 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 2200 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2153 "Rate_index:%x, ratr_val:%x, %5phC\n", 2201 "Rate_index:%x, ratr_val:%x, %5phC\n",
2154 ratr_index, ratr_bitmap, rate_mask); 2202 ratr_index, ratr_bitmap, rate_mask);
2155 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 2203 memcpy(rtlpriv->rate_mask, rate_mask, 5);
2204 /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
2205 * "scheduled while atomic" if called directly */
2206 schedule_work(&rtlpriv->works.fill_h2c_cmd);
2207
2208 if (macid != 0)
2209 sta_entry->ratr_index = ratr_index;
2210}
2211
2212void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
2213 struct ieee80211_sta *sta,
2214 u8 rssi_level)
2215{
2216 struct rtl_priv *rtlpriv = rtl_priv(hw);
2217
2218 if (rtlpriv->dm.useramask)
2219 rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
2220 else
2221 rtl92cu_update_hal_rate_table(hw, sta);
2156} 2222}
2157 2223
2158void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) 2224void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index f41a3aa4a26f..8e3ec1e25644 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
98 u32 add_msr, u32 rm_msr); 98 u32 add_msr, u32 rm_msr);
99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
102 struct ieee80211_sta *sta,
103 u8 rssi_level);
104void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
105 101
106void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); 102void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
107bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); 103bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 85b6bdb163c0..da4f587199ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
289 macaddr = cam_const_broad; 289 macaddr = cam_const_broad;
290 entry_id = key_index; 290 entry_id = key_index;
291 } else { 291 } else {
292 if (mac->opmode == NL80211_IFTYPE_AP ||
293 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
294 entry_id = rtl_cam_get_free_entry(hw,
295 p_macaddr);
296 if (entry_id >= TOTAL_CAM_ENTRY) {
297 RT_TRACE(rtlpriv, COMP_SEC,
298 DBG_EMERG,
299 "Can not find free hw security cam entry\n");
300 return;
301 }
302 } else {
303 entry_id = CAM_PAIRWISE_KEY_POSITION;
304 }
305
292 key_index = PAIRWISE_KEYIDX; 306 key_index = PAIRWISE_KEYIDX;
293 entry_id = CAM_PAIRWISE_KEY_POSITION;
294 is_pairwise = true; 307 is_pairwise = true;
295 } 308 }
296 } 309 }
297 if (rtlpriv->sec.key_len[key_index] == 0) { 310 if (rtlpriv->sec.key_len[key_index] == 0) {
298 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 311 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
299 "delete one entry\n"); 312 "delete one entry\n");
313 if (mac->opmode == NL80211_IFTYPE_AP ||
314 mac->opmode == NL80211_IFTYPE_MESH_POINT)
315 rtl_cam_del_entry(hw, p_macaddr);
300 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 316 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
301 } else { 317 } else {
302 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 318 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 938b1e670b93..826f085c29dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
106 .update_interrupt_mask = rtl92cu_update_interrupt_mask, 106 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
107 .get_hw_reg = rtl92cu_get_hw_reg, 107 .get_hw_reg = rtl92cu_get_hw_reg,
108 .set_hw_reg = rtl92cu_set_hw_reg, 108 .set_hw_reg = rtl92cu_set_hw_reg,
109 .update_rate_tbl = rtl92cu_update_hal_rate_table, 109 .update_rate_tbl = rtl92cu_update_hal_rate_tbl,
110 .update_rate_mask = rtl92cu_update_hal_rate_mask,
111 .fill_tx_desc = rtl92cu_tx_fill_desc, 110 .fill_tx_desc = rtl92cu_tx_fill_desc,
112 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, 111 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
113 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, 112 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
@@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
137 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, 136 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
138 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, 137 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
139 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, 138 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
139 .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
140}; 140};
141 141
142static struct rtl_mod_params rtl92cu_mod_params = { 142static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index a1310abd0d54..262e1e4c6e5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, 49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask); 50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
53 struct ieee80211_sta *sta,
54 u8 rssi_level);
52 55
53#endif 56#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 76732b0cd221..a3532e077871 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
824 824
825 /* should after adapter start and interrupt enable. */ 825 /* should after adapter start and interrupt enable. */
826 set_hal_stop(rtlhal); 826 set_hal_stop(rtlhal);
827 cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
827 /* Enable software */ 828 /* Enable software */
828 SET_USB_STOP(rtlusb); 829 SET_USB_STOP(rtlusb);
829 rtl_usb_deinit(hw); 830 rtl_usb_deinit(hw);
@@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1026 return false; 1027 return false;
1027} 1028}
1028 1029
1030static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
1031{
1032 struct rtl_works *rtlworks =
1033 container_of(work, struct rtl_works, fill_h2c_cmd);
1034 struct ieee80211_hw *hw = rtlworks->hw;
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036
1037 rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
1038}
1039
1029static struct rtl_intf_ops rtl_usb_ops = { 1040static struct rtl_intf_ops rtl_usb_ops = {
1030 .adapter_start = rtl_usb_start, 1041 .adapter_start = rtl_usb_start,
1031 .adapter_stop = rtl_usb_stop, 1042 .adapter_stop = rtl_usb_stop,
@@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf,
1057 1068
1058 /* this spin lock must be initialized early */ 1069 /* this spin lock must be initialized early */
1059 spin_lock_init(&rtlpriv->locks.usb_lock); 1070 spin_lock_init(&rtlpriv->locks.usb_lock);
1071 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
1072 rtl_fill_h2c_cmd_work_callback);
1060 1073
1061 rtlpriv->usb_data_index = 0; 1074 rtlpriv->usb_data_index = 0;
1062 init_completion(&rtlpriv->firmware_loading_complete); 1075 init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 44328baa6389..cc03e7c87cbe 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1736,6 +1736,8 @@ struct rtl_hal_ops {
1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, 1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
1737 bool mstate); 1737 bool mstate);
1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); 1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
1739 void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
1740 u32 cmd_len, u8 *p_cmdbuffer);
1739}; 1741};
1740 1742
1741struct rtl_intf_ops { 1743struct rtl_intf_ops {
@@ -1869,6 +1871,7 @@ struct rtl_works {
1869 struct delayed_work fwevt_wq; 1871 struct delayed_work fwevt_wq;
1870 1872
1871 struct work_struct lps_change_work; 1873 struct work_struct lps_change_work;
1874 struct work_struct fill_h2c_cmd;
1872}; 1875};
1873 1876
1874struct rtl_debug { 1877struct rtl_debug {
@@ -2048,6 +2051,7 @@ struct rtl_priv {
2048 }; 2051 };
2049 }; 2052 };
2050 bool enter_ps; /* true when entering PS */ 2053 bool enter_ps; /* true when entering PS */
2054 u8 rate_mask[5];
2051 2055
2052 /*This must be the last item so 2056 /*This must be the last item so
2053 that it points to the data allocated 2057 that it points to the data allocated
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index affdb3ec6225..4a0bbb13806b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
310 memcpy(cmd->channels_2, cmd_channels->channels_2, 310 memcpy(cmd->channels_2, cmd_channels->channels_2,
311 sizeof(cmd->channels_2)); 311 sizeof(cmd->channels_2));
312 memcpy(cmd->channels_5, cmd_channels->channels_5, 312 memcpy(cmd->channels_5, cmd_channels->channels_5,
313 sizeof(cmd->channels_2)); 313 sizeof(cmd->channels_5));
314 /* channels_4 are not supported, so no need to copy them */ 314 /* channels_4 are not supported, so no need to copy them */
315} 315}
316 316
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 222d03540200..9e5484a73667 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -36,12 +36,12 @@
36#define WL127X_IFTYPE_SR_VER 3 36#define WL127X_IFTYPE_SR_VER 3
37#define WL127X_MAJOR_SR_VER 10 37#define WL127X_MAJOR_SR_VER 10
38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
39#define WL127X_MINOR_SR_VER 115 39#define WL127X_MINOR_SR_VER 133
40/* minimum multi-role FW version for wl127x */ 40/* minimum multi-role FW version for wl127x */
41#define WL127X_IFTYPE_MR_VER 5 41#define WL127X_IFTYPE_MR_VER 5
42#define WL127X_MAJOR_MR_VER 7 42#define WL127X_MAJOR_MR_VER 7
43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE 43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
44#define WL127X_MINOR_MR_VER 115 44#define WL127X_MINOR_MR_VER 42
45 45
46/* FW chip version for wl128x */ 46/* FW chip version for wl128x */
47#define WL128X_CHIP_VER 7 47#define WL128X_CHIP_VER 7
@@ -49,7 +49,7 @@
49#define WL128X_IFTYPE_SR_VER 3 49#define WL128X_IFTYPE_SR_VER 3
50#define WL128X_MAJOR_SR_VER 10 50#define WL128X_MAJOR_SR_VER 10
51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
52#define WL128X_MINOR_SR_VER 115 52#define WL128X_MINOR_SR_VER 133
53/* minimum multi-role FW version for wl128x */ 53/* minimum multi-role FW version for wl128x */
54#define WL128X_IFTYPE_MR_VER 5 54#define WL128X_IFTYPE_MR_VER 5
55#define WL128X_MAJOR_MR_VER 7 55#define WL128X_MAJOR_MR_VER 7
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 09d944505ac0..2b642f8c9266 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
34 memcpy(cmd->channels_2, cmd_channels->channels_2, 34 memcpy(cmd->channels_2, cmd_channels->channels_2,
35 sizeof(cmd->channels_2)); 35 sizeof(cmd->channels_2));
36 memcpy(cmd->channels_5, cmd_channels->channels_5, 36 memcpy(cmd->channels_5, cmd_channels->channels_5,
37 sizeof(cmd->channels_2)); 37 sizeof(cmd->channels_5));
38 /* channels_4 are not supported, so no need to copy them */ 38 /* channels_4 are not supported, so no need to copy them */
39} 39}
40 40
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 82576fffb452..a0b50ad2ef31 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -778,12 +778,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
778 sco->meta_slots_used); 778 sco->meta_slots_used);
779 779
780 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 780 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
781 if (ret && list_empty(&vif->notify_list))
782 list_add_tail(&vif->notify_list, &notify);
783 781
784 xenvif_notify_tx_completion(vif); 782 xenvif_notify_tx_completion(vif);
785 783
786 xenvif_put(vif); 784 if (ret && list_empty(&vif->notify_list))
785 list_add_tail(&vif->notify_list, &notify);
786 else
787 xenvif_put(vif);
787 npo.meta_cons += sco->meta_slots_used; 788 npo.meta_cons += sco->meta_slots_used;
788 dev_kfree_skb(skb); 789 dev_kfree_skb(skb);
789 } 790 }
@@ -791,6 +792,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
791 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 792 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
792 notify_remote_via_irq(vif->rx_irq); 793 notify_remote_via_irq(vif->rx_irq);
793 list_del_init(&vif->notify_list); 794 list_del_init(&vif->notify_list);
795 xenvif_put(vif);
794 } 796 }
795 797
796 /* More work to do? */ 798 /* More work to do? */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f53b992f060a..a6f584a7f4a1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -192,14 +192,15 @@ EXPORT_SYMBOL(of_find_property);
192struct device_node *of_find_all_nodes(struct device_node *prev) 192struct device_node *of_find_all_nodes(struct device_node *prev)
193{ 193{
194 struct device_node *np; 194 struct device_node *np;
195 unsigned long flags;
195 196
196 raw_spin_lock(&devtree_lock); 197 raw_spin_lock_irqsave(&devtree_lock, flags);
197 np = prev ? prev->allnext : of_allnodes; 198 np = prev ? prev->allnext : of_allnodes;
198 for (; np != NULL; np = np->allnext) 199 for (; np != NULL; np = np->allnext)
199 if (of_node_get(np)) 200 if (of_node_get(np))
200 break; 201 break;
201 of_node_put(prev); 202 of_node_put(prev);
202 raw_spin_unlock(&devtree_lock); 203 raw_spin_unlock_irqrestore(&devtree_lock, flags);
203 return np; 204 return np;
204} 205}
205EXPORT_SYMBOL(of_find_all_nodes); 206EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
421 struct device_node *prev) 422 struct device_node *prev)
422{ 423{
423 struct device_node *next; 424 struct device_node *next;
425 unsigned long flags;
424 426
425 raw_spin_lock(&devtree_lock); 427 raw_spin_lock_irqsave(&devtree_lock, flags);
426 next = prev ? prev->sibling : node->child; 428 next = prev ? prev->sibling : node->child;
427 for (; next; next = next->sibling) { 429 for (; next; next = next->sibling) {
428 if (!__of_device_is_available(next)) 430 if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
431 break; 433 break;
432 } 434 }
433 of_node_put(prev); 435 of_node_put(prev);
434 raw_spin_unlock(&devtree_lock); 436 raw_spin_unlock_irqrestore(&devtree_lock, flags);
435 return next; 437 return next;
436} 438}
437EXPORT_SYMBOL(of_get_next_available_child); 439EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
735struct device_node *of_find_node_by_phandle(phandle handle) 737struct device_node *of_find_node_by_phandle(phandle handle)
736{ 738{
737 struct device_node *np; 739 struct device_node *np;
740 unsigned long flags;
738 741
739 raw_spin_lock(&devtree_lock); 742 raw_spin_lock_irqsave(&devtree_lock, flags);
740 for (np = of_allnodes; np; np = np->allnext) 743 for (np = of_allnodes; np; np = np->allnext)
741 if (np->phandle == handle) 744 if (np->phandle == handle)
742 break; 745 break;
743 of_node_get(np); 746 of_node_get(np);
744 raw_spin_unlock(&devtree_lock); 747 raw_spin_unlock_irqrestore(&devtree_lock, flags);
745 return np; 748 return np;
746} 749}
747EXPORT_SYMBOL(of_find_node_by_phandle); 750EXPORT_SYMBOL(of_find_node_by_phandle);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 791a6719d8a9..8cd90e7e945a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = {
2357}; 2357};
2358/* - USB0 ------------------------------------------------------------------- */ 2358/* - USB0 ------------------------------------------------------------------- */
2359static const unsigned int usb0_pins[] = { 2359static const unsigned int usb0_pins[] = {
2360 /* OVC */ 2360 /* PENC */
2361 150, 154, 2361 154,
2362}; 2362};
2363static const unsigned int usb0_mux[] = { 2363static const unsigned int usb0_mux[] = {
2364 USB_OVC0_MARK, USB_PENC0_MARK, 2364 USB_PENC0_MARK,
2365};
2366static const unsigned int usb0_ovc_pins[] = {
2367 /* USB_OVC */
2368 150
2369};
2370static const unsigned int usb0_ovc_mux[] = {
2371 USB_OVC0_MARK,
2365}; 2372};
2366/* - USB1 ------------------------------------------------------------------- */ 2373/* - USB1 ------------------------------------------------------------------- */
2367static const unsigned int usb1_pins[] = { 2374static const unsigned int usb1_pins[] = {
2368 /* OVC */ 2375 /* PENC */
2369 152, 155, 2376 155,
2370}; 2377};
2371static const unsigned int usb1_mux[] = { 2378static const unsigned int usb1_mux[] = {
2372 USB_OVC1_MARK, USB_PENC1_MARK, 2379 USB_PENC1_MARK,
2380};
2381static const unsigned int usb1_ovc_pins[] = {
2382 /* USB_OVC */
2383 152,
2384};
2385static const unsigned int usb1_ovc_mux[] = {
2386 USB_OVC1_MARK,
2373}; 2387};
2374/* - USB2 ------------------------------------------------------------------- */ 2388/* - USB2 ------------------------------------------------------------------- */
2375static const unsigned int usb2_pins[] = { 2389static const unsigned int usb2_pins[] = {
2376 /* OVC, PENC */ 2390 /* PENC */
2377 125, 156, 2391 156,
2378}; 2392};
2379static const unsigned int usb2_mux[] = { 2393static const unsigned int usb2_mux[] = {
2380 USB_OVC2_MARK, USB_PENC2_MARK, 2394 USB_PENC2_MARK,
2395};
2396static const unsigned int usb2_ovc_pins[] = {
2397 /* USB_OVC */
2398 125,
2399};
2400static const unsigned int usb2_ovc_mux[] = {
2401 USB_OVC2_MARK,
2381}; 2402};
2382 2403
2383static const struct sh_pfc_pin_group pinmux_groups[] = { 2404static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2501 SH_PFC_PIN_GROUP(sdhi3_cd), 2522 SH_PFC_PIN_GROUP(sdhi3_cd),
2502 SH_PFC_PIN_GROUP(sdhi3_wp), 2523 SH_PFC_PIN_GROUP(sdhi3_wp),
2503 SH_PFC_PIN_GROUP(usb0), 2524 SH_PFC_PIN_GROUP(usb0),
2525 SH_PFC_PIN_GROUP(usb0_ovc),
2504 SH_PFC_PIN_GROUP(usb1), 2526 SH_PFC_PIN_GROUP(usb1),
2527 SH_PFC_PIN_GROUP(usb1_ovc),
2505 SH_PFC_PIN_GROUP(usb2), 2528 SH_PFC_PIN_GROUP(usb2),
2529 SH_PFC_PIN_GROUP(usb2_ovc),
2506}; 2530};
2507 2531
2508static const char * const du0_groups[] = { 2532static const char * const du0_groups[] = {
@@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = {
2683 2707
2684static const char * const usb0_groups[] = { 2708static const char * const usb0_groups[] = {
2685 "usb0", 2709 "usb0",
2710 "usb0_ovc",
2686}; 2711};
2687 2712
2688static const char * const usb1_groups[] = { 2713static const char * const usb1_groups[] = {
2689 "usb1", 2714 "usb1",
2715 "usb1_ovc",
2690}; 2716};
2691 2717
2692static const char * const usb2_groups[] = { 2718static const char * const usb2_groups[] = {
2693 "usb2", 2719 "usb2",
2720 "usb2_ovc",
2694}; 2721};
2695 2722
2696static const struct sh_pfc_function pinmux_functions[] = { 2723static const struct sh_pfc_function pinmux_functions[] = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8df0c5a21be2..d111c8687f9b 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
703 } 703 }
704 rfkill_init_sw_state(gps_rfkill, 704 rfkill_init_sw_state(gps_rfkill,
705 hp_wmi_get_sw_state(HPWMI_GPS)); 705 hp_wmi_get_sw_state(HPWMI_GPS));
706 rfkill_set_hw_state(bluetooth_rfkill, 706 rfkill_set_hw_state(gps_rfkill,
707 hp_wmi_get_hw_state(HPWMI_GPS)); 707 hp_wmi_get_hw_state(HPWMI_GPS));
708 err = rfkill_register(gps_rfkill); 708 err = rfkill_register(gps_rfkill);
709 if (err) 709 if (err)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0eab77b22340..f296f3f7db9b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -25,6 +25,7 @@
25#include <linux/rtc.h> 25#include <linux/rtc.h>
26#include <linux/bcd.h> 26#include <linux/bcd.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/spinlock.h>
28#include <linux/ioctl.h> 29#include <linux/ioctl.h>
29#include <linux/completion.h> 30#include <linux/completion.h>
30#include <linux/io.h> 31#include <linux/io.h>
@@ -42,10 +43,65 @@
42 43
43#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ 44#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
44 45
46struct at91_rtc_config {
47 bool use_shadow_imr;
48};
49
50static const struct at91_rtc_config *at91_rtc_config;
45static DECLARE_COMPLETION(at91_rtc_updated); 51static DECLARE_COMPLETION(at91_rtc_updated);
46static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 52static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
47static void __iomem *at91_rtc_regs; 53static void __iomem *at91_rtc_regs;
48static int irq; 54static int irq;
55static DEFINE_SPINLOCK(at91_rtc_lock);
56static u32 at91_rtc_shadow_imr;
57
58static void at91_rtc_write_ier(u32 mask)
59{
60 unsigned long flags;
61
62 spin_lock_irqsave(&at91_rtc_lock, flags);
63 at91_rtc_shadow_imr |= mask;
64 at91_rtc_write(AT91_RTC_IER, mask);
65 spin_unlock_irqrestore(&at91_rtc_lock, flags);
66}
67
68static void at91_rtc_write_idr(u32 mask)
69{
70 unsigned long flags;
71
72 spin_lock_irqsave(&at91_rtc_lock, flags);
73 at91_rtc_write(AT91_RTC_IDR, mask);
74 /*
75 * Register read back (of any RTC-register) needed to make sure
76 * IDR-register write has reached the peripheral before updating
77 * shadow mask.
78 *
79 * Note that there is still a possibility that the mask is updated
80 * before interrupts have actually been disabled in hardware. The only
81 * way to be certain would be to poll the IMR-register, which is is
82 * the very register we are trying to emulate. The register read back
83 * is a reasonable heuristic.
84 */
85 at91_rtc_read(AT91_RTC_SR);
86 at91_rtc_shadow_imr &= ~mask;
87 spin_unlock_irqrestore(&at91_rtc_lock, flags);
88}
89
90static u32 at91_rtc_read_imr(void)
91{
92 unsigned long flags;
93 u32 mask;
94
95 if (at91_rtc_config->use_shadow_imr) {
96 spin_lock_irqsave(&at91_rtc_lock, flags);
97 mask = at91_rtc_shadow_imr;
98 spin_unlock_irqrestore(&at91_rtc_lock, flags);
99 } else {
100 mask = at91_rtc_read(AT91_RTC_IMR);
101 }
102
103 return mask;
104}
49 105
50/* 106/*
51 * Decode time/date into rtc_time structure 107 * Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
110 cr = at91_rtc_read(AT91_RTC_CR); 166 cr = at91_rtc_read(AT91_RTC_CR);
111 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 167 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
112 168
113 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 169 at91_rtc_write_ier(AT91_RTC_ACKUPD);
114 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 170 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
115 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 171 at91_rtc_write_idr(AT91_RTC_ACKUPD);
116 172
117 at91_rtc_write(AT91_RTC_TIMR, 173 at91_rtc_write(AT91_RTC_TIMR,
118 bin2bcd(tm->tm_sec) << 0 174 bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
144 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 200 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
145 tm->tm_year = at91_alarm_year - 1900; 201 tm->tm_year = at91_alarm_year - 1900;
146 202
147 alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) 203 alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
148 ? 1 : 0; 204 ? 1 : 0;
149 205
150 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 206 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
169 tm.tm_min = alrm->time.tm_min; 225 tm.tm_min = alrm->time.tm_min;
170 tm.tm_sec = alrm->time.tm_sec; 226 tm.tm_sec = alrm->time.tm_sec;
171 227
172 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 228 at91_rtc_write_idr(AT91_RTC_ALARM);
173 at91_rtc_write(AT91_RTC_TIMALR, 229 at91_rtc_write(AT91_RTC_TIMALR,
174 bin2bcd(tm.tm_sec) << 0 230 bin2bcd(tm.tm_sec) << 0
175 | bin2bcd(tm.tm_min) << 8 231 | bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
182 238
183 if (alrm->enabled) { 239 if (alrm->enabled) {
184 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 240 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
185 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 241 at91_rtc_write_ier(AT91_RTC_ALARM);
186 } 242 }
187 243
188 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 244 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
198 254
199 if (enabled) { 255 if (enabled) {
200 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 256 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
201 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 257 at91_rtc_write_ier(AT91_RTC_ALARM);
202 } else 258 } else
203 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 259 at91_rtc_write_idr(AT91_RTC_ALARM);
204 260
205 return 0; 261 return 0;
206} 262}
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
209 */ 265 */
210static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 266static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
211{ 267{
212 unsigned long imr = at91_rtc_read(AT91_RTC_IMR); 268 unsigned long imr = at91_rtc_read_imr();
213 269
214 seq_printf(seq, "update_IRQ\t: %s\n", 270 seq_printf(seq, "update_IRQ\t: %s\n",
215 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 271 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
229 unsigned int rtsr; 285 unsigned int rtsr;
230 unsigned long events = 0; 286 unsigned long events = 0;
231 287
232 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); 288 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
233 if (rtsr) { /* this interrupt is shared! Is it ours? */ 289 if (rtsr) { /* this interrupt is shared! Is it ours? */
234 if (rtsr & AT91_RTC_ALARM) 290 if (rtsr & AT91_RTC_ALARM)
235 events |= (RTC_AF | RTC_IRQF); 291 events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
250 return IRQ_NONE; /* not handled */ 306 return IRQ_NONE; /* not handled */
251} 307}
252 308
309static const struct at91_rtc_config at91rm9200_config = {
310};
311
312static const struct at91_rtc_config at91sam9x5_config = {
313 .use_shadow_imr = true,
314};
315
316#ifdef CONFIG_OF
317static const struct of_device_id at91_rtc_dt_ids[] = {
318 {
319 .compatible = "atmel,at91rm9200-rtc",
320 .data = &at91rm9200_config,
321 }, {
322 .compatible = "atmel,at91sam9x5-rtc",
323 .data = &at91sam9x5_config,
324 }, {
325 /* sentinel */
326 }
327};
328MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
329#endif
330
331static const struct at91_rtc_config *
332at91_rtc_get_config(struct platform_device *pdev)
333{
334 const struct of_device_id *match;
335
336 if (pdev->dev.of_node) {
337 match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
338 if (!match)
339 return NULL;
340 return (const struct at91_rtc_config *)match->data;
341 }
342
343 return &at91rm9200_config;
344}
345
253static const struct rtc_class_ops at91_rtc_ops = { 346static const struct rtc_class_ops at91_rtc_ops = {
254 .read_time = at91_rtc_readtime, 347 .read_time = at91_rtc_readtime,
255 .set_time = at91_rtc_settime, 348 .set_time = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
268 struct resource *regs; 361 struct resource *regs;
269 int ret = 0; 362 int ret = 0;
270 363
364 at91_rtc_config = at91_rtc_get_config(pdev);
365 if (!at91_rtc_config)
366 return -ENODEV;
367
271 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 368 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272 if (!regs) { 369 if (!regs) {
273 dev_err(&pdev->dev, "no mmio resource defined\n"); 370 dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
290 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */ 387 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
291 388
292 /* Disable all interrupts */ 389 /* Disable all interrupts */
293 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 390 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
294 AT91_RTC_SECEV | AT91_RTC_TIMEV | 391 AT91_RTC_SECEV | AT91_RTC_TIMEV |
295 AT91_RTC_CALEV); 392 AT91_RTC_CALEV);
296 393
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
335 struct rtc_device *rtc = platform_get_drvdata(pdev); 432 struct rtc_device *rtc = platform_get_drvdata(pdev);
336 433
337 /* Disable all interrupts */ 434 /* Disable all interrupts */
338 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 435 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
339 AT91_RTC_SECEV | AT91_RTC_TIMEV | 436 AT91_RTC_SECEV | AT91_RTC_TIMEV |
340 AT91_RTC_CALEV); 437 AT91_RTC_CALEV);
341 free_irq(irq, pdev); 438 free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
358 /* this IRQ is shared with DBGU and other hardware which isn't 455 /* this IRQ is shared with DBGU and other hardware which isn't
359 * necessarily doing PM like we are... 456 * necessarily doing PM like we are...
360 */ 457 */
361 at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) 458 at91_rtc_imr = at91_rtc_read_imr()
362 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 459 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
363 if (at91_rtc_imr) { 460 if (at91_rtc_imr) {
364 if (device_may_wakeup(dev)) 461 if (device_may_wakeup(dev))
365 enable_irq_wake(irq); 462 enable_irq_wake(irq);
366 else 463 else
367 at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); 464 at91_rtc_write_idr(at91_rtc_imr);
368 } 465 }
369 return 0; 466 return 0;
370} 467}
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
375 if (device_may_wakeup(dev)) 472 if (device_may_wakeup(dev))
376 disable_irq_wake(irq); 473 disable_irq_wake(irq);
377 else 474 else
378 at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); 475 at91_rtc_write_ier(at91_rtc_imr);
379 } 476 }
380 return 0; 477 return 0;
381} 478}
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
383 480
384static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); 481static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
385 482
386static const struct of_device_id at91_rtc_dt_ids[] = {
387 { .compatible = "atmel,at91rm9200-rtc" },
388 { /* sentinel */ }
389};
390MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
391
392static struct platform_driver at91_rtc_driver = { 483static struct platform_driver at91_rtc_driver = {
393 .remove = __exit_p(at91_rtc_remove), 484 .remove = __exit_p(at91_rtc_remove),
394 .driver = { 485 .driver = {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cc5bea9c4b1c..f1cb706445c7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
854 } 854 }
855 855
856 spin_lock_irq(&rtc_lock); 856 spin_lock_irq(&rtc_lock);
857 if (device_may_wakeup(dev))
858 hpet_rtc_timer_init();
859
857 do { 860 do {
858 CMOS_WRITE(tmp, RTC_CONTROL); 861 CMOS_WRITE(tmp, RTC_CONTROL);
859 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK); 862 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
869 rtc_update_irq(cmos->rtc, 1, mask); 872 rtc_update_irq(cmos->rtc, 1, mask);
870 tmp &= ~RTC_AIE; 873 tmp &= ~RTC_AIE;
871 hpet_mask_rtc_irq_bit(RTC_AIE); 874 hpet_mask_rtc_irq_bit(RTC_AIE);
872 hpet_rtc_timer_init();
873 } while (mask & RTC_AIE); 875 } while (mask & RTC_AIE);
874 spin_unlock_irq(&rtc_lock); 876 spin_unlock_irq(&rtc_lock);
875 } 877 }
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 459c2ffc95a6..426901cef14f 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
273 return ret; 273 return ret;
274 } 274 }
275 275
276 device_init_wakeup(&pdev->dev, 1);
277
276 platform_set_drvdata(pdev, rtc); 278 platform_set_drvdata(pdev, rtc);
277 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), 279 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
278 &tps6586x_rtc_ops, THIS_MODULE); 280 &tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
292 goto fail_rtc_register; 294 goto fail_rtc_register;
293 } 295 }
294 disable_irq(rtc->irq); 296 disable_irq(rtc->irq);
295 device_set_wakeup_capable(&pdev->dev, 1);
296 return 0; 297 return 0;
297 298
298fail_rtc_register: 299fail_rtc_register:
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 8751a5240c99..b2eab34f38d9 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
524 } 524 }
525 525
526 platform_set_drvdata(pdev, rtc); 526 platform_set_drvdata(pdev, rtc);
527 device_init_wakeup(&pdev->dev, 1);
527 return 0; 528 return 0;
528 529
529out2: 530out2:
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c87ea5..9ca3996f65b2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2040,6 +2040,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2040 netiucv_setup_netdevice); 2040 netiucv_setup_netdevice);
2041 if (!dev) 2041 if (!dev)
2042 return NULL; 2042 return NULL;
2043 rtnl_lock();
2043 if (dev_alloc_name(dev, dev->name) < 0) 2044 if (dev_alloc_name(dev, dev->name) < 0)
2044 goto out_netdev; 2045 goto out_netdev;
2045 2046
@@ -2061,6 +2062,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2061out_fsm: 2062out_fsm:
2062 kfree_fsm(privptr->fsm); 2063 kfree_fsm(privptr->fsm);
2063out_netdev: 2064out_netdev:
2065 rtnl_unlock();
2064 free_netdev(dev); 2066 free_netdev(dev);
2065 return NULL; 2067 return NULL;
2066} 2068}
@@ -2100,6 +2102,7 @@ static ssize_t conn_write(struct device_driver *drv,
2100 2102
2101 rc = netiucv_register_device(dev); 2103 rc = netiucv_register_device(dev);
2102 if (rc) { 2104 if (rc) {
2105 rtnl_unlock();
2103 IUCV_DBF_TEXT_(setup, 2, 2106 IUCV_DBF_TEXT_(setup, 2,
2104 "ret %d from netiucv_register_device\n", rc); 2107 "ret %d from netiucv_register_device\n", rc);
2105 goto out_free_ndev; 2108 goto out_free_ndev;
@@ -2109,7 +2112,8 @@ static ssize_t conn_write(struct device_driver *drv,
2109 priv = netdev_priv(dev); 2112 priv = netdev_priv(dev);
2110 SET_NETDEV_DEV(dev, priv->dev); 2113 SET_NETDEV_DEV(dev, priv->dev);
2111 2114
2112 rc = register_netdev(dev); 2115 rc = register_netdevice(dev);
2116 rtnl_unlock();
2113 if (rc) 2117 if (rc)
2114 goto out_unreg; 2118 goto out_unreg;
2115 2119
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 439c012be763..b63d534192e3 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
186 file->f_pos += offset; 186 file->f_pos += offset;
187 break; 187 break;
188 case 2: 188 case 2:
189 file->f_pos = debug->buffer_len - offset; 189 file->f_pos = debug->buffer_len + offset;
190 break; 190 break;
191 default: 191 default:
192 return -EINVAL; 192 return -EINVAL;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index adc1f7f471f5..85e1ffd0e5c5 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
174 pos = file->f_pos + offset; 174 pos = file->f_pos + offset;
175 break; 175 break;
176 case 2: 176 case 2:
177 pos = fnic_dbg_prt->buffer_len - offset; 177 pos = fnic_dbg_prt->buffer_len + offset;
178 } 178 }
179 return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? 179 return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
180 -EINVAL : (file->f_pos = pos); 180 -EINVAL : (file->f_pos = pos);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f63f5ff7f274..f525ecb7a9c6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
1178 pos = file->f_pos + off; 1178 pos = file->f_pos + off;
1179 break; 1179 break;
1180 case 2: 1180 case 2:
1181 pos = debug->len - off; 1181 pos = debug->len + off;
1182 } 1182 }
1183 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); 1183 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
1184} 1184}
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 60cfae51c713..eab593eaaafa 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
89 if ((mask & hspi_read(hspi, SPSR)) == val) 89 if ((mask & hspi_read(hspi, SPSR)) == val)
90 return 0; 90 return 0;
91 91
92 msleep(20); 92 udelay(10);
93 } 93 }
94 94
95 dev_err(hspi->dev, "timeout\n"); 95 dev_err(hspi->dev, "timeout\n");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 35f60bd252dd..637d728fbeb5 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1487 return 0; 1487 return 0;
1488 1488
1489err_spi_register_master: 1489err_spi_register_master:
1490 free_irq(board_dat->pdev->irq, board_dat); 1490 free_irq(board_dat->pdev->irq, data);
1491err_request_irq: 1491err_request_irq:
1492 pch_spi_free_resources(board_dat, data); 1492 pch_spi_free_resources(board_dat, data);
1493err_spi_get_resources: 1493err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
1667 pd_dev = platform_device_alloc("pch-spi", i); 1667 pd_dev = platform_device_alloc("pch-spi", i);
1668 if (!pd_dev) { 1668 if (!pd_dev) {
1669 dev_err(&pdev->dev, "platform_device_alloc failed\n"); 1669 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1670 retval = -ENOMEM;
1670 goto err_platform_device; 1671 goto err_platform_device;
1671 } 1672 }
1672 pd_dev_save->pd_save[i] = pd_dev; 1673 pd_dev_save->pd_save[i] = pd_dev;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e1d769607425..34d18dcfa0db 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
267{ 267{
268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
269 u32 ipif_ier; 269 u32 ipif_ier;
270 u16 cr;
271 270
272 /* We get here with transmitter inhibited */ 271 /* We get here with transmitter inhibited */
273 272
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
276 xspi->remaining_bytes = t->len; 275 xspi->remaining_bytes = t->len;
277 INIT_COMPLETION(xspi->done); 276 INIT_COMPLETION(xspi->done);
278 277
279 xilinx_spi_fill_tx_fifo(xspi);
280 278
281 /* Enable the transmit empty interrupt, which we use to determine 279 /* Enable the transmit empty interrupt, which we use to determine
282 * progress on the transmission. 280 * progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
285 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, 283 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
286 xspi->regs + XIPIF_V123B_IIER_OFFSET); 284 xspi->regs + XIPIF_V123B_IIER_OFFSET);
287 285
288 /* Start the transfer by not inhibiting the transmitter any longer */ 286 for (;;) {
289 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & 287 u16 cr;
290 ~XSPI_CR_TRANS_INHIBIT; 288 u8 sr;
291 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); 289
290 xilinx_spi_fill_tx_fifo(xspi);
291
292 /* Start the transfer by not inhibiting the transmitter any
293 * longer
294 */
295 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
296 ~XSPI_CR_TRANS_INHIBIT;
297 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
298
299 wait_for_completion(&xspi->done);
300
301 /* A transmit has just completed. Process received data and
302 * check for more data to transmit. Always inhibit the
303 * transmitter while the Isr refills the transmit register/FIFO,
304 * or make sure it is stopped if we're done.
305 */
306 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
307 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
308 xspi->regs + XSPI_CR_OFFSET);
309
310 /* Read out all the data from the Rx FIFO */
311 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
312 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
313 xspi->rx_fn(xspi);
314 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
315 }
292 316
293 wait_for_completion(&xspi->done); 317 /* See if there is more data to send */
318 if (!xspi->remaining_bytes > 0)
319 break;
320 }
294 321
295 /* Disable the transmit empty interrupt */ 322 /* Disable the transmit empty interrupt */
296 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 323 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
314 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); 341 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
315 342
316 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ 343 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
317 u16 cr; 344 complete(&xspi->done);
318 u8 sr;
319
320 /* A transmit has just completed. Process received data and
321 * check for more data to transmit. Always inhibit the
322 * transmitter while the Isr refills the transmit register/FIFO,
323 * or make sure it is stopped if we're done.
324 */
325 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
326 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
327 xspi->regs + XSPI_CR_OFFSET);
328
329 /* Read out all the data from the Rx FIFO */
330 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
331 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
332 xspi->rx_fn(xspi);
333 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
334 }
335
336 /* See if there is more data to send */
337 if (xspi->remaining_bytes > 0) {
338 xilinx_spi_fill_tx_fifo(xspi);
339 /* Start the transfer by not inhibiting the
340 * transmitter any longer
341 */
342 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
343 } else {
344 /* No more data to send.
345 * Indicate the transfer is completed.
346 */
347 complete(&xspi->done);
348 }
349 } 345 }
350 346
351 return IRQ_HANDLED; 347 return IRQ_HANDLED;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 49b098bedf9b..475c9c114689 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -276,8 +276,9 @@ static void ci_role_work(struct work_struct *work)
276 276
277 ci_role_stop(ci); 277 ci_role_stop(ci);
278 ci_role_start(ci, role); 278 ci_role_start(ci, role);
279 enable_irq(ci->irq);
280 } 279 }
280
281 enable_irq(ci->irq);
281} 282}
282 283
283static irqreturn_t ci_irq(int irq, void *data) 284static irqreturn_t ci_irq(int irq, void *data)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 519ead2443c5..b501346484ae 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1678,8 +1678,11 @@ static int udc_start(struct ci13xxx *ci)
1678 1678
1679 ci->gadget.ep0 = &ci->ep0in->ep; 1679 ci->gadget.ep0 = &ci->ep0in->ep;
1680 1680
1681 if (ci->global_phy) 1681 if (ci->global_phy) {
1682 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); 1682 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
1683 if (IS_ERR(ci->transceiver))
1684 ci->transceiver = NULL;
1685 }
1683 1686
1684 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) { 1687 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1685 if (ci->transceiver == NULL) { 1688 if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@ static int udc_start(struct ci13xxx *ci)
1694 goto put_transceiver; 1697 goto put_transceiver;
1695 } 1698 }
1696 1699
1697 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1700 if (ci->transceiver) {
1698 retval = otg_set_peripheral(ci->transceiver->otg, 1701 retval = otg_set_peripheral(ci->transceiver->otg,
1699 &ci->gadget); 1702 &ci->gadget);
1700 if (retval) 1703 if (retval)
@@ -1711,7 +1714,7 @@ static int udc_start(struct ci13xxx *ci)
1711 return retval; 1714 return retval;
1712 1715
1713remove_trans: 1716remove_trans:
1714 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1717 if (ci->transceiver) {
1715 otg_set_peripheral(ci->transceiver->otg, NULL); 1718 otg_set_peripheral(ci->transceiver->otg, NULL);
1716 if (ci->global_phy) 1719 if (ci->global_phy)
1717 usb_put_phy(ci->transceiver); 1720 usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@ remove_trans:
1719 1722
1720 dev_err(dev, "error = %i\n", retval); 1723 dev_err(dev, "error = %i\n", retval);
1721put_transceiver: 1724put_transceiver:
1722 if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy) 1725 if (ci->transceiver && ci->global_phy)
1723 usb_put_phy(ci->transceiver); 1726 usb_put_phy(ci->transceiver);
1724destroy_eps: 1727destroy_eps:
1725 destroy_eps(ci); 1728 destroy_eps(ci);
@@ -1747,7 +1750,7 @@ static void udc_stop(struct ci13xxx *ci)
1747 dma_pool_destroy(ci->td_pool); 1750 dma_pool_destroy(ci->td_pool);
1748 dma_pool_destroy(ci->qh_pool); 1751 dma_pool_destroy(ci->qh_pool);
1749 1752
1750 if (!IS_ERR_OR_NULL(ci->transceiver)) { 1753 if (ci->transceiver) {
1751 otg_set_peripheral(ci->transceiver->otg, NULL); 1754 otg_set_peripheral(ci->transceiver->otg, NULL);
1752 if (ci->global_phy) 1755 if (ci->global_phy)
1753 usb_put_phy(ci->transceiver); 1756 usb_put_phy(ci->transceiver);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 090b411d893f..7d8dd5aad236 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
165 /* FIXME - Stubbed out for now */ 165 /* FIXME - Stubbed out for now */
166 166
167 /* Don't change anything if nothing has changed */ 167 /* Don't change anything if nothing has changed */
168 if (!tty_termios_hw_change(&tty->termios, old_termios)) 168 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
169 return; 169 return;
170 170
171 /* Do the real work here... */ 171 /* Do the real work here... */
172 tty_termios_copy_hw(&tty->termios, old_termios); 172 if (old_termios)
173 tty_termios_copy_hw(&tty->termios, old_termios);
173} 174}
174 175
175static int f81232_tiocmget(struct tty_struct *tty) 176static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
187 188
188static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) 189static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
189{ 190{
190 struct ktermios tmp_termios;
191 int result; 191 int result;
192 192
193 /* Setup termios */ 193 /* Setup termios */
194 if (tty) 194 if (tty)
195 f81232_set_termios(tty, port, &tmp_termios); 195 f81232_set_termios(tty, port, NULL);
196 196
197 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 197 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
198 if (result) { 198 if (result) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7151659367a0..048cd44d51b1 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
284 serial settings even to the same values as before. Thus 284 serial settings even to the same values as before. Thus
285 we actually need to filter in this specific case */ 285 we actually need to filter in this specific case */
286 286
287 if (!tty_termios_hw_change(&tty->termios, old_termios)) 287 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
288 return; 288 return;
289 289
290 cflag = tty->termios.c_cflag; 290 cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
293 if (!buf) { 293 if (!buf) {
294 dev_err(&port->dev, "%s - out of memory.\n", __func__); 294 dev_err(&port->dev, "%s - out of memory.\n", __func__);
295 /* Report back no change occurred */ 295 /* Report back no change occurred */
296 tty->termios = *old_termios; 296 if (old_termios)
297 tty->termios = *old_termios;
297 return; 298 return;
298 } 299 }
299 300
@@ -433,7 +434,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
433 control = priv->line_control; 434 control = priv->line_control;
434 if ((cflag & CBAUD) == B0) 435 if ((cflag & CBAUD) == B0)
435 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); 436 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
436 else if ((old_termios->c_cflag & CBAUD) == B0) 437 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
437 priv->line_control |= (CONTROL_DTR | CONTROL_RTS); 438 priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
438 if (control != priv->line_control) { 439 if (control != priv->line_control) {
439 control = priv->line_control; 440 control = priv->line_control;
@@ -492,7 +493,6 @@ static void pl2303_close(struct usb_serial_port *port)
492 493
493static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) 494static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
494{ 495{
495 struct ktermios tmp_termios;
496 struct usb_serial *serial = port->serial; 496 struct usb_serial *serial = port->serial;
497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
498 int result; 498 int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
508 508
509 /* Setup termios */ 509 /* Setup termios */
510 if (tty) 510 if (tty)
511 pl2303_set_termios(tty, port, &tmp_termios); 511 pl2303_set_termios(tty, port, NULL);
512 512
513 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 513 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
514 if (result) { 514 if (result) {
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index cf3df793c2b7..ddf6c47137dc 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -291,7 +291,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
291 struct spcp8x5_private *priv = usb_get_serial_port_data(port); 291 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
292 unsigned long flags; 292 unsigned long flags;
293 unsigned int cflag = tty->termios.c_cflag; 293 unsigned int cflag = tty->termios.c_cflag;
294 unsigned int old_cflag = old_termios->c_cflag;
295 unsigned short uartdata; 294 unsigned short uartdata;
296 unsigned char buf[2] = {0, 0}; 295 unsigned char buf[2] = {0, 0};
297 int baud; 296 int baud;
@@ -299,15 +298,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
299 u8 control; 298 u8 control;
300 299
301 /* check that they really want us to change something */ 300 /* check that they really want us to change something */
302 if (!tty_termios_hw_change(&tty->termios, old_termios)) 301 if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
303 return; 302 return;
304 303
305 /* set DTR/RTS active */ 304 /* set DTR/RTS active */
306 spin_lock_irqsave(&priv->lock, flags); 305 spin_lock_irqsave(&priv->lock, flags);
307 control = priv->line_control; 306 control = priv->line_control;
308 if ((old_cflag & CBAUD) == B0) { 307 if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
309 priv->line_control |= MCR_DTR; 308 priv->line_control |= MCR_DTR;
310 if (!(old_cflag & CRTSCTS)) 309 if (!(old_termios->c_cflag & CRTSCTS))
311 priv->line_control |= MCR_RTS; 310 priv->line_control |= MCR_RTS;
312 } 311 }
313 if (control != priv->line_control) { 312 if (control != priv->line_control) {
@@ -394,7 +393,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
394 393
395static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) 394static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
396{ 395{
397 struct ktermios tmp_termios;
398 struct usb_serial *serial = port->serial; 396 struct usb_serial *serial = port->serial;
399 struct spcp8x5_private *priv = usb_get_serial_port_data(port); 397 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
400 int ret; 398 int ret;
@@ -411,7 +409,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
411 spcp8x5_set_ctrl_line(port, priv->line_control); 409 spcp8x5_set_ctrl_line(port, priv->line_control);
412 410
413 if (tty) 411 if (tty)
414 spcp8x5_set_termios(tty, port, &tmp_termios); 412 spcp8x5_set_termios(tty, port, NULL);
415 413
416 port->port.drain_delay = 256; 414 port->port.drain_delay = 256;
417 415
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index acb7121a9316..6d78736563de 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = {
1360 */ 1360 */
1361static char *vfio_devnode(struct device *dev, umode_t *mode) 1361static char *vfio_devnode(struct device *dev, umode_t *mode)
1362{ 1362{
1363 if (MINOR(dev->devt) == 0) 1363 if (mode && (MINOR(dev->devt) == 0))
1364 *mode = S_IRUGO | S_IWUGO; 1364 *mode = S_IRUGO | S_IWUGO;
1365 1365
1366 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); 1366 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2b51e2336aa2..f80d3dd41d8c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
155 155
156static void vhost_net_clear_ubuf_info(struct vhost_net *n) 156static void vhost_net_clear_ubuf_info(struct vhost_net *n)
157{ 157{
158
159 bool zcopy;
160 int i; 158 int i;
161 159
162 for (i = 0; i < n->dev.nvqs; ++i) { 160 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
163 zcopy = vhost_net_zcopy_mask & (0x1 << i); 161 kfree(n->vqs[i].ubuf_info);
164 if (zcopy) 162 n->vqs[i].ubuf_info = NULL;
165 kfree(n->vqs[i].ubuf_info);
166 } 163 }
167} 164}
168 165
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
171 bool zcopy; 168 bool zcopy;
172 int i; 169 int i;
173 170
174 for (i = 0; i < n->dev.nvqs; ++i) { 171 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
175 zcopy = vhost_net_zcopy_mask & (0x1 << i); 172 zcopy = vhost_net_zcopy_mask & (0x1 << i);
176 if (!zcopy) 173 if (!zcopy)
177 continue; 174 continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
183 return 0; 180 return 0;
184 181
185err: 182err:
186 while (i--) { 183 vhost_net_clear_ubuf_info(n);
187 zcopy = vhost_net_zcopy_mask & (0x1 << i);
188 if (!zcopy)
189 continue;
190 kfree(n->vqs[i].ubuf_info);
191 }
192 return -ENOMEM; 184 return -ENOMEM;
193} 185}
194 186
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
196{ 188{
197 int i; 189 int i;
198 190
191 vhost_net_clear_ubuf_info(n);
192
199 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 193 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
200 n->vqs[i].done_idx = 0; 194 n->vqs[i].done_idx = 0;
201 n->vqs[i].upend_idx = 0; 195 n->vqs[i].upend_idx = 0;
202 n->vqs[i].ubufs = NULL; 196 n->vqs[i].ubufs = NULL;
203 kfree(n->vqs[i].ubuf_info);
204 n->vqs[i].ubuf_info = NULL;
205 n->vqs[i].vhost_hlen = 0; 197 n->vqs[i].vhost_hlen = 0;
206 n->vqs[i].sock_hlen = 0; 198 n->vqs[i].sock_hlen = 0;
207 } 199 }
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
436 kref_get(&ubufs->kref); 428 kref_get(&ubufs->kref);
437 } 429 }
438 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 430 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
439 } 431 } else
432 msg.msg_control = NULL;
440 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 433 /* TODO: Check specific error and bomb out unless ENOBUFS? */
441 err = sock->ops->sendmsg(NULL, sock, &msg, len); 434 err = sock->ops->sendmsg(NULL, sock, &msg, len);
442 if (unlikely(err < 0)) { 435 if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
1053 int r; 1046 int r;
1054 1047
1055 mutex_lock(&n->dev.mutex); 1048 mutex_lock(&n->dev.mutex);
1049 if (vhost_dev_has_owner(&n->dev)) {
1050 r = -EBUSY;
1051 goto out;
1052 }
1056 r = vhost_net_set_ubuf_info(n); 1053 r = vhost_net_set_ubuf_info(n);
1057 if (r) 1054 if (r)
1058 goto out; 1055 goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5787e6..60aa5ad09a2f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
344} 344}
345 345
346/* Caller should have device mutex */ 346/* Caller should have device mutex */
347bool vhost_dev_has_owner(struct vhost_dev *dev)
348{
349 return dev->mm;
350}
351
352/* Caller should have device mutex */
347long vhost_dev_set_owner(struct vhost_dev *dev) 353long vhost_dev_set_owner(struct vhost_dev *dev)
348{ 354{
349 struct task_struct *worker; 355 struct task_struct *worker;
350 int err; 356 int err;
351 357
352 /* Is there an owner already? */ 358 /* Is there an owner already? */
353 if (dev->mm) { 359 if (vhost_dev_has_owner(dev)) {
354 err = -EBUSY; 360 err = -EBUSY;
355 goto err_mm; 361 goto err_mm;
356 } 362 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a7ad63592987..64adcf99ff33 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -133,6 +133,7 @@ struct vhost_dev {
133 133
134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
135long vhost_dev_set_owner(struct vhost_dev *dev); 135long vhost_dev_set_owner(struct vhost_dev *dev);
136bool vhost_dev_has_owner(struct vhost_dev *dev);
136long vhost_dev_check_owner(struct vhost_dev *); 137long vhost_dev_check_owner(struct vhost_dev *);
137struct vhost_memory *vhost_dev_reset_owner_prepare(void); 138struct vhost_memory *vhost_dev_reset_owner_prepare(void);
138void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); 139void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index cc072c66c766..0f0493c63371 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -379,10 +379,10 @@ static int xen_tmem_init(void)
379#ifdef CONFIG_FRONTSWAP 379#ifdef CONFIG_FRONTSWAP
380 if (tmem_enabled && frontswap) { 380 if (tmem_enabled && frontswap) {
381 char *s = ""; 381 char *s = "";
382 struct frontswap_ops *old_ops = 382 struct frontswap_ops *old_ops;
383 frontswap_register_ops(&tmem_frontswap_ops);
384 383
385 tmem_frontswap_poolid = -1; 384 tmem_frontswap_poolid = -1;
385 old_ops = frontswap_register_ops(&tmem_frontswap_ops);
386 if (IS_ERR(old_ops) || old_ops) { 386 if (IS_ERR(old_ops) || old_ops) {
387 if (IS_ERR(old_ops)) 387 if (IS_ERR(old_ops))
388 return PTR_ERR(old_ops); 388 return PTR_ERR(old_ops);