aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-04-16 20:35:26 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-16 20:35:26 -0400
commita54bfa40fd16aeb90bc556189221576f746f8567 (patch)
tree176bb7a99ffab5f42f0dd4e9671f335be3f3efa0 /drivers
parentfe957c40ec5e2763b9977c565beab3bde3aaf85b (diff)
parent134ffb4cad92a6aa534e55a9be145bca780a32c1 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/hwvalid.c1
-rw-r--r--drivers/acpi/proc.c13
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/scan.c31
-rw-r--r--drivers/acpi/sleep.h3
-rw-r--r--drivers/acpi/thermal.c68
-rw-r--r--drivers/acpi/video.c30
-rw-r--r--drivers/acpi/wakeup.c30
-rw-r--r--drivers/ata/ahci.c57
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/block/xsysace.c12
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/at91_ide.c91
-rw-r--r--drivers/ide/falconide.c13
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/ide/ide-atapi.c38
-rw-r--r--drivers/ide/ide-cd.c288
-rw-r--r--drivers/ide/ide-disk.c70
-rw-r--r--drivers/ide/ide-disk_proc.c6
-rw-r--r--drivers/ide/ide-dma-sff.c9
-rw-r--r--drivers/ide/ide-h8300.c101
-rw-r--r--drivers/ide/ide-io-std.c75
-rw-r--r--drivers/ide/ide-io.c13
-rw-r--r--drivers/ide/ide-ioctls.c14
-rw-r--r--drivers/ide/ide-iops.c20
-rw-r--r--drivers/ide/ide-lib.c28
-rw-r--r--drivers/ide/ide-park.c3
-rw-r--r--drivers/ide/ide-pm.c3
-rw-r--r--drivers/ide/ide-probe.c18
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/ide/ide-taskfile.c97
-rw-r--r--drivers/ide/ns87415.c34
-rw-r--r--drivers/ide/q40ide.c14
-rw-r--r--drivers/ide/scc_pata.c71
-rw-r--r--drivers/ide/tx4938ide.c89
-rw-r--r--drivers/ide/tx4939ide.c110
-rw-r--r--drivers/infiniband/core/cma.c45
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c10
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c11
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c116
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c22
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c389
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c52
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c25
-rw-r--r--drivers/input/input.c13
-rw-r--r--drivers/input/keyboard/atkbd.c135
-rw-r--r--drivers/input/keyboard/bf54x-keys.c4
-rw-r--r--drivers/input/keyboard/hilkbd.c140
-rw-r--r--drivers/input/misc/Kconfig23
-rw-r--r--drivers/input/misc/Makefile28
-rw-r--r--drivers/input/misc/ati_remote2.c277
-rw-r--r--drivers/input/misc/rb532_button.c120
-rw-r--r--drivers/input/misc/rotary_encoder.c221
-rw-r--r--drivers/input/mouse/Kconfig11
-rw-r--r--drivers/input/mouse/Makefile9
-rw-r--r--drivers/input/mouse/hgpk.c2
-rw-r--r--drivers/input/mouse/maplemouse.c147
-rw-r--r--drivers/input/mouse/pc110pad.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/touchscreen/Kconfig58
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c844
-rw-r--r--drivers/input/touchscreen/ad7879.c782
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c3
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c5
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c13
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c240
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/md/dm-ioctl.c21
-rw-r--r--drivers/md/dm-kcopyd.c23
-rw-r--r--drivers/md/dm-linear.c1
-rw-r--r--drivers/md/dm-table.c59
-rw-r--r--drivers/md/dm.c199
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/media/dvb/ttpci/Kconfig23
-rw-r--r--drivers/media/dvb/ttpci/Makefile9
-rw-r--r--drivers/media/dvb/ttpci/av7110.c16
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c35
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.h3
-rw-r--r--drivers/media/dvb/ttpci/fdump.c44
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/host/imxmmc.c19
-rw-r--r--drivers/mmc/host/mmc_spi.c188
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c17
-rw-r--r--drivers/net/ariadne.c19
-rw-r--r--drivers/net/arm/am79c961a.c24
-rw-r--r--drivers/net/arm/at91_ether.c32
-rw-r--r--drivers/net/arm/ep93xx_eth.c19
-rw-r--r--drivers/net/arm/ether1.c19
-rw-r--r--drivers/net/arm/ether3.c19
-rw-r--r--drivers/net/atarilance.c18
-rw-r--r--drivers/net/au1000_eth.c23
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/bfin_mac.c24
-rw-r--r--drivers/net/bnx2.c26
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/bonding/bond_sysfs.c14
-rw-r--r--drivers/net/cris/eth_v10.c30
-rw-r--r--drivers/net/declance.c17
-rw-r--r--drivers/net/e1000/e1000_main.c48
-rw-r--r--drivers/net/e1000e/netdev.c61
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/fec.c1
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c27
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/ibm_newemac/core.c41
-rw-r--r--drivers/net/igb/e1000_mac.c2
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c17
-rw-r--r--drivers/net/igb/e1000_mbx.h2
-rw-r--r--drivers/net/igb/igb_main.c166
-rw-r--r--drivers/net/igbvf/Makefile38
-rw-r--r--drivers/net/igbvf/defines.h125
-rw-r--r--drivers/net/igbvf/ethtool.c540
-rw-r--r--drivers/net/igbvf/igbvf.h332
-rw-r--r--drivers/net/igbvf/mbx.c350
-rw-r--r--drivers/net/igbvf/mbx.h75
-rw-r--r--drivers/net/igbvf/netdev.c2922
-rw-r--r--drivers/net/igbvf/regs.h108
-rw-r--r--drivers/net/igbvf/vf.c398
-rw-r--r--drivers/net/igbvf/vf.h264
-rw-r--r--drivers/net/ioc3-eth.c22
-rw-r--r--drivers/net/isa-skeleton.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c59
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/mac89x0.c18
-rw-r--r--drivers/net/macb.c19
-rw-r--r--drivers/net/macsonic.c19
-rw-r--r--drivers/net/mlx4/port.c5
-rw-r--r--drivers/net/mv643xx_eth.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/niu.c3
-rw-r--r--drivers/net/phy/fixed.c5
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/phy.c14
-rw-r--r--drivers/net/r6040.c1
-rw-r--r--drivers/net/sfc/efx.c7
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sh_eth.c21
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/sun3_82586.c19
-rw-r--r--drivers/net/tc35815.c27
-rw-r--r--drivers/net/tg3.c9
-rw-r--r--drivers/net/tsi108_eth.c20
-rw-r--r--drivers/net/tun.c5
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/xtsonic.c19
-rw-r--r--drivers/pci/dmar.c11
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c60
-rw-r--r--drivers/platform/x86/panasonic-laptop.c28
-rw-r--r--drivers/platform/x86/sony-laptop.c30
-rw-r--r--drivers/platform/x86/wmi.c15
-rw-r--r--drivers/power/pcf50633-charger.c88
-rw-r--r--drivers/power/pda_power.c89
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_wait_scan.c2
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/serial/max3100.c927
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/watchdog/i6300esb.c155
-rw-r--r--drivers/watchdog/ks8695_wdt.c6
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/orion5x_wdt.c57
196 files changed, 11394 insertions, 2490 deletions
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index bd3c937b0ac0..7737afb157c3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -90,7 +90,6 @@ static const struct acpi_port_info acpi_protected_ports[] = {
90 {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP}, 90 {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP},
91 {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP}, 91 {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP},
92 {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP}, 92 {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP},
93 {"DMA1", 0x0081, 0x0083, ACPI_OSI_WIN_XP},
94 {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP}, 93 {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP},
95 {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP}, 94 {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP},
96 {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP}, 95 {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP},
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 05dfdc96802e..d0d550d22a6d 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -343,9 +343,6 @@ acpi_system_write_alarm(struct file *file,
343} 343}
344#endif /* HAVE_ACPI_LEGACY_ALARM */ 344#endif /* HAVE_ACPI_LEGACY_ALARM */
345 345
346extern struct list_head acpi_wakeup_device_list;
347extern spinlock_t acpi_device_lock;
348
349static int 346static int
350acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) 347acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
351{ 348{
@@ -353,7 +350,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
353 350
354 seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); 351 seq_printf(seq, "Device\tS-state\t Status Sysfs node\n");
355 352
356 spin_lock(&acpi_device_lock); 353 mutex_lock(&acpi_device_lock);
357 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 354 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
358 struct acpi_device *dev = 355 struct acpi_device *dev =
359 container_of(node, struct acpi_device, wakeup_list); 356 container_of(node, struct acpi_device, wakeup_list);
@@ -361,7 +358,6 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
361 358
362 if (!dev->wakeup.flags.valid) 359 if (!dev->wakeup.flags.valid)
363 continue; 360 continue;
364 spin_unlock(&acpi_device_lock);
365 361
366 ldev = acpi_get_physical_device(dev->handle); 362 ldev = acpi_get_physical_device(dev->handle);
367 seq_printf(seq, "%s\t S%d\t%c%-8s ", 363 seq_printf(seq, "%s\t S%d\t%c%-8s ",
@@ -376,9 +372,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
376 seq_printf(seq, "\n"); 372 seq_printf(seq, "\n");
377 put_device(ldev); 373 put_device(ldev);
378 374
379 spin_lock(&acpi_device_lock);
380 } 375 }
381 spin_unlock(&acpi_device_lock); 376 mutex_unlock(&acpi_device_lock);
382 return 0; 377 return 0;
383} 378}
384 379
@@ -409,7 +404,7 @@ acpi_system_write_wakeup_device(struct file *file,
409 strbuf[len] = '\0'; 404 strbuf[len] = '\0';
410 sscanf(strbuf, "%s", str); 405 sscanf(strbuf, "%s", str);
411 406
412 spin_lock(&acpi_device_lock); 407 mutex_lock(&acpi_device_lock);
413 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 408 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
414 struct acpi_device *dev = 409 struct acpi_device *dev =
415 container_of(node, struct acpi_device, wakeup_list); 410 container_of(node, struct acpi_device, wakeup_list);
@@ -446,7 +441,7 @@ acpi_system_write_wakeup_device(struct file *file,
446 } 441 }
447 } 442 }
448 } 443 }
449 spin_unlock(&acpi_device_lock); 444 mutex_unlock(&acpi_device_lock);
450 return count; 445 return count;
451} 446}
452 447
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 4e6e758bd397..6fe121434ffb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -145,6 +145,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
145 struct acpi_processor_power *pwr = &pr->power; 145 struct acpi_processor_power *pwr = &pr->power;
146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
147 147
148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
149 return;
150
148 /* 151 /*
149 * Check, if one of the previous states already marked the lapic 152 * Check, if one of the previous states already marked the lapic
150 * unstable 153 * unstable
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 20c23c049207..8ff510b91d88 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -24,7 +24,7 @@ extern struct acpi_device *acpi_root;
24 24
25static LIST_HEAD(acpi_device_list); 25static LIST_HEAD(acpi_device_list);
26static LIST_HEAD(acpi_bus_id_list); 26static LIST_HEAD(acpi_bus_id_list);
27DEFINE_SPINLOCK(acpi_device_lock); 27DEFINE_MUTEX(acpi_device_lock);
28LIST_HEAD(acpi_wakeup_device_list); 28LIST_HEAD(acpi_wakeup_device_list);
29 29
30struct acpi_device_bus_id{ 30struct acpi_device_bus_id{
@@ -491,7 +491,6 @@ static int acpi_device_register(struct acpi_device *device,
491 */ 491 */
492 INIT_LIST_HEAD(&device->children); 492 INIT_LIST_HEAD(&device->children);
493 INIT_LIST_HEAD(&device->node); 493 INIT_LIST_HEAD(&device->node);
494 INIT_LIST_HEAD(&device->g_list);
495 INIT_LIST_HEAD(&device->wakeup_list); 494 INIT_LIST_HEAD(&device->wakeup_list);
496 495
497 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); 496 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
@@ -500,7 +499,7 @@ static int acpi_device_register(struct acpi_device *device,
500 return -ENOMEM; 499 return -ENOMEM;
501 } 500 }
502 501
503 spin_lock(&acpi_device_lock); 502 mutex_lock(&acpi_device_lock);
504 /* 503 /*
505 * Find suitable bus_id and instance number in acpi_bus_id_list 504 * Find suitable bus_id and instance number in acpi_bus_id_list
506 * If failed, create one and link it into acpi_bus_id_list 505 * If failed, create one and link it into acpi_bus_id_list
@@ -521,14 +520,12 @@ static int acpi_device_register(struct acpi_device *device,
521 } 520 }
522 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); 521 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
523 522
524 if (device->parent) { 523 if (device->parent)
525 list_add_tail(&device->node, &device->parent->children); 524 list_add_tail(&device->node, &device->parent->children);
526 list_add_tail(&device->g_list, &device->parent->g_list); 525
527 } else
528 list_add_tail(&device->g_list, &acpi_device_list);
529 if (device->wakeup.flags.valid) 526 if (device->wakeup.flags.valid)
530 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 527 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
531 spin_unlock(&acpi_device_lock); 528 mutex_unlock(&acpi_device_lock);
532 529
533 if (device->parent) 530 if (device->parent)
534 device->dev.parent = &parent->dev; 531 device->dev.parent = &parent->dev;
@@ -549,28 +546,22 @@ static int acpi_device_register(struct acpi_device *device,
549 device->removal_type = ACPI_BUS_REMOVAL_NORMAL; 546 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
550 return 0; 547 return 0;
551 end: 548 end:
552 spin_lock(&acpi_device_lock); 549 mutex_lock(&acpi_device_lock);
553 if (device->parent) { 550 if (device->parent)
554 list_del(&device->node); 551 list_del(&device->node);
555 list_del(&device->g_list);
556 } else
557 list_del(&device->g_list);
558 list_del(&device->wakeup_list); 552 list_del(&device->wakeup_list);
559 spin_unlock(&acpi_device_lock); 553 mutex_unlock(&acpi_device_lock);
560 return result; 554 return result;
561} 555}
562 556
563static void acpi_device_unregister(struct acpi_device *device, int type) 557static void acpi_device_unregister(struct acpi_device *device, int type)
564{ 558{
565 spin_lock(&acpi_device_lock); 559 mutex_lock(&acpi_device_lock);
566 if (device->parent) { 560 if (device->parent)
567 list_del(&device->node); 561 list_del(&device->node);
568 list_del(&device->g_list);
569 } else
570 list_del(&device->g_list);
571 562
572 list_del(&device->wakeup_list); 563 list_del(&device->wakeup_list);
573 spin_unlock(&acpi_device_lock); 564 mutex_unlock(&acpi_device_lock);
574 565
575 acpi_detach_data(device->handle, acpi_bus_data_handler); 566 acpi_detach_data(device->handle, acpi_bus_data_handler);
576 567
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index cfaf8f5b0a14..8a8f3b3382a6 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -5,3 +5,6 @@ extern int acpi_suspend (u32 state);
5extern void acpi_enable_wakeup_device_prep(u8 sleep_state); 5extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
6extern void acpi_enable_wakeup_device(u8 sleep_state); 6extern void acpi_enable_wakeup_device(u8 sleep_state);
7extern void acpi_disable_wakeup_device(u8 sleep_state); 7extern void acpi_disable_wakeup_device(u8 sleep_state);
8
9extern struct list_head acpi_wakeup_device_list;
10extern struct mutex acpi_device_lock;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index e8c143caf0fd..9cd15e8c8932 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -98,6 +98,7 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
98static int acpi_thermal_add(struct acpi_device *device); 98static int acpi_thermal_add(struct acpi_device *device);
99static int acpi_thermal_remove(struct acpi_device *device, int type); 99static int acpi_thermal_remove(struct acpi_device *device, int type);
100static int acpi_thermal_resume(struct acpi_device *device); 100static int acpi_thermal_resume(struct acpi_device *device);
101static void acpi_thermal_notify(struct acpi_device *device, u32 event);
101static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 102static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
102static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 103static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
103static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 104static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -123,6 +124,7 @@ static struct acpi_driver acpi_thermal_driver = {
123 .add = acpi_thermal_add, 124 .add = acpi_thermal_add,
124 .remove = acpi_thermal_remove, 125 .remove = acpi_thermal_remove,
125 .resume = acpi_thermal_resume, 126 .resume = acpi_thermal_resume,
127 .notify = acpi_thermal_notify,
126 }, 128 },
127}; 129};
128 130
@@ -192,6 +194,7 @@ struct acpi_thermal {
192 struct acpi_handle_list devices; 194 struct acpi_handle_list devices;
193 struct thermal_zone_device *thermal_zone; 195 struct thermal_zone_device *thermal_zone;
194 int tz_enabled; 196 int tz_enabled;
197 int kelvin_offset;
195 struct mutex lock; 198 struct mutex lock;
196}; 199};
197 200
@@ -581,7 +584,7 @@ static void acpi_thermal_check(void *data)
581} 584}
582 585
583/* sys I/F for generic thermal sysfs support */ 586/* sys I/F for generic thermal sysfs support */
584#define KELVIN_TO_MILLICELSIUS(t) (t * 100 - 273200) 587#define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100)
585 588
586static int thermal_get_temp(struct thermal_zone_device *thermal, 589static int thermal_get_temp(struct thermal_zone_device *thermal,
587 unsigned long *temp) 590 unsigned long *temp)
@@ -596,7 +599,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
596 if (result) 599 if (result)
597 return result; 600 return result;
598 601
599 *temp = KELVIN_TO_MILLICELSIUS(tz->temperature); 602 *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset);
600 return 0; 603 return 0;
601} 604}
602 605
@@ -702,7 +705,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
702 if (tz->trips.critical.flags.valid) { 705 if (tz->trips.critical.flags.valid) {
703 if (!trip) { 706 if (!trip) {
704 *temp = KELVIN_TO_MILLICELSIUS( 707 *temp = KELVIN_TO_MILLICELSIUS(
705 tz->trips.critical.temperature); 708 tz->trips.critical.temperature,
709 tz->kelvin_offset);
706 return 0; 710 return 0;
707 } 711 }
708 trip--; 712 trip--;
@@ -711,7 +715,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
711 if (tz->trips.hot.flags.valid) { 715 if (tz->trips.hot.flags.valid) {
712 if (!trip) { 716 if (!trip) {
713 *temp = KELVIN_TO_MILLICELSIUS( 717 *temp = KELVIN_TO_MILLICELSIUS(
714 tz->trips.hot.temperature); 718 tz->trips.hot.temperature,
719 tz->kelvin_offset);
715 return 0; 720 return 0;
716 } 721 }
717 trip--; 722 trip--;
@@ -720,7 +725,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
720 if (tz->trips.passive.flags.valid) { 725 if (tz->trips.passive.flags.valid) {
721 if (!trip) { 726 if (!trip) {
722 *temp = KELVIN_TO_MILLICELSIUS( 727 *temp = KELVIN_TO_MILLICELSIUS(
723 tz->trips.passive.temperature); 728 tz->trips.passive.temperature,
729 tz->kelvin_offset);
724 return 0; 730 return 0;
725 } 731 }
726 trip--; 732 trip--;
@@ -730,7 +736,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
730 tz->trips.active[i].flags.valid; i++) { 736 tz->trips.active[i].flags.valid; i++) {
731 if (!trip) { 737 if (!trip) {
732 *temp = KELVIN_TO_MILLICELSIUS( 738 *temp = KELVIN_TO_MILLICELSIUS(
733 tz->trips.active[i].temperature); 739 tz->trips.active[i].temperature,
740 tz->kelvin_offset);
734 return 0; 741 return 0;
735 } 742 }
736 trip--; 743 trip--;
@@ -745,7 +752,8 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
745 752
746 if (tz->trips.critical.flags.valid) { 753 if (tz->trips.critical.flags.valid) {
747 *temperature = KELVIN_TO_MILLICELSIUS( 754 *temperature = KELVIN_TO_MILLICELSIUS(
748 tz->trips.critical.temperature); 755 tz->trips.critical.temperature,
756 tz->kelvin_offset);
749 return 0; 757 return 0;
750 } else 758 } else
751 return -EINVAL; 759 return -EINVAL;
@@ -1264,17 +1272,14 @@ static int acpi_thermal_remove_fs(struct acpi_device *device)
1264 Driver Interface 1272 Driver Interface
1265 -------------------------------------------------------------------------- */ 1273 -------------------------------------------------------------------------- */
1266 1274
1267static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) 1275static void acpi_thermal_notify(struct acpi_device *device, u32 event)
1268{ 1276{
1269 struct acpi_thermal *tz = data; 1277 struct acpi_thermal *tz = acpi_driver_data(device);
1270 struct acpi_device *device = NULL;
1271 1278
1272 1279
1273 if (!tz) 1280 if (!tz)
1274 return; 1281 return;
1275 1282
1276 device = tz->device;
1277
1278 switch (event) { 1283 switch (event) {
1279 case ACPI_THERMAL_NOTIFY_TEMPERATURE: 1284 case ACPI_THERMAL_NOTIFY_TEMPERATURE:
1280 acpi_thermal_check(tz); 1285 acpi_thermal_check(tz);
@@ -1298,8 +1303,6 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
1298 "Unsupported event [0x%x]\n", event)); 1303 "Unsupported event [0x%x]\n", event));
1299 break; 1304 break;
1300 } 1305 }
1301
1302 return;
1303} 1306}
1304 1307
1305static int acpi_thermal_get_info(struct acpi_thermal *tz) 1308static int acpi_thermal_get_info(struct acpi_thermal *tz)
@@ -1334,10 +1337,28 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
1334 return 0; 1337 return 0;
1335} 1338}
1336 1339
1340/*
1341 * The exact offset between Kelvin and degree Celsius is 273.15. However ACPI
1342 * handles temperature values with a single decimal place. As a consequence,
1343 * some implementations use an offset of 273.1 and others use an offset of
1344 * 273.2. Try to find out which one is being used, to present the most
1345 * accurate and visually appealing number.
1346 *
1347 * The heuristic below should work for all ACPI thermal zones which have a
1348 * critical trip point with a value being a multiple of 0.5 degree Celsius.
1349 */
1350static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
1351{
1352 if (tz->trips.critical.flags.valid &&
1353 (tz->trips.critical.temperature % 5) == 1)
1354 tz->kelvin_offset = 2731;
1355 else
1356 tz->kelvin_offset = 2732;
1357}
1358
1337static int acpi_thermal_add(struct acpi_device *device) 1359static int acpi_thermal_add(struct acpi_device *device)
1338{ 1360{
1339 int result = 0; 1361 int result = 0;
1340 acpi_status status = AE_OK;
1341 struct acpi_thermal *tz = NULL; 1362 struct acpi_thermal *tz = NULL;
1342 1363
1343 1364
@@ -1360,6 +1381,8 @@ static int acpi_thermal_add(struct acpi_device *device)
1360 if (result) 1381 if (result)
1361 goto free_memory; 1382 goto free_memory;
1362 1383
1384 acpi_thermal_guess_offset(tz);
1385
1363 result = acpi_thermal_register_thermal_zone(tz); 1386 result = acpi_thermal_register_thermal_zone(tz);
1364 if (result) 1387 if (result)
1365 goto free_memory; 1388 goto free_memory;
@@ -1368,21 +1391,11 @@ static int acpi_thermal_add(struct acpi_device *device)
1368 if (result) 1391 if (result)
1369 goto unregister_thermal_zone; 1392 goto unregister_thermal_zone;
1370 1393
1371 status = acpi_install_notify_handler(device->handle,
1372 ACPI_DEVICE_NOTIFY,
1373 acpi_thermal_notify, tz);
1374 if (ACPI_FAILURE(status)) {
1375 result = -ENODEV;
1376 goto remove_fs;
1377 }
1378
1379 printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", 1394 printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
1380 acpi_device_name(device), acpi_device_bid(device), 1395 acpi_device_name(device), acpi_device_bid(device),
1381 KELVIN_TO_CELSIUS(tz->temperature)); 1396 KELVIN_TO_CELSIUS(tz->temperature));
1382 goto end; 1397 goto end;
1383 1398
1384remove_fs:
1385 acpi_thermal_remove_fs(device);
1386unregister_thermal_zone: 1399unregister_thermal_zone:
1387 thermal_zone_device_unregister(tz->thermal_zone); 1400 thermal_zone_device_unregister(tz->thermal_zone);
1388free_memory: 1401free_memory:
@@ -1393,7 +1406,6 @@ end:
1393 1406
1394static int acpi_thermal_remove(struct acpi_device *device, int type) 1407static int acpi_thermal_remove(struct acpi_device *device, int type)
1395{ 1408{
1396 acpi_status status = AE_OK;
1397 struct acpi_thermal *tz = NULL; 1409 struct acpi_thermal *tz = NULL;
1398 1410
1399 if (!device || !acpi_driver_data(device)) 1411 if (!device || !acpi_driver_data(device))
@@ -1401,10 +1413,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1401 1413
1402 tz = acpi_driver_data(device); 1414 tz = acpi_driver_data(device);
1403 1415
1404 status = acpi_remove_notify_handler(device->handle,
1405 ACPI_DEVICE_NOTIFY,
1406 acpi_thermal_notify);
1407
1408 acpi_thermal_remove_fs(device); 1416 acpi_thermal_remove_fs(device);
1409 acpi_thermal_unregister_thermal_zone(tz); 1417 acpi_thermal_unregister_thermal_zone(tz);
1410 mutex_destroy(&tz->lock); 1418 mutex_destroy(&tz->lock);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ab06143672bc..cd4fb7543a90 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -79,6 +79,7 @@ module_param(brightness_switch_enabled, bool, 0644);
79static int acpi_video_bus_add(struct acpi_device *device); 79static int acpi_video_bus_add(struct acpi_device *device);
80static int acpi_video_bus_remove(struct acpi_device *device, int type); 80static int acpi_video_bus_remove(struct acpi_device *device, int type);
81static int acpi_video_resume(struct acpi_device *device); 81static int acpi_video_resume(struct acpi_device *device);
82static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
82 83
83static const struct acpi_device_id video_device_ids[] = { 84static const struct acpi_device_id video_device_ids[] = {
84 {ACPI_VIDEO_HID, 0}, 85 {ACPI_VIDEO_HID, 0},
@@ -94,6 +95,7 @@ static struct acpi_driver acpi_video_bus = {
94 .add = acpi_video_bus_add, 95 .add = acpi_video_bus_add,
95 .remove = acpi_video_bus_remove, 96 .remove = acpi_video_bus_remove,
96 .resume = acpi_video_resume, 97 .resume = acpi_video_resume,
98 .notify = acpi_video_bus_notify,
97 }, 99 },
98}; 100};
99 101
@@ -1986,17 +1988,15 @@ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
1986 return acpi_video_bus_DOS(video, 0, 1); 1988 return acpi_video_bus_DOS(video, 0, 1);
1987} 1989}
1988 1990
1989static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data) 1991static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1990{ 1992{
1991 struct acpi_video_bus *video = data; 1993 struct acpi_video_bus *video = acpi_driver_data(device);
1992 struct acpi_device *device = NULL;
1993 struct input_dev *input; 1994 struct input_dev *input;
1994 int keycode; 1995 int keycode;
1995 1996
1996 if (!video) 1997 if (!video)
1997 return; 1998 return;
1998 1999
1999 device = video->device;
2000 input = video->input; 2000 input = video->input;
2001 2001
2002 switch (event) { 2002 switch (event) {
@@ -2127,7 +2127,6 @@ static int acpi_video_resume(struct acpi_device *device)
2127 2127
2128static int acpi_video_bus_add(struct acpi_device *device) 2128static int acpi_video_bus_add(struct acpi_device *device)
2129{ 2129{
2130 acpi_status status;
2131 struct acpi_video_bus *video; 2130 struct acpi_video_bus *video;
2132 struct input_dev *input; 2131 struct input_dev *input;
2133 int error; 2132 int error;
@@ -2169,20 +2168,10 @@ static int acpi_video_bus_add(struct acpi_device *device)
2169 acpi_video_bus_get_devices(video, device); 2168 acpi_video_bus_get_devices(video, device);
2170 acpi_video_bus_start_devices(video); 2169 acpi_video_bus_start_devices(video);
2171 2170
2172 status = acpi_install_notify_handler(device->handle,
2173 ACPI_DEVICE_NOTIFY,
2174 acpi_video_bus_notify, video);
2175 if (ACPI_FAILURE(status)) {
2176 printk(KERN_ERR PREFIX
2177 "Error installing notify handler\n");
2178 error = -ENODEV;
2179 goto err_stop_video;
2180 }
2181
2182 video->input = input = input_allocate_device(); 2171 video->input = input = input_allocate_device();
2183 if (!input) { 2172 if (!input) {
2184 error = -ENOMEM; 2173 error = -ENOMEM;
2185 goto err_uninstall_notify; 2174 goto err_stop_video;
2186 } 2175 }
2187 2176
2188 snprintf(video->phys, sizeof(video->phys), 2177 snprintf(video->phys, sizeof(video->phys),
@@ -2218,9 +2207,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
2218 2207
2219 err_free_input_dev: 2208 err_free_input_dev:
2220 input_free_device(input); 2209 input_free_device(input);
2221 err_uninstall_notify:
2222 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
2223 acpi_video_bus_notify);
2224 err_stop_video: 2210 err_stop_video:
2225 acpi_video_bus_stop_devices(video); 2211 acpi_video_bus_stop_devices(video);
2226 acpi_video_bus_put_devices(video); 2212 acpi_video_bus_put_devices(video);
@@ -2235,7 +2221,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
2235 2221
2236static int acpi_video_bus_remove(struct acpi_device *device, int type) 2222static int acpi_video_bus_remove(struct acpi_device *device, int type)
2237{ 2223{
2238 acpi_status status = 0;
2239 struct acpi_video_bus *video = NULL; 2224 struct acpi_video_bus *video = NULL;
2240 2225
2241 2226
@@ -2245,11 +2230,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
2245 video = acpi_driver_data(device); 2230 video = acpi_driver_data(device);
2246 2231
2247 acpi_video_bus_stop_devices(video); 2232 acpi_video_bus_stop_devices(video);
2248
2249 status = acpi_remove_notify_handler(video->device->handle,
2250 ACPI_DEVICE_NOTIFY,
2251 acpi_video_bus_notify);
2252
2253 acpi_video_bus_put_devices(video); 2233 acpi_video_bus_put_devices(video);
2254 acpi_video_bus_remove_fs(device); 2234 acpi_video_bus_remove_fs(device);
2255 2235
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 5aee8c26cc9f..88725dcdf8bc 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -12,12 +12,14 @@
12#include "internal.h" 12#include "internal.h"
13#include "sleep.h" 13#include "sleep.h"
14 14
15/*
16 * We didn't lock acpi_device_lock in the file, because it invokes oops in
17 * suspend/resume and isn't really required as this is called in S-state. At
18 * that time, there is no device hotplug
19 **/
15#define _COMPONENT ACPI_SYSTEM_COMPONENT 20#define _COMPONENT ACPI_SYSTEM_COMPONENT
16ACPI_MODULE_NAME("wakeup_devices") 21ACPI_MODULE_NAME("wakeup_devices")
17 22
18extern struct list_head acpi_wakeup_device_list;
19extern spinlock_t acpi_device_lock;
20
21/** 23/**
22 * acpi_enable_wakeup_device_prep - prepare wakeup devices 24 * acpi_enable_wakeup_device_prep - prepare wakeup devices
23 * @sleep_state: ACPI state 25 * @sleep_state: ACPI state
@@ -29,7 +31,6 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
29{ 31{
30 struct list_head *node, *next; 32 struct list_head *node, *next;
31 33
32 spin_lock(&acpi_device_lock);
33 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 34 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
34 struct acpi_device *dev = container_of(node, 35 struct acpi_device *dev = container_of(node,
35 struct acpi_device, 36 struct acpi_device,
@@ -40,11 +41,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
40 (sleep_state > (u32) dev->wakeup.sleep_state)) 41 (sleep_state > (u32) dev->wakeup.sleep_state))
41 continue; 42 continue;
42 43
43 spin_unlock(&acpi_device_lock);
44 acpi_enable_wakeup_device_power(dev, sleep_state); 44 acpi_enable_wakeup_device_power(dev, sleep_state);
45 spin_lock(&acpi_device_lock);
46 } 45 }
47 spin_unlock(&acpi_device_lock);
48} 46}
49 47
50/** 48/**
@@ -60,7 +58,6 @@ void acpi_enable_wakeup_device(u8 sleep_state)
60 * Caution: this routine must be invoked when interrupt is disabled 58 * Caution: this routine must be invoked when interrupt is disabled
61 * Refer ACPI2.0: P212 59 * Refer ACPI2.0: P212
62 */ 60 */
63 spin_lock(&acpi_device_lock);
64 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 61 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
65 struct acpi_device *dev = 62 struct acpi_device *dev =
66 container_of(node, struct acpi_device, wakeup_list); 63 container_of(node, struct acpi_device, wakeup_list);
@@ -74,22 +71,17 @@ void acpi_enable_wakeup_device(u8 sleep_state)
74 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) 71 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
75 || sleep_state > (u32) dev->wakeup.sleep_state) { 72 || sleep_state > (u32) dev->wakeup.sleep_state) {
76 if (dev->wakeup.flags.run_wake) { 73 if (dev->wakeup.flags.run_wake) {
77 spin_unlock(&acpi_device_lock);
78 /* set_gpe_type will disable GPE, leave it like that */ 74 /* set_gpe_type will disable GPE, leave it like that */
79 acpi_set_gpe_type(dev->wakeup.gpe_device, 75 acpi_set_gpe_type(dev->wakeup.gpe_device,
80 dev->wakeup.gpe_number, 76 dev->wakeup.gpe_number,
81 ACPI_GPE_TYPE_RUNTIME); 77 ACPI_GPE_TYPE_RUNTIME);
82 spin_lock(&acpi_device_lock);
83 } 78 }
84 continue; 79 continue;
85 } 80 }
86 spin_unlock(&acpi_device_lock);
87 if (!dev->wakeup.flags.run_wake) 81 if (!dev->wakeup.flags.run_wake)
88 acpi_enable_gpe(dev->wakeup.gpe_device, 82 acpi_enable_gpe(dev->wakeup.gpe_device,
89 dev->wakeup.gpe_number); 83 dev->wakeup.gpe_number);
90 spin_lock(&acpi_device_lock);
91 } 84 }
92 spin_unlock(&acpi_device_lock);
93} 85}
94 86
95/** 87/**
@@ -101,7 +93,6 @@ void acpi_disable_wakeup_device(u8 sleep_state)
101{ 93{
102 struct list_head *node, *next; 94 struct list_head *node, *next;
103 95
104 spin_lock(&acpi_device_lock);
105 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 96 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
106 struct acpi_device *dev = 97 struct acpi_device *dev =
107 container_of(node, struct acpi_device, wakeup_list); 98 container_of(node, struct acpi_device, wakeup_list);
@@ -112,19 +103,16 @@ void acpi_disable_wakeup_device(u8 sleep_state)
112 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) 103 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
113 || sleep_state > (u32) dev->wakeup.sleep_state) { 104 || sleep_state > (u32) dev->wakeup.sleep_state) {
114 if (dev->wakeup.flags.run_wake) { 105 if (dev->wakeup.flags.run_wake) {
115 spin_unlock(&acpi_device_lock);
116 acpi_set_gpe_type(dev->wakeup.gpe_device, 106 acpi_set_gpe_type(dev->wakeup.gpe_device,
117 dev->wakeup.gpe_number, 107 dev->wakeup.gpe_number,
118 ACPI_GPE_TYPE_WAKE_RUN); 108 ACPI_GPE_TYPE_WAKE_RUN);
119 /* Re-enable it, since set_gpe_type will disable it */ 109 /* Re-enable it, since set_gpe_type will disable it */
120 acpi_enable_gpe(dev->wakeup.gpe_device, 110 acpi_enable_gpe(dev->wakeup.gpe_device,
121 dev->wakeup.gpe_number); 111 dev->wakeup.gpe_number);
122 spin_lock(&acpi_device_lock);
123 } 112 }
124 continue; 113 continue;
125 } 114 }
126 115
127 spin_unlock(&acpi_device_lock);
128 acpi_disable_wakeup_device_power(dev); 116 acpi_disable_wakeup_device_power(dev);
129 /* Never disable run-wake GPE */ 117 /* Never disable run-wake GPE */
130 if (!dev->wakeup.flags.run_wake) { 118 if (!dev->wakeup.flags.run_wake) {
@@ -133,16 +121,14 @@ void acpi_disable_wakeup_device(u8 sleep_state)
133 acpi_clear_gpe(dev->wakeup.gpe_device, 121 acpi_clear_gpe(dev->wakeup.gpe_device,
134 dev->wakeup.gpe_number, ACPI_NOT_ISR); 122 dev->wakeup.gpe_number, ACPI_NOT_ISR);
135 } 123 }
136 spin_lock(&acpi_device_lock);
137 } 124 }
138 spin_unlock(&acpi_device_lock);
139} 125}
140 126
141int __init acpi_wakeup_device_init(void) 127int __init acpi_wakeup_device_init(void)
142{ 128{
143 struct list_head *node, *next; 129 struct list_head *node, *next;
144 130
145 spin_lock(&acpi_device_lock); 131 mutex_lock(&acpi_device_lock);
146 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 132 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
147 struct acpi_device *dev = container_of(node, 133 struct acpi_device *dev = container_of(node,
148 struct acpi_device, 134 struct acpi_device,
@@ -150,15 +136,13 @@ int __init acpi_wakeup_device_init(void)
150 /* In case user doesn't load button driver */ 136 /* In case user doesn't load button driver */
151 if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) 137 if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
152 continue; 138 continue;
153 spin_unlock(&acpi_device_lock);
154 acpi_set_gpe_type(dev->wakeup.gpe_device, 139 acpi_set_gpe_type(dev->wakeup.gpe_device,
155 dev->wakeup.gpe_number, 140 dev->wakeup.gpe_number,
156 ACPI_GPE_TYPE_WAKE_RUN); 141 ACPI_GPE_TYPE_WAKE_RUN);
157 acpi_enable_gpe(dev->wakeup.gpe_device, 142 acpi_enable_gpe(dev->wakeup.gpe_device,
158 dev->wakeup.gpe_number); 143 dev->wakeup.gpe_number);
159 dev->wakeup.state.enabled = 1; 144 dev->wakeup.state.enabled = 1;
160 spin_lock(&acpi_device_lock);
161 } 145 }
162 spin_unlock(&acpi_device_lock); 146 mutex_unlock(&acpi_device_lock);
163 return 0; 147 return 0;
164} 148}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 57be6bea48eb..08186ecbaf8d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -114,6 +114,7 @@ enum {
114 board_ahci_sb700 = 5, /* for SB700 and SB800 */ 114 board_ahci_sb700 = 5, /* for SB700 and SB800 */
115 board_ahci_mcp65 = 6, 115 board_ahci_mcp65 = 6,
116 board_ahci_nopmp = 7, 116 board_ahci_nopmp = 7,
117 board_ahci_yesncq = 8,
117 118
118 /* global controller registers */ 119 /* global controller registers */
119 HOST_CAP = 0x00, /* host capabilities */ 120 HOST_CAP = 0x00, /* host capabilities */
@@ -469,6 +470,14 @@ static const struct ata_port_info ahci_port_info[] = {
469 .udma_mask = ATA_UDMA6, 470 .udma_mask = ATA_UDMA6,
470 .port_ops = &ahci_ops, 471 .port_ops = &ahci_ops,
471 }, 472 },
473 /* board_ahci_yesncq */
474 {
475 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
476 .flags = AHCI_FLAG_COMMON,
477 .pio_mask = ATA_PIO4,
478 .udma_mask = ATA_UDMA6,
479 .port_ops = &ahci_ops,
480 },
472}; 481};
473 482
474static const struct pci_device_id ahci_pci_tbl[] = { 483static const struct pci_device_id ahci_pci_tbl[] = {
@@ -535,30 +544,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
535 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ 544 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
536 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ 545 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
537 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ 546 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
538 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 547 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
539 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 548 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
540 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 549 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
541 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 550 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
542 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 551 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
543 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 552 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
544 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 553 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
545 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 554 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
546 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 555 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
547 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 556 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
548 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 557 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
549 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 558 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
550 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ 559 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
551 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ 560 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
552 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ 561 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
553 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ 562 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
554 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ 563 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
555 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ 564 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
556 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ 565 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
557 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ 566 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
558 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ 567 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
559 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ 568 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
560 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ 569 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
561 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ 570 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
562 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 571 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
563 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 572 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
564 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 573 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e7ea77cf6069..065507c46644 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6110,13 +6110,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6110 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6110 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6111 } 6111 }
6112 6112
6113 /* perform each probe synchronously */ 6113 /* perform each probe asynchronously */
6114 DPRINTK("probe begin\n");
6115 for (i = 0; i < host->n_ports; i++) { 6114 for (i = 0; i < host->n_ports; i++) {
6116 struct ata_port *ap = host->ports[i]; 6115 struct ata_port *ap = host->ports[i];
6117 async_schedule(async_port_probe, ap); 6116 async_schedule(async_port_probe, ap);
6118 } 6117 }
6119 DPRINTK("probe end\n");
6120 6118
6121 return 0; 6119 return 0;
6122} 6120}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 98e8c50703b3..bdd43c7f432e 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -566,7 +566,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
566 static int printed_version; 566 static int printed_version;
567 unsigned int i; 567 unsigned int i;
568 int rc; 568 int rc;
569 struct ata_host *host; 569 struct ata_host *host = NULL;
570 int board_id = (int) ent->driver_data; 570 int board_id = (int) ent->driver_data;
571 const unsigned *bar_sizes; 571 const unsigned *bar_sizes;
572 572
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 6cccdc3f5220..4aecf5dc6a93 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -563,7 +563,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
563 case ACE_FSM_STATE_IDENTIFY_PREPARE: 563 case ACE_FSM_STATE_IDENTIFY_PREPARE:
564 /* Send identify command */ 564 /* Send identify command */
565 ace->fsm_task = ACE_TASK_IDENTIFY; 565 ace->fsm_task = ACE_TASK_IDENTIFY;
566 ace->data_ptr = &ace->cf_id; 566 ace->data_ptr = ace->cf_id;
567 ace->data_count = ACE_BUF_PER_SECTOR; 567 ace->data_count = ACE_BUF_PER_SECTOR;
568 ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); 568 ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
569 569
@@ -608,8 +608,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
608 break; 608 break;
609 609
610 case ACE_FSM_STATE_IDENTIFY_COMPLETE: 610 case ACE_FSM_STATE_IDENTIFY_COMPLETE:
611 ace_fix_driveid(&ace->cf_id[0]); 611 ace_fix_driveid(ace->cf_id);
612 ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */ 612 ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */
613 613
614 if (ace->data_result) { 614 if (ace->data_result) {
615 /* Error occured, disable the disk */ 615 /* Error occured, disable the disk */
@@ -622,9 +622,9 @@ static void ace_fsm_dostate(struct ace_device *ace)
622 622
623 /* Record disk parameters */ 623 /* Record disk parameters */
624 set_capacity(ace->gd, 624 set_capacity(ace->gd,
625 ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); 625 ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
626 dev_info(ace->dev, "capacity: %i sectors\n", 626 dev_info(ace->dev, "capacity: %i sectors\n",
627 ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); 627 ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
628 } 628 }
629 629
630 /* We're done, drop to IDLE state and notify waiters */ 630 /* We're done, drop to IDLE state and notify waiters */
@@ -923,7 +923,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
923static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) 923static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
924{ 924{
925 struct ace_device *ace = bdev->bd_disk->private_data; 925 struct ace_device *ace = bdev->bd_disk->private_data;
926 u16 *cf_id = &ace->cf_id[0]; 926 u16 *cf_id = ace->cf_id;
927 927
928 dev_dbg(ace->dev, "ace_getgeo()\n"); 928 dev_dbg(ace->dev, "ace_getgeo()\n");
929 929
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index f68e5f8e23ee..6318f7ddc1d4 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -190,7 +190,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
190 int completed = 1; 190 int completed = 1;
191 unsigned long timeout = jiffies + i2c_adap->timeout; 191 unsigned long timeout = jiffies + i2c_adap->timeout;
192 192
193 while (pca_status(adap) != 0xf8) { 193 while ((state = pca_status(adap)) != 0xf8) {
194 if (time_before(jiffies, timeout)) { 194 if (time_before(jiffies, timeout)) {
195 msleep(10); 195 msleep(10);
196 } else { 196 } else {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 94eae5c3cbc7..a48c8aee0218 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -604,12 +604,14 @@ comment "Graphics adapter I2C/DDC channel drivers"
604 depends on PCI 604 depends on PCI
605 605
606config I2C_VOODOO3 606config I2C_VOODOO3
607 tristate "Voodoo 3" 607 tristate "Voodoo 3 (DEPRECATED)"
608 depends on PCI 608 depends on PCI
609 select I2C_ALGOBIT 609 select I2C_ALGOBIT
610 help 610 help
611 If you say yes to this option, support will be included for the 611 If you say yes to this option, support will be included for the
612 Voodoo 3 I2C interface. 612 Voodoo 3 I2C interface. This driver is deprecated and you should
613 use the tdfxfb driver instead, which additionally provides
614 framebuffer support.
613 615
614 This driver can also be built as a module. If so, the module 616 This driver can also be built as a module. If so, the module
615 will be called i2c-voodoo3. 617 will be called i2c-voodoo3.
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b6f3a0de6ca2..85e2e919d1cd 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -716,8 +716,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
716 716
717 /* new style driver methods can't mix with legacy ones */ 717 /* new style driver methods can't mix with legacy ones */
718 if (is_newstyle_driver(driver)) { 718 if (is_newstyle_driver(driver)) {
719 if (driver->attach_adapter || driver->detach_adapter 719 if (driver->detach_adapter || driver->detach_client) {
720 || driver->detach_client) {
721 printk(KERN_WARNING 720 printk(KERN_WARNING
722 "i2c-core: driver [%s] is confused\n", 721 "i2c-core: driver [%s] is confused\n",
723 driver->driver.name); 722 driver->driver.name);
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 8eda552326e9..403d0e4265db 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -20,7 +20,6 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/version.h>
24#include <linux/kernel.h> 23#include <linux/kernel.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/clk.h> 25#include <linux/clk.h>
@@ -175,90 +174,6 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
175 leave_16bit(chipselect, mode); 174 leave_16bit(chipselect, mode);
176} 175}
177 176
178static u8 ide_mm_inb(unsigned long port)
179{
180 return readb((void __iomem *) port);
181}
182
183static void ide_mm_outb(u8 value, unsigned long port)
184{
185 writeb(value, (void __iomem *) port);
186}
187
188static void at91_ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd)
189{
190 ide_hwif_t *hwif = drive->hwif;
191 struct ide_io_ports *io_ports = &hwif->io_ports;
192 struct ide_taskfile *tf = &cmd->tf;
193 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
194
195 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
196 HIHI = 0xFF;
197
198 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
199 ide_mm_outb(tf->hob_feature, io_ports->feature_addr);
200 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
201 ide_mm_outb(tf->hob_nsect, io_ports->nsect_addr);
202 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
203 ide_mm_outb(tf->hob_lbal, io_ports->lbal_addr);
204 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
205 ide_mm_outb(tf->hob_lbam, io_ports->lbam_addr);
206 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
207 ide_mm_outb(tf->hob_lbah, io_ports->lbah_addr);
208
209 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
210 ide_mm_outb(tf->feature, io_ports->feature_addr);
211 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT)
212 ide_mm_outb(tf->nsect, io_ports->nsect_addr);
213 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL)
214 ide_mm_outb(tf->lbal, io_ports->lbal_addr);
215 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM)
216 ide_mm_outb(tf->lbam, io_ports->lbam_addr);
217 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH)
218 ide_mm_outb(tf->lbah, io_ports->lbah_addr);
219
220 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE)
221 ide_mm_outb((tf->device & HIHI) | drive->select, io_ports->device_addr);
222}
223
224static void at91_ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd)
225{
226 ide_hwif_t *hwif = drive->hwif;
227 struct ide_io_ports *io_ports = &hwif->io_ports;
228 struct ide_taskfile *tf = &cmd->tf;
229
230 /* be sure we're looking at the low order bits */
231 ide_mm_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
232
233 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR)
234 tf->error = ide_mm_inb(io_ports->feature_addr);
235 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT)
236 tf->nsect = ide_mm_inb(io_ports->nsect_addr);
237 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL)
238 tf->lbal = ide_mm_inb(io_ports->lbal_addr);
239 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM)
240 tf->lbam = ide_mm_inb(io_ports->lbam_addr);
241 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH)
242 tf->lbah = ide_mm_inb(io_ports->lbah_addr);
243 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE)
244 tf->device = ide_mm_inb(io_ports->device_addr);
245
246 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
247 ide_mm_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
248
249 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
250 tf->hob_error = ide_mm_inb(io_ports->feature_addr);
251 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
252 tf->hob_nsect = ide_mm_inb(io_ports->nsect_addr);
253 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
254 tf->hob_lbal = ide_mm_inb(io_ports->lbal_addr);
255 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
256 tf->hob_lbam = ide_mm_inb(io_ports->lbam_addr);
257 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
258 tf->hob_lbah = ide_mm_inb(io_ports->lbah_addr);
259 }
260}
261
262static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 177static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
263{ 178{
264 struct ide_timing *timing; 179 struct ide_timing *timing;
@@ -284,8 +199,8 @@ static const struct ide_tp_ops at91_ide_tp_ops = {
284 .write_devctl = ide_write_devctl, 199 .write_devctl = ide_write_devctl,
285 200
286 .dev_select = ide_dev_select, 201 .dev_select = ide_dev_select,
287 .tf_load = at91_ide_tf_load, 202 .tf_load = ide_tf_load,
288 .tf_read = at91_ide_tf_read, 203 .tf_read = ide_tf_read,
289 204
290 .input_data = at91_ide_input_data, 205 .input_data = at91_ide_input_data,
291 .output_data = at91_ide_output_data, 206 .output_data = at91_ide_output_data,
@@ -300,7 +215,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = {
300 .tp_ops = &at91_ide_tp_ops, 215 .tp_ops = &at91_ide_tp_ops,
301 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | 216 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
302 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, 217 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
303 .pio_mask = ATA_PIO5, 218 .pio_mask = ATA_PIO6,
304}; 219};
305 220
306/* 221/*
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index afa2af9a362b..0e2df6755ec9 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -20,6 +20,7 @@
20#include <asm/atarihw.h> 20#include <asm/atarihw.h>
21#include <asm/atariints.h> 21#include <asm/atariints.h>
22#include <asm/atari_stdma.h> 22#include <asm/atari_stdma.h>
23#include <asm/ide.h>
23 24
24#define DRV_NAME "falconide" 25#define DRV_NAME "falconide"
25 26
@@ -67,8 +68,10 @@ static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
67{ 68{
68 unsigned long data_addr = drive->hwif->io_ports.data_addr; 69 unsigned long data_addr = drive->hwif->io_ports.data_addr;
69 70
70 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) 71 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
71 return insw(data_addr, buf, (len + 1) / 2); 72 __ide_mm_insw(data_addr, buf, (len + 1) / 2);
73 return;
74 }
72 75
73 raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); 76 raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
74} 77}
@@ -78,8 +81,10 @@ static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
78{ 81{
79 unsigned long data_addr = drive->hwif->io_ports.data_addr; 82 unsigned long data_addr = drive->hwif->io_ports.data_addr;
80 83
81 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) 84 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
82 return outsw(data_addr, buf, (len + 1) / 2); 85 __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
86 return;
87 }
83 88
84 raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); 89 raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
85} 90}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 12f436951bff..77f79d26b264 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -318,8 +318,9 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
318 318
319 /* convert GTF to taskfile */ 319 /* convert GTF to taskfile */
320 memset(&cmd, 0, sizeof(cmd)); 320 memset(&cmd, 0, sizeof(cmd));
321 memcpy(&cmd.tf_array[7], gtf, REGS_PER_GTF); 321 memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF);
322 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 322 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
323 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
323 324
324 err = ide_no_data_taskfile(drive, &cmd); 325 err = ide_no_data_taskfile(drive, &cmd);
325 if (err) { 326 if (err) {
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 3e43b889dd64..7201b176d75b 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -254,16 +254,13 @@ EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
254 254
255void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) 255void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
256{ 256{
257 struct ide_cmd cmd; 257 struct ide_taskfile tf;
258 258
259 memset(&cmd, 0, sizeof(cmd)); 259 drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT |
260 cmd.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | 260 IDE_VALID_LBAM | IDE_VALID_LBAH);
261 IDE_TFLAG_IN_NSECT;
262 261
263 drive->hwif->tp_ops->tf_read(drive, &cmd); 262 *bcount = (tf.lbah << 8) | tf.lbam;
264 263 *ireason = tf.nsect & 3;
265 *bcount = (cmd.tf.lbah << 8) | cmd.tf.lbam;
266 *ireason = cmd.tf.nsect & 3;
267} 264}
268EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); 265EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
269 266
@@ -439,12 +436,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
439 return ide_started; 436 return ide_started;
440} 437}
441 438
442static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags, 439static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf,
443 u16 bcount, u8 dma) 440 u16 bcount, u8 dma)
444{ 441{
445 cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; 442 cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO;
446 cmd->tf_flags |= IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 443 cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM |
447 IDE_TFLAG_OUT_FEATURE | tf_flags; 444 IDE_VALID_FEATURE | valid_tf;
448 cmd->tf.command = ATA_CMD_PACKET; 445 cmd->tf.command = ATA_CMD_PACKET;
449 cmd->tf.feature = dma; /* Use PIO/DMA */ 446 cmd->tf.feature = dma; /* Use PIO/DMA */
450 cmd->tf.lbam = bcount & 0xff; 447 cmd->tf.lbam = bcount & 0xff;
@@ -453,14 +450,11 @@ static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags,
453 450
454static u8 ide_read_ireason(ide_drive_t *drive) 451static u8 ide_read_ireason(ide_drive_t *drive)
455{ 452{
456 struct ide_cmd cmd; 453 struct ide_taskfile tf;
457
458 memset(&cmd, 0, sizeof(cmd));
459 cmd.tf_flags = IDE_TFLAG_IN_NSECT;
460 454
461 drive->hwif->tp_ops->tf_read(drive, &cmd); 455 drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT);
462 456
463 return cmd.tf.nsect & 3; 457 return tf.nsect & 3;
464} 458}
465 459
466static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) 460static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
@@ -588,12 +582,12 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
588 ide_expiry_t *expiry = NULL; 582 ide_expiry_t *expiry = NULL;
589 struct request *rq = hwif->rq; 583 struct request *rq = hwif->rq;
590 unsigned int timeout; 584 unsigned int timeout;
591 u32 tf_flags;
592 u16 bcount; 585 u16 bcount;
586 u8 valid_tf;
593 u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT); 587 u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT);
594 588
595 if (dev_is_idecd(drive)) { 589 if (dev_is_idecd(drive)) {
596 tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL; 590 valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL;
597 bcount = ide_cd_get_xferlen(rq); 591 bcount = ide_cd_get_xferlen(rq);
598 expiry = ide_cd_expiry; 592 expiry = ide_cd_expiry;
599 timeout = ATAPI_WAIT_PC; 593 timeout = ATAPI_WAIT_PC;
@@ -607,7 +601,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
607 pc->xferred = 0; 601 pc->xferred = 0;
608 pc->cur_pos = pc->buf; 602 pc->cur_pos = pc->buf;
609 603
610 tf_flags = IDE_TFLAG_OUT_DEVICE; 604 valid_tf = IDE_VALID_DEVICE;
611 bcount = ((drive->media == ide_tape) ? 605 bcount = ((drive->media == ide_tape) ?
612 pc->req_xfer : 606 pc->req_xfer :
613 min(pc->req_xfer, 63 * 1024)); 607 min(pc->req_xfer, 63 * 1024));
@@ -627,7 +621,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
627 : WAIT_TAPE_CMD; 621 : WAIT_TAPE_CMD;
628 } 622 }
629 623
630 ide_init_packet_cmd(cmd, tf_flags, bcount, drive->dma); 624 ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma);
631 625
632 (void)do_rw_taskfile(drive, cmd); 626 (void)do_rw_taskfile(drive, cmd);
633 627
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 35729a47f797..3aec19d1fdfc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -265,35 +265,62 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
265 cdrom_analyze_sense_data(drive, NULL, sense); 265 cdrom_analyze_sense_data(drive, NULL, sense);
266} 266}
267 267
268
268/* 269/*
270 * Allow the drive 5 seconds to recover; some devices will return NOT_READY
271 * while flushing data from cache.
272 *
273 * returns: 0 failed (write timeout expired)
274 * 1 success
275 */
276static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
277{
278
279 struct cdrom_info *info = drive->driver_data;
280
281 if (!rq->errors)
282 info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
283
284 rq->errors = 1;
285
286 if (time_after(jiffies, info->write_timeout))
287 return 0;
288 else {
289 struct request_queue *q = drive->queue;
290 unsigned long flags;
291
292 /*
293 * take a breather relying on the unplug timer to kick us again
294 */
295
296 spin_lock_irqsave(q->queue_lock, flags);
297 blk_plug_device(q);
298 spin_unlock_irqrestore(q->queue_lock, flags);
299
300 return 1;
301 }
302}
303
304/**
269 * Returns: 305 * Returns:
270 * 0: if the request should be continued. 306 * 0: if the request should be continued.
271 * 1: if the request will be going through error recovery. 307 * 1: if the request will be going through error recovery.
272 * 2: if the request should be ended. 308 * 2: if the request should be ended.
273 */ 309 */
274static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 310static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
275{ 311{
276 ide_hwif_t *hwif = drive->hwif; 312 ide_hwif_t *hwif = drive->hwif;
277 struct request *rq = hwif->rq; 313 struct request *rq = hwif->rq;
278 int stat, err, sense_key; 314 int err, sense_key, do_end_request = 0;
279 315 u8 quiet = rq->cmd_flags & REQ_QUIET;
280 /* check for errors */
281 stat = hwif->tp_ops->read_status(hwif);
282
283 if (stat_ret)
284 *stat_ret = stat;
285
286 if (OK_STAT(stat, good_stat, BAD_R_STAT))
287 return 0;
288 316
289 /* get the IDE error register */ 317 /* get the IDE error register */
290 err = ide_read_error(drive); 318 err = ide_read_error(drive);
291 sense_key = err >> 4; 319 sense_key = err >> 4;
292 320
293 ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, " 321 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, "
294 "rq->cmd_type: 0x%x, err: 0x%x", 322 "stat 0x%x",
295 stat, good_stat, rq->cmd[0], rq->cmd_type, 323 rq->cmd[0], rq->cmd_type, err, stat);
296 err);
297 324
298 if (blk_sense_request(rq)) { 325 if (blk_sense_request(rq)) {
299 /* 326 /*
@@ -303,151 +330,108 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
303 */ 330 */
304 rq->cmd_flags |= REQ_FAILED; 331 rq->cmd_flags |= REQ_FAILED;
305 return 2; 332 return 2;
306 } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { 333 }
307 /* All other functions, except for READ. */
308 334
309 /* 335 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
310 * if we have an error, pass back CHECK_CONDITION as the 336 if (blk_pc_request(rq) && !rq->errors)
311 * scsi status byte 337 rq->errors = SAM_STAT_CHECK_CONDITION;
312 */
313 if (blk_pc_request(rq) && !rq->errors)
314 rq->errors = SAM_STAT_CHECK_CONDITION;
315 338
316 /* check for tray open */ 339 if (blk_noretry_request(rq))
317 if (sense_key == NOT_READY) { 340 do_end_request = 1;
318 cdrom_saw_media_change(drive); 341
319 } else if (sense_key == UNIT_ATTENTION) { 342 switch (sense_key) {
320 /* check for media change */ 343 case NOT_READY:
344 if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) {
345 if (ide_cd_breathe(drive, rq))
346 return 1;
347 } else {
321 cdrom_saw_media_change(drive); 348 cdrom_saw_media_change(drive);
322 return 0; 349
323 } else if (sense_key == ILLEGAL_REQUEST && 350 if (blk_fs_request(rq) && !quiet)
324 rq->cmd[0] == GPCMD_START_STOP_UNIT) { 351 printk(KERN_ERR PFX "%s: tray open\n",
325 /* 352 drive->name);
326 * Don't print error message for this condition--
327 * SFF8090i indicates that 5/24/00 is the correct
328 * response to a request to close the tray if the
329 * drive doesn't have that capability.
330 * cdrom_log_sense() knows this!
331 */
332 } else if (!(rq->cmd_flags & REQ_QUIET)) {
333 /* otherwise, print an error */
334 ide_dump_status(drive, "packet command error", stat);
335 } 353 }
354 do_end_request = 1;
355 break;
356 case UNIT_ATTENTION:
357 cdrom_saw_media_change(drive);
336 358
337 rq->cmd_flags |= REQ_FAILED; 359 if (blk_fs_request(rq) == 0)
360 return 0;
338 361
339 /* 362 /*
340 * instead of playing games with moving completions around, 363 * Arrange to retry the request but be sure to give up if we've
341 * remove failed request completely and end it when the 364 * retried too many times.
342 * request sense has completed
343 */ 365 */
344 goto end_request; 366 if (++rq->errors > ERROR_MAX)
345
346 } else if (blk_fs_request(rq)) {
347 int do_end_request = 0;
348
349 /* handle errors from READ and WRITE requests */
350
351 if (blk_noretry_request(rq))
352 do_end_request = 1; 367 do_end_request = 1;
353 368 break;
354 if (sense_key == NOT_READY) { 369 case ILLEGAL_REQUEST:
355 /* tray open */ 370 /*
356 if (rq_data_dir(rq) == READ) { 371 * Don't print error message for this condition -- SFF8090i
357 cdrom_saw_media_change(drive); 372 * indicates that 5/24/00 is the correct response to a request
358 373 * to close the tray if the drive doesn't have that capability.
359 /* fail the request */ 374 *
360 printk(KERN_ERR PFX "%s: tray open\n", 375 * cdrom_log_sense() knows this!
361 drive->name); 376 */
362 do_end_request = 1; 377 if (rq->cmd[0] == GPCMD_START_STOP_UNIT)
363 } else { 378 break;
364 struct cdrom_info *info = drive->driver_data; 379 /* fall-through */
365 380 case DATA_PROTECT:
366 /* 381 /*
367 * Allow the drive 5 seconds to recover, some 382 * No point in retrying after an illegal request or data
368 * devices will return this error while flushing 383 * protect error.
369 * data from cache. 384 */
370 */ 385 if (!quiet)
371 if (!rq->errors)
372 info->write_timeout = jiffies +
373 ATAPI_WAIT_WRITE_BUSY;
374 rq->errors = 1;
375 if (time_after(jiffies, info->write_timeout))
376 do_end_request = 1;
377 else {
378 struct request_queue *q = drive->queue;
379 unsigned long flags;
380
381 /*
382 * take a breather relying on the unplug
383 * timer to kick us again
384 */
385 spin_lock_irqsave(q->queue_lock, flags);
386 blk_plug_device(q);
387 spin_unlock_irqrestore(q->queue_lock, flags);
388
389 return 1;
390 }
391 }
392 } else if (sense_key == UNIT_ATTENTION) {
393 /* media change */
394 cdrom_saw_media_change(drive);
395
396 /*
397 * Arrange to retry the request but be sure to give up
398 * if we've retried too many times.
399 */
400 if (++rq->errors > ERROR_MAX)
401 do_end_request = 1;
402 } else if (sense_key == ILLEGAL_REQUEST ||
403 sense_key == DATA_PROTECT) {
404 /*
405 * No point in retrying after an illegal request or data
406 * protect error.
407 */
408 ide_dump_status(drive, "command error", stat); 386 ide_dump_status(drive, "command error", stat);
409 do_end_request = 1; 387 do_end_request = 1;
410 } else if (sense_key == MEDIUM_ERROR) { 388 break;
411 /* 389 case MEDIUM_ERROR:
412 * No point in re-trying a zillion times on a bad 390 /*
413 * sector. If we got here the error is not correctable. 391 * No point in re-trying a zillion times on a bad sector.
414 */ 392 * If we got here the error is not correctable.
415 ide_dump_status(drive, "media error (bad sector)", 393 */
394 if (!quiet)
395 ide_dump_status(drive, "media error "
396 "(bad sector)", stat);
397 do_end_request = 1;
398 break;
399 case BLANK_CHECK:
400 /* disk appears blank? */
401 if (!quiet)
402 ide_dump_status(drive, "media error (blank)",
416 stat); 403 stat);
417 do_end_request = 1; 404 do_end_request = 1;
418 } else if (sense_key == BLANK_CHECK) { 405 break;
419 /* disk appears blank ?? */ 406 default:
420 ide_dump_status(drive, "media error (blank)", stat); 407 if (blk_fs_request(rq) == 0)
421 do_end_request = 1; 408 break;
422 } else if ((err & ~ATA_ABORTED) != 0) { 409 if (err & ~ATA_ABORTED) {
423 /* go to the default handler for other errors */ 410 /* go to the default handler for other errors */
424 ide_error(drive, "cdrom_decode_status", stat); 411 ide_error(drive, "cdrom_decode_status", stat);
425 return 1; 412 return 1;
426 } else if ((++rq->errors > ERROR_MAX)) { 413 } else if (++rq->errors > ERROR_MAX)
427 /* we've racked up too many retries, abort */ 414 /* we've racked up too many retries, abort */
428 do_end_request = 1; 415 do_end_request = 1;
429 } 416 }
430
431 /*
432 * End a request through request sense analysis when we have
433 * sense data. We need this in order to perform end of media
434 * processing.
435 */
436 if (do_end_request)
437 goto end_request;
438 417
439 /* 418 if (blk_fs_request(rq) == 0) {
440 * If we got a CHECK_CONDITION status, queue 419 rq->cmd_flags |= REQ_FAILED;
441 * a request sense command. 420 do_end_request = 1;
442 */
443 if (stat & ATA_ERR)
444 cdrom_queue_request_sense(drive, NULL, NULL);
445 return 1;
446 } else {
447 blk_dump_rq_flags(rq, PFX "bad rq");
448 return 2;
449 } 421 }
450 422
423 /*
424 * End a request through request sense analysis when we have sense data.
425 * We need this in order to perform end of media processing.
426 */
427 if (do_end_request)
428 goto end_request;
429
430 /* if we got a CHECK_CONDITION status, queue a request sense command */
431 if (stat & ATA_ERR)
432 cdrom_queue_request_sense(drive, NULL, NULL);
433 return 1;
434
451end_request: 435end_request:
452 if (stat & ATA_ERR) { 436 if (stat & ATA_ERR) {
453 struct request_queue *q = drive->queue; 437 struct request_queue *q = drive->queue;
@@ -624,15 +608,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
624 struct ide_cmd *cmd = &hwif->cmd; 608 struct ide_cmd *cmd = &hwif->cmd;
625 struct request *rq = hwif->rq; 609 struct request *rq = hwif->rq;
626 ide_expiry_t *expiry = NULL; 610 ide_expiry_t *expiry = NULL;
627 int dma_error = 0, dma, stat, thislen, uptodate = 0; 611 int dma_error = 0, dma, thislen, uptodate = 0;
628 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; 612 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors;
629 int sense = blk_sense_request(rq); 613 int sense = blk_sense_request(rq);
630 unsigned int timeout; 614 unsigned int timeout;
631 u16 len; 615 u16 len;
632 u8 ireason; 616 u8 ireason, stat;
633 617
634 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x", 618 ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write);
635 rq->cmd[0], write);
636 619
637 /* check for errors */ 620 /* check for errors */
638 dma = drive->dma; 621 dma = drive->dma;
@@ -648,11 +631,16 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
648 } 631 }
649 } 632 }
650 633
651 rc = cdrom_decode_status(drive, 0, &stat); 634 /* check status */
652 if (rc) { 635 stat = hwif->tp_ops->read_status(hwif);
653 if (rc == 2) 636
654 goto out_end; 637 if (!OK_STAT(stat, 0, BAD_R_STAT)) {
655 return ide_stopped; 638 rc = cdrom_decode_status(drive, stat);
639 if (rc) {
640 if (rc == 2)
641 goto out_end;
642 return ide_stopped;
643 }
656 } 644 }
657 645
658 /* using dma, transfer is complete now */ 646 /* using dma, transfer is complete now */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c998cf8e971a..a9fbe2c31210 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -97,35 +97,38 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
97 } 97 }
98 98
99 memset(&cmd, 0, sizeof(cmd)); 99 memset(&cmd, 0, sizeof(cmd));
100 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 100 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
101 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
101 102
102 if (drive->dev_flags & IDE_DFLAG_LBA) { 103 if (drive->dev_flags & IDE_DFLAG_LBA) {
103 if (lba48) { 104 if (lba48) {
104 pr_debug("%s: LBA=0x%012llx\n", drive->name, 105 pr_debug("%s: LBA=0x%012llx\n", drive->name,
105 (unsigned long long)block); 106 (unsigned long long)block);
106 107
107 tf->hob_nsect = (nsectors >> 8) & 0xff;
108 tf->hob_lbal = (u8)(block >> 24);
109 if (sizeof(block) != 4) {
110 tf->hob_lbam = (u8)((u64)block >> 32);
111 tf->hob_lbah = (u8)((u64)block >> 40);
112 }
113
114 tf->nsect = nsectors & 0xff; 108 tf->nsect = nsectors & 0xff;
115 tf->lbal = (u8) block; 109 tf->lbal = (u8) block;
116 tf->lbam = (u8)(block >> 8); 110 tf->lbam = (u8)(block >> 8);
117 tf->lbah = (u8)(block >> 16); 111 tf->lbah = (u8)(block >> 16);
112 tf->device = ATA_LBA;
118 113
119 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); 114 tf = &cmd.hob;
115 tf->nsect = (nsectors >> 8) & 0xff;
116 tf->lbal = (u8)(block >> 24);
117 if (sizeof(block) != 4) {
118 tf->lbam = (u8)((u64)block >> 32);
119 tf->lbah = (u8)((u64)block >> 40);
120 }
121
122 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
123 cmd.valid.in.hob = IDE_VALID_IN_HOB;
124 cmd.tf_flags |= IDE_TFLAG_LBA48;
120 } else { 125 } else {
121 tf->nsect = nsectors & 0xff; 126 tf->nsect = nsectors & 0xff;
122 tf->lbal = block; 127 tf->lbal = block;
123 tf->lbam = block >>= 8; 128 tf->lbam = block >>= 8;
124 tf->lbah = block >>= 8; 129 tf->lbah = block >>= 8;
125 tf->device = (block >> 8) & 0xf; 130 tf->device = ((block >> 8) & 0xf) | ATA_LBA;
126 } 131 }
127
128 tf->device |= ATA_LBA;
129 } else { 132 } else {
130 unsigned int sect, head, cyl, track; 133 unsigned int sect, head, cyl, track;
131 134
@@ -220,15 +223,19 @@ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
220 tf->command = ATA_CMD_READ_NATIVE_MAX; 223 tf->command = ATA_CMD_READ_NATIVE_MAX;
221 tf->device = ATA_LBA; 224 tf->device = ATA_LBA;
222 225
223 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 226 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
224 if (lba48) 227 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
225 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); 228 if (lba48) {
229 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
230 cmd.valid.in.hob = IDE_VALID_IN_HOB;
231 cmd.tf_flags = IDE_TFLAG_LBA48;
232 }
226 233
227 ide_no_data_taskfile(drive, &cmd); 234 ide_no_data_taskfile(drive, &cmd);
228 235
229 /* if OK, compute maximum address value */ 236 /* if OK, compute maximum address value */
230 if (!(tf->status & ATA_ERR)) 237 if (!(tf->status & ATA_ERR))
231 addr = ide_get_lba_addr(tf, lba48) + 1; 238 addr = ide_get_lba_addr(&cmd, lba48) + 1;
232 239
233 return addr; 240 return addr;
234} 241}
@@ -250,9 +257,9 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
250 tf->lbam = (addr_req >>= 8) & 0xff; 257 tf->lbam = (addr_req >>= 8) & 0xff;
251 tf->lbah = (addr_req >>= 8) & 0xff; 258 tf->lbah = (addr_req >>= 8) & 0xff;
252 if (lba48) { 259 if (lba48) {
253 tf->hob_lbal = (addr_req >>= 8) & 0xff; 260 cmd.hob.lbal = (addr_req >>= 8) & 0xff;
254 tf->hob_lbam = (addr_req >>= 8) & 0xff; 261 cmd.hob.lbam = (addr_req >>= 8) & 0xff;
255 tf->hob_lbah = (addr_req >>= 8) & 0xff; 262 cmd.hob.lbah = (addr_req >>= 8) & 0xff;
256 tf->command = ATA_CMD_SET_MAX_EXT; 263 tf->command = ATA_CMD_SET_MAX_EXT;
257 } else { 264 } else {
258 tf->device = (addr_req >>= 8) & 0x0f; 265 tf->device = (addr_req >>= 8) & 0x0f;
@@ -260,15 +267,19 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
260 } 267 }
261 tf->device |= ATA_LBA; 268 tf->device |= ATA_LBA;
262 269
263 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 270 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
264 if (lba48) 271 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
265 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); 272 if (lba48) {
273 cmd.valid.out.hob = IDE_VALID_OUT_HOB;
274 cmd.valid.in.hob = IDE_VALID_IN_HOB;
275 cmd.tf_flags = IDE_TFLAG_LBA48;
276 }
266 277
267 ide_no_data_taskfile(drive, &cmd); 278 ide_no_data_taskfile(drive, &cmd);
268 279
269 /* if OK, compute maximum address value */ 280 /* if OK, compute maximum address value */
270 if (!(tf->status & ATA_ERR)) 281 if (!(tf->status & ATA_ERR))
271 addr_set = ide_get_lba_addr(tf, lba48) + 1; 282 addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
272 283
273 return addr_set; 284 return addr_set;
274} 285}
@@ -395,8 +406,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
395 cmd->tf.command = ATA_CMD_FLUSH_EXT; 406 cmd->tf.command = ATA_CMD_FLUSH_EXT;
396 else 407 else
397 cmd->tf.command = ATA_CMD_FLUSH; 408 cmd->tf.command = ATA_CMD_FLUSH;
398 cmd->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE | 409 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
399 IDE_TFLAG_DYN; 410 cmd->tf_flags = IDE_TFLAG_DYN;
400 cmd->protocol = ATA_PROT_NODATA; 411 cmd->protocol = ATA_PROT_NODATA;
401 412
402 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
@@ -457,7 +468,8 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
457 cmd.tf.feature = feature; 468 cmd.tf.feature = feature;
458 cmd.tf.nsect = nsect; 469 cmd.tf.nsect = nsect;
459 cmd.tf.command = ATA_CMD_SET_FEATURES; 470 cmd.tf.command = ATA_CMD_SET_FEATURES;
460 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 471 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
472 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
461 473
462 return ide_no_data_taskfile(drive, &cmd); 474 return ide_no_data_taskfile(drive, &cmd);
463} 475}
@@ -533,7 +545,8 @@ static int do_idedisk_flushcache(ide_drive_t *drive)
533 cmd.tf.command = ATA_CMD_FLUSH_EXT; 545 cmd.tf.command = ATA_CMD_FLUSH_EXT;
534 else 546 else
535 cmd.tf.command = ATA_CMD_FLUSH; 547 cmd.tf.command = ATA_CMD_FLUSH;
536 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 548 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
549 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
537 550
538 return ide_no_data_taskfile(drive, &cmd); 551 return ide_no_data_taskfile(drive, &cmd);
539} 552}
@@ -715,7 +728,8 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
715 728
716 memset(&cmd, 0, sizeof(cmd)); 729 memset(&cmd, 0, sizeof(cmd));
717 cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; 730 cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
718 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 731 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
732 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
719 733
720 ret = ide_no_data_taskfile(drive, &cmd); 734 ret = ide_no_data_taskfile(drive, &cmd);
721 735
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index eaea3bef2073..19f263bf0a9e 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -13,7 +13,8 @@ static int smart_enable(ide_drive_t *drive)
13 tf->lbam = ATA_SMART_LBAM_PASS; 13 tf->lbam = ATA_SMART_LBAM_PASS;
14 tf->lbah = ATA_SMART_LBAH_PASS; 14 tf->lbah = ATA_SMART_LBAH_PASS;
15 tf->command = ATA_CMD_SMART; 15 tf->command = ATA_CMD_SMART;
16 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 16 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
17 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
17 18
18 return ide_no_data_taskfile(drive, &cmd); 19 return ide_no_data_taskfile(drive, &cmd);
19} 20}
@@ -29,7 +30,8 @@ static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd)
29 tf->lbam = ATA_SMART_LBAM_PASS; 30 tf->lbam = ATA_SMART_LBAM_PASS;
30 tf->lbah = ATA_SMART_LBAH_PASS; 31 tf->lbah = ATA_SMART_LBAH_PASS;
31 tf->command = ATA_CMD_SMART; 32 tf->command = ATA_CMD_SMART;
32 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 33 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
34 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
33 cmd.protocol = ATA_PROT_PIO; 35 cmd.protocol = ATA_PROT_PIO;
34 36
35 return ide_raw_taskfile(drive, &cmd, buf, 1); 37 return ide_raw_taskfile(drive, &cmd, buf, 1);
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
index 16fc46edc32d..e4cdf78cc3e9 100644
--- a/drivers/ide/ide-dma-sff.c
+++ b/drivers/ide/ide-dma-sff.c
@@ -277,8 +277,6 @@ void ide_dma_start(ide_drive_t *drive)
277 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 277 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
278 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); 278 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
279 } 279 }
280
281 wmb();
282} 280}
283EXPORT_SYMBOL_GPL(ide_dma_start); 281EXPORT_SYMBOL_GPL(ide_dma_start);
284 282
@@ -286,7 +284,7 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
286int ide_dma_end(ide_drive_t *drive) 284int ide_dma_end(ide_drive_t *drive)
287{ 285{
288 ide_hwif_t *hwif = drive->hwif; 286 ide_hwif_t *hwif = drive->hwif;
289 u8 dma_stat = 0, dma_cmd = 0, mask; 287 u8 dma_stat = 0, dma_cmd = 0;
290 288
291 /* stop DMA */ 289 /* stop DMA */
292 if (hwif->host_flags & IDE_HFLAG_MMIO) { 290 if (hwif->host_flags & IDE_HFLAG_MMIO) {
@@ -304,11 +302,10 @@ int ide_dma_end(ide_drive_t *drive)
304 /* clear INTR & ERROR bits */ 302 /* clear INTR & ERROR bits */
305 ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); 303 ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
306 304
307 wmb(); 305#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
308 306
309 /* verify good DMA status */ 307 /* verify good DMA status */
310 mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR; 308 if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR)
311 if ((dma_stat & mask) != ATA_DMA_INTR)
312 return 0x10 | dma_stat; 309 return 0x10 | dma_stat;
313 return 0; 310 return 0;
314} 311}
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
index dac9a6d44963..c06ebdc4a130 100644
--- a/drivers/ide/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
@@ -22,103 +22,6 @@
22 (r); \ 22 (r); \
23}) 23})
24 24
25static void mm_outw(u16 d, unsigned long a)
26{
27 __asm__("mov.b %w0,r2h\n\t"
28 "mov.b %x0,r2l\n\t"
29 "mov.w r2,@%1"
30 :
31 :"r"(d),"r"(a)
32 :"er2");
33}
34
35static u16 mm_inw(unsigned long a)
36{
37 register u16 r __asm__("er0");
38 __asm__("mov.w @%1,r2\n\t"
39 "mov.b r2l,%x0\n\t"
40 "mov.b r2h,%w0"
41 :"=r"(r)
42 :"r"(a)
43 :"er2");
44 return r;
45}
46
47static void h8300_tf_load(ide_drive_t *drive, struct ide_cmd *cmd)
48{
49 ide_hwif_t *hwif = drive->hwif;
50 struct ide_io_ports *io_ports = &hwif->io_ports;
51 struct ide_taskfile *tf = &cmd->tf;
52 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
53
54 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
55 HIHI = 0xFF;
56
57 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
58 outb(tf->hob_feature, io_ports->feature_addr);
59 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
60 outb(tf->hob_nsect, io_ports->nsect_addr);
61 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
62 outb(tf->hob_lbal, io_ports->lbal_addr);
63 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
64 outb(tf->hob_lbam, io_ports->lbam_addr);
65 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
66 outb(tf->hob_lbah, io_ports->lbah_addr);
67
68 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
69 outb(tf->feature, io_ports->feature_addr);
70 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT)
71 outb(tf->nsect, io_ports->nsect_addr);
72 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL)
73 outb(tf->lbal, io_ports->lbal_addr);
74 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM)
75 outb(tf->lbam, io_ports->lbam_addr);
76 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH)
77 outb(tf->lbah, io_ports->lbah_addr);
78
79 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE)
80 outb((tf->device & HIHI) | drive->select,
81 io_ports->device_addr);
82}
83
84static void h8300_tf_read(ide_drive_t *drive, struct ide_cmd *cmd)
85{
86 ide_hwif_t *hwif = drive->hwif;
87 struct ide_io_ports *io_ports = &hwif->io_ports;
88 struct ide_taskfile *tf = &cmd->tf;
89
90 /* be sure we're looking at the low order bits */
91 outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
92
93 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR)
94 tf->error = inb(io_ports->feature_addr);
95 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT)
96 tf->nsect = inb(io_ports->nsect_addr);
97 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL)
98 tf->lbal = inb(io_ports->lbal_addr);
99 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM)
100 tf->lbam = inb(io_ports->lbam_addr);
101 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH)
102 tf->lbah = inb(io_ports->lbah_addr);
103 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE)
104 tf->device = inb(io_ports->device_addr);
105
106 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
107 outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
108
109 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
110 tf->hob_error = inb(io_ports->feature_addr);
111 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
112 tf->hob_nsect = inb(io_ports->nsect_addr);
113 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
114 tf->hob_lbal = inb(io_ports->lbal_addr);
115 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
116 tf->hob_lbam = inb(io_ports->lbam_addr);
117 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
118 tf->hob_lbah = inb(io_ports->lbah_addr);
119 }
120}
121
122static void mm_outsw(unsigned long addr, void *buf, u32 len) 25static void mm_outsw(unsigned long addr, void *buf, u32 len)
123{ 26{
124 unsigned short *bp = (unsigned short *)buf; 27 unsigned short *bp = (unsigned short *)buf;
@@ -152,8 +55,8 @@ static const struct ide_tp_ops h8300_tp_ops = {
152 .write_devctl = ide_write_devctl, 55 .write_devctl = ide_write_devctl,
153 56
154 .dev_select = ide_dev_select, 57 .dev_select = ide_dev_select,
155 .tf_load = h8300_tf_load, 58 .tf_load = ide_tf_load,
156 .tf_read = h8300_tf_read, 59 .tf_read = ide_tf_read,
157 60
158 .input_data = h8300_input_data, 61 .input_data = h8300_input_data,
159 .output_data = h8300_output_data, 62 .output_data = h8300_output_data,
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
index 9cac281d82c4..46721c454518 100644
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -85,98 +85,57 @@ void ide_dev_select(ide_drive_t *drive)
85} 85}
86EXPORT_SYMBOL_GPL(ide_dev_select); 86EXPORT_SYMBOL_GPL(ide_dev_select);
87 87
88void ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) 88void ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
89{ 89{
90 ide_hwif_t *hwif = drive->hwif; 90 ide_hwif_t *hwif = drive->hwif;
91 struct ide_io_ports *io_ports = &hwif->io_ports; 91 struct ide_io_ports *io_ports = &hwif->io_ports;
92 struct ide_taskfile *tf = &cmd->tf;
93 void (*tf_outb)(u8 addr, unsigned long port); 92 void (*tf_outb)(u8 addr, unsigned long port);
94 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 93 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
95 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
96 94
97 if (mmio) 95 if (mmio)
98 tf_outb = ide_mm_outb; 96 tf_outb = ide_mm_outb;
99 else 97 else
100 tf_outb = ide_outb; 98 tf_outb = ide_outb;
101 99
102 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) 100 if (valid & IDE_VALID_FEATURE)
103 HIHI = 0xFF;
104
105 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
106 tf_outb(tf->hob_feature, io_ports->feature_addr);
107 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
108 tf_outb(tf->hob_nsect, io_ports->nsect_addr);
109 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
110 tf_outb(tf->hob_lbal, io_ports->lbal_addr);
111 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
112 tf_outb(tf->hob_lbam, io_ports->lbam_addr);
113 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
114 tf_outb(tf->hob_lbah, io_ports->lbah_addr);
115
116 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
117 tf_outb(tf->feature, io_ports->feature_addr); 101 tf_outb(tf->feature, io_ports->feature_addr);
118 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) 102 if (valid & IDE_VALID_NSECT)
119 tf_outb(tf->nsect, io_ports->nsect_addr); 103 tf_outb(tf->nsect, io_ports->nsect_addr);
120 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) 104 if (valid & IDE_VALID_LBAL)
121 tf_outb(tf->lbal, io_ports->lbal_addr); 105 tf_outb(tf->lbal, io_ports->lbal_addr);
122 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) 106 if (valid & IDE_VALID_LBAM)
123 tf_outb(tf->lbam, io_ports->lbam_addr); 107 tf_outb(tf->lbam, io_ports->lbam_addr);
124 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) 108 if (valid & IDE_VALID_LBAH)
125 tf_outb(tf->lbah, io_ports->lbah_addr); 109 tf_outb(tf->lbah, io_ports->lbah_addr);
126 110 if (valid & IDE_VALID_DEVICE)
127 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) 111 tf_outb(tf->device, io_ports->device_addr);
128 tf_outb((tf->device & HIHI) | drive->select,
129 io_ports->device_addr);
130} 112}
131EXPORT_SYMBOL_GPL(ide_tf_load); 113EXPORT_SYMBOL_GPL(ide_tf_load);
132 114
133void ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) 115void ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
134{ 116{
135 ide_hwif_t *hwif = drive->hwif; 117 ide_hwif_t *hwif = drive->hwif;
136 struct ide_io_ports *io_ports = &hwif->io_ports; 118 struct ide_io_ports *io_ports = &hwif->io_ports;
137 struct ide_taskfile *tf = &cmd->tf;
138 void (*tf_outb)(u8 addr, unsigned long port);
139 u8 (*tf_inb)(unsigned long port); 119 u8 (*tf_inb)(unsigned long port);
140 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 120 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
141 121
142 if (mmio) { 122 if (mmio)
143 tf_outb = ide_mm_outb;
144 tf_inb = ide_mm_inb; 123 tf_inb = ide_mm_inb;
145 } else { 124 else
146 tf_outb = ide_outb;
147 tf_inb = ide_inb; 125 tf_inb = ide_inb;
148 }
149
150 /* be sure we're looking at the low order bits */
151 tf_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
152 126
153 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) 127 if (valid & IDE_VALID_ERROR)
154 tf->error = tf_inb(io_ports->feature_addr); 128 tf->error = tf_inb(io_ports->feature_addr);
155 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) 129 if (valid & IDE_VALID_NSECT)
156 tf->nsect = tf_inb(io_ports->nsect_addr); 130 tf->nsect = tf_inb(io_ports->nsect_addr);
157 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) 131 if (valid & IDE_VALID_LBAL)
158 tf->lbal = tf_inb(io_ports->lbal_addr); 132 tf->lbal = tf_inb(io_ports->lbal_addr);
159 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) 133 if (valid & IDE_VALID_LBAM)
160 tf->lbam = tf_inb(io_ports->lbam_addr); 134 tf->lbam = tf_inb(io_ports->lbam_addr);
161 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) 135 if (valid & IDE_VALID_LBAH)
162 tf->lbah = tf_inb(io_ports->lbah_addr); 136 tf->lbah = tf_inb(io_ports->lbah_addr);
163 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) 137 if (valid & IDE_VALID_DEVICE)
164 tf->device = tf_inb(io_ports->device_addr); 138 tf->device = tf_inb(io_ports->device_addr);
165
166 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
167 tf_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
168
169 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
170 tf->hob_error = tf_inb(io_ports->feature_addr);
171 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
172 tf->hob_nsect = tf_inb(io_ports->nsect_addr);
173 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
174 tf->hob_lbal = tf_inb(io_ports->lbal_addr);
175 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
176 tf->hob_lbam = tf_inb(io_ports->lbam_addr);
177 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
178 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
179 }
180} 139}
181EXPORT_SYMBOL_GPL(ide_tf_read); 140EXPORT_SYMBOL_GPL(ide_tf_read);
182 141
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1deb6d29b186..2ae02b8d7f8e 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -86,18 +86,18 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
86 86
87 tp_ops->input_data(drive, cmd, data, 2); 87 tp_ops->input_data(drive, cmd, data, 2);
88 88
89 tf->data = data[0]; 89 cmd->tf.data = data[0];
90 tf->hob_data = data[1]; 90 cmd->hob.data = data[1];
91 } 91 }
92 92
93 tp_ops->tf_read(drive, cmd); 93 ide_tf_readback(drive, cmd);
94 94
95 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && 95 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
96 tf_cmd == ATA_CMD_IDLEIMMEDIATE) { 96 tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
97 if (tf->lbal != 0xc4) { 97 if (tf->lbal != 0xc4) {
98 printk(KERN_ERR "%s: head unload failed!\n", 98 printk(KERN_ERR "%s: head unload failed!\n",
99 drive->name); 99 drive->name);
100 ide_tf_dump(drive->name, tf); 100 ide_tf_dump(drive->name, cmd);
101 } else 101 } else
102 drive->dev_flags |= IDE_DFLAG_PARKED; 102 drive->dev_flags |= IDE_DFLAG_PARKED;
103 } 103 }
@@ -205,8 +205,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
205 return ide_stopped; 205 return ide_stopped;
206 } 206 }
207 207
208 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 208 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
209 IDE_TFLAG_CUSTOM_HANDLER; 209 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
210 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
210 211
211 do_rw_taskfile(drive, &cmd); 212 do_rw_taskfile(drive, &cmd);
212 213
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 770142767437..c1c25ebbaa1f 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -141,11 +141,12 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
141 tf->lbal = args[1]; 141 tf->lbal = args[1];
142 tf->lbam = 0x4f; 142 tf->lbam = 0x4f;
143 tf->lbah = 0xc2; 143 tf->lbah = 0xc2;
144 cmd.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT; 144 cmd.valid.out.tf = IDE_VALID_OUT_TF;
145 cmd.valid.in.tf = IDE_VALID_NSECT;
145 } else { 146 } else {
146 tf->nsect = args[1]; 147 tf->nsect = args[1];
147 cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | 148 cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
148 IDE_TFLAG_IN_NSECT; 149 cmd.valid.in.tf = IDE_VALID_NSECT;
149 } 150 }
150 tf->command = args[0]; 151 tf->command = args[0];
151 cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA; 152 cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA;
@@ -205,14 +206,15 @@ static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg)
205 return -EFAULT; 206 return -EFAULT;
206 207
207 memset(&cmd, 0, sizeof(cmd)); 208 memset(&cmd, 0, sizeof(cmd));
208 memcpy(&cmd.tf_array[7], &args[1], 6); 209 memcpy(&cmd.tf.feature, &args[1], 6);
209 cmd.tf.command = args[0]; 210 cmd.tf.command = args[0];
210 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 211 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
212 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
211 213
212 err = ide_no_data_taskfile(drive, &cmd); 214 err = ide_no_data_taskfile(drive, &cmd);
213 215
214 args[0] = cmd.tf.command; 216 args[0] = cmd.tf.command;
215 memcpy(&args[1], &cmd.tf_array[7], 6); 217 memcpy(&args[1], &cmd.tf.feature, 6);
216 218
217 if (copy_to_user(p, args, 7)) 219 if (copy_to_user(p, args, 7))
218 err = -EFAULT; 220 err = -EFAULT;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 27bb70ddd459..c19a221b1e18 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -37,14 +37,11 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
37 37
38u8 ide_read_error(ide_drive_t *drive) 38u8 ide_read_error(ide_drive_t *drive)
39{ 39{
40 struct ide_cmd cmd; 40 struct ide_taskfile tf;
41 41
42 memset(&cmd, 0, sizeof(cmd)); 42 drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR);
43 cmd.tf_flags = IDE_TFLAG_IN_ERROR;
44 43
45 drive->hwif->tp_ops->tf_read(drive, &cmd); 44 return tf.error;
46
47 return cmd.tf.error;
48} 45}
49EXPORT_SYMBOL_GPL(ide_read_error); 46EXPORT_SYMBOL_GPL(ide_read_error);
50 47
@@ -312,10 +309,10 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
312{ 309{
313 ide_hwif_t *hwif = drive->hwif; 310 ide_hwif_t *hwif = drive->hwif;
314 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 311 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
312 struct ide_taskfile tf;
315 u16 *id = drive->id, i; 313 u16 *id = drive->id, i;
316 int error = 0; 314 int error = 0;
317 u8 stat; 315 u8 stat;
318 struct ide_cmd cmd;
319 316
320#ifdef CONFIG_BLK_DEV_IDEDMA 317#ifdef CONFIG_BLK_DEV_IDEDMA
321 if (hwif->dma_ops) /* check if host supports DMA */ 318 if (hwif->dma_ops) /* check if host supports DMA */
@@ -347,12 +344,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
347 udelay(1); 344 udelay(1);
348 tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); 345 tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
349 346
350 memset(&cmd, 0, sizeof(cmd)); 347 memset(&tf, 0, sizeof(tf));
351 cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT; 348 tf.feature = SETFEATURES_XFER;
352 cmd.tf.feature = SETFEATURES_XFER; 349 tf.nsect = speed;
353 cmd.tf.nsect = speed;
354 350
355 tp_ops->tf_load(drive, &cmd); 351 tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);
356 352
357 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); 353 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
358 354
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 217b7fdf2b17..56ff8c46c7d1 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -49,16 +49,17 @@ static void ide_dump_opcode(ide_drive_t *drive)
49 printk(KERN_CONT "0x%02x\n", cmd->tf.command); 49 printk(KERN_CONT "0x%02x\n", cmd->tf.command);
50} 50}
51 51
52u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) 52u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
53{ 53{
54 struct ide_taskfile *tf = &cmd->tf;
54 u32 high, low; 55 u32 high, low;
55 56
56 if (lba48)
57 high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) |
58 tf->hob_lbal;
59 else
60 high = tf->device & 0xf;
61 low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; 57 low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
58 if (lba48) {
59 tf = &cmd->hob;
60 high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
61 } else
62 high = tf->device & 0xf;
62 63
63 return ((u64)high << 24) | low; 64 return ((u64)high << 24) | low;
64} 65}
@@ -71,17 +72,18 @@ static void ide_dump_sector(ide_drive_t *drive)
71 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 72 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
72 73
73 memset(&cmd, 0, sizeof(cmd)); 74 memset(&cmd, 0, sizeof(cmd));
74 if (lba48) 75 if (lba48) {
75 cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA | 76 cmd.valid.in.tf = IDE_VALID_LBA;
76 IDE_TFLAG_LBA48; 77 cmd.valid.in.hob = IDE_VALID_LBA;
77 else 78 cmd.tf_flags = IDE_TFLAG_LBA48;
78 cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; 79 } else
80 cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE;
79 81
80 drive->hwif->tp_ops->tf_read(drive, &cmd); 82 ide_tf_readback(drive, &cmd);
81 83
82 if (lba48 || (tf->device & ATA_LBA)) 84 if (lba48 || (tf->device & ATA_LBA))
83 printk(KERN_CONT ", LBAsect=%llu", 85 printk(KERN_CONT ", LBAsect=%llu",
84 (unsigned long long)ide_get_lba_addr(tf, lba48)); 86 (unsigned long long)ide_get_lba_addr(&cmd, lba48));
85 else 87 else
86 printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, 88 printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
87 tf->device & 0xf, tf->lbal); 89 tf->device & 0xf, tf->lbal);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 9490b446519f..310d03f2b5b7 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -74,7 +74,8 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
74 tf->lbal = 0x4c; 74 tf->lbal = 0x4c;
75 tf->lbam = 0x4e; 75 tf->lbam = 0x4e;
76 tf->lbah = 0x55; 76 tf->lbah = 0x55;
77 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 77 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
78 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
78 } else /* cmd == REQ_UNPARK_HEADS */ 79 } else /* cmd == REQ_UNPARK_HEADS */
79 tf->command = ATA_CMD_CHK_POWER; 80 tf->command = ATA_CMD_CHK_POWER;
80 81
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index bb7858ebb7d1..0d8a151c0a01 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -163,7 +163,8 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
163 return ide_stopped; 163 return ide_stopped;
164 164
165out_do_tf: 165out_do_tf:
166 cmd->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 166 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
167 cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
167 cmd->protocol = ATA_PROT_NODATA; 168 cmd->protocol = ATA_PROT_NODATA;
168 169
169 return do_rw_taskfile(drive, cmd); 170 return do_rw_taskfile(drive, cmd);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index d8c1c3e735bb..7f264ed1141b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -283,13 +283,11 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
283 * identify command to be sure of reply 283 * identify command to be sure of reply
284 */ 284 */
285 if (cmd == ATA_CMD_ID_ATAPI) { 285 if (cmd == ATA_CMD_ID_ATAPI) {
286 struct ide_cmd cmd; 286 struct ide_taskfile tf;
287 287
288 memset(&cmd, 0, sizeof(cmd)); 288 memset(&tf, 0, sizeof(tf));
289 /* disable DMA & overlap */ 289 /* disable DMA & overlap */
290 cmd.tf_flags = IDE_TFLAG_OUT_FEATURE; 290 tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE);
291
292 tp_ops->tf_load(drive, &cmd);
293 } 291 }
294 292
295 /* ask drive for ID */ 293 /* ask drive for ID */
@@ -337,14 +335,11 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
337 335
338static u8 ide_read_device(ide_drive_t *drive) 336static u8 ide_read_device(ide_drive_t *drive)
339{ 337{
340 struct ide_cmd cmd; 338 struct ide_taskfile tf;
341
342 memset(&cmd, 0, sizeof(cmd));
343 cmd.tf_flags = IDE_TFLAG_IN_DEVICE;
344 339
345 drive->hwif->tp_ops->tf_read(drive, &cmd); 340 drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE);
346 341
347 return cmd.tf.device; 342 return tf.device;
348} 343}
349 344
350/** 345/**
@@ -1314,6 +1309,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1314 host->get_lock = d->get_lock; 1309 host->get_lock = d->get_lock;
1315 host->release_lock = d->release_lock; 1310 host->release_lock = d->release_lock;
1316 host->host_flags = d->host_flags; 1311 host->host_flags = d->host_flags;
1312 host->irq_flags = d->irq_flags;
1317 } 1313 }
1318 1314
1319 return host; 1315 return host;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 10a88bf3eefa..3242698832a4 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -204,8 +204,8 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
204 cmd.tf.command = ATA_CMD_SET_FEATURES; 204 cmd.tf.command = ATA_CMD_SET_FEATURES;
205 cmd.tf.feature = SETFEATURES_XFER; 205 cmd.tf.feature = SETFEATURES_XFER;
206 cmd.tf.nsect = (u8)arg; 206 cmd.tf.nsect = (u8)arg;
207 cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | 207 cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
208 IDE_TFLAG_IN_NSECT; 208 cmd.valid.in.tf = IDE_VALID_NSECT;
209 209
210 err = ide_no_data_taskfile(drive, &cmd); 210 err = ide_no_data_taskfile(drive, &cmd);
211 211
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 243421ce40d0..4aa6223c11be 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -23,17 +23,33 @@
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/io.h> 24#include <asm/io.h>
25 25
26void ide_tf_dump(const char *s, struct ide_taskfile *tf) 26void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
27{
28 ide_hwif_t *hwif = drive->hwif;
29 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
30
31 /* Be sure we're looking at the low order bytes */
32 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
33
34 tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
35
36 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
37 tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
38
39 tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
40 }
41}
42
43void ide_tf_dump(const char *s, struct ide_cmd *cmd)
27{ 44{
28#ifdef DEBUG 45#ifdef DEBUG
29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " 46 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", 47 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31 s, tf->feature, tf->nsect, tf->lbal, 48 s, cmd->tf.feature, cmd->tf.nsect,
32 tf->lbam, tf->lbah, tf->device, tf->command); 49 cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
33 printk("%s: hob: nsect 0x%02x lbal 0x%02x " 50 cmd->tf.device, cmd->tf.command);
34 "lbam 0x%02x lbah 0x%02x\n", 51 printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
35 s, tf->hob_nsect, tf->hob_lbal, 52 s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
36 tf->hob_lbam, tf->hob_lbah);
37#endif 53#endif
38} 54}
39 55
@@ -47,7 +63,8 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
47 cmd.tf.command = ATA_CMD_ID_ATA; 63 cmd.tf.command = ATA_CMD_ID_ATA;
48 else 64 else
49 cmd.tf.command = ATA_CMD_ID_ATAPI; 65 cmd.tf.command = ATA_CMD_ID_ATAPI;
50 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 66 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
67 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
51 cmd.protocol = ATA_PROT_PIO; 68 cmd.protocol = ATA_PROT_PIO;
52 69
53 return ide_raw_taskfile(drive, &cmd, buf, 1); 70 return ide_raw_taskfile(drive, &cmd, buf, 1);
@@ -79,16 +96,27 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
79 memcpy(cmd, orig_cmd, sizeof(*cmd)); 96 memcpy(cmd, orig_cmd, sizeof(*cmd));
80 97
81 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 98 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82 ide_tf_dump(drive->name, tf); 99 ide_tf_dump(drive->name, cmd);
83 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 100 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
84 SELECT_MASK(drive, 0); 101 SELECT_MASK(drive, 0);
85 102
86 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { 103 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
87 u8 data[2] = { tf->data, tf->hob_data }; 104 u8 data[2] = { cmd->tf.data, cmd->hob.data };
88 105
89 tp_ops->output_data(drive, cmd, data, 2); 106 tp_ops->output_data(drive, cmd, data, 2);
90 } 107 }
91 tp_ops->tf_load(drive, cmd); 108
109 if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
110 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
111 0xE0 : 0xEF;
112
113 if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
114 cmd->tf.device &= HIHI;
115 cmd->tf.device |= drive->select;
116 }
117
118 tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
119 tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf);
92 } 120 }
93 121
94 switch (cmd->protocol) { 122 switch (cmd->protocol) {
@@ -489,16 +517,17 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
489 517
490 memset(&cmd, 0, sizeof(cmd)); 518 memset(&cmd, 0, sizeof(cmd));
491 519
492 memcpy(&cmd.tf_array[0], req_task->hob_ports, 520 memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
493 HDIO_DRIVE_HOB_HDR_SIZE - 2); 521 memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
494 memcpy(&cmd.tf_array[6], req_task->io_ports,
495 HDIO_DRIVE_TASK_HDR_SIZE);
496 522
497 cmd.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE | 523 cmd.valid.out.tf = IDE_VALID_DEVICE;
498 IDE_TFLAG_IN_TF; 524 cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
525 cmd.tf_flags = IDE_TFLAG_IO_16BIT;
499 526
500 if (drive->dev_flags & IDE_DFLAG_LBA48) 527 if (drive->dev_flags & IDE_DFLAG_LBA48) {
501 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB); 528 cmd.tf_flags |= IDE_TFLAG_LBA48;
529 cmd.valid.in.hob = IDE_VALID_IN_HOB;
530 }
502 531
503 if (req_task->out_flags.all) { 532 if (req_task->out_flags.all) {
504 cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; 533 cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
@@ -507,28 +536,28 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
507 cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; 536 cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
508 537
509 if (req_task->out_flags.b.nsector_hob) 538 if (req_task->out_flags.b.nsector_hob)
510 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT; 539 cmd.valid.out.hob |= IDE_VALID_NSECT;
511 if (req_task->out_flags.b.sector_hob) 540 if (req_task->out_flags.b.sector_hob)
512 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL; 541 cmd.valid.out.hob |= IDE_VALID_LBAL;
513 if (req_task->out_flags.b.lcyl_hob) 542 if (req_task->out_flags.b.lcyl_hob)
514 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM; 543 cmd.valid.out.hob |= IDE_VALID_LBAM;
515 if (req_task->out_flags.b.hcyl_hob) 544 if (req_task->out_flags.b.hcyl_hob)
516 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH; 545 cmd.valid.out.hob |= IDE_VALID_LBAH;
517 546
518 if (req_task->out_flags.b.error_feature) 547 if (req_task->out_flags.b.error_feature)
519 cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE; 548 cmd.valid.out.tf |= IDE_VALID_FEATURE;
520 if (req_task->out_flags.b.nsector) 549 if (req_task->out_flags.b.nsector)
521 cmd.tf_flags |= IDE_TFLAG_OUT_NSECT; 550 cmd.valid.out.tf |= IDE_VALID_NSECT;
522 if (req_task->out_flags.b.sector) 551 if (req_task->out_flags.b.sector)
523 cmd.tf_flags |= IDE_TFLAG_OUT_LBAL; 552 cmd.valid.out.tf |= IDE_VALID_LBAL;
524 if (req_task->out_flags.b.lcyl) 553 if (req_task->out_flags.b.lcyl)
525 cmd.tf_flags |= IDE_TFLAG_OUT_LBAM; 554 cmd.valid.out.tf |= IDE_VALID_LBAM;
526 if (req_task->out_flags.b.hcyl) 555 if (req_task->out_flags.b.hcyl)
527 cmd.tf_flags |= IDE_TFLAG_OUT_LBAH; 556 cmd.valid.out.tf |= IDE_VALID_LBAH;
528 } else { 557 } else {
529 cmd.tf_flags |= IDE_TFLAG_OUT_TF; 558 cmd.valid.out.tf |= IDE_VALID_OUT_TF;
530 if (cmd.tf_flags & IDE_TFLAG_LBA48) 559 if (cmd.tf_flags & IDE_TFLAG_LBA48)
531 cmd.tf_flags |= IDE_TFLAG_OUT_HOB; 560 cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
532 } 561 }
533 562
534 if (req_task->in_flags.b.data) 563 if (req_task->in_flags.b.data)
@@ -594,7 +623,7 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
594 if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) 623 if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
595 nsect = 0; 624 nsect = 0;
596 else if (!nsect) { 625 else if (!nsect) {
597 nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect; 626 nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
598 627
599 if (!nsect) { 628 if (!nsect) {
600 printk(KERN_ERR "%s: in/out command without data\n", 629 printk(KERN_ERR "%s: in/out command without data\n",
@@ -606,10 +635,8 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
606 635
607 err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); 636 err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
608 637
609 memcpy(req_task->hob_ports, &cmd.tf_array[0], 638 memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
610 HDIO_DRIVE_HOB_HDR_SIZE - 2); 639 memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE);
611 memcpy(req_task->io_ports, &cmd.tf_array[6],
612 HDIO_DRIVE_TASK_HDR_SIZE);
613 640
614 if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && 641 if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
615 req_task->in_flags.all == 0) { 642 req_task->in_flags.all == 0) {
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
index 71a39fb3856f..95327a2c2422 100644
--- a/drivers/ide/ns87415.c
+++ b/drivers/ide/ns87415.c
@@ -61,41 +61,23 @@ static u8 superio_dma_sff_read_status(ide_hwif_t *hwif)
61 return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); 61 return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
62} 62}
63 63
64static void superio_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) 64static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf,
65 u8 valid)
65{ 66{
66 struct ide_io_ports *io_ports = &drive->hwif->io_ports; 67 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
67 struct ide_taskfile *tf = &cmd->tf;
68 68
69 /* be sure we're looking at the low order bits */ 69 if (valid & IDE_VALID_ERROR)
70 outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
71
72 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR)
73 tf->error = inb(io_ports->feature_addr); 70 tf->error = inb(io_ports->feature_addr);
74 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) 71 if (valid & IDE_VALID_NSECT)
75 tf->nsect = inb(io_ports->nsect_addr); 72 tf->nsect = inb(io_ports->nsect_addr);
76 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) 73 if (valid & IDE_VALID_LBAL)
77 tf->lbal = inb(io_ports->lbal_addr); 74 tf->lbal = inb(io_ports->lbal_addr);
78 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) 75 if (valid & IDE_VALID_LBAM)
79 tf->lbam = inb(io_ports->lbam_addr); 76 tf->lbam = inb(io_ports->lbam_addr);
80 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) 77 if (valid & IDE_VALID_LBAH)
81 tf->lbah = inb(io_ports->lbah_addr); 78 tf->lbah = inb(io_ports->lbah_addr);
82 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) 79 if (valid & IDE_VALID_DEVICE)
83 tf->device = superio_ide_inb(io_ports->device_addr); 80 tf->device = superio_ide_inb(io_ports->device_addr);
84
85 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
86 outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
87
88 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
89 tf->hob_error = inb(io_ports->feature_addr);
90 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
91 tf->hob_nsect = inb(io_ports->nsect_addr);
92 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
93 tf->hob_lbal = inb(io_ports->lbal_addr);
94 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
95 tf->hob_lbam = inb(io_ports->lbam_addr);
96 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
97 tf->hob_lbah = inb(io_ports->lbah_addr);
98 }
99} 81}
100 82
101static void ns87415_dev_select(ide_drive_t *drive); 83static void ns87415_dev_select(ide_drive_t *drive);
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
index d007e7f66598..c79346679244 100644
--- a/drivers/ide/q40ide.c
+++ b/drivers/ide/q40ide.c
@@ -16,6 +16,8 @@
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/ide.h> 17#include <linux/ide.h>
18 18
19#include <asm/ide.h>
20
19 /* 21 /*
20 * Bases of the IDE interfaces 22 * Bases of the IDE interfaces
21 */ 23 */
@@ -77,8 +79,10 @@ static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
77{ 79{
78 unsigned long data_addr = drive->hwif->io_ports.data_addr; 80 unsigned long data_addr = drive->hwif->io_ports.data_addr;
79 81
80 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) 82 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
81 return insw(data_addr, buf, (len + 1) / 2); 83 __ide_mm_insw(data_addr, buf, (len + 1) / 2);
84 return;
85 }
82 86
83 raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); 87 raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
84} 88}
@@ -88,8 +92,10 @@ static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
88{ 92{
89 unsigned long data_addr = drive->hwif->io_ports.data_addr; 93 unsigned long data_addr = drive->hwif->io_ports.data_addr;
90 94
91 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) 95 if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
92 return outsw(data_addr, buf, (len + 1) / 2); 96 __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
97 return;
98 }
93 99
94 raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); 100 raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
95} 101}
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 6d8dbd9c10bc..5be41f25204f 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -337,7 +337,6 @@ static void scc_dma_start(ide_drive_t *drive)
337 337
338 /* start DMA */ 338 /* start DMA */
339 scc_ide_outb(dma_cmd | 1, hwif->dma_base); 339 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
340 wmb();
341} 340}
342 341
343static int __scc_dma_end(ide_drive_t *drive) 342static int __scc_dma_end(ide_drive_t *drive)
@@ -354,7 +353,6 @@ static int __scc_dma_end(ide_drive_t *drive)
354 /* clear the INTR & ERROR bits */ 353 /* clear the INTR & ERROR bits */
355 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); 354 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
356 /* verify good DMA status */ 355 /* verify good DMA status */
357 wmb();
358 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 356 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
359} 357}
360 358
@@ -647,77 +645,40 @@ static int __devinit init_setup_scc(struct pci_dev *dev,
647 return rc; 645 return rc;
648} 646}
649 647
650static void scc_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) 648static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
651{ 649{
652 struct ide_io_ports *io_ports = &drive->hwif->io_ports; 650 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
653 struct ide_taskfile *tf = &cmd->tf; 651
654 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; 652 if (valid & IDE_VALID_FEATURE)
655
656 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
657 HIHI = 0xFF;
658
659 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
660 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
661 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
662 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
663 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
664 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
665 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
666 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
667 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
668 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
669
670 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
671 scc_ide_outb(tf->feature, io_ports->feature_addr); 653 scc_ide_outb(tf->feature, io_ports->feature_addr);
672 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) 654 if (valid & IDE_VALID_NSECT)
673 scc_ide_outb(tf->nsect, io_ports->nsect_addr); 655 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
674 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) 656 if (valid & IDE_VALID_LBAL)
675 scc_ide_outb(tf->lbal, io_ports->lbal_addr); 657 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
676 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) 658 if (valid & IDE_VALID_LBAM)
677 scc_ide_outb(tf->lbam, io_ports->lbam_addr); 659 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
678 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) 660 if (valid & IDE_VALID_LBAH)
679 scc_ide_outb(tf->lbah, io_ports->lbah_addr); 661 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
680 662 if (valid & IDE_VALID_DEVICE)
681 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) 663 scc_ide_outb(tf->device, io_ports->device_addr);
682 scc_ide_outb((tf->device & HIHI) | drive->select,
683 io_ports->device_addr);
684} 664}
685 665
686static void scc_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) 666static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
687{ 667{
688 struct ide_io_ports *io_ports = &drive->hwif->io_ports; 668 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
689 struct ide_taskfile *tf = &cmd->tf;
690
691 /* be sure we're looking at the low order bits */
692 scc_ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
693 669
694 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) 670 if (valid & IDE_VALID_ERROR)
695 tf->error = scc_ide_inb(io_ports->feature_addr); 671 tf->error = scc_ide_inb(io_ports->feature_addr);
696 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) 672 if (valid & IDE_VALID_NSECT)
697 tf->nsect = scc_ide_inb(io_ports->nsect_addr); 673 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
698 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) 674 if (valid & IDE_VALID_LBAL)
699 tf->lbal = scc_ide_inb(io_ports->lbal_addr); 675 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
700 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) 676 if (valid & IDE_VALID_LBAM)
701 tf->lbam = scc_ide_inb(io_ports->lbam_addr); 677 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
702 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) 678 if (valid & IDE_VALID_LBAH)
703 tf->lbah = scc_ide_inb(io_ports->lbah_addr); 679 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
704 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) 680 if (valid & IDE_VALID_DEVICE)
705 tf->device = scc_ide_inb(io_ports->device_addr); 681 tf->device = scc_ide_inb(io_ports->device_addr);
706
707 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
708 scc_ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
709
710 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
711 tf->hob_error = scc_ide_inb(io_ports->feature_addr);
712 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
713 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
714 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
715 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
716 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
717 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
718 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
719 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
720 }
721} 682}
722 683
723static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, 684static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 4cb79c4c2604..e33d764e2945 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -72,91 +72,6 @@ static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
72#ifdef __BIG_ENDIAN 72#ifdef __BIG_ENDIAN
73 73
74/* custom iops (independent from SWAP_IO_SPACE) */ 74/* custom iops (independent from SWAP_IO_SPACE) */
75static u8 tx4938ide_inb(unsigned long port)
76{
77 return __raw_readb((void __iomem *)port);
78}
79
80static void tx4938ide_outb(u8 value, unsigned long port)
81{
82 __raw_writeb(value, (void __iomem *)port);
83}
84
85static void tx4938ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd)
86{
87 ide_hwif_t *hwif = drive->hwif;
88 struct ide_io_ports *io_ports = &hwif->io_ports;
89 struct ide_taskfile *tf = &cmd->tf;
90 u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF;
91
92 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
93 HIHI = 0xFF;
94
95 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
96 tx4938ide_outb(tf->hob_feature, io_ports->feature_addr);
97 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
98 tx4938ide_outb(tf->hob_nsect, io_ports->nsect_addr);
99 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
100 tx4938ide_outb(tf->hob_lbal, io_ports->lbal_addr);
101 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
102 tx4938ide_outb(tf->hob_lbam, io_ports->lbam_addr);
103 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
104 tx4938ide_outb(tf->hob_lbah, io_ports->lbah_addr);
105
106 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
107 tx4938ide_outb(tf->feature, io_ports->feature_addr);
108 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT)
109 tx4938ide_outb(tf->nsect, io_ports->nsect_addr);
110 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL)
111 tx4938ide_outb(tf->lbal, io_ports->lbal_addr);
112 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM)
113 tx4938ide_outb(tf->lbam, io_ports->lbam_addr);
114 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH)
115 tx4938ide_outb(tf->lbah, io_ports->lbah_addr);
116
117 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE)
118 tx4938ide_outb((tf->device & HIHI) | drive->select,
119 io_ports->device_addr);
120}
121
122static void tx4938ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd)
123{
124 ide_hwif_t *hwif = drive->hwif;
125 struct ide_io_ports *io_ports = &hwif->io_ports;
126 struct ide_taskfile *tf = &cmd->tf;
127
128 /* be sure we're looking at the low order bits */
129 tx4938ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
130
131 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR)
132 tf->error = tx4938ide_inb(io_ports->feature_addr);
133 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT)
134 tf->nsect = tx4938ide_inb(io_ports->nsect_addr);
135 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL)
136 tf->lbal = tx4938ide_inb(io_ports->lbal_addr);
137 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM)
138 tf->lbam = tx4938ide_inb(io_ports->lbam_addr);
139 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH)
140 tf->lbah = tx4938ide_inb(io_ports->lbah_addr);
141 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE)
142 tf->device = tx4938ide_inb(io_ports->device_addr);
143
144 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
145 tx4938ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
146
147 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
148 tf->hob_error = tx4938ide_inb(io_ports->feature_addr);
149 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
150 tf->hob_nsect = tx4938ide_inb(io_ports->nsect_addr);
151 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
152 tf->hob_lbal = tx4938ide_inb(io_ports->lbal_addr);
153 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
154 tf->hob_lbam = tx4938ide_inb(io_ports->lbam_addr);
155 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
156 tf->hob_lbah = tx4938ide_inb(io_ports->lbah_addr);
157 }
158}
159
160static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, 75static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
161 void *buf, unsigned int len) 76 void *buf, unsigned int len)
162{ 77{
@@ -190,8 +105,8 @@ static const struct ide_tp_ops tx4938ide_tp_ops = {
190 .write_devctl = ide_write_devctl, 105 .write_devctl = ide_write_devctl,
191 106
192 .dev_select = ide_dev_select, 107 .dev_select = ide_dev_select,
193 .tf_load = tx4938ide_tf_load, 108 .tf_load = ide_tf_load,
194 .tf_read = tx4938ide_tf_read, 109 .tf_read = ide_tf_read,
195 110
196 .input_data = tx4938ide_input_data_swap, 111 .input_data = tx4938ide_input_data_swap,
197 .output_data = tx4938ide_output_data_swap, 112 .output_data = tx4938ide_output_data_swap,
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 0040a9a3e26e..564422d23976 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -327,15 +327,15 @@ static int tx4939ide_dma_end(ide_drive_t *drive)
327 /* read and clear the INTR & ERROR bits */ 327 /* read and clear the INTR & ERROR bits */
328 dma_stat = tx4939ide_clear_dma_status(base); 328 dma_stat = tx4939ide_clear_dma_status(base);
329 329
330 wmb(); 330#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
331 331
332 /* verify good DMA status */ 332 /* verify good DMA status */
333 if ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) == 0 && 333 if ((dma_stat & CHECK_DMA_MASK) == 0 &&
334 (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) == 334 (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) ==
335 (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) 335 (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST))
336 /* INT_IDE lost... bug? */ 336 /* INT_IDE lost... bug? */
337 return 0; 337 return 0;
338 return ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) != 338 return ((dma_stat & CHECK_DMA_MASK) !=
339 ATA_DMA_INTR) ? 0x10 | dma_stat : 0; 339 ATA_DMA_INTR) ? 0x10 | dma_stat : 0;
340} 340}
341 341
@@ -434,97 +434,19 @@ static void tx4939ide_tf_load_fixup(ide_drive_t *drive)
434 tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); 434 tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
435} 435}
436 436
437#ifdef __BIG_ENDIAN 437static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf,
438 438 u8 valid)
439/* custom iops (independent from SWAP_IO_SPACE) */
440static u8 tx4939ide_inb(unsigned long port)
441{ 439{
442 return __raw_readb((void __iomem *)port); 440 ide_tf_load(drive, tf, valid);
443}
444 441
445static void tx4939ide_outb(u8 value, unsigned long port) 442 if (valid & IDE_VALID_DEVICE)
446{
447 __raw_writeb(value, (void __iomem *)port);
448}
449
450static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd)
451{
452 ide_hwif_t *hwif = drive->hwif;
453 struct ide_io_ports *io_ports = &hwif->io_ports;
454 struct ide_taskfile *tf = &cmd->tf;
455 u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF;
456
457 if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
458 HIHI = 0xFF;
459
460 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
461 tx4939ide_outb(tf->hob_feature, io_ports->feature_addr);
462 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
463 tx4939ide_outb(tf->hob_nsect, io_ports->nsect_addr);
464 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
465 tx4939ide_outb(tf->hob_lbal, io_ports->lbal_addr);
466 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
467 tx4939ide_outb(tf->hob_lbam, io_ports->lbam_addr);
468 if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
469 tx4939ide_outb(tf->hob_lbah, io_ports->lbah_addr);
470
471 if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE)
472 tx4939ide_outb(tf->feature, io_ports->feature_addr);
473 if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT)
474 tx4939ide_outb(tf->nsect, io_ports->nsect_addr);
475 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL)
476 tx4939ide_outb(tf->lbal, io_ports->lbal_addr);
477 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM)
478 tx4939ide_outb(tf->lbam, io_ports->lbam_addr);
479 if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH)
480 tx4939ide_outb(tf->lbah, io_ports->lbah_addr);
481
482 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) {
483 tx4939ide_outb((tf->device & HIHI) | drive->select,
484 io_ports->device_addr);
485 tx4939ide_tf_load_fixup(drive); 443 tx4939ide_tf_load_fixup(drive);
486 }
487} 444}
488 445
489static void tx4939ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) 446#ifdef __BIG_ENDIAN
490{
491 ide_hwif_t *hwif = drive->hwif;
492 struct ide_io_ports *io_ports = &hwif->io_ports;
493 struct ide_taskfile *tf = &cmd->tf;
494
495 /* be sure we're looking at the low order bits */
496 tx4939ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr);
497
498 if (cmd->tf_flags & IDE_TFLAG_IN_ERROR)
499 tf->error = tx4939ide_inb(io_ports->feature_addr);
500 if (cmd->tf_flags & IDE_TFLAG_IN_NSECT)
501 tf->nsect = tx4939ide_inb(io_ports->nsect_addr);
502 if (cmd->tf_flags & IDE_TFLAG_IN_LBAL)
503 tf->lbal = tx4939ide_inb(io_ports->lbal_addr);
504 if (cmd->tf_flags & IDE_TFLAG_IN_LBAM)
505 tf->lbam = tx4939ide_inb(io_ports->lbam_addr);
506 if (cmd->tf_flags & IDE_TFLAG_IN_LBAH)
507 tf->lbah = tx4939ide_inb(io_ports->lbah_addr);
508 if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE)
509 tf->device = tx4939ide_inb(io_ports->device_addr);
510
511 if (cmd->tf_flags & IDE_TFLAG_LBA48) {
512 tx4939ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr);
513
514 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR)
515 tf->hob_error = tx4939ide_inb(io_ports->feature_addr);
516 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
517 tf->hob_nsect = tx4939ide_inb(io_ports->nsect_addr);
518 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
519 tf->hob_lbal = tx4939ide_inb(io_ports->lbal_addr);
520 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
521 tf->hob_lbam = tx4939ide_inb(io_ports->lbam_addr);
522 if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
523 tf->hob_lbah = tx4939ide_inb(io_ports->lbah_addr);
524 }
525}
526 447
527static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, 448/* custom iops (independent from SWAP_IO_SPACE) */
449static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
528 void *buf, unsigned int len) 450 void *buf, unsigned int len)
529{ 451{
530 unsigned long port = drive->hwif->io_ports.data_addr; 452 unsigned long port = drive->hwif->io_ports.data_addr;
@@ -536,7 +458,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq,
536 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); 458 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
537} 459}
538 460
539static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, 461static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
540 void *buf, unsigned int len) 462 void *buf, unsigned int len)
541{ 463{
542 unsigned long port = drive->hwif->io_ports.data_addr; 464 unsigned long port = drive->hwif->io_ports.data_addr;
@@ -558,7 +480,7 @@ static const struct ide_tp_ops tx4939ide_tp_ops = {
558 480
559 .dev_select = ide_dev_select, 481 .dev_select = ide_dev_select,
560 .tf_load = tx4939ide_tf_load, 482 .tf_load = tx4939ide_tf_load,
561 .tf_read = tx4939ide_tf_read, 483 .tf_read = ide_tf_read,
562 484
563 .input_data = tx4939ide_input_data_swap, 485 .input_data = tx4939ide_input_data_swap,
564 .output_data = tx4939ide_output_data_swap, 486 .output_data = tx4939ide_output_data_swap,
@@ -566,14 +488,6 @@ static const struct ide_tp_ops tx4939ide_tp_ops = {
566 488
567#else /* __LITTLE_ENDIAN */ 489#else /* __LITTLE_ENDIAN */
568 490
569static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd)
570{
571 ide_tf_load(drive, cmd);
572
573 if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE)
574 tx4939ide_tf_load_fixup(drive);
575}
576
577static const struct ide_tp_ops tx4939ide_tp_ops = { 491static const struct ide_tp_ops tx4939ide_tp_ops = {
578 .exec_command = ide_exec_command, 492 .exec_command = ide_exec_command,
579 .read_status = ide_read_status, 493 .read_status = ide_read_status,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2a2e50871b40..851de83ff455 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -297,21 +297,25 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
297 id_priv->cma_dev = NULL; 297 id_priv->cma_dev = NULL;
298} 298}
299 299
300static int cma_set_qkey(struct ib_device *device, u8 port_num, 300static int cma_set_qkey(struct rdma_id_private *id_priv)
301 enum rdma_port_space ps,
302 struct rdma_dev_addr *dev_addr, u32 *qkey)
303{ 301{
304 struct ib_sa_mcmember_rec rec; 302 struct ib_sa_mcmember_rec rec;
305 int ret = 0; 303 int ret = 0;
306 304
307 switch (ps) { 305 if (id_priv->qkey)
306 return 0;
307
308 switch (id_priv->id.ps) {
308 case RDMA_PS_UDP: 309 case RDMA_PS_UDP:
309 *qkey = RDMA_UDP_QKEY; 310 id_priv->qkey = RDMA_UDP_QKEY;
310 break; 311 break;
311 case RDMA_PS_IPOIB: 312 case RDMA_PS_IPOIB:
312 ib_addr_get_mgid(dev_addr, &rec.mgid); 313 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
313 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); 314 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
314 *qkey = be32_to_cpu(rec.qkey); 315 id_priv->id.port_num, &rec.mgid,
316 &rec);
317 if (!ret)
318 id_priv->qkey = be32_to_cpu(rec.qkey);
315 break; 319 break;
316 default: 320 default:
317 break; 321 break;
@@ -341,12 +345,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
341 ret = ib_find_cached_gid(cma_dev->device, &gid, 345 ret = ib_find_cached_gid(cma_dev->device, &gid,
342 &id_priv->id.port_num, NULL); 346 &id_priv->id.port_num, NULL);
343 if (!ret) { 347 if (!ret) {
344 ret = cma_set_qkey(cma_dev->device, 348 cma_attach_to_dev(id_priv, cma_dev);
345 id_priv->id.port_num,
346 id_priv->id.ps, dev_addr,
347 &id_priv->qkey);
348 if (!ret)
349 cma_attach_to_dev(id_priv, cma_dev);
350 break; 349 break;
351 } 350 }
352 } 351 }
@@ -578,6 +577,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
578 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 577 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
579 578
580 if (cma_is_ud_ps(id_priv->id.ps)) { 579 if (cma_is_ud_ps(id_priv->id.ps)) {
580 ret = cma_set_qkey(id_priv);
581 if (ret)
582 return ret;
583
581 qp_attr->qkey = id_priv->qkey; 584 qp_attr->qkey = id_priv->qkey;
582 *qp_attr_mask |= IB_QP_QKEY; 585 *qp_attr_mask |= IB_QP_QKEY;
583 } else { 586 } else {
@@ -2201,6 +2204,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2201 event.status = ib_event->param.sidr_rep_rcvd.status; 2204 event.status = ib_event->param.sidr_rep_rcvd.status;
2202 break; 2205 break;
2203 } 2206 }
2207 ret = cma_set_qkey(id_priv);
2208 if (ret) {
2209 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2210 event.status = -EINVAL;
2211 break;
2212 }
2204 if (id_priv->qkey != rep->qkey) { 2213 if (id_priv->qkey != rep->qkey) {
2205 event.event = RDMA_CM_EVENT_UNREACHABLE; 2214 event.event = RDMA_CM_EVENT_UNREACHABLE;
2206 event.status = -EINVAL; 2215 event.status = -EINVAL;
@@ -2480,10 +2489,14 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2480 const void *private_data, int private_data_len) 2489 const void *private_data, int private_data_len)
2481{ 2490{
2482 struct ib_cm_sidr_rep_param rep; 2491 struct ib_cm_sidr_rep_param rep;
2492 int ret;
2483 2493
2484 memset(&rep, 0, sizeof rep); 2494 memset(&rep, 0, sizeof rep);
2485 rep.status = status; 2495 rep.status = status;
2486 if (status == IB_SIDR_SUCCESS) { 2496 if (status == IB_SIDR_SUCCESS) {
2497 ret = cma_set_qkey(id_priv);
2498 if (ret)
2499 return ret;
2487 rep.qp_num = id_priv->qp_num; 2500 rep.qp_num = id_priv->qp_num;
2488 rep.qkey = id_priv->qkey; 2501 rep.qkey = id_priv->qkey;
2489 } 2502 }
@@ -2713,6 +2726,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2713 IB_SA_MCMEMBER_REC_FLOW_LABEL | 2726 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2714 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 2727 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2715 2728
2729 if (id_priv->id.ps == RDMA_PS_IPOIB)
2730 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
2731 IB_SA_MCMEMBER_REC_RATE_SELECTOR;
2732
2716 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 2733 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2717 id_priv->id.port_num, &rec, 2734 id_priv->id.port_num, &rec,
2718 comp_mask, GFP_KERNEL, 2735 comp_mask, GFP_KERNEL,
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index a4a82bff7100..8d71086f5a1c 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -152,7 +152,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
152 sge_cmd = qpid << 8 | 3; 152 sge_cmd = qpid << 8 | 3;
153 wqe->sge_cmd = cpu_to_be64(sge_cmd); 153 wqe->sge_cmd = cpu_to_be64(sge_cmd);
154 skb->priority = CPL_PRIORITY_CONTROL; 154 skb->priority = CPL_PRIORITY_CONTROL;
155 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); 155 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
156} 156}
157 157
158int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 158int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
@@ -571,7 +571,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
571 (unsigned long long) rdev_p->ctrl_qp.dma_addr, 571 (unsigned long long) rdev_p->ctrl_qp.dma_addr,
572 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); 572 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
573 skb->priority = CPL_PRIORITY_CONTROL; 573 skb->priority = CPL_PRIORITY_CONTROL;
574 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); 574 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
575err: 575err:
576 kfree_skb(skb); 576 kfree_skb(skb);
577 return err; 577 return err;
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 u32 stag_idx; 701 u32 stag_idx;
702 u32 wptr; 702 u32 wptr;
703 703
704 if (rdev_p->flags) 704 if (cxio_fatal_error(rdev_p))
705 return -EIO; 705 return -EIO;
706 706
707 stag_state = stag_state > 0; 707 stag_state = stag_state > 0;
@@ -858,7 +858,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
858 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); 858 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
859 wqe->irs = cpu_to_be32(attr->irs); 859 wqe->irs = cpu_to_be32(attr->irs);
860 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ 860 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
861 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); 861 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
862} 862}
863 863
864void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) 864void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
@@ -1041,9 +1041,9 @@ void cxio_rdev_close(struct cxio_rdev *rdev_p)
1041 cxio_hal_pblpool_destroy(rdev_p); 1041 cxio_hal_pblpool_destroy(rdev_p);
1042 cxio_hal_rqtpool_destroy(rdev_p); 1042 cxio_hal_rqtpool_destroy(rdev_p);
1043 list_del(&rdev_p->entry); 1043 list_del(&rdev_p->entry);
1044 rdev_p->t3cdev_p->ulp = NULL;
1045 cxio_hal_destroy_ctrl_qp(rdev_p); 1044 cxio_hal_destroy_ctrl_qp(rdev_p);
1046 cxio_hal_destroy_resource(rdev_p->rscp); 1045 cxio_hal_destroy_resource(rdev_p->rscp);
1046 rdev_p->t3cdev_p->ulp = NULL;
1047 } 1047 }
1048} 1048}
1049 1049
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 094a66d1480c..bfd03bf8be54 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -115,6 +115,11 @@ struct cxio_rdev {
115#define CXIO_ERROR_FATAL 1 115#define CXIO_ERROR_FATAL 1
116}; 116};
117 117
118static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
119{
120 return rdev_p->flags & CXIO_ERROR_FATAL;
121}
122
118static inline int cxio_num_stags(struct cxio_rdev *rdev_p) 123static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
119{ 124{
120 return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5)); 125 return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
@@ -188,6 +193,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
188void cxio_flush_hw_cq(struct t3_cq *cq); 193void cxio_flush_hw_cq(struct t3_cq *cq);
189int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, 194int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
190 u8 *cqe_flushed, u64 *cookie, u32 *credit); 195 u8 *cqe_flushed, u64 *cookie, u32 *credit);
196int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
191 197
192#define MOD "iw_cxgb3: " 198#define MOD "iw_cxgb3: "
193#define PDBG(fmt, args...) pr_debug(MOD fmt, ## args) 199#define PDBG(fmt, args...) pr_debug(MOD fmt, ## args)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 37a4fc264a07..26fc0a4eaa74 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -165,12 +165,19 @@ static void close_rnic_dev(struct t3cdev *tdev)
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) 165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
166{ 166{
167 struct cxio_rdev *rdev = tdev->ulp; 167 struct cxio_rdev *rdev = tdev->ulp;
168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
169 struct ib_event event;
168 170
169 if (status == OFFLOAD_STATUS_DOWN) 171 if (status == OFFLOAD_STATUS_DOWN) {
170 rdev->flags = CXIO_ERROR_FATAL; 172 rdev->flags = CXIO_ERROR_FATAL;
171 173
172 return; 174 event.device = &rnicp->ibdev;
175 event.event = IB_EVENT_DEVICE_FATAL;
176 event.element.port_num = 0;
177 ib_dispatch_event(&event);
178 }
173 179
180 return;
174} 181}
175 182
176static int __init iwch_init_module(void) 183static int __init iwch_init_module(void)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 3773453b2cf0..84735506333f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -117,6 +117,11 @@ static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
117 return container_of(ibdev, struct iwch_dev, ibdev); 117 return container_of(ibdev, struct iwch_dev, ibdev);
118} 118}
119 119
120static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
121{
122 return container_of(rdev, struct iwch_dev, rdev);
123}
124
120static inline int t3b_device(const struct iwch_dev *rhp) 125static inline int t3b_device(const struct iwch_dev *rhp)
121{ 126{
122 return rhp->rdev.t3cdev_p->type == T3B; 127 return rhp->rdev.t3cdev_p->type == T3B;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 8699947aaf6c..fef3f1ae7225 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -139,6 +139,38 @@ static void stop_ep_timer(struct iwch_ep *ep)
139 put_ep(&ep->com); 139 put_ep(&ep->com);
140} 140}
141 141
142int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
143{
144 int error = 0;
145 struct cxio_rdev *rdev;
146
147 rdev = (struct cxio_rdev *)tdev->ulp;
148 if (cxio_fatal_error(rdev)) {
149 kfree_skb(skb);
150 return -EIO;
151 }
152 error = l2t_send(tdev, skb, l2e);
153 if (error)
154 kfree_skb(skb);
155 return error;
156}
157
158int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
159{
160 int error = 0;
161 struct cxio_rdev *rdev;
162
163 rdev = (struct cxio_rdev *)tdev->ulp;
164 if (cxio_fatal_error(rdev)) {
165 kfree_skb(skb);
166 return -EIO;
167 }
168 error = cxgb3_ofld_send(tdev, skb);
169 if (error)
170 kfree_skb(skb);
171 return error;
172}
173
142static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) 174static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
143{ 175{
144 struct cpl_tid_release *req; 176 struct cpl_tid_release *req;
@@ -150,7 +182,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
150 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 182 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
151 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 183 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
152 skb->priority = CPL_PRIORITY_SETUP; 184 skb->priority = CPL_PRIORITY_SETUP;
153 cxgb3_ofld_send(tdev, skb); 185 iwch_cxgb3_ofld_send(tdev, skb);
154 return; 186 return;
155} 187}
156 188
@@ -172,8 +204,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep)
172 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); 204 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
173 205
174 skb->priority = CPL_PRIORITY_DATA; 206 skb->priority = CPL_PRIORITY_DATA;
175 cxgb3_ofld_send(ep->com.tdev, skb); 207 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
176 return 0;
177} 208}
178 209
179int iwch_resume_tid(struct iwch_ep *ep) 210int iwch_resume_tid(struct iwch_ep *ep)
@@ -194,8 +225,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
194 req->val = 0; 225 req->val = 0;
195 226
196 skb->priority = CPL_PRIORITY_DATA; 227 skb->priority = CPL_PRIORITY_DATA;
197 cxgb3_ofld_send(ep->com.tdev, skb); 228 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
198 return 0;
199} 229}
200 230
201static void set_emss(struct iwch_ep *ep, u16 opt) 231static void set_emss(struct iwch_ep *ep, u16 opt)
@@ -252,18 +282,22 @@ static void *alloc_ep(int size, gfp_t gfp)
252 282
253void __free_ep(struct kref *kref) 283void __free_ep(struct kref *kref)
254{ 284{
255 struct iwch_ep_common *epc; 285 struct iwch_ep *ep;
256 epc = container_of(kref, struct iwch_ep_common, kref); 286 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
257 PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]); 287 struct iwch_ep, com);
258 kfree(epc); 288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
289 if (ep->com.flags & RELEASE_RESOURCES) {
290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
291 dst_release(ep->dst);
292 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
293 }
294 kfree(ep);
259} 295}
260 296
261static void release_ep_resources(struct iwch_ep *ep) 297static void release_ep_resources(struct iwch_ep *ep)
262{ 298{
263 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); 299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
264 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 300 ep->com.flags |= RELEASE_RESOURCES;
265 dst_release(ep->dst);
266 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
267 put_ep(&ep->com); 301 put_ep(&ep->com);
268} 302}
269 303
@@ -382,7 +416,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
382 416
383 PDBG("%s t3cdev %p\n", __func__, dev); 417 PDBG("%s t3cdev %p\n", __func__, dev);
384 req->cmd = CPL_ABORT_NO_RST; 418 req->cmd = CPL_ABORT_NO_RST;
385 cxgb3_ofld_send(dev, skb); 419 iwch_cxgb3_ofld_send(dev, skb);
386} 420}
387 421
388static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) 422static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
@@ -402,8 +436,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); 436 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
403 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 437 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
404 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); 438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
405 l2t_send(ep->com.tdev, skb, ep->l2t); 439 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
406 return 0;
407} 440}
408 441
409static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) 442static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
@@ -424,8 +457,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
424 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 457 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
425 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 458 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
426 req->cmd = CPL_ABORT_SEND_RST; 459 req->cmd = CPL_ABORT_SEND_RST;
427 l2t_send(ep->com.tdev, skb, ep->l2t); 460 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
428 return 0;
429} 461}
430 462
431static int send_connect(struct iwch_ep *ep) 463static int send_connect(struct iwch_ep *ep)
@@ -469,8 +501,7 @@ static int send_connect(struct iwch_ep *ep)
469 req->opt0l = htonl(opt0l); 501 req->opt0l = htonl(opt0l);
470 req->params = 0; 502 req->params = 0;
471 req->opt2 = htonl(opt2); 503 req->opt2 = htonl(opt2);
472 l2t_send(ep->com.tdev, skb, ep->l2t); 504 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
473 return 0;
474} 505}
475 506
476static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) 507static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
@@ -527,7 +558,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
527 req->sndseq = htonl(ep->snd_seq); 558 req->sndseq = htonl(ep->snd_seq);
528 BUG_ON(ep->mpa_skb); 559 BUG_ON(ep->mpa_skb);
529 ep->mpa_skb = skb; 560 ep->mpa_skb = skb;
530 l2t_send(ep->com.tdev, skb, ep->l2t); 561 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
531 start_ep_timer(ep); 562 start_ep_timer(ep);
532 state_set(&ep->com, MPA_REQ_SENT); 563 state_set(&ep->com, MPA_REQ_SENT);
533 return; 564 return;
@@ -578,8 +609,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
578 req->sndseq = htonl(ep->snd_seq); 609 req->sndseq = htonl(ep->snd_seq);
579 BUG_ON(ep->mpa_skb); 610 BUG_ON(ep->mpa_skb);
580 ep->mpa_skb = skb; 611 ep->mpa_skb = skb;
581 l2t_send(ep->com.tdev, skb, ep->l2t); 612 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
582 return 0;
583} 613}
584 614
585static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) 615static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
@@ -630,8 +660,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
630 req->sndseq = htonl(ep->snd_seq); 660 req->sndseq = htonl(ep->snd_seq);
631 ep->mpa_skb = skb; 661 ep->mpa_skb = skb;
632 state_set(&ep->com, MPA_REP_SENT); 662 state_set(&ep->com, MPA_REP_SENT);
633 l2t_send(ep->com.tdev, skb, ep->l2t); 663 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
634 return 0;
635} 664}
636 665
637static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 666static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
@@ -795,7 +824,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
795 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); 824 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
796 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); 825 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
797 skb->priority = CPL_PRIORITY_ACK; 826 skb->priority = CPL_PRIORITY_ACK;
798 cxgb3_ofld_send(ep->com.tdev, skb); 827 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
799 return credits; 828 return credits;
800} 829}
801 830
@@ -1127,8 +1156,8 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1127 * We get 2 abort replies from the HW. The first one must 1156 * We get 2 abort replies from the HW. The first one must
1128 * be ignored except for scribbling that we need one more. 1157 * be ignored except for scribbling that we need one more.
1129 */ 1158 */
1130 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { 1159 if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
1131 ep->flags |= ABORT_REQ_IN_PROGRESS; 1160 ep->com.flags |= ABORT_REQ_IN_PROGRESS;
1132 return CPL_RET_BUF_DONE; 1161 return CPL_RET_BUF_DONE;
1133 } 1162 }
1134 1163
@@ -1203,8 +1232,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1203 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); 1232 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1204 1233
1205 skb->priority = 1; 1234 skb->priority = 1;
1206 cxgb3_ofld_send(ep->com.tdev, skb); 1235 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1207 return 0;
1208} 1236}
1209 1237
1210static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1238static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
@@ -1237,8 +1265,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
1237 req->cpu_idx = 0; 1265 req->cpu_idx = 0;
1238 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); 1266 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1239 skb->priority = 1; 1267 skb->priority = 1;
1240 cxgb3_ofld_send(ep->com.tdev, skb); 1268 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1241 return 0;
1242} 1269}
1243 1270
1244static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, 1271static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
@@ -1286,7 +1313,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1286 rpl->opt2 = htonl(opt2); 1313 rpl->opt2 = htonl(opt2);
1287 rpl->rsvd = rpl->opt2; /* workaround for HW bug */ 1314 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1288 skb->priority = CPL_PRIORITY_SETUP; 1315 skb->priority = CPL_PRIORITY_SETUP;
1289 l2t_send(ep->com.tdev, skb, ep->l2t); 1316 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
1290 1317
1291 return; 1318 return;
1292} 1319}
@@ -1315,7 +1342,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1315 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); 1342 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1316 rpl->opt2 = 0; 1343 rpl->opt2 = 0;
1317 rpl->rsvd = rpl->opt2; 1344 rpl->rsvd = rpl->opt2;
1318 cxgb3_ofld_send(tdev, skb); 1345 iwch_cxgb3_ofld_send(tdev, skb);
1319 } 1346 }
1320} 1347}
1321 1348
@@ -1534,8 +1561,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1534 * We get 2 peer aborts from the HW. The first one must 1561 * We get 2 peer aborts from the HW. The first one must
1535 * be ignored except for scribbling that we need one more. 1562 * be ignored except for scribbling that we need one more.
1536 */ 1563 */
1537 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { 1564 if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
1538 ep->flags |= PEER_ABORT_IN_PROGRESS; 1565 ep->com.flags |= PEER_ABORT_IN_PROGRESS;
1539 return CPL_RET_BUF_DONE; 1566 return CPL_RET_BUF_DONE;
1540 } 1567 }
1541 1568
@@ -1613,7 +1640,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1613 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 1640 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1614 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1641 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1615 rpl->cmd = CPL_ABORT_NO_RST; 1642 rpl->cmd = CPL_ABORT_NO_RST;
1616 cxgb3_ofld_send(ep->com.tdev, rpl_skb); 1643 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1617out: 1644out:
1618 if (release) 1645 if (release)
1619 release_ep_resources(ep); 1646 release_ep_resources(ep);
@@ -2017,8 +2044,11 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
2017 ep->com.rpl_done = 0; 2044 ep->com.rpl_done = 0;
2018 ep->com.rpl_err = 0; 2045 ep->com.rpl_err = 0;
2019 err = listen_stop(ep); 2046 err = listen_stop(ep);
2047 if (err)
2048 goto done;
2020 wait_event(ep->com.waitq, ep->com.rpl_done); 2049 wait_event(ep->com.waitq, ep->com.rpl_done);
2021 cxgb3_free_stid(ep->com.tdev, ep->stid); 2050 cxgb3_free_stid(ep->com.tdev, ep->stid);
2051done:
2022 err = ep->com.rpl_err; 2052 err = ep->com.rpl_err;
2023 cm_id->rem_ref(cm_id); 2053 cm_id->rem_ref(cm_id);
2024 put_ep(&ep->com); 2054 put_ep(&ep->com);
@@ -2030,12 +2060,22 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2030 int ret=0; 2060 int ret=0;
2031 unsigned long flags; 2061 unsigned long flags;
2032 int close = 0; 2062 int close = 0;
2063 int fatal = 0;
2064 struct t3cdev *tdev;
2065 struct cxio_rdev *rdev;
2033 2066
2034 spin_lock_irqsave(&ep->com.lock, flags); 2067 spin_lock_irqsave(&ep->com.lock, flags);
2035 2068
2036 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2069 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2037 states[ep->com.state], abrupt); 2070 states[ep->com.state], abrupt);
2038 2071
2072 tdev = (struct t3cdev *)ep->com.tdev;
2073 rdev = (struct cxio_rdev *)tdev->ulp;
2074 if (cxio_fatal_error(rdev)) {
2075 fatal = 1;
2076 close_complete_upcall(ep);
2077 ep->com.state = DEAD;
2078 }
2039 switch (ep->com.state) { 2079 switch (ep->com.state) {
2040 case MPA_REQ_WAIT: 2080 case MPA_REQ_WAIT:
2041 case MPA_REQ_SENT: 2081 case MPA_REQ_SENT:
@@ -2075,7 +2115,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2075 ret = send_abort(ep, NULL, gfp); 2115 ret = send_abort(ep, NULL, gfp);
2076 else 2116 else
2077 ret = send_halfclose(ep, gfp); 2117 ret = send_halfclose(ep, gfp);
2118 if (ret)
2119 fatal = 1;
2078 } 2120 }
2121 if (fatal)
2122 release_ep_resources(ep);
2079 return ret; 2123 return ret;
2080} 2124}
2081 2125
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index d7c7e09f0996..43c0aea7eadc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -147,6 +147,7 @@ enum iwch_ep_state {
147enum iwch_ep_flags { 147enum iwch_ep_flags {
148 PEER_ABORT_IN_PROGRESS = (1 << 0), 148 PEER_ABORT_IN_PROGRESS = (1 << 0),
149 ABORT_REQ_IN_PROGRESS = (1 << 1), 149 ABORT_REQ_IN_PROGRESS = (1 << 1),
150 RELEASE_RESOURCES = (1 << 2),
150}; 151};
151 152
152struct iwch_ep_common { 153struct iwch_ep_common {
@@ -161,6 +162,7 @@ struct iwch_ep_common {
161 wait_queue_head_t waitq; 162 wait_queue_head_t waitq;
162 int rpl_done; 163 int rpl_done;
163 int rpl_err; 164 int rpl_err;
165 u32 flags;
164}; 166};
165 167
166struct iwch_listen_ep { 168struct iwch_listen_ep {
@@ -188,7 +190,6 @@ struct iwch_ep {
188 u16 plen; 190 u16 plen;
189 u32 ird; 191 u32 ird;
190 u32 ord; 192 u32 ord;
191 u32 flags;
192}; 193};
193 194
194static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) 195static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index c758fbd58478..2f546a625330 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -751,7 +751,7 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
751 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| 751 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
752 V_FW_RIWR_LEN(flit_cnt)); 752 V_FW_RIWR_LEN(flit_cnt));
753 skb->priority = CPL_PRIORITY_DATA; 753 skb->priority = CPL_PRIORITY_DATA;
754 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); 754 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
755} 755}
756 756
757/* 757/*
@@ -783,7 +783,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
783 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG)); 783 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
784 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); 784 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
785 skb->priority = CPL_PRIORITY_DATA; 785 skb->priority = CPL_PRIORITY_DATA;
786 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); 786 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
787} 787}
788 788
789/* 789/*
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2ccb9d31771f..ae3d7590346e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -394,8 +394,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
394 PAGE_SIZE, vma->vm_page_prot)) 394 PAGE_SIZE, vma->vm_page_prot))
395 return -EAGAIN; 395 return -EAGAIN;
396 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { 396 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
397 /* FIXME want pgprot_writecombine() for BlueFlame pages */ 397 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
398 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
399 398
400 if (io_remap_pfn_range(vma, vma->vm_start, 399 if (io_remap_pfn_range(vma, vma->vm_start,
401 to_mucontext(context)->uar.pfn + 400 to_mucontext(context)->uar.pfn +
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 04b12ad23390..17621de54a9f 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -289,8 +289,8 @@ static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
289static inline void 289static inline void
290set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) 290set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
291{ 291{
292 wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value)); 292 wqe_words[index] = cpu_to_le32((u32) value);
293 wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value))); 293 wqe_words[index + 1] = cpu_to_le32(upper_32_bits(value));
294} 294}
295 295
296static inline void 296static inline void
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 52425154acd4..dbd9a75474e3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -426,6 +426,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
426 if (type == NES_TIMER_TYPE_CLOSE) { 426 if (type == NES_TIMER_TYPE_CLOSE) {
427 new_send->timetosend += (HZ/10); 427 new_send->timetosend += (HZ/10);
428 if (cm_node->recv_entry) { 428 if (cm_node->recv_entry) {
429 kfree(new_send);
429 WARN_ON(1); 430 WARN_ON(1);
430 return -EINVAL; 431 return -EINVAL;
431 } 432 }
@@ -445,8 +446,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
445 if (ret != NETDEV_TX_OK) { 446 if (ret != NETDEV_TX_OK) {
446 nes_debug(NES_DBG_CM, "Error sending packet %p " 447 nes_debug(NES_DBG_CM, "Error sending packet %p "
447 "(jiffies = %lu)\n", new_send, jiffies); 448 "(jiffies = %lu)\n", new_send, jiffies);
448 atomic_dec(&new_send->skb->users);
449 new_send->timetosend = jiffies; 449 new_send->timetosend = jiffies;
450 ret = NETDEV_TX_OK;
450 } else { 451 } else {
451 cm_packets_sent++; 452 cm_packets_sent++;
452 if (!send_retrans) { 453 if (!send_retrans) {
@@ -630,7 +631,6 @@ static void nes_cm_timer_tick(unsigned long pass)
630 nes_debug(NES_DBG_CM, "rexmit failed for " 631 nes_debug(NES_DBG_CM, "rexmit failed for "
631 "node=%p\n", cm_node); 632 "node=%p\n", cm_node);
632 cm_packets_bounced++; 633 cm_packets_bounced++;
633 atomic_dec(&send_entry->skb->users);
634 send_entry->retrycount--; 634 send_entry->retrycount--;
635 nexttimeout = jiffies + NES_SHORT_TIME; 635 nexttimeout = jiffies + NES_SHORT_TIME;
636 settimer = 1; 636 settimer = 1;
@@ -666,11 +666,6 @@ static void nes_cm_timer_tick(unsigned long pass)
666 666
667 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 667 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
668 rem_ref_cm_node(cm_node->cm_core, cm_node); 668 rem_ref_cm_node(cm_node->cm_core, cm_node);
669 if (ret != NETDEV_TX_OK) {
670 nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n",
671 cm_node);
672 break;
673 }
674 } 669 }
675 670
676 if (settimer) { 671 if (settimer) {
@@ -1262,7 +1257,6 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1262 cm_node->nesqp = NULL; 1257 cm_node->nesqp = NULL;
1263 } 1258 }
1264 1259
1265 cm_node->freed = 1;
1266 kfree(cm_node); 1260 kfree(cm_node);
1267 return 0; 1261 return 0;
1268} 1262}
@@ -1999,13 +1993,17 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1999 if (loopbackremotelistener == NULL) { 1993 if (loopbackremotelistener == NULL) {
2000 create_event(cm_node, NES_CM_EVENT_ABORTED); 1994 create_event(cm_node, NES_CM_EVENT_ABORTED);
2001 } else { 1995 } else {
2002 atomic_inc(&cm_loopbacks);
2003 loopback_cm_info = *cm_info; 1996 loopback_cm_info = *cm_info;
2004 loopback_cm_info.loc_port = cm_info->rem_port; 1997 loopback_cm_info.loc_port = cm_info->rem_port;
2005 loopback_cm_info.rem_port = cm_info->loc_port; 1998 loopback_cm_info.rem_port = cm_info->loc_port;
2006 loopback_cm_info.cm_id = loopbackremotelistener->cm_id; 1999 loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
2007 loopbackremotenode = make_cm_node(cm_core, nesvnic, 2000 loopbackremotenode = make_cm_node(cm_core, nesvnic,
2008 &loopback_cm_info, loopbackremotelistener); 2001 &loopback_cm_info, loopbackremotelistener);
2002 if (!loopbackremotenode) {
2003 rem_ref_cm_node(cm_node->cm_core, cm_node);
2004 return NULL;
2005 }
2006 atomic_inc(&cm_loopbacks);
2009 loopbackremotenode->loopbackpartner = cm_node; 2007 loopbackremotenode->loopbackpartner = cm_node;
2010 loopbackremotenode->tcp_cntxt.rcv_wscale = 2008 loopbackremotenode->tcp_cntxt.rcv_wscale =
2011 NES_CM_DEFAULT_RCV_WND_SCALE; 2009 NES_CM_DEFAULT_RCV_WND_SCALE;
@@ -2690,6 +2688,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2690 struct ib_mr *ibmr = NULL; 2688 struct ib_mr *ibmr = NULL;
2691 struct ib_phys_buf ibphysbuf; 2689 struct ib_phys_buf ibphysbuf;
2692 struct nes_pd *nespd; 2690 struct nes_pd *nespd;
2691 u64 tagged_offset;
2693 2692
2694 2693
2695 2694
@@ -2755,10 +2754,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2755 ibphysbuf.addr = nesqp->ietf_frame_pbase; 2754 ibphysbuf.addr = nesqp->ietf_frame_pbase;
2756 ibphysbuf.size = conn_param->private_data_len + 2755 ibphysbuf.size = conn_param->private_data_len +
2757 sizeof(struct ietf_mpa_frame); 2756 sizeof(struct ietf_mpa_frame);
2757 tagged_offset = (u64)(unsigned long)nesqp->ietf_frame;
2758 ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, 2758 ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
2759 &ibphysbuf, 1, 2759 &ibphysbuf, 1,
2760 IB_ACCESS_LOCAL_WRITE, 2760 IB_ACCESS_LOCAL_WRITE,
2761 (u64 *)&nesqp->ietf_frame); 2761 &tagged_offset);
2762 if (!ibmr) { 2762 if (!ibmr) {
2763 nes_debug(NES_DBG_CM, "Unable to register memory region" 2763 nes_debug(NES_DBG_CM, "Unable to register memory region"
2764 "for lSMM for cm_node = %p \n", 2764 "for lSMM for cm_node = %p \n",
@@ -2782,7 +2782,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2782 sizeof(struct ietf_mpa_frame)); 2782 sizeof(struct ietf_mpa_frame));
2783 set_wqe_64bit_value(wqe->wqe_words, 2783 set_wqe_64bit_value(wqe->wqe_words,
2784 NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, 2784 NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
2785 (u64)nesqp->ietf_frame); 2785 (u64)(unsigned long)nesqp->ietf_frame);
2786 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 2786 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
2787 cpu_to_le32(conn_param->private_data_len + 2787 cpu_to_le32(conn_param->private_data_len +
2788 sizeof(struct ietf_mpa_frame)); 2788 sizeof(struct ietf_mpa_frame));
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d5f778202eb7..80bba1892571 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -298,7 +298,6 @@ struct nes_cm_node {
298 struct nes_vnic *nesvnic; 298 struct nes_vnic *nesvnic;
299 int apbvt_set; 299 int apbvt_set;
300 int accept_pend; 300 int accept_pend;
301 int freed;
302 struct list_head timer_entry; 301 struct list_head timer_entry;
303 struct list_head reset_entry; 302 struct list_head reset_entry;
304 struct nes_qp *nesqp; 303 struct nes_qp *nesqp;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 52e734042b8e..d6fc9ae44062 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -46,6 +46,10 @@ static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
46module_param(nes_lro_max_aggr, uint, 0444); 46module_param(nes_lro_max_aggr, uint, 0444);
47MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); 47MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
48 48
49static int wide_ppm_offset;
50module_param(wide_ppm_offset, int, 0644);
51MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
52
49static u32 crit_err_count; 53static u32 crit_err_count;
50u32 int_mod_timer_init; 54u32 int_mod_timer_init;
51u32 int_mod_cq_depth_256; 55u32 int_mod_cq_depth_256;
@@ -546,8 +550,11 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
546 msleep(1); 550 msleep(1);
547 } 551 }
548 if (int_cnt > 1) { 552 if (int_cnt > 1) {
553 u32 sds;
549 spin_lock_irqsave(&nesadapter->phy_lock, flags); 554 spin_lock_irqsave(&nesadapter->phy_lock, flags);
550 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); 555 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
556 sds |= 0x00000040;
557 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
551 mh_detected++; 558 mh_detected++;
552 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); 559 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
553 reset_value |= 0x0000003d; 560 reset_value |= 0x0000003d;
@@ -736,39 +743,49 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
736{ 743{
737 int i; 744 int i;
738 u32 u32temp; 745 u32 u32temp;
739 u32 serdes_common_control; 746 u32 sds;
740 747
741 if (hw_rev != NE020_REV) { 748 if (hw_rev != NE020_REV) {
742 /* init serdes 0 */ 749 /* init serdes 0 */
750 if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4))
751 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
752 else
753 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
743 754
744 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
745 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { 755 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
746 serdes_common_control = nes_read_indexed(nesdev, 756 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
747 NES_IDX_ETH_SERDES_COMMON_CONTROL0); 757 sds |= 0x00000100;
748 serdes_common_control |= 0x000000100; 758 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
749 nes_write_indexed(nesdev,
750 NES_IDX_ETH_SERDES_COMMON_CONTROL0,
751 serdes_common_control);
752 } else if (!OneG_Mode) {
753 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
754 } 759 }
755 if (((port_count > 1) && 760 if (!OneG_Mode)
756 (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) || 761 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
757 ((port_count > 2) && 762
758 (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) { 763 if (port_count < 2)
759 /* init serdes 1 */ 764 return 0;
760 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); 765
761 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { 766 /* init serdes 1 */
762 serdes_common_control = nes_read_indexed(nesdev, 767 switch (nesadapter->phy_type[1]) {
763 NES_IDX_ETH_SERDES_COMMON_CONTROL1); 768 case NES_PHY_TYPE_ARGUS:
764 serdes_common_control |= 0x000000100; 769 case NES_PHY_TYPE_SFP_D:
765 nes_write_indexed(nesdev, 770 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
766 NES_IDX_ETH_SERDES_COMMON_CONTROL1, 771 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
767 serdes_common_control); 772 break;
768 } else if (!OneG_Mode) { 773 case NES_PHY_TYPE_CX4:
769 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); 774 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
770 } 775 sds &= 0xFFFFFFBF;
776 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
777 if (wide_ppm_offset)
778 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
779 else
780 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
781 break;
782 case NES_PHY_TYPE_PUMA_1G:
783 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
784 sds |= 0x000000100;
785 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
771 } 786 }
787 if (!OneG_Mode)
788 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
772 } else { 789 } else {
773 /* init serdes 0 */ 790 /* init serdes 0 */
774 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); 791 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
@@ -1259,203 +1276,162 @@ int nes_init_phy(struct nes_device *nesdev)
1259{ 1276{
1260 struct nes_adapter *nesadapter = nesdev->nesadapter; 1277 struct nes_adapter *nesadapter = nesdev->nesadapter;
1261 u32 counter = 0; 1278 u32 counter = 0;
1262 u32 sds_common_control0; 1279 u32 sds;
1263 u32 mac_index = nesdev->mac_index; 1280 u32 mac_index = nesdev->mac_index;
1264 u32 tx_config = 0; 1281 u32 tx_config = 0;
1265 u16 phy_data; 1282 u16 phy_data;
1266 u32 temp_phy_data = 0; 1283 u32 temp_phy_data = 0;
1267 u32 temp_phy_data2 = 0; 1284 u32 temp_phy_data2 = 0;
1268 u32 i = 0; 1285 u8 phy_type = nesadapter->phy_type[mac_index];
1286 u8 phy_index = nesadapter->phy_index[mac_index];
1269 1287
1270 if ((nesadapter->OneG_Mode) && 1288 if ((nesadapter->OneG_Mode) &&
1271 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { 1289 (phy_type != NES_PHY_TYPE_PUMA_1G)) {
1272 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); 1290 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1273 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1291 if (phy_type == NES_PHY_TYPE_1G) {
1274 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
1275 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1292 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1276 tx_config &= 0xFFFFFFE3; 1293 tx_config &= 0xFFFFFFE3;
1277 tx_config |= 0x04; 1294 tx_config |= 0x04;
1278 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1295 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1279 } 1296 }
1280 1297
1281 nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); 1298 nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
1282 nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n", 1299 nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
1283 nesadapter->phy_index[mac_index], phy_data);
1284 nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
1285 1300
1286 /* Reset the PHY */ 1301 /* Reset the PHY */
1287 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000); 1302 nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
1288 udelay(100); 1303 udelay(100);
1289 counter = 0; 1304 counter = 0;
1290 do { 1305 do {
1291 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); 1306 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1292 nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); 1307 if (counter++ > 100)
1293 if (counter++ > 100) break; 1308 break;
1294 } while (phy_data & 0x8000); 1309 } while (phy_data & 0x8000);
1295 1310
1296 /* Setting no phy loopback */ 1311 /* Setting no phy loopback */
1297 phy_data &= 0xbfff; 1312 phy_data &= 0xbfff;
1298 phy_data |= 0x1140; 1313 phy_data |= 0x1140;
1299 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data); 1314 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1300 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); 1315 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1301 nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); 1316 nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
1302 1317 nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
1303 nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data);
1304 nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data);
1305
1306 nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data);
1307 nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data);
1308 1318
1309 /* Setting the interrupt mask */ 1319 /* Setting the interrupt mask */
1310 nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); 1320 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1311 nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); 1321 nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
1312 nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee); 1322 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1313
1314 nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
1315 nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
1316 1323
1317 /* turning on flow control */ 1324 /* turning on flow control */
1318 nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); 1325 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1319 nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); 1326 nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
1320 nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], 1327 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1321 (phy_data & ~(0x03E0)) | 0xc00);
1322 /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
1323 phy_data | 0xc00); */
1324 nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
1325 nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
1326
1327 nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
1328 nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
1329 /* Clear Half duplex */
1330 nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index],
1331 phy_data & ~(0x0100));
1332 nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
1333 nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
1334
1335 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
1336 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
1337 } else {
1338 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) ||
1339 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1340 /* setup 10G MDIO operation */
1341 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1342 tx_config &= 0xFFFFFFE3;
1343 tx_config |= 0x15;
1344 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1345 }
1346 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1347 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
1348 1328
1349 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1329 /* Clear Half duplex */
1350 mdelay(10); 1330 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1351 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); 1331 nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
1352 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1332 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1353 1333
1354 /* 1334 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1355 * if firmware is already running (like from a 1335 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
1356 * driver un-load/load, don't do anything.
1357 */
1358 if (temp_phy_data == temp_phy_data2) {
1359 /* configure QT2505 AMCC PHY */
1360 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000);
1361 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000);
1362 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044);
1363 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052);
1364 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
1365 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
1366 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
1367 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001);
1368 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
1369 1336
1370 /* 1337 return 0;
1371 * remove micro from reset; chip boots from ROM, 1338 }
1372 * uploads EEPROM f/w image, uC executes f/w
1373 */
1374 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002);
1375 1339
1376 /* 1340 if ((phy_type == NES_PHY_TYPE_IRIS) ||
1377 * wait for heart beat to start to 1341 (phy_type == NES_PHY_TYPE_ARGUS) ||
1378 * know loading is done 1342 (phy_type == NES_PHY_TYPE_SFP_D)) {
1379 */ 1343 /* setup 10G MDIO operation */
1380 counter = 0; 1344 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1381 do { 1345 tx_config &= 0xFFFFFFE3;
1382 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); 1346 tx_config |= 0x15;
1383 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1347 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1384 if (counter++ > 1000) { 1348 }
1385 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n"); 1349 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1386 break; 1350 (phy_type == NES_PHY_TYPE_SFP_D)) {
1387 } 1351 /* Check firmware heartbeat */
1388 mdelay(100); 1352 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1389 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); 1353 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1390 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1354 udelay(1500);
1391 } while ((temp_phy_data2 == temp_phy_data)); 1355 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1356 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1357
1358 if (temp_phy_data != temp_phy_data2)
1359 return 0;
1392 1360
1393 /* 1361 /* no heartbeat, configure the PHY */
1394 * wait for tracking to start to know 1362 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
1395 * f/w is good to go 1363 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
1396 */ 1364 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1397 counter = 0; 1365 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1398 do { 1366 if (phy_type == NES_PHY_TYPE_ARGUS) {
1399 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd); 1367 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
1400 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1368 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
1401 if (counter++ > 1000) { 1369 } else {
1402 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n"); 1370 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
1403 break; 1371 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
1404 } 1372 }
1405 mdelay(1000); 1373 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
1406 /* 1374 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1407 * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n", 1375 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
1408 * temp_phy_data);
1409 */
1410 } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
1411
1412 /* set LOS Control invert RXLOSB_I_PADINV */
1413 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000);
1414 /* set LOS Control to mask of RXLOSB_I */
1415 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042);
1416 /* set LED1 to input mode (LED1 and LED2 share same LED) */
1417 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007);
1418 /* set LED2 to RX link_status and activity */
1419 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A);
1420 /* set LED3 to RX link_status */
1421 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009);
1422 1376
1423 /* 1377 /* setup LEDs */
1424 * reset the res-calibration on t2 1378 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
1425 * serdes; ensures it is stable after 1379 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
1426 * the amcc phy is stable 1380 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
1427 */
1428 1381
1429 sds_common_control0 = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); 1382 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
1430 sds_common_control0 |= 0x1;
1431 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
1432 1383
1433 /* release the res-calibration reset */ 1384 /* Bring PHY out of reset */
1434 sds_common_control0 &= 0xfffffffe; 1385 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
1435 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
1436 1386
1437 i = 0; 1387 /* Check for heartbeat */
1438 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) 1388 counter = 0;
1439 && (i++ < 5000)) { 1389 mdelay(690);
1440 /* mdelay(1); */ 1390 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1441 } 1391 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1392 do {
1393 if (counter++ > 150) {
1394 nes_debug(NES_DBG_PHY, "No PHY heartbeat\n");
1395 break;
1396 }
1397 mdelay(1);
1398 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1399 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1400 } while ((temp_phy_data2 == temp_phy_data));
1442 1401
1443 /* 1402 /* wait for tracking */
1444 * wait for link train done before moving on, 1403 counter = 0;
1445 * or will get an interupt storm 1404 do {
1446 */ 1405 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1447 counter = 0; 1406 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1448 do { 1407 if (counter++ > 300) {
1449 temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1408 nes_debug(NES_DBG_PHY, "PHY did not track\n");
1450 (0x200 * (nesdev->mac_index & 1))); 1409 break;
1451 if (counter++ > 1000) {
1452 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n");
1453 break;
1454 }
1455 mdelay(1);
1456 } while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000));
1457 } 1410 }
1458 } 1411 mdelay(10);
1412 } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
1413
1414 /* setup signal integrity */
1415 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
1416 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
1417 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
1418 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
1419 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
1420
1421 /* reset serdes */
1422 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
1423 mac_index * 0x200);
1424 sds |= 0x1;
1425 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
1426 mac_index * 0x200, sds);
1427 sds &= 0xfffffffe;
1428 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
1429 mac_index * 0x200, sds);
1430
1431 counter = 0;
1432 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
1433 && (counter++ < 5000))
1434 ;
1459 } 1435 }
1460 return 0; 1436 return 0;
1461} 1437}
@@ -2359,6 +2335,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2359 u16 temp_phy_data; 2335 u16 temp_phy_data;
2360 u32 pcs_val = 0x0f0f0000; 2336 u32 pcs_val = 0x0f0f0000;
2361 u32 pcs_mask = 0x0f1f0000; 2337 u32 pcs_mask = 0x0f1f0000;
2338 u32 cdr_ctrl;
2362 2339
2363 spin_lock_irqsave(&nesadapter->phy_lock, flags); 2340 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2364 if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { 2341 if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
@@ -2473,6 +2450,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2473 break; 2450 break;
2474 2451
2475 case NES_PHY_TYPE_ARGUS: 2452 case NES_PHY_TYPE_ARGUS:
2453 case NES_PHY_TYPE_SFP_D:
2476 /* clear the alarms */ 2454 /* clear the alarms */
2477 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); 2455 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
2478 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); 2456 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -2483,19 +2461,18 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2483 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004); 2461 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004);
2484 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005); 2462 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005);
2485 /* check link status */ 2463 /* check link status */
2486 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); 2464 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
2487 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2465 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2488 u32temp = 100;
2489 do {
2490 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2491 2466
2492 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2467 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
2493 if ((phy_data == temp_phy_data) || (!(--u32temp))) 2468 nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2494 break; 2469 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
2495 temp_phy_data = phy_data; 2470 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2496 } while (1); 2471
2472 phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
2473
2497 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", 2474 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2498 __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); 2475 __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
2499 break; 2476 break;
2500 2477
2501 case NES_PHY_TYPE_PUMA_1G: 2478 case NES_PHY_TYPE_PUMA_1G:
@@ -2511,6 +2488,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2511 } 2488 }
2512 2489
2513 if (phy_data & 0x0004) { 2490 if (phy_data & 0x0004) {
2491 if (wide_ppm_offset &&
2492 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
2493 (nesadapter->hw_rev != NE020_REV)) {
2494 cdr_ctrl = nes_read_indexed(nesdev,
2495 NES_IDX_ETH_SERDES_CDR_CONTROL0 +
2496 mac_index * 0x200);
2497 nes_write_indexed(nesdev,
2498 NES_IDX_ETH_SERDES_CDR_CONTROL0 +
2499 mac_index * 0x200,
2500 cdr_ctrl | 0x000F0000);
2501 }
2514 nesadapter->mac_link_down[mac_index] = 0; 2502 nesadapter->mac_link_down[mac_index] = 0;
2515 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2503 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2516 nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", 2504 nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
@@ -2525,6 +2513,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2525 } 2513 }
2526 } 2514 }
2527 } else { 2515 } else {
2516 if (wide_ppm_offset &&
2517 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
2518 (nesadapter->hw_rev != NE020_REV)) {
2519 cdr_ctrl = nes_read_indexed(nesdev,
2520 NES_IDX_ETH_SERDES_CDR_CONTROL0 +
2521 mac_index * 0x200);
2522 nes_write_indexed(nesdev,
2523 NES_IDX_ETH_SERDES_CDR_CONTROL0 +
2524 mac_index * 0x200,
2525 cdr_ctrl & 0xFFF0FFFF);
2526 }
2528 nesadapter->mac_link_down[mac_index] = 1; 2527 nesadapter->mac_link_down[mac_index] = 1;
2529 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2528 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2530 nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", 2529 nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index f41a8710d2a8..c3654c6383fe 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -35,12 +35,14 @@
35 35
36#include <linux/inet_lro.h> 36#include <linux/inet_lro.h>
37 37
38#define NES_PHY_TYPE_CX4 1
38#define NES_PHY_TYPE_1G 2 39#define NES_PHY_TYPE_1G 2
39#define NES_PHY_TYPE_IRIS 3 40#define NES_PHY_TYPE_IRIS 3
40#define NES_PHY_TYPE_ARGUS 4 41#define NES_PHY_TYPE_ARGUS 4
41#define NES_PHY_TYPE_PUMA_1G 5 42#define NES_PHY_TYPE_PUMA_1G 5
42#define NES_PHY_TYPE_PUMA_10G 6 43#define NES_PHY_TYPE_PUMA_10G 6
43#define NES_PHY_TYPE_GLADIUS 7 44#define NES_PHY_TYPE_GLADIUS 7
45#define NES_PHY_TYPE_SFP_D 8
44 46
45#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
46 48
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index ecb1f6fd6276..c6e6611d3016 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1426,49 +1426,55 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1426 struct nes_vnic *nesvnic = netdev_priv(netdev); 1426 struct nes_vnic *nesvnic = netdev_priv(netdev);
1427 struct nes_device *nesdev = nesvnic->nesdev; 1427 struct nes_device *nesdev = nesvnic->nesdev;
1428 struct nes_adapter *nesadapter = nesdev->nesadapter; 1428 struct nes_adapter *nesadapter = nesdev->nesadapter;
1429 u32 mac_index = nesdev->mac_index;
1430 u8 phy_type = nesadapter->phy_type[mac_index];
1431 u8 phy_index = nesadapter->phy_index[mac_index];
1429 u16 phy_data; 1432 u16 phy_data;
1430 1433
1431 et_cmd->duplex = DUPLEX_FULL; 1434 et_cmd->duplex = DUPLEX_FULL;
1432 et_cmd->port = PORT_MII; 1435 et_cmd->port = PORT_MII;
1436 et_cmd->maxtxpkt = 511;
1437 et_cmd->maxrxpkt = 511;
1433 1438
1434 if (nesadapter->OneG_Mode) { 1439 if (nesadapter->OneG_Mode) {
1435 et_cmd->speed = SPEED_1000; 1440 et_cmd->speed = SPEED_1000;
1436 if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) { 1441 if (phy_type == NES_PHY_TYPE_PUMA_1G) {
1437 et_cmd->supported = SUPPORTED_1000baseT_Full; 1442 et_cmd->supported = SUPPORTED_1000baseT_Full;
1438 et_cmd->advertising = ADVERTISED_1000baseT_Full; 1443 et_cmd->advertising = ADVERTISED_1000baseT_Full;
1439 et_cmd->autoneg = AUTONEG_DISABLE; 1444 et_cmd->autoneg = AUTONEG_DISABLE;
1440 et_cmd->transceiver = XCVR_INTERNAL; 1445 et_cmd->transceiver = XCVR_INTERNAL;
1441 et_cmd->phy_address = nesdev->mac_index; 1446 et_cmd->phy_address = mac_index;
1442 } else { 1447 } else {
1443 et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; 1448 et_cmd->supported = SUPPORTED_1000baseT_Full
1444 et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; 1449 | SUPPORTED_Autoneg;
1445 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data); 1450 et_cmd->advertising = ADVERTISED_1000baseT_Full
1451 | ADVERTISED_Autoneg;
1452 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1446 if (phy_data & 0x1000) 1453 if (phy_data & 0x1000)
1447 et_cmd->autoneg = AUTONEG_ENABLE; 1454 et_cmd->autoneg = AUTONEG_ENABLE;
1448 else 1455 else
1449 et_cmd->autoneg = AUTONEG_DISABLE; 1456 et_cmd->autoneg = AUTONEG_DISABLE;
1450 et_cmd->transceiver = XCVR_EXTERNAL; 1457 et_cmd->transceiver = XCVR_EXTERNAL;
1451 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; 1458 et_cmd->phy_address = phy_index;
1452 } 1459 }
1460 return 0;
1461 }
1462 if ((phy_type == NES_PHY_TYPE_IRIS) ||
1463 (phy_type == NES_PHY_TYPE_ARGUS) ||
1464 (phy_type == NES_PHY_TYPE_SFP_D)) {
1465 et_cmd->transceiver = XCVR_EXTERNAL;
1466 et_cmd->port = PORT_FIBRE;
1467 et_cmd->supported = SUPPORTED_FIBRE;
1468 et_cmd->advertising = ADVERTISED_FIBRE;
1469 et_cmd->phy_address = phy_index;
1453 } else { 1470 } else {
1454 if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) || 1471 et_cmd->transceiver = XCVR_INTERNAL;
1455 (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) { 1472 et_cmd->supported = SUPPORTED_10000baseT_Full;
1456 et_cmd->transceiver = XCVR_EXTERNAL; 1473 et_cmd->advertising = ADVERTISED_10000baseT_Full;
1457 et_cmd->port = PORT_FIBRE; 1474 et_cmd->phy_address = mac_index;
1458 et_cmd->supported = SUPPORTED_FIBRE;
1459 et_cmd->advertising = ADVERTISED_FIBRE;
1460 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1461 } else {
1462 et_cmd->transceiver = XCVR_INTERNAL;
1463 et_cmd->supported = SUPPORTED_10000baseT_Full;
1464 et_cmd->advertising = ADVERTISED_10000baseT_Full;
1465 et_cmd->phy_address = nesdev->mac_index;
1466 }
1467 et_cmd->speed = SPEED_10000;
1468 et_cmd->autoneg = AUTONEG_DISABLE;
1469 } 1475 }
1470 et_cmd->maxtxpkt = 511; 1476 et_cmd->speed = SPEED_10000;
1471 et_cmd->maxrxpkt = 511; 1477 et_cmd->autoneg = AUTONEG_DISABLE;
1472 return 0; 1478 return 0;
1473} 1479}
1474 1480
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 5a76a5510350..4c57f329dd50 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -70,12 +70,14 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
70 */ 70 */
71 if (ppriv->pkey == pkey) { 71 if (ppriv->pkey == pkey) {
72 result = -ENOTUNIQ; 72 result = -ENOTUNIQ;
73 priv = NULL;
73 goto err; 74 goto err;
74 } 75 }
75 76
76 list_for_each_entry(priv, &ppriv->child_intfs, list) { 77 list_for_each_entry(priv, &ppriv->child_intfs, list) {
77 if (priv->pkey == pkey) { 78 if (priv->pkey == pkey) {
78 result = -ENOTUNIQ; 79 result = -ENOTUNIQ;
80 priv = NULL;
79 goto err; 81 goto err;
80 } 82 }
81 } 83 }
@@ -96,7 +98,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
96 98
97 result = ipoib_set_dev_features(priv, ppriv->ca); 99 result = ipoib_set_dev_features(priv, ppriv->ca);
98 if (result) 100 if (result)
99 goto device_init_failed; 101 goto err;
100 102
101 priv->pkey = pkey; 103 priv->pkey = pkey;
102 104
@@ -109,7 +111,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
109 ipoib_warn(ppriv, "failed to initialize subinterface: " 111 ipoib_warn(ppriv, "failed to initialize subinterface: "
110 "device %s, port %d", 112 "device %s, port %d",
111 ppriv->ca->name, ppriv->port); 113 ppriv->ca->name, ppriv->port);
112 goto device_init_failed; 114 goto err;
113 } 115 }
114 116
115 result = register_netdevice(priv->dev); 117 result = register_netdevice(priv->dev);
@@ -146,19 +148,19 @@ sysfs_failed:
146register_failed: 148register_failed:
147 ipoib_dev_cleanup(priv->dev); 149 ipoib_dev_cleanup(priv->dev);
148 150
149device_init_failed:
150 free_netdev(priv->dev);
151
152err: 151err:
153 mutex_unlock(&ppriv->vlan_mutex); 152 mutex_unlock(&ppriv->vlan_mutex);
154 rtnl_unlock(); 153 rtnl_unlock();
154 if (priv)
155 free_netdev(priv->dev);
156
155 return result; 157 return result;
156} 158}
157 159
158int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) 160int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
159{ 161{
160 struct ipoib_dev_priv *ppriv, *priv, *tpriv; 162 struct ipoib_dev_priv *ppriv, *priv, *tpriv;
161 int ret = -ENOENT; 163 struct net_device *dev = NULL;
162 164
163 if (!capable(CAP_NET_ADMIN)) 165 if (!capable(CAP_NET_ADMIN))
164 return -EPERM; 166 return -EPERM;
@@ -172,14 +174,17 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
172 unregister_netdevice(priv->dev); 174 unregister_netdevice(priv->dev);
173 ipoib_dev_cleanup(priv->dev); 175 ipoib_dev_cleanup(priv->dev);
174 list_del(&priv->list); 176 list_del(&priv->list);
175 free_netdev(priv->dev); 177 dev = priv->dev;
176
177 ret = 0;
178 break; 178 break;
179 } 179 }
180 } 180 }
181 mutex_unlock(&ppriv->vlan_mutex); 181 mutex_unlock(&ppriv->vlan_mutex);
182 rtnl_unlock(); 182 rtnl_unlock();
183 183
184 return ret; 184 if (dev) {
185 free_netdev(dev);
186 return 0;
187 }
188
189 return -ENODEV;
185} 190}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ec3db3ade118..d44065d2e662 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -132,6 +132,11 @@ static void input_start_autorepeat(struct input_dev *dev, int code)
132 } 132 }
133} 133}
134 134
135static void input_stop_autorepeat(struct input_dev *dev)
136{
137 del_timer(&dev->timer);
138}
139
135#define INPUT_IGNORE_EVENT 0 140#define INPUT_IGNORE_EVENT 0
136#define INPUT_PASS_TO_HANDLERS 1 141#define INPUT_PASS_TO_HANDLERS 1
137#define INPUT_PASS_TO_DEVICE 2 142#define INPUT_PASS_TO_DEVICE 2
@@ -167,6 +172,8 @@ static void input_handle_event(struct input_dev *dev,
167 __change_bit(code, dev->key); 172 __change_bit(code, dev->key);
168 if (value) 173 if (value)
169 input_start_autorepeat(dev, code); 174 input_start_autorepeat(dev, code);
175 else
176 input_stop_autorepeat(dev);
170 } 177 }
171 178
172 disposition = INPUT_PASS_TO_HANDLERS; 179 disposition = INPUT_PASS_TO_HANDLERS;
@@ -737,11 +744,11 @@ static inline void input_wakeup_procfs_readers(void)
737 744
738static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) 745static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
739{ 746{
740 int state = input_devices_state;
741
742 poll_wait(file, &input_devices_poll_wait, wait); 747 poll_wait(file, &input_devices_poll_wait, wait);
743 if (state != input_devices_state) 748 if (file->f_version != input_devices_state) {
749 file->f_version = input_devices_state;
744 return POLLIN | POLLRDNORM; 750 return POLLIN | POLLRDNORM;
751 }
745 752
746 return 0; 753 return 0;
747} 754}
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 45470f18d7e9..f999dc60c3b8 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -229,7 +229,8 @@ struct atkbd {
229/* 229/*
230 * System-specific ketymap fixup routine 230 * System-specific ketymap fixup routine
231 */ 231 */
232static void (*atkbd_platform_fixup)(struct atkbd *); 232static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
233static void *atkbd_platform_fixup_data;
233 234
234static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 235static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
235 ssize_t (*handler)(struct atkbd *, char *)); 236 ssize_t (*handler)(struct atkbd *, char *));
@@ -834,87 +835,64 @@ static void atkbd_disconnect(struct serio *serio)
834} 835}
835 836
836/* 837/*
837 * Most special keys (Fn+F?) on Dell laptops do not generate release 838 * generate release events for the keycodes given in data
838 * events so we have to do it ourselves.
839 */ 839 */
840static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd) 840static void atkbd_apply_forced_release_keylist(struct atkbd* atkbd,
841 const void *data)
841{ 842{
842 static const unsigned int forced_release_keys[] = { 843 const unsigned int *keys = data;
843 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, 844 unsigned int i;
844 };
845 int i;
846 845
847 if (atkbd->set == 2) 846 if (atkbd->set == 2)
848 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) 847 for (i = 0; keys[i] != -1U; i++)
849 __set_bit(forced_release_keys[i], 848 __set_bit(keys[i], atkbd->force_release_mask);
850 atkbd->force_release_mask);
851} 849}
852 850
853/* 851/*
852 * Most special keys (Fn+F?) on Dell laptops do not generate release
853 * events so we have to do it ourselves.
854 */
855static unsigned int atkbd_dell_laptop_forced_release_keys[] = {
856 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, -1U
857};
858
859/*
854 * Perform fixup for HP system that doesn't generate release 860 * Perform fixup for HP system that doesn't generate release
855 * for its video switch 861 * for its video switch
856 */ 862 */
857static void atkbd_hp_keymap_fixup(struct atkbd *atkbd) 863static unsigned int atkbd_hp_forced_release_keys[] = {
858{ 864 0x94, -1U
859 static const unsigned int forced_release_keys[] = { 865};
860 0x94,
861 };
862 int i;
863
864 if (atkbd->set == 2)
865 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
866 __set_bit(forced_release_keys[i],
867 atkbd->force_release_mask);
868}
869 866
870/* 867/*
871 * Inventec system with broken key release on volume keys 868 * Inventec system with broken key release on volume keys
872 */ 869 */
873static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd) 870static unsigned int atkbd_inventec_forced_release_keys[] = {
874{ 871 0xae, 0xb0, -1U
875 const unsigned int forced_release_keys[] = { 872};
876 0xae, 0xb0,
877 };
878 int i;
879
880 if (atkbd->set == 2)
881 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
882 __set_bit(forced_release_keys[i],
883 atkbd->force_release_mask);
884}
885 873
886/* 874/*
887 * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release 875 * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
888 * for its volume buttons 876 * for its volume buttons
889 */ 877 */
890static void atkbd_hp_zv6100_keymap_fixup(struct atkbd *atkbd) 878static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
891{ 879 0xae, 0xb0, -1U
892 const unsigned int forced_release_keys[] = { 880};
893 0xae, 0xb0,
894 };
895 int i;
896
897 if (atkbd->set == 2)
898 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
899 __set_bit(forced_release_keys[i],
900 atkbd->force_release_mask);
901}
902 881
903/* 882/*
904 * Samsung NC10 with Fn+F? key release not working 883 * Samsung NC10 with Fn+F? key release not working
905 */ 884 */
906static void atkbd_samsung_keymap_fixup(struct atkbd *atkbd) 885static unsigned int atkbd_samsung_forced_release_keys[] = {
907{ 886 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U
908 const unsigned int forced_release_keys[] = { 887};
909 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9,
910 };
911 int i;
912 888
913 if (atkbd->set == 2) 889/*
914 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) 890 * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
915 __set_bit(forced_release_keys[i], 891 * do not generate release events so we have to do it ourselves.
916 atkbd->force_release_mask); 892 */
917} 893static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
894 0xb0, 0xae, -1U
895};
918 896
919/* 897/*
920 * atkbd_set_keycode_table() initializes keyboard's keycode table 898 * atkbd_set_keycode_table() initializes keyboard's keycode table
@@ -967,7 +945,7 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
967 * Perform additional fixups 945 * Perform additional fixups
968 */ 946 */
969 if (atkbd_platform_fixup) 947 if (atkbd_platform_fixup)
970 atkbd_platform_fixup(atkbd); 948 atkbd_platform_fixup(atkbd, atkbd_platform_fixup_data);
971} 949}
972 950
973/* 951/*
@@ -1492,9 +1470,11 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf)
1492 return sprintf(buf, "%lu\n", atkbd->err_count); 1470 return sprintf(buf, "%lu\n", atkbd->err_count);
1493} 1471}
1494 1472
1495static int __init atkbd_setup_fixup(const struct dmi_system_id *id) 1473static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
1496{ 1474{
1497 atkbd_platform_fixup = id->driver_data; 1475 atkbd_platform_fixup = atkbd_apply_forced_release_keylist;
1476 atkbd_platform_fixup_data = id->driver_data;
1477
1498 return 0; 1478 return 0;
1499} 1479}
1500 1480
@@ -1505,8 +1485,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1505 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1485 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1506 DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ 1486 DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
1507 }, 1487 },
1508 .callback = atkbd_setup_fixup, 1488 .callback = atkbd_setup_forced_release,
1509 .driver_data = atkbd_dell_laptop_keymap_fixup, 1489 .driver_data = atkbd_dell_laptop_forced_release_keys,
1510 }, 1490 },
1511 { 1491 {
1512 .ident = "Dell Laptop", 1492 .ident = "Dell Laptop",
@@ -1514,8 +1494,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1514 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 1494 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
1515 DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ 1495 DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
1516 }, 1496 },
1517 .callback = atkbd_setup_fixup, 1497 .callback = atkbd_setup_forced_release,
1518 .driver_data = atkbd_dell_laptop_keymap_fixup, 1498 .driver_data = atkbd_dell_laptop_forced_release_keys,
1519 }, 1499 },
1520 { 1500 {
1521 .ident = "HP 2133", 1501 .ident = "HP 2133",
@@ -1523,8 +1503,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1523 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1503 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1524 DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"), 1504 DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"),
1525 }, 1505 },
1526 .callback = atkbd_setup_fixup, 1506 .callback = atkbd_setup_forced_release,
1527 .driver_data = atkbd_hp_keymap_fixup, 1507 .driver_data = atkbd_hp_forced_release_keys,
1528 }, 1508 },
1529 { 1509 {
1530 .ident = "HP Pavilion ZV6100", 1510 .ident = "HP Pavilion ZV6100",
@@ -1532,8 +1512,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1532 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1512 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1533 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), 1513 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
1534 }, 1514 },
1535 .callback = atkbd_setup_fixup, 1515 .callback = atkbd_setup_forced_release,
1536 .driver_data = atkbd_hp_zv6100_keymap_fixup, 1516 .driver_data = atkbd_hp_zv6100_forced_release_keys,
1537 }, 1517 },
1538 { 1518 {
1539 .ident = "Inventec Symphony", 1519 .ident = "Inventec Symphony",
@@ -1541,8 +1521,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), 1521 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), 1522 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
1543 }, 1523 },
1544 .callback = atkbd_setup_fixup, 1524 .callback = atkbd_setup_forced_release,
1545 .driver_data = atkbd_inventec_keymap_fixup, 1525 .driver_data = atkbd_inventec_forced_release_keys,
1546 }, 1526 },
1547 { 1527 {
1548 .ident = "Samsung NC10", 1528 .ident = "Samsung NC10",
@@ -1550,8 +1530,17 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1550 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), 1530 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1551 DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
1552 }, 1532 },
1553 .callback = atkbd_setup_fixup, 1533 .callback = atkbd_setup_forced_release,
1554 .driver_data = atkbd_samsung_keymap_fixup, 1534 .driver_data = atkbd_samsung_forced_release_keys,
1535 },
1536 {
1537 .ident = "Fujitsu Amilo PA 1510",
1538 .matches = {
1539 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1540 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
1541 },
1542 .callback = atkbd_setup_forced_release,
1543 .driver_data = atkbd_amilo_pa1510_forced_release_keys,
1555 }, 1544 },
1556 { } 1545 { }
1557}; 1546};
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index ee855c5202e8..e94b7d735aca 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -211,8 +211,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
211 211
212 if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT || 212 if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT ||
213 !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) { 213 !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) {
214 printk(KERN_ERR DRV_NAME 214 printk(KERN_WARNING DRV_NAME
215 ": Invalid Debounce/Columdrive Time from pdata\n"); 215 ": Invalid Debounce/Columndrive Time in platform data\n");
216 bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */ 216 bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */
217 } else { 217 } else {
218 bfin_write_KPAD_MSEL( 218 bfin_write_KPAD_MSEL(
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index aacf71f3cd44..e9d639ec283d 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -198,45 +198,28 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len)
198} 198}
199 199
200 200
201/* initialise HIL */ 201/* initialize HIL */
202static int __init 202static int __devinit hil_keyb_init(void)
203hil_keyb_init(void)
204{ 203{
205 unsigned char c; 204 unsigned char c;
206 unsigned int i, kbid; 205 unsigned int i, kbid;
207 wait_queue_head_t hil_wait; 206 wait_queue_head_t hil_wait;
208 int err; 207 int err;
209 208
210 if (hil_dev.dev) { 209 if (hil_dev.dev)
211 return -ENODEV; /* already initialized */ 210 return -ENODEV; /* already initialized */
212 }
213 211
212 init_waitqueue_head(&hil_wait);
214 spin_lock_init(&hil_dev.lock); 213 spin_lock_init(&hil_dev.lock);
214
215 hil_dev.dev = input_allocate_device(); 215 hil_dev.dev = input_allocate_device();
216 if (!hil_dev.dev) 216 if (!hil_dev.dev)
217 return -ENOMEM; 217 return -ENOMEM;
218 218
219#if defined(CONFIG_HP300)
220 if (!MACH_IS_HP300) {
221 err = -ENODEV;
222 goto err1;
223 }
224 if (!hwreg_present((void *)(HILBASE + HIL_DATA))) {
225 printk(KERN_ERR "HIL: hardware register was not found\n");
226 err = -ENODEV;
227 goto err1;
228 }
229 if (!request_region(HILBASE + HIL_DATA, 2, "hil")) {
230 printk(KERN_ERR "HIL: IOPORT region already used\n");
231 err = -EIO;
232 goto err1;
233 }
234#endif
235
236 err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); 219 err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id);
237 if (err) { 220 if (err) {
238 printk(KERN_ERR "HIL: Can't get IRQ\n"); 221 printk(KERN_ERR "HIL: Can't get IRQ\n");
239 goto err2; 222 goto err1;
240 } 223 }
241 224
242 /* Turn on interrupts */ 225 /* Turn on interrupts */
@@ -246,11 +229,9 @@ hil_keyb_init(void)
246 hil_dev.valid = 0; /* clear any pending data */ 229 hil_dev.valid = 0; /* clear any pending data */
247 hil_do(HIL_READKBDSADR, NULL, 0); 230 hil_do(HIL_READKBDSADR, NULL, 0);
248 231
249 init_waitqueue_head(&hil_wait); 232 wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ);
250 wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3*HZ); 233 if (!hil_dev.valid)
251 if (!hil_dev.valid) {
252 printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); 234 printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n");
253 }
254 235
255 c = hil_dev.c; 236 c = hil_dev.c;
256 hil_dev.valid = 0; 237 hil_dev.valid = 0;
@@ -268,7 +249,7 @@ hil_keyb_init(void)
268 249
269 for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) 250 for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++)
270 if (hphilkeyb_keycode[i] != KEY_RESERVED) 251 if (hphilkeyb_keycode[i] != KEY_RESERVED)
271 set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); 252 __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit);
272 253
273 hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 254 hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
274 hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | 255 hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) |
@@ -287,34 +268,45 @@ hil_keyb_init(void)
287 err = input_register_device(hil_dev.dev); 268 err = input_register_device(hil_dev.dev);
288 if (err) { 269 if (err) {
289 printk(KERN_ERR "HIL: Can't register device\n"); 270 printk(KERN_ERR "HIL: Can't register device\n");
290 goto err3; 271 goto err2;
291 } 272 }
273
292 printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", 274 printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n",
293 hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); 275 hil_dev.dev->name, kbid, HILBASE, HIL_IRQ);
294 276
295 return 0; 277 return 0;
296 278
297err3: 279err2:
298 hil_do(HIL_INTOFF, NULL, 0); 280 hil_do(HIL_INTOFF, NULL, 0);
299 disable_irq(HIL_IRQ);
300 free_irq(HIL_IRQ, hil_dev.dev_id); 281 free_irq(HIL_IRQ, hil_dev.dev_id);
301err2:
302#if defined(CONFIG_HP300)
303 release_region(HILBASE + HIL_DATA, 2);
304err1: 282err1:
305#endif
306 input_free_device(hil_dev.dev); 283 input_free_device(hil_dev.dev);
307 hil_dev.dev = NULL; 284 hil_dev.dev = NULL;
308 return err; 285 return err;
309} 286}
310 287
288static void __devexit hil_keyb_exit(void)
289{
290 if (HIL_IRQ)
291 free_irq(HIL_IRQ, hil_dev.dev_id);
292
293 /* Turn off interrupts */
294 hil_do(HIL_INTOFF, NULL, 0);
295
296 input_unregister_device(hil_dev.dev);
297 hil_dev.dev = NULL;
298}
311 299
312#if defined(CONFIG_PARISC) 300#if defined(CONFIG_PARISC)
313static int __init 301static int __devinit hil_probe_chip(struct parisc_device *dev)
314hil_init_chip(struct parisc_device *dev)
315{ 302{
303 /* Only allow one HIL keyboard */
304 if (hil_dev.dev)
305 return -ENODEV;
306
316 if (!dev->irq) { 307 if (!dev->irq) {
317 printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%08lx\n", dev->hpa.start); 308 printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n",
309 (void *)dev->hpa.start);
318 return -ENODEV; 310 return -ENODEV;
319 } 311 }
320 312
@@ -327,51 +319,79 @@ hil_init_chip(struct parisc_device *dev)
327 return hil_keyb_init(); 319 return hil_keyb_init();
328} 320}
329 321
322static int __devexit hil_remove_chip(struct parisc_device *dev)
323{
324 hil_keyb_exit();
325
326 return 0;
327}
328
330static struct parisc_device_id hil_tbl[] = { 329static struct parisc_device_id hil_tbl[] = {
331 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, 330 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 },
332 { 0, } 331 { 0, }
333}; 332};
334 333
334#if 0
335/* Disabled to avoid conflicts with the HP SDC HIL drivers */
335MODULE_DEVICE_TABLE(parisc, hil_tbl); 336MODULE_DEVICE_TABLE(parisc, hil_tbl);
337#endif
336 338
337static struct parisc_driver hil_driver = { 339static struct parisc_driver hil_driver = {
338 .name = "hil", 340 .name = "hil",
339 .id_table = hil_tbl, 341 .id_table = hil_tbl,
340 .probe = hil_init_chip, 342 .probe = hil_probe_chip,
343 .remove = __devexit_p(hil_remove_chip),
341}; 344};
342#endif /* CONFIG_PARISC */
343
344 345
345static int __init hil_init(void) 346static int __init hil_init(void)
346{ 347{
347#if defined(CONFIG_PARISC)
348 return register_parisc_driver(&hil_driver); 348 return register_parisc_driver(&hil_driver);
349#else
350 return hil_keyb_init();
351#endif
352} 349}
353 350
354
355static void __exit hil_exit(void) 351static void __exit hil_exit(void)
356{ 352{
357 if (HIL_IRQ) { 353 unregister_parisc_driver(&hil_driver);
358 disable_irq(HIL_IRQ); 354}
359 free_irq(HIL_IRQ, hil_dev.dev_id); 355
356#else /* !CONFIG_PARISC */
357
358static int __init hil_init(void)
359{
360 int error;
361
362 /* Only allow one HIL keyboard */
363 if (hil_dev.dev)
364 return -EBUSY;
365
366 if (!MACH_IS_HP300)
367 return -ENODEV;
368
369 if (!hwreg_present((void *)(HILBASE + HIL_DATA))) {
370 printk(KERN_ERR "HIL: hardware register was not found\n");
371 return -ENODEV;
360 } 372 }
361 373
362 /* Turn off interrupts */ 374 if (!request_region(HILBASE + HIL_DATA, 2, "hil")) {
363 hil_do(HIL_INTOFF, NULL, 0); 375 printk(KERN_ERR "HIL: IOPORT region already used\n");
376 return -EIO;
377 }
364 378
365 input_unregister_device(hil_dev.dev); 379 error = hil_keyb_init();
380 if (error) {
381 release_region(HILBASE + HIL_DATA, 2);
382 return error;
383 }
366 384
367 hil_dev.dev = NULL; 385 return 0;
386}
368 387
369#if defined(CONFIG_PARISC) 388static void __exit hil_exit(void)
370 unregister_parisc_driver(&hil_driver); 389{
371#else 390 hil_keyb_exit();
372 release_region(HILBASE+HIL_DATA, 2); 391 release_region(HILBASE + HIL_DATA, 2);
373#endif
374} 392}
375 393
394#endif /* CONFIG_PARISC */
395
376module_init(hil_init); 396module_init(hil_init);
377module_exit(hil_exit); 397module_exit(hil_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 67e5553f699a..203abac1e23e 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -227,4 +227,27 @@ config INPUT_PCF50633_PMU
227 Say Y to include support for delivering PMU events via input 227 Say Y to include support for delivering PMU events via input
228 layer on NXP PCF50633. 228 layer on NXP PCF50633.
229 229
230config INPUT_GPIO_ROTARY_ENCODER
231 tristate "Rotary encoders connected to GPIO pins"
232 depends on GPIOLIB && GENERIC_GPIO
233 help
234 Say Y here to add support for rotary encoders connected to GPIO lines.
235 Check file:Documentation/incput/rotary_encoder.txt for more
236 information.
237
238 To compile this driver as a module, choose M here: the
239 module will be called rotary_encoder.
240
241config INPUT_RB532_BUTTON
242 tristate "Mikrotik Routerboard 532 button interface"
243 depends on MIKROTIK_RB532
244 depends on GPIOLIB && GENERIC_GPIO
245 select INPUT_POLLDEV
246 help
247 Say Y here if you want support for the S1 button built into
248 Mikrotik's Routerboard 532.
249
250 To compile this driver as a module, choose M here: the
251 module will be called rb532_button.
252
230endif 253endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index bb62e6efacf3..eb3f407baedf 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -4,21 +4,23 @@
4 4
5# Each configuration option enables a list of files. 5# Each configuration option enables a list of files.
6 6
7obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o 7obj-$(CONFIG_INPUT_APANEL) += apanel.o
8obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
9obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
10obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
11obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
12obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
13obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
14obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o 8obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
15obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o 9obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
16obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 10obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
17obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
18obj-$(CONFIG_INPUT_YEALINK) += yealink.o
19obj-$(CONFIG_INPUT_CM109) += cm109.o 11obj-$(CONFIG_INPUT_CM109) += cm109.o
12obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
20obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 13obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
21obj-$(CONFIG_INPUT_UINPUT) += uinput.o 14obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
22obj-$(CONFIG_INPUT_APANEL) += apanel.o 15obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
23obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o 16obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
24obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 17obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
18obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
19obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
20obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
21obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
22obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
23obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
24obj-$(CONFIG_INPUT_UINPUT) += uinput.o
25obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
26obj-$(CONFIG_INPUT_YEALINK) += yealink.o
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 3c9988dc0e9f..922c05141585 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -31,12 +31,73 @@ MODULE_LICENSE("GPL");
31 * newly configured "channel". 31 * newly configured "channel".
32 */ 32 */
33 33
34static unsigned int channel_mask = 0xFFFF; 34enum {
35module_param(channel_mask, uint, 0644); 35 ATI_REMOTE2_MAX_CHANNEL_MASK = 0xFFFF,
36 ATI_REMOTE2_MAX_MODE_MASK = 0x1F,
37};
38
39static int ati_remote2_set_mask(const char *val,
40 struct kernel_param *kp, unsigned int max)
41{
42 unsigned long mask;
43 int ret;
44
45 if (!val)
46 return -EINVAL;
47
48 ret = strict_strtoul(val, 0, &mask);
49 if (ret)
50 return ret;
51
52 if (mask & ~max)
53 return -EINVAL;
54
55 *(unsigned int *)kp->arg = mask;
56
57 return 0;
58}
59
60static int ati_remote2_set_channel_mask(const char *val,
61 struct kernel_param *kp)
62{
63 pr_debug("%s()\n", __func__);
64
65 return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK);
66}
67
68static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp)
69{
70 pr_debug("%s()\n", __func__);
71
72 return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
73}
74
75static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp)
76{
77 pr_debug("%s()\n", __func__);
78
79 return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK);
80}
81
82static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
83{
84 pr_debug("%s()\n", __func__);
85
86 return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
87}
88
89static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
90#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
91#define param_set_channel_mask ati_remote2_set_channel_mask
92#define param_get_channel_mask ati_remote2_get_channel_mask
93module_param(channel_mask, channel_mask, 0644);
36MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>"); 94MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>");
37 95
38static unsigned int mode_mask = 0x1F; 96static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
39module_param(mode_mask, uint, 0644); 97#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
98#define param_set_mode_mask ati_remote2_set_mode_mask
99#define param_get_mode_mask ati_remote2_get_mode_mask
100module_param(mode_mask, mode_mask, 0644);
40MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); 101MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
41 102
42static struct usb_device_id ati_remote2_id_table[] = { 103static struct usb_device_id ati_remote2_id_table[] = {
@@ -133,12 +194,18 @@ struct ati_remote2 {
133 u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)]; 194 u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)];
134 195
135 unsigned int flags; 196 unsigned int flags;
197
198 unsigned int channel_mask;
199 unsigned int mode_mask;
136}; 200};
137 201
138static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id); 202static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id);
139static void ati_remote2_disconnect(struct usb_interface *interface); 203static void ati_remote2_disconnect(struct usb_interface *interface);
140static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message); 204static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message);
141static int ati_remote2_resume(struct usb_interface *interface); 205static int ati_remote2_resume(struct usb_interface *interface);
206static int ati_remote2_reset_resume(struct usb_interface *interface);
207static int ati_remote2_pre_reset(struct usb_interface *interface);
208static int ati_remote2_post_reset(struct usb_interface *interface);
142 209
143static struct usb_driver ati_remote2_driver = { 210static struct usb_driver ati_remote2_driver = {
144 .name = "ati_remote2", 211 .name = "ati_remote2",
@@ -147,6 +214,9 @@ static struct usb_driver ati_remote2_driver = {
147 .id_table = ati_remote2_id_table, 214 .id_table = ati_remote2_id_table,
148 .suspend = ati_remote2_suspend, 215 .suspend = ati_remote2_suspend,
149 .resume = ati_remote2_resume, 216 .resume = ati_remote2_resume,
217 .reset_resume = ati_remote2_reset_resume,
218 .pre_reset = ati_remote2_pre_reset,
219 .post_reset = ati_remote2_post_reset,
150 .supports_autosuspend = 1, 220 .supports_autosuspend = 1,
151}; 221};
152 222
@@ -238,7 +308,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2)
238 308
239 channel = data[0] >> 4; 309 channel = data[0] >> 4;
240 310
241 if (!((1 << channel) & channel_mask)) 311 if (!((1 << channel) & ar2->channel_mask))
242 return; 312 return;
243 313
244 mode = data[0] & 0x0F; 314 mode = data[0] & 0x0F;
@@ -250,7 +320,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2)
250 return; 320 return;
251 } 321 }
252 322
253 if (!((1 << mode) & mode_mask)) 323 if (!((1 << mode) & ar2->mode_mask))
254 return; 324 return;
255 325
256 input_event(idev, EV_REL, REL_X, (s8) data[1]); 326 input_event(idev, EV_REL, REL_X, (s8) data[1]);
@@ -277,7 +347,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
277 347
278 channel = data[0] >> 4; 348 channel = data[0] >> 4;
279 349
280 if (!((1 << channel) & channel_mask)) 350 if (!((1 << channel) & ar2->channel_mask))
281 return; 351 return;
282 352
283 mode = data[0] & 0x0F; 353 mode = data[0] & 0x0F;
@@ -305,7 +375,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
305 ar2->mode = mode; 375 ar2->mode = mode;
306 } 376 }
307 377
308 if (!((1 << mode) & mode_mask)) 378 if (!((1 << mode) & ar2->mode_mask))
309 return; 379 return;
310 380
311 index = ati_remote2_lookup(hw_code); 381 index = ati_remote2_lookup(hw_code);
@@ -410,7 +480,7 @@ static int ati_remote2_getkeycode(struct input_dev *idev,
410 int index, mode; 480 int index, mode;
411 481
412 mode = scancode >> 8; 482 mode = scancode >> 8;
413 if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) 483 if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
414 return -EINVAL; 484 return -EINVAL;
415 485
416 index = ati_remote2_lookup(scancode & 0xFF); 486 index = ati_remote2_lookup(scancode & 0xFF);
@@ -427,7 +497,7 @@ static int ati_remote2_setkeycode(struct input_dev *idev, int scancode, int keyc
427 int index, mode, old_keycode; 497 int index, mode, old_keycode;
428 498
429 mode = scancode >> 8; 499 mode = scancode >> 8;
430 if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) 500 if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask))
431 return -EINVAL; 501 return -EINVAL;
432 502
433 index = ati_remote2_lookup(scancode & 0xFF); 503 index = ati_remote2_lookup(scancode & 0xFF);
@@ -550,7 +620,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
550 } 620 }
551} 621}
552 622
553static int ati_remote2_setup(struct ati_remote2 *ar2) 623static int ati_remote2_setup(struct ati_remote2 *ar2, unsigned int ch_mask)
554{ 624{
555 int r, i, channel; 625 int r, i, channel;
556 626
@@ -565,8 +635,8 @@ static int ati_remote2_setup(struct ati_remote2 *ar2)
565 635
566 channel = 0; 636 channel = 0;
567 for (i = 0; i < 16; i++) { 637 for (i = 0; i < 16; i++) {
568 if ((1 << i) & channel_mask) { 638 if ((1 << i) & ch_mask) {
569 if (!(~(1 << i) & 0xFFFF & channel_mask)) 639 if (!(~(1 << i) & ch_mask))
570 channel = i + 1; 640 channel = i + 1;
571 break; 641 break;
572 } 642 }
@@ -585,6 +655,99 @@ static int ati_remote2_setup(struct ati_remote2 *ar2)
585 return 0; 655 return 0;
586} 656}
587 657
658static ssize_t ati_remote2_show_channel_mask(struct device *dev,
659 struct device_attribute *attr,
660 char *buf)
661{
662 struct usb_device *udev = to_usb_device(dev);
663 struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
664 struct ati_remote2 *ar2 = usb_get_intfdata(intf);
665
666 return sprintf(buf, "0x%04x\n", ar2->channel_mask);
667}
668
669static ssize_t ati_remote2_store_channel_mask(struct device *dev,
670 struct device_attribute *attr,
671 const char *buf, size_t count)
672{
673 struct usb_device *udev = to_usb_device(dev);
674 struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
675 struct ati_remote2 *ar2 = usb_get_intfdata(intf);
676 unsigned long mask;
677 int r;
678
679 if (strict_strtoul(buf, 0, &mask))
680 return -EINVAL;
681
682 if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
683 return -EINVAL;
684
685 r = usb_autopm_get_interface(ar2->intf[0]);
686 if (r) {
687 dev_err(&ar2->intf[0]->dev,
688 "%s(): usb_autopm_get_interface() = %d\n", __func__, r);
689 return r;
690 }
691
692 mutex_lock(&ati_remote2_mutex);
693
694 if (mask != ar2->channel_mask && !ati_remote2_setup(ar2, mask))
695 ar2->channel_mask = mask;
696
697 mutex_unlock(&ati_remote2_mutex);
698
699 usb_autopm_put_interface(ar2->intf[0]);
700
701 return count;
702}
703
704static ssize_t ati_remote2_show_mode_mask(struct device *dev,
705 struct device_attribute *attr,
706 char *buf)
707{
708 struct usb_device *udev = to_usb_device(dev);
709 struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
710 struct ati_remote2 *ar2 = usb_get_intfdata(intf);
711
712 return sprintf(buf, "0x%02x\n", ar2->mode_mask);
713}
714
715static ssize_t ati_remote2_store_mode_mask(struct device *dev,
716 struct device_attribute *attr,
717 const char *buf, size_t count)
718{
719 struct usb_device *udev = to_usb_device(dev);
720 struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
721 struct ati_remote2 *ar2 = usb_get_intfdata(intf);
722 unsigned long mask;
723
724 if (strict_strtoul(buf, 0, &mask))
725 return -EINVAL;
726
727 if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
728 return -EINVAL;
729
730 ar2->mode_mask = mask;
731
732 return count;
733}
734
735static DEVICE_ATTR(channel_mask, 0644, ati_remote2_show_channel_mask,
736 ati_remote2_store_channel_mask);
737
738static DEVICE_ATTR(mode_mask, 0644, ati_remote2_show_mode_mask,
739 ati_remote2_store_mode_mask);
740
741static struct attribute *ati_remote2_attrs[] = {
742 &dev_attr_channel_mask.attr,
743 &dev_attr_mode_mask.attr,
744 NULL,
745};
746
747static struct attribute_group ati_remote2_attr_group = {
748 .attrs = ati_remote2_attrs,
749};
750
588static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id) 751static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id)
589{ 752{
590 struct usb_device *udev = interface_to_usbdev(interface); 753 struct usb_device *udev = interface_to_usbdev(interface);
@@ -615,7 +778,10 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
615 if (r) 778 if (r)
616 goto fail2; 779 goto fail2;
617 780
618 r = ati_remote2_setup(ar2); 781 ar2->channel_mask = channel_mask;
782 ar2->mode_mask = mode_mask;
783
784 r = ati_remote2_setup(ar2, ar2->channel_mask);
619 if (r) 785 if (r)
620 goto fail2; 786 goto fail2;
621 787
@@ -624,19 +790,24 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
624 790
625 strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name)); 791 strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name));
626 792
627 r = ati_remote2_input_init(ar2); 793 r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
628 if (r) 794 if (r)
629 goto fail2; 795 goto fail2;
630 796
797 r = ati_remote2_input_init(ar2);
798 if (r)
799 goto fail3;
800
631 usb_set_intfdata(interface, ar2); 801 usb_set_intfdata(interface, ar2);
632 802
633 interface->needs_remote_wakeup = 1; 803 interface->needs_remote_wakeup = 1;
634 804
635 return 0; 805 return 0;
636 806
807 fail3:
808 sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
637 fail2: 809 fail2:
638 ati_remote2_urb_cleanup(ar2); 810 ati_remote2_urb_cleanup(ar2);
639
640 usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); 811 usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
641 fail1: 812 fail1:
642 kfree(ar2); 813 kfree(ar2);
@@ -657,6 +828,8 @@ static void ati_remote2_disconnect(struct usb_interface *interface)
657 828
658 input_unregister_device(ar2->idev); 829 input_unregister_device(ar2->idev);
659 830
831 sysfs_remove_group(&ar2->udev->dev.kobj, &ati_remote2_attr_group);
832
660 ati_remote2_urb_cleanup(ar2); 833 ati_remote2_urb_cleanup(ar2);
661 834
662 usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); 835 usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
@@ -715,6 +888,78 @@ static int ati_remote2_resume(struct usb_interface *interface)
715 return r; 888 return r;
716} 889}
717 890
891static int ati_remote2_reset_resume(struct usb_interface *interface)
892{
893 struct ati_remote2 *ar2;
894 struct usb_host_interface *alt = interface->cur_altsetting;
895 int r = 0;
896
897 if (alt->desc.bInterfaceNumber)
898 return 0;
899
900 ar2 = usb_get_intfdata(interface);
901
902 dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
903
904 mutex_lock(&ati_remote2_mutex);
905
906 r = ati_remote2_setup(ar2, ar2->channel_mask);
907 if (r)
908 goto out;
909
910 if (ar2->flags & ATI_REMOTE2_OPENED)
911 r = ati_remote2_submit_urbs(ar2);
912
913 if (!r)
914 ar2->flags &= ~ATI_REMOTE2_SUSPENDED;
915
916 out:
917 mutex_unlock(&ati_remote2_mutex);
918
919 return r;
920}
921
922static int ati_remote2_pre_reset(struct usb_interface *interface)
923{
924 struct ati_remote2 *ar2;
925 struct usb_host_interface *alt = interface->cur_altsetting;
926
927 if (alt->desc.bInterfaceNumber)
928 return 0;
929
930 ar2 = usb_get_intfdata(interface);
931
932 dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
933
934 mutex_lock(&ati_remote2_mutex);
935
936 if (ar2->flags == ATI_REMOTE2_OPENED)
937 ati_remote2_kill_urbs(ar2);
938
939 return 0;
940}
941
942static int ati_remote2_post_reset(struct usb_interface *interface)
943{
944 struct ati_remote2 *ar2;
945 struct usb_host_interface *alt = interface->cur_altsetting;
946 int r = 0;
947
948 if (alt->desc.bInterfaceNumber)
949 return 0;
950
951 ar2 = usb_get_intfdata(interface);
952
953 dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__);
954
955 if (ar2->flags == ATI_REMOTE2_OPENED)
956 r = ati_remote2_submit_urbs(ar2);
957
958 mutex_unlock(&ati_remote2_mutex);
959
960 return r;
961}
962
718static int __init ati_remote2_init(void) 963static int __init ati_remote2_init(void)
719{ 964{
720 int r; 965 int r;
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
new file mode 100644
index 000000000000..e2c7f622a0b5
--- /dev/null
+++ b/drivers/input/misc/rb532_button.c
@@ -0,0 +1,120 @@
1/*
2 * Support for the S1 button on Routerboard 532
3 *
4 * Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org>
5 */
6
7#include <linux/input-polldev.h>
8#include <linux/module.h>
9#include <linux/platform_device.h>
10
11#include <asm/mach-rc32434/gpio.h>
12#include <asm/mach-rc32434/rb.h>
13
14#define DRV_NAME "rb532-button"
15
16#define RB532_BTN_RATE 100 /* msec */
17#define RB532_BTN_KSYM BTN_0
18
19/* The S1 button state is provided by GPIO pin 1. But as this
20 * pin is also used for uart input as alternate function, the
21 * operational modes must be switched first:
22 * 1) disable uart using set_latch_u5()
23 * 2) turn off alternate function implicitly through
24 * gpio_direction_input()
25 * 3) read the GPIO's current value
26 * 4) undo step 2 by enabling alternate function (in this
27 * mode the GPIO direction is fixed, so no change needed)
28 * 5) turn on uart again
29 * The GPIO value occurs to be inverted, so pin high means
30 * button is not pressed.
31 */
32static bool rb532_button_pressed(void)
33{
34 int val;
35
36 set_latch_u5(0, LO_FOFF);
37 gpio_direction_input(GPIO_BTN_S1);
38
39 val = gpio_get_value(GPIO_BTN_S1);
40
41 rb532_gpio_set_func(GPIO_BTN_S1);
42 set_latch_u5(LO_FOFF, 0);
43
44 return !val;
45}
46
47static void rb532_button_poll(struct input_polled_dev *poll_dev)
48{
49 input_report_key(poll_dev->input, RB532_BTN_KSYM,
50 rb532_button_pressed());
51 input_sync(poll_dev->input);
52}
53
54static int __devinit rb532_button_probe(struct platform_device *pdev)
55{
56 struct input_polled_dev *poll_dev;
57 int error;
58
59 poll_dev = input_allocate_polled_device();
60 if (!poll_dev)
61 return -ENOMEM;
62
63 poll_dev->poll = rb532_button_poll;
64 poll_dev->poll_interval = RB532_BTN_RATE;
65
66 poll_dev->input->name = "rb532 button";
67 poll_dev->input->phys = "rb532/button0";
68 poll_dev->input->id.bustype = BUS_HOST;
69 poll_dev->input->dev.parent = &pdev->dev;
70
71 dev_set_drvdata(&pdev->dev, poll_dev);
72
73 input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM);
74
75 error = input_register_polled_device(poll_dev);
76 if (error) {
77 input_free_polled_device(poll_dev);
78 return error;
79 }
80
81 return 0;
82}
83
84static int __devexit rb532_button_remove(struct platform_device *pdev)
85{
86 struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
87
88 input_unregister_polled_device(poll_dev);
89 input_free_polled_device(poll_dev);
90 dev_set_drvdata(&pdev->dev, NULL);
91
92 return 0;
93}
94
95static struct platform_driver rb532_button_driver = {
96 .probe = rb532_button_probe,
97 .remove = __devexit_p(rb532_button_remove),
98 .driver = {
99 .name = DRV_NAME,
100 .owner = THIS_MODULE,
101 },
102};
103
104static int __init rb532_button_init(void)
105{
106 return platform_driver_register(&rb532_button_driver);
107}
108
109static void __exit rb532_button_exit(void)
110{
111 platform_driver_unregister(&rb532_button_driver);
112}
113
114module_init(rb532_button_init);
115module_exit(rb532_button_exit);
116
117MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
118MODULE_LICENSE("GPL");
119MODULE_DESCRIPTION("Support for S1 button on Routerboard 532");
120MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
new file mode 100644
index 000000000000..5bb3ab51b8c6
--- /dev/null
+++ b/drivers/input/misc/rotary_encoder.c
@@ -0,0 +1,221 @@
1/*
2 * rotary_encoder.c
3 *
4 * (c) 2009 Daniel Mack <daniel@caiaq.de>
5 *
6 * state machine code inspired by code from Tim Ruetz
7 *
8 * A generic driver for rotary encoders connected to GPIO lines.
9 * See file:Documentation/input/rotary_encoder.txt for more information
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/input.h>
21#include <linux/device.h>
22#include <linux/platform_device.h>
23#include <linux/gpio.h>
24#include <linux/rotary_encoder.h>
25
26#define DRV_NAME "rotary-encoder"
27
28struct rotary_encoder {
29 unsigned int irq_a;
30 unsigned int irq_b;
31 unsigned int pos;
32 unsigned int armed;
33 unsigned int dir;
34 struct input_dev *input;
35 struct rotary_encoder_platform_data *pdata;
36};
37
38static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
39{
40 struct rotary_encoder *encoder = dev_id;
41 struct rotary_encoder_platform_data *pdata = encoder->pdata;
42 int a = !!gpio_get_value(pdata->gpio_a);
43 int b = !!gpio_get_value(pdata->gpio_b);
44 int state;
45
46 a ^= pdata->inverted_a;
47 b ^= pdata->inverted_b;
48 state = (a << 1) | b;
49
50 switch (state) {
51
52 case 0x0:
53 if (!encoder->armed)
54 break;
55
56 if (encoder->dir) {
57 /* turning counter-clockwise */
58 encoder->pos += pdata->steps;
59 encoder->pos--;
60 encoder->pos %= pdata->steps;
61 } else {
62 /* turning clockwise */
63 encoder->pos++;
64 encoder->pos %= pdata->steps;
65 }
66
67 input_report_abs(encoder->input, pdata->axis, encoder->pos);
68 input_sync(encoder->input);
69
70 encoder->armed = 0;
71 break;
72
73 case 0x1:
74 case 0x2:
75 if (encoder->armed)
76 encoder->dir = state - 1;
77 break;
78
79 case 0x3:
80 encoder->armed = 1;
81 break;
82 }
83
84 return IRQ_HANDLED;
85}
86
87static int __devinit rotary_encoder_probe(struct platform_device *pdev)
88{
89 struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
90 struct rotary_encoder *encoder;
91 struct input_dev *input;
92 int err;
93
94 if (!pdata || !pdata->steps) {
95 dev_err(&pdev->dev, "invalid platform data\n");
96 return -ENOENT;
97 }
98
99 encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
100 input = input_allocate_device();
101 if (!encoder || !input) {
102 dev_err(&pdev->dev, "failed to allocate memory for device\n");
103 err = -ENOMEM;
104 goto exit_free_mem;
105 }
106
107 encoder->input = input;
108 encoder->pdata = pdata;
109 encoder->irq_a = gpio_to_irq(pdata->gpio_a);
110 encoder->irq_b = gpio_to_irq(pdata->gpio_b);
111
112 /* create and register the input driver */
113 input->name = pdev->name;
114 input->id.bustype = BUS_HOST;
115 input->dev.parent = &pdev->dev;
116 input->evbit[0] = BIT_MASK(EV_ABS);
117 input_set_abs_params(encoder->input,
118 pdata->axis, 0, pdata->steps, 0, 1);
119
120 err = input_register_device(input);
121 if (err) {
122 dev_err(&pdev->dev, "failed to register input device\n");
123 goto exit_free_mem;
124 }
125
126 /* request the GPIOs */
127 err = gpio_request(pdata->gpio_a, DRV_NAME);
128 if (err) {
129 dev_err(&pdev->dev, "unable to request GPIO %d\n",
130 pdata->gpio_a);
131 goto exit_unregister_input;
132 }
133
134 err = gpio_request(pdata->gpio_b, DRV_NAME);
135 if (err) {
136 dev_err(&pdev->dev, "unable to request GPIO %d\n",
137 pdata->gpio_b);
138 goto exit_free_gpio_a;
139 }
140
141 /* request the IRQs */
142 err = request_irq(encoder->irq_a, &rotary_encoder_irq,
143 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
144 DRV_NAME, encoder);
145 if (err) {
146 dev_err(&pdev->dev, "unable to request IRQ %d\n",
147 encoder->irq_a);
148 goto exit_free_gpio_b;
149 }
150
151 err = request_irq(encoder->irq_b, &rotary_encoder_irq,
152 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
153 DRV_NAME, encoder);
154 if (err) {
155 dev_err(&pdev->dev, "unable to request IRQ %d\n",
156 encoder->irq_b);
157 goto exit_free_irq_a;
158 }
159
160 platform_set_drvdata(pdev, encoder);
161
162 return 0;
163
164exit_free_irq_a:
165 free_irq(encoder->irq_a, encoder);
166exit_free_gpio_b:
167 gpio_free(pdata->gpio_b);
168exit_free_gpio_a:
169 gpio_free(pdata->gpio_a);
170exit_unregister_input:
171 input_unregister_device(input);
172 input = NULL; /* so we don't try to free it */
173exit_free_mem:
174 input_free_device(input);
175 kfree(encoder);
176 return err;
177}
178
179static int __devexit rotary_encoder_remove(struct platform_device *pdev)
180{
181 struct rotary_encoder *encoder = platform_get_drvdata(pdev);
182 struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
183
184 free_irq(encoder->irq_a, encoder);
185 free_irq(encoder->irq_b, encoder);
186 gpio_free(pdata->gpio_a);
187 gpio_free(pdata->gpio_b);
188 input_unregister_device(encoder->input);
189 platform_set_drvdata(pdev, NULL);
190 kfree(encoder);
191
192 return 0;
193}
194
195static struct platform_driver rotary_encoder_driver = {
196 .probe = rotary_encoder_probe,
197 .remove = __devexit_p(rotary_encoder_remove),
198 .driver = {
199 .name = DRV_NAME,
200 .owner = THIS_MODULE,
201 }
202};
203
204static int __init rotary_encoder_init(void)
205{
206 return platform_driver_register(&rotary_encoder_driver);
207}
208
209static void __exit rotary_encoder_exit(void)
210{
211 platform_driver_unregister(&rotary_encoder_driver);
212}
213
214module_init(rotary_encoder_init);
215module_exit(rotary_encoder_exit);
216
217MODULE_ALIAS("platform:" DRV_NAME);
218MODULE_DESCRIPTION("GPIO rotary encoder driver");
219MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
220MODULE_LICENSE("GPL v2");
221
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 4f38e6f7dfdd..c66cc3d08c2f 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -292,4 +292,15 @@ config MOUSE_PXA930_TRKBALL
292 help 292 help
293 Say Y here to support PXA930 Trackball mouse. 293 Say Y here to support PXA930 Trackball mouse.
294 294
295config MOUSE_MAPLE
296 tristate "Maple mouse (for the Dreamcast)"
297 depends on MAPLE
298 help
299 This driver supports the Maple mouse on the SEGA Dreamcast.
300
301 Most Dreamcast users, who have a mouse, will say Y here.
302
303 To compile this driver as a module choose M here: the module will be
304 called maplemouse.
305
295endif 306endif
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 8c8a1f236e28..472189468d67 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -6,18 +6,19 @@
6 6
7obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o 7obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o 8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
9obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
10obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o 9obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
11obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o 10obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
11obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
12obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
12obj-$(CONFIG_MOUSE_INPORT) += inport.o 13obj-$(CONFIG_MOUSE_INPORT) += inport.o
13obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o 14obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
15obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
14obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o 16obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
15obj-$(CONFIG_MOUSE_PS2) += psmouse.o 17obj-$(CONFIG_MOUSE_PS2) += psmouse.o
16obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o 18obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o
19obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
17obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o 20obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o
18obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
19obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o 21obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
20obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
21 22
22psmouse-objs := psmouse-base.o synaptics.o 23psmouse-objs := psmouse-base.o synaptics.o
23 24
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 55cd0fa68339..a1ad2f1a7bb3 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -472,7 +472,7 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse)
472 return -EIO; 472 return -EIO;
473 } 473 }
474 474
475 hgpk_dbg(psmouse, "ID: %02x %02x %02x", param[0], param[1], param[2]); 475 hgpk_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]);
476 476
477 /* HGPK signature: 0x67, 0x00, 0x<model> */ 477 /* HGPK signature: 0x67, 0x00, 0x<model> */
478 if (param[0] != 0x67 || param[1] != 0x00) 478 if (param[0] != 0x67 || param[1] != 0x00)
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
new file mode 100644
index 000000000000..d196abfb68bc
--- /dev/null
+++ b/drivers/input/mouse/maplemouse.c
@@ -0,0 +1,147 @@
1/*
2 * SEGA Dreamcast mouse driver
3 * Based on drivers/usb/usbmouse.c
4 *
5 * Copyright Yaegashi Takeshi, 2001
6 * Adrian McMenamin, 2008
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/input.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/maple.h>
16
17MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
18MODULE_DESCRIPTION("SEGA Dreamcast mouse driver");
19MODULE_LICENSE("GPL");
20
21struct dc_mouse {
22 struct input_dev *dev;
23 struct maple_device *mdev;
24};
25
26static void dc_mouse_callback(struct mapleq *mq)
27{
28 int buttons, relx, rely, relz;
29 struct maple_device *mapledev = mq->dev;
30 struct dc_mouse *mse = maple_get_drvdata(mapledev);
31 struct input_dev *dev = mse->dev;
32 unsigned char *res = mq->recvbuf;
33
34 buttons = ~res[8];
35 relx = *(unsigned short *)(res + 12) - 512;
36 rely = *(unsigned short *)(res + 14) - 512;
37 relz = *(unsigned short *)(res + 16) - 512;
38
39 input_report_key(dev, BTN_LEFT, buttons & 4);
40 input_report_key(dev, BTN_MIDDLE, buttons & 9);
41 input_report_key(dev, BTN_RIGHT, buttons & 2);
42 input_report_rel(dev, REL_X, relx);
43 input_report_rel(dev, REL_Y, rely);
44 input_report_rel(dev, REL_WHEEL, relz);
45 input_sync(dev);
46}
47
48static int dc_mouse_open(struct input_dev *dev)
49{
50 struct dc_mouse *mse = dev->dev.platform_data;
51
52 maple_getcond_callback(mse->mdev, dc_mouse_callback, HZ/50,
53 MAPLE_FUNC_MOUSE);
54
55 return 0;
56}
57
58static void dc_mouse_close(struct input_dev *dev)
59{
60 struct dc_mouse *mse = dev->dev.platform_data;
61
62 maple_getcond_callback(mse->mdev, dc_mouse_callback, 0,
63 MAPLE_FUNC_MOUSE);
64}
65
66
67static int __devinit probe_maple_mouse(struct device *dev)
68{
69 struct maple_device *mdev = to_maple_dev(dev);
70 struct maple_driver *mdrv = to_maple_driver(dev->driver);
71 struct input_dev *input_dev;
72 struct dc_mouse *mse;
73 int error;
74
75 mse = kzalloc(sizeof(struct dc_mouse), GFP_KERNEL);
76 input_dev = input_allocate_device();
77
78 if (!mse || !input_dev) {
79 error = -ENOMEM;
80 goto fail;
81 }
82
83 mse->dev = input_dev;
84 mse->mdev = mdev;
85
86 input_set_drvdata(input_dev, mse);
87 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
88 input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
89 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
90 input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
91 BIT_MASK(REL_WHEEL);
92 input_dev->name = mdev->product_name;
93 input_dev->id.bustype = BUS_HOST;
94 input_dev->open = dc_mouse_open;
95 input_dev->close = dc_mouse_close;
96
97 mdev->driver = mdrv;
98 maple_set_drvdata(mdev, mse);
99
100 error = input_register_device(input_dev);
101 if (error)
102 goto fail;
103
104 return 0;
105
106fail:
107 input_free_device(input_dev);
108 maple_set_drvdata(mdev, NULL);
109 kfree(mse);
110 mdev->driver = NULL;
111 return error;
112}
113
114static int __devexit remove_maple_mouse(struct device *dev)
115{
116 struct maple_device *mdev = to_maple_dev(dev);
117 struct dc_mouse *mse = maple_get_drvdata(mdev);
118
119 mdev->callback = NULL;
120 input_unregister_device(mse->dev);
121 maple_set_drvdata(mdev, NULL);
122 kfree(mse);
123
124 return 0;
125}
126
127static struct maple_driver dc_mouse_driver = {
128 .function = MAPLE_FUNC_MOUSE,
129 .drv = {
130 .name = "Dreamcast_mouse",
131 .probe = probe_maple_mouse,
132 .remove = __devexit_p(remove_maple_mouse),
133 },
134};
135
136static int __init dc_mouse_init(void)
137{
138 return maple_driver_register(&dc_mouse_driver);
139}
140
141static void __exit dc_mouse_exit(void)
142{
143 maple_driver_unregister(&dc_mouse_driver);
144}
145
146module_init(dc_mouse_init);
147module_exit(dc_mouse_exit);
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index fd09c8df81f2..f63995f854ff 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -111,11 +111,8 @@ static int __init pc110pad_init(void)
111 struct pci_dev *dev; 111 struct pci_dev *dev;
112 int err; 112 int err;
113 113
114 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); 114 if (!no_pci_devices())
115 if (dev) {
116 pci_dev_put(dev);
117 return -ENODEV; 115 return -ENODEV;
118 }
119 116
120 if (!request_region(pc110pad_io, 4, "pc110pad")) { 117 if (!request_region(pc110pad_io, 4, "pc110pad")) {
121 printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n", 118 printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 6fa2deff7446..83ed2d56b924 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -151,6 +151,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = {
151 DMI_MATCH(DMI_PRODUCT_VERSION, "01"), 151 DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
152 }, 152 },
153 }, 153 },
154 {
155 .ident = "HP DV9700",
156 .matches = {
157 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
158 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
159 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
160 },
161 },
154 { } 162 { }
155}; 163};
156 164
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index bb6486a8c070..b01fd61dadcc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -29,6 +29,51 @@ config TOUCHSCREEN_ADS7846
29 To compile this driver as a module, choose M here: the 29 To compile this driver as a module, choose M here: the
30 module will be called ads7846. 30 module will be called ads7846.
31 31
32config TOUCHSCREEN_AD7877
33 tristate "AD7877 based touchscreens"
34 depends on SPI_MASTER
35 help
36 Say Y here if you have a touchscreen interface using the
37 AD7877 controller, and your board-specific initialization
38 code includes that in its table of SPI devices.
39
40 If unsure, say N (but it's safe to say "Y").
41
42 To compile this driver as a module, choose M here: the
43 module will be called ad7877.
44
45config TOUCHSCREEN_AD7879_I2C
46 tristate "AD7879 based touchscreens: AD7879-1 I2C Interface"
47 depends on I2C
48 select TOUCHSCREEN_AD7879
49 help
50 Say Y here if you have a touchscreen interface using the
51 AD7879-1 controller, and your board-specific initialization
52 code includes that in its table of I2C devices.
53
54 If unsure, say N (but it's safe to say "Y").
55
56 To compile this driver as a module, choose M here: the
57 module will be called ad7879.
58
59config TOUCHSCREEN_AD7879_SPI
60 tristate "AD7879 based touchscreens: AD7879 SPI Interface"
61 depends on SPI_MASTER && TOUCHSCREEN_AD7879_I2C = n
62 select TOUCHSCREEN_AD7879
63 help
64 Say Y here if you have a touchscreen interface using the
65 AD7879 controller, and your board-specific initialization
66 code includes that in its table of SPI devices.
67
68 If unsure, say N (but it's safe to say "Y").
69
70 To compile this driver as a module, choose M here: the
71 module will be called ad7879.
72
73config TOUCHSCREEN_AD7879
74 tristate
75 default n
76
32config TOUCHSCREEN_BITSY 77config TOUCHSCREEN_BITSY
33 tristate "Compaq iPAQ H3600 (Bitsy) touchscreen" 78 tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
34 depends on SA1100_BITSY 79 depends on SA1100_BITSY
@@ -308,6 +353,19 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
308 To compile this driver as a module, choose M here: the 353 To compile this driver as a module, choose M here: the
309 module will be called mainstone-wm97xx. 354 module will be called mainstone-wm97xx.
310 355
356config TOUCHSCREEN_WM97XX_ZYLONITE
357 tristate "Zylonite accelerated touch"
358 depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
359 select TOUCHSCREEN_WM9713
360 help
361 Say Y here for support for streaming mode with the touchscreen
362 on Zylonite systems.
363
364 If unsure, say N.
365
366 To compile this driver as a module, choose M here: the
367 module will be called zylonite-wm97xx.
368
311config TOUCHSCREEN_USB_COMPOSITE 369config TOUCHSCREEN_USB_COMPOSITE
312 tristate "USB Touchscreen Driver" 370 tristate "USB Touchscreen Driver"
313 depends on USB_ARCH_HAS_HCD 371 depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index d3375aff46fe..6700f7b9d165 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,6 +6,8 @@
6 6
7wm97xx-ts-y := wm97xx-core.o 7wm97xx-ts-y := wm97xx-core.o
8 8
9obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
10obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
9obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o 11obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
10obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o 12obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
11obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o 13obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
@@ -34,3 +36,4 @@ wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
34wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o 36wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
35wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o 37wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o
36obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o 38obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
39obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
new file mode 100644
index 000000000000..e4728a28f492
--- /dev/null
+++ b/drivers/input/touchscreen/ad7877.c
@@ -0,0 +1,844 @@
1/*
2 * Copyright (C) 2006-2008 Michael Hennerich, Analog Devices Inc.
3 *
4 * Description: AD7877 based touchscreen, sensor (ADCs), DAC and GPIO driver
5 * Based on: ads7846.c
6 *
7 * Bugs: Enter bugs at http://blackfin.uclinux.org/
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see the file COPYING, or write
21 * to the Free Software Foundation, Inc.,
22 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 * History:
25 * Copyright (c) 2005 David Brownell
26 * Copyright (c) 2006 Nokia Corporation
27 * Various changes: Imre Deak <imre.deak@nokia.com>
28 *
29 * Using code from:
30 * - corgi_ts.c
31 * Copyright (C) 2004-2005 Richard Purdie
32 * - omap_ts.[hc], ads7846.h, ts_osk.c
33 * Copyright (C) 2002 MontaVista Software
34 * Copyright (C) 2004 Texas Instruments
35 * Copyright (C) 2005 Dirk Behme
36 */
37
38
39#include <linux/device.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/input.h>
43#include <linux/interrupt.h>
44#include <linux/slab.h>
45#include <linux/spi/spi.h>
46#include <linux/spi/ad7877.h>
47#include <asm/irq.h>
48
49#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50)
50
51#define MAX_SPI_FREQ_HZ 20000000
52#define MAX_12BIT ((1<<12)-1)
53
54#define AD7877_REG_ZEROS 0
55#define AD7877_REG_CTRL1 1
56#define AD7877_REG_CTRL2 2
57#define AD7877_REG_ALERT 3
58#define AD7877_REG_AUX1HIGH 4
59#define AD7877_REG_AUX1LOW 5
60#define AD7877_REG_BAT1HIGH 6
61#define AD7877_REG_BAT1LOW 7
62#define AD7877_REG_BAT2HIGH 8
63#define AD7877_REG_BAT2LOW 9
64#define AD7877_REG_TEMP1HIGH 10
65#define AD7877_REG_TEMP1LOW 11
66#define AD7877_REG_SEQ0 12
67#define AD7877_REG_SEQ1 13
68#define AD7877_REG_DAC 14
69#define AD7877_REG_NONE1 15
70#define AD7877_REG_EXTWRITE 15
71#define AD7877_REG_XPLUS 16
72#define AD7877_REG_YPLUS 17
73#define AD7877_REG_Z2 18
74#define AD7877_REG_aux1 19
75#define AD7877_REG_aux2 20
76#define AD7877_REG_aux3 21
77#define AD7877_REG_bat1 22
78#define AD7877_REG_bat2 23
79#define AD7877_REG_temp1 24
80#define AD7877_REG_temp2 25
81#define AD7877_REG_Z1 26
82#define AD7877_REG_GPIOCTRL1 27
83#define AD7877_REG_GPIOCTRL2 28
84#define AD7877_REG_GPIODATA 29
85#define AD7877_REG_NONE2 30
86#define AD7877_REG_NONE3 31
87
88#define AD7877_SEQ_YPLUS_BIT (1<<11)
89#define AD7877_SEQ_XPLUS_BIT (1<<10)
90#define AD7877_SEQ_Z2_BIT (1<<9)
91#define AD7877_SEQ_AUX1_BIT (1<<8)
92#define AD7877_SEQ_AUX2_BIT (1<<7)
93#define AD7877_SEQ_AUX3_BIT (1<<6)
94#define AD7877_SEQ_BAT1_BIT (1<<5)
95#define AD7877_SEQ_BAT2_BIT (1<<4)
96#define AD7877_SEQ_TEMP1_BIT (1<<3)
97#define AD7877_SEQ_TEMP2_BIT (1<<2)
98#define AD7877_SEQ_Z1_BIT (1<<1)
99
100enum {
101 AD7877_SEQ_YPOS = 0,
102 AD7877_SEQ_XPOS = 1,
103 AD7877_SEQ_Z2 = 2,
104 AD7877_SEQ_AUX1 = 3,
105 AD7877_SEQ_AUX2 = 4,
106 AD7877_SEQ_AUX3 = 5,
107 AD7877_SEQ_BAT1 = 6,
108 AD7877_SEQ_BAT2 = 7,
109 AD7877_SEQ_TEMP1 = 8,
110 AD7877_SEQ_TEMP2 = 9,
111 AD7877_SEQ_Z1 = 10,
112 AD7877_NR_SENSE = 11,
113};
114
115/* DAC Register Default RANGE 0 to Vcc, Volatge Mode, DAC On */
116#define AD7877_DAC_CONF 0x1
117
118/* If gpio3 is set AUX3/GPIO3 acts as GPIO Output */
119#define AD7877_EXTW_GPIO_3_CONF 0x1C4
120#define AD7877_EXTW_GPIO_DATA 0x200
121
122/* Control REG 2 */
123#define AD7877_TMR(x) ((x & 0x3) << 0)
124#define AD7877_REF(x) ((x & 0x1) << 2)
125#define AD7877_POL(x) ((x & 0x1) << 3)
126#define AD7877_FCD(x) ((x & 0x3) << 4)
127#define AD7877_PM(x) ((x & 0x3) << 6)
128#define AD7877_ACQ(x) ((x & 0x3) << 8)
129#define AD7877_AVG(x) ((x & 0x3) << 10)
130
131/* Control REG 1 */
132#define AD7877_SER (1 << 11) /* non-differential */
133#define AD7877_DFR (0 << 11) /* differential */
134
135#define AD7877_MODE_NOC (0) /* Do not convert */
136#define AD7877_MODE_SCC (1) /* Single channel conversion */
137#define AD7877_MODE_SEQ0 (2) /* Sequence 0 in Slave Mode */
138#define AD7877_MODE_SEQ1 (3) /* Sequence 1 in Master Mode */
139
140#define AD7877_CHANADD(x) ((x&0xF)<<7)
141#define AD7877_READADD(x) ((x)<<2)
142#define AD7877_WRITEADD(x) ((x)<<12)
143
144#define AD7877_READ_CHAN(x) (AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_SER | \
145 AD7877_MODE_SCC | AD7877_CHANADD(AD7877_REG_ ## x) | \
146 AD7877_READADD(AD7877_REG_ ## x))
147
148#define AD7877_MM_SEQUENCE (AD7877_SEQ_YPLUS_BIT | AD7877_SEQ_XPLUS_BIT | \
149 AD7877_SEQ_Z2_BIT | AD7877_SEQ_Z1_BIT)
150
151/*
152 * Non-touchscreen sensors only use single-ended conversions.
153 */
154
155struct ser_req {
156 u16 reset;
157 u16 ref_on;
158 u16 command;
159 u16 sample;
160 struct spi_message msg;
161 struct spi_transfer xfer[6];
162};
163
164struct ad7877 {
165 struct input_dev *input;
166 char phys[32];
167
168 struct spi_device *spi;
169 u16 model;
170 u16 vref_delay_usecs;
171 u16 x_plate_ohms;
172 u16 pressure_max;
173
174 u16 cmd_crtl1;
175 u16 cmd_crtl2;
176 u16 cmd_dummy;
177 u16 dac;
178
179 u8 stopacq_polarity;
180 u8 first_conversion_delay;
181 u8 acquisition_time;
182 u8 averaging;
183 u8 pen_down_acc_interval;
184
185 u16 conversion_data[AD7877_NR_SENSE];
186
187 struct spi_transfer xfer[AD7877_NR_SENSE + 2];
188 struct spi_message msg;
189
190 struct mutex mutex;
191 unsigned disabled:1; /* P: mutex */
192 unsigned gpio3:1; /* P: mutex */
193 unsigned gpio4:1; /* P: mutex */
194
195 spinlock_t lock;
196 struct timer_list timer; /* P: lock */
197 unsigned pending:1; /* P: lock */
198};
199
200static int gpio3;
201module_param(gpio3, int, 0);
202MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3");
203
204/*
205 * ad7877_read/write are only used for initial setup and for sysfs controls.
206 * The main traffic is done using spi_async() in the interrupt handler.
207 */
208
209static int ad7877_read(struct spi_device *spi, u16 reg)
210{
211 struct ser_req *req;
212 int status, ret;
213
214 req = kzalloc(sizeof *req, GFP_KERNEL);
215 if (!req)
216 return -ENOMEM;
217
218 spi_message_init(&req->msg);
219
220 req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) |
221 AD7877_READADD(reg));
222 req->xfer[0].tx_buf = &req->command;
223 req->xfer[0].len = 2;
224
225 req->xfer[1].rx_buf = &req->sample;
226 req->xfer[1].len = 2;
227
228 spi_message_add_tail(&req->xfer[0], &req->msg);
229 spi_message_add_tail(&req->xfer[1], &req->msg);
230
231 status = spi_sync(spi, &req->msg);
232 ret = status ? : req->sample;
233
234 kfree(req);
235
236 return ret;
237}
238
239static int ad7877_write(struct spi_device *spi, u16 reg, u16 val)
240{
241 struct ser_req *req;
242 int status;
243
244 req = kzalloc(sizeof *req, GFP_KERNEL);
245 if (!req)
246 return -ENOMEM;
247
248 spi_message_init(&req->msg);
249
250 req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT));
251 req->xfer[0].tx_buf = &req->command;
252 req->xfer[0].len = 2;
253
254 spi_message_add_tail(&req->xfer[0], &req->msg);
255
256 status = spi_sync(spi, &req->msg);
257
258 kfree(req);
259
260 return status;
261}
262
263static int ad7877_read_adc(struct spi_device *spi, unsigned command)
264{
265 struct ad7877 *ts = dev_get_drvdata(&spi->dev);
266 struct ser_req *req;
267 int status;
268 int sample;
269 int i;
270
271 req = kzalloc(sizeof *req, GFP_KERNEL);
272 if (!req)
273 return -ENOMEM;
274
275 spi_message_init(&req->msg);
276
277 /* activate reference, so it has time to settle; */
278 req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) |
279 AD7877_POL(ts->stopacq_polarity) |
280 AD7877_AVG(0) | AD7877_PM(2) | AD7877_TMR(0) |
281 AD7877_ACQ(ts->acquisition_time) | AD7877_FCD(0);
282
283 req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC;
284
285 req->command = (u16) command;
286
287 req->xfer[0].tx_buf = &req->reset;
288 req->xfer[0].len = 2;
289
290 req->xfer[1].tx_buf = &req->ref_on;
291 req->xfer[1].len = 2;
292 req->xfer[1].delay_usecs = ts->vref_delay_usecs;
293
294 req->xfer[2].tx_buf = &req->command;
295 req->xfer[2].len = 2;
296 req->xfer[2].delay_usecs = ts->vref_delay_usecs;
297
298 req->xfer[3].rx_buf = &req->sample;
299 req->xfer[3].len = 2;
300
301 req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/
302 req->xfer[4].len = 2;
303
304 req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/
305 req->xfer[5].len = 2;
306
307 /* group all the transfers together, so we can't interfere with
308 * reading touchscreen state; disable penirq while sampling
309 */
310 for (i = 0; i < 6; i++)
311 spi_message_add_tail(&req->xfer[i], &req->msg);
312
313 status = spi_sync(spi, &req->msg);
314 sample = req->sample;
315
316 kfree(req);
317
318 return status ? : sample;
319}
320
321static void ad7877_rx(struct ad7877 *ts)
322{
323 struct input_dev *input_dev = ts->input;
324 unsigned Rt;
325 u16 x, y, z1, z2;
326
327 x = ts->conversion_data[AD7877_SEQ_XPOS] & MAX_12BIT;
328 y = ts->conversion_data[AD7877_SEQ_YPOS] & MAX_12BIT;
329 z1 = ts->conversion_data[AD7877_SEQ_Z1] & MAX_12BIT;
330 z2 = ts->conversion_data[AD7877_SEQ_Z2] & MAX_12BIT;
331
332 /*
333 * The samples processed here are already preprocessed by the AD7877.
334 * The preprocessing function consists of an averaging filter.
335 * The combination of 'first conversion delay' and averaging provides a robust solution,
336 * discarding the spurious noise in the signal and keeping only the data of interest.
337 * The size of the averaging filter is programmable. (dev.platform_data, see linux/spi/ad7877.h)
338 * Other user-programmable conversion controls include variable acquisition time,
339 * and first conversion delay. Up to 16 averages can be taken per conversion.
340 */
341
342 if (likely(x && z1)) {
343 /* compute touch pressure resistance using equation #1 */
344 Rt = (z2 - z1) * x * ts->x_plate_ohms;
345 Rt /= z1;
346 Rt = (Rt + 2047) >> 12;
347
348 input_report_abs(input_dev, ABS_X, x);
349 input_report_abs(input_dev, ABS_Y, y);
350 input_report_abs(input_dev, ABS_PRESSURE, Rt);
351 input_sync(input_dev);
352 }
353}
354
355static inline void ad7877_ts_event_release(struct ad7877 *ts)
356{
357 struct input_dev *input_dev = ts->input;
358
359 input_report_abs(input_dev, ABS_PRESSURE, 0);
360 input_sync(input_dev);
361}
362
363static void ad7877_timer(unsigned long handle)
364{
365 struct ad7877 *ts = (void *)handle;
366
367 ad7877_ts_event_release(ts);
368}
369
370static irqreturn_t ad7877_irq(int irq, void *handle)
371{
372 struct ad7877 *ts = handle;
373 unsigned long flags;
374 int status;
375
376 /*
377 * The repeated conversion sequencer controlled by TMR kicked off
378 * too fast. We ignore the last and process the sample sequence
379 * currently in the queue. It can't be older than 9.4ms, and we
380 * need to avoid that ts->msg doesn't get issued twice while in work.
381 */
382
383 spin_lock_irqsave(&ts->lock, flags);
384 if (!ts->pending) {
385 ts->pending = 1;
386
387 status = spi_async(ts->spi, &ts->msg);
388 if (status)
389 dev_err(&ts->spi->dev, "spi_sync --> %d\n", status);
390 }
391 spin_unlock_irqrestore(&ts->lock, flags);
392
393 return IRQ_HANDLED;
394}
395
396static void ad7877_callback(void *_ts)
397{
398 struct ad7877 *ts = _ts;
399
400 spin_lock_irq(&ts->lock);
401
402 ad7877_rx(ts);
403 ts->pending = 0;
404 mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
405
406 spin_unlock_irq(&ts->lock);
407}
408
409static void ad7877_disable(struct ad7877 *ts)
410{
411 mutex_lock(&ts->mutex);
412
413 if (!ts->disabled) {
414 ts->disabled = 1;
415 disable_irq(ts->spi->irq);
416
417 /* Wait for spi_async callback */
418 while (ts->pending)
419 msleep(1);
420
421 if (del_timer_sync(&ts->timer))
422 ad7877_ts_event_release(ts);
423 }
424
425 /* we know the chip's in lowpower mode since we always
426 * leave it that way after every request
427 */
428
429 mutex_unlock(&ts->mutex);
430}
431
432static void ad7877_enable(struct ad7877 *ts)
433{
434 mutex_lock(&ts->mutex);
435
436 if (ts->disabled) {
437 ts->disabled = 0;
438 enable_irq(ts->spi->irq);
439 }
440
441 mutex_unlock(&ts->mutex);
442}
443
444#define SHOW(name) static ssize_t \
445name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
446{ \
447 struct ad7877 *ts = dev_get_drvdata(dev); \
448 ssize_t v = ad7877_read_adc(ts->spi, \
449 AD7877_READ_CHAN(name)); \
450 if (v < 0) \
451 return v; \
452 return sprintf(buf, "%u\n", (unsigned) v); \
453} \
454static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL);
455
456SHOW(aux1)
457SHOW(aux2)
458SHOW(aux3)
459SHOW(bat1)
460SHOW(bat2)
461SHOW(temp1)
462SHOW(temp2)
463
464static ssize_t ad7877_disable_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct ad7877 *ts = dev_get_drvdata(dev);
468
469 return sprintf(buf, "%u\n", ts->disabled);
470}
471
472static ssize_t ad7877_disable_store(struct device *dev,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
475{
476 struct ad7877 *ts = dev_get_drvdata(dev);
477 unsigned long val;
478 int error;
479
480 error = strict_strtoul(buf, 10, &val);
481 if (error)
482 return error;
483
484 if (val)
485 ad7877_disable(ts);
486 else
487 ad7877_enable(ts);
488
489 return count;
490}
491
492static DEVICE_ATTR(disable, 0664, ad7877_disable_show, ad7877_disable_store);
493
494static ssize_t ad7877_dac_show(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct ad7877 *ts = dev_get_drvdata(dev);
498
499 return sprintf(buf, "%u\n", ts->dac);
500}
501
502static ssize_t ad7877_dac_store(struct device *dev,
503 struct device_attribute *attr,
504 const char *buf, size_t count)
505{
506 struct ad7877 *ts = dev_get_drvdata(dev);
507 unsigned long val;
508 int error;
509
510 error = strict_strtoul(buf, 10, &val);
511 if (error)
512 return error;
513
514 mutex_lock(&ts->mutex);
515 ts->dac = val & 0xFF;
516 ad7877_write(ts->spi, AD7877_REG_DAC, (ts->dac << 4) | AD7877_DAC_CONF);
517 mutex_unlock(&ts->mutex);
518
519 return count;
520}
521
522static DEVICE_ATTR(dac, 0664, ad7877_dac_show, ad7877_dac_store);
523
524static ssize_t ad7877_gpio3_show(struct device *dev,
525 struct device_attribute *attr, char *buf)
526{
527 struct ad7877 *ts = dev_get_drvdata(dev);
528
529 return sprintf(buf, "%u\n", ts->gpio3);
530}
531
532static ssize_t ad7877_gpio3_store(struct device *dev,
533 struct device_attribute *attr,
534 const char *buf, size_t count)
535{
536 struct ad7877 *ts = dev_get_drvdata(dev);
537 unsigned long val;
538 int error;
539
540 error = strict_strtoul(buf, 10, &val);
541 if (error)
542 return error;
543
544 mutex_lock(&ts->mutex);
545 ts->gpio3 = !!val;
546 ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA |
547 (ts->gpio4 << 4) | (ts->gpio3 << 5));
548 mutex_unlock(&ts->mutex);
549
550 return count;
551}
552
553static DEVICE_ATTR(gpio3, 0664, ad7877_gpio3_show, ad7877_gpio3_store);
554
555static ssize_t ad7877_gpio4_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct ad7877 *ts = dev_get_drvdata(dev);
559
560 return sprintf(buf, "%u\n", ts->gpio4);
561}
562
563static ssize_t ad7877_gpio4_store(struct device *dev,
564 struct device_attribute *attr,
565 const char *buf, size_t count)
566{
567 struct ad7877 *ts = dev_get_drvdata(dev);
568 unsigned long val;
569 int error;
570
571 error = strict_strtoul(buf, 10, &val);
572 if (error)
573 return error;
574
575 mutex_lock(&ts->mutex);
576 ts->gpio4 = !!val;
577 ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA |
578 (ts->gpio4 << 4) | (ts->gpio3 << 5));
579 mutex_unlock(&ts->mutex);
580
581 return count;
582}
583
584static DEVICE_ATTR(gpio4, 0664, ad7877_gpio4_show, ad7877_gpio4_store);
585
586static struct attribute *ad7877_attributes[] = {
587 &dev_attr_temp1.attr,
588 &dev_attr_temp2.attr,
589 &dev_attr_aux1.attr,
590 &dev_attr_aux2.attr,
591 &dev_attr_bat1.attr,
592 &dev_attr_bat2.attr,
593 &dev_attr_disable.attr,
594 &dev_attr_dac.attr,
595 &dev_attr_gpio4.attr,
596 NULL
597};
598
599static const struct attribute_group ad7877_attr_group = {
600 .attrs = ad7877_attributes,
601};
602
603static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
604{
605 struct spi_message *m;
606 int i;
607
608 ts->cmd_crtl2 = AD7877_WRITEADD(AD7877_REG_CTRL2) |
609 AD7877_POL(ts->stopacq_polarity) |
610 AD7877_AVG(ts->averaging) | AD7877_PM(1) |
611 AD7877_TMR(ts->pen_down_acc_interval) |
612 AD7877_ACQ(ts->acquisition_time) |
613 AD7877_FCD(ts->first_conversion_delay);
614
615 ad7877_write(spi, AD7877_REG_CTRL2, ts->cmd_crtl2);
616
617 ts->cmd_crtl1 = AD7877_WRITEADD(AD7877_REG_CTRL1) |
618 AD7877_READADD(AD7877_REG_XPLUS-1) |
619 AD7877_MODE_SEQ1 | AD7877_DFR;
620
621 ad7877_write(spi, AD7877_REG_CTRL1, ts->cmd_crtl1);
622
623 ts->cmd_dummy = 0;
624
625 m = &ts->msg;
626
627 spi_message_init(m);
628
629 m->complete = ad7877_callback;
630 m->context = ts;
631
632 ts->xfer[0].tx_buf = &ts->cmd_crtl1;
633 ts->xfer[0].len = 2;
634
635 spi_message_add_tail(&ts->xfer[0], m);
636
637 ts->xfer[1].tx_buf = &ts->cmd_dummy; /* Send ZERO */
638 ts->xfer[1].len = 2;
639
640 spi_message_add_tail(&ts->xfer[1], m);
641
642 for (i = 0; i < 11; i++) {
643 ts->xfer[i + 2].rx_buf = &ts->conversion_data[AD7877_SEQ_YPOS + i];
644 ts->xfer[i + 2].len = 2;
645 spi_message_add_tail(&ts->xfer[i + 2], m);
646 }
647}
648
649static int __devinit ad7877_probe(struct spi_device *spi)
650{
651 struct ad7877 *ts;
652 struct input_dev *input_dev;
653 struct ad7877_platform_data *pdata = spi->dev.platform_data;
654 int err;
655 u16 verify;
656
657 if (!spi->irq) {
658 dev_dbg(&spi->dev, "no IRQ?\n");
659 return -ENODEV;
660 }
661
662 if (!pdata) {
663 dev_dbg(&spi->dev, "no platform data?\n");
664 return -ENODEV;
665 }
666
667 /* don't exceed max specified SPI CLK frequency */
668 if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
669 dev_dbg(&spi->dev, "SPI CLK %d Hz?\n",spi->max_speed_hz);
670 return -EINVAL;
671 }
672
673 ts = kzalloc(sizeof(struct ad7877), GFP_KERNEL);
674 input_dev = input_allocate_device();
675 if (!ts || !input_dev) {
676 err = -ENOMEM;
677 goto err_free_mem;
678 }
679
680 dev_set_drvdata(&spi->dev, ts);
681 ts->spi = spi;
682 ts->input = input_dev;
683
684 setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts);
685 mutex_init(&ts->mutex);
686 spin_lock_init(&ts->lock);
687
688 ts->model = pdata->model ? : 7877;
689 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
690 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
691 ts->pressure_max = pdata->pressure_max ? : ~0;
692
693 ts->stopacq_polarity = pdata->stopacq_polarity;
694 ts->first_conversion_delay = pdata->first_conversion_delay;
695 ts->acquisition_time = pdata->acquisition_time;
696 ts->averaging = pdata->averaging;
697 ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
698
699 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
700
701 input_dev->name = "AD7877 Touchscreen";
702 input_dev->phys = ts->phys;
703 input_dev->dev.parent = &spi->dev;
704
705 __set_bit(EV_ABS, input_dev->evbit);
706 __set_bit(ABS_X, input_dev->absbit);
707 __set_bit(ABS_Y, input_dev->absbit);
708 __set_bit(ABS_PRESSURE, input_dev->absbit);
709
710 input_set_abs_params(input_dev, ABS_X,
711 pdata->x_min ? : 0,
712 pdata->x_max ? : MAX_12BIT,
713 0, 0);
714 input_set_abs_params(input_dev, ABS_Y,
715 pdata->y_min ? : 0,
716 pdata->y_max ? : MAX_12BIT,
717 0, 0);
718 input_set_abs_params(input_dev, ABS_PRESSURE,
719 pdata->pressure_min, pdata->pressure_max, 0, 0);
720
721 ad7877_write(spi, AD7877_REG_SEQ1, AD7877_MM_SEQUENCE);
722
723 verify = ad7877_read(spi, AD7877_REG_SEQ1);
724
725 if (verify != AD7877_MM_SEQUENCE){
726 dev_err(&spi->dev, "%s: Failed to probe %s\n",
727 dev_name(&spi->dev), input_dev->name);
728 err = -ENODEV;
729 goto err_free_mem;
730 }
731
732 if (gpio3)
733 ad7877_write(spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_3_CONF);
734
735 ad7877_setup_ts_def_msg(spi, ts);
736
737 /* Request AD7877 /DAV GPIO interrupt */
738
739 err = request_irq(spi->irq, ad7877_irq, IRQF_TRIGGER_FALLING |
740 IRQF_SAMPLE_RANDOM, spi->dev.driver->name, ts);
741 if (err) {
742 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
743 goto err_free_mem;
744 }
745
746 err = sysfs_create_group(&spi->dev.kobj, &ad7877_attr_group);
747 if (err)
748 goto err_free_irq;
749
750 err = device_create_file(&spi->dev,
751 gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
752 if (err)
753 goto err_remove_attr_group;
754
755 err = input_register_device(input_dev);
756 if (err)
757 goto err_remove_attr;
758
759 return 0;
760
761err_remove_attr:
762 device_remove_file(&spi->dev,
763 gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
764err_remove_attr_group:
765 sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
766err_free_irq:
767 free_irq(spi->irq, ts);
768err_free_mem:
769 input_free_device(input_dev);
770 kfree(ts);
771 dev_set_drvdata(&spi->dev, NULL);
772 return err;
773}
774
775static int __devexit ad7877_remove(struct spi_device *spi)
776{
777 struct ad7877 *ts = dev_get_drvdata(&spi->dev);
778
779 sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group);
780 device_remove_file(&spi->dev,
781 gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3);
782
783 ad7877_disable(ts);
784 free_irq(ts->spi->irq, ts);
785
786 input_unregister_device(ts->input);
787 kfree(ts);
788
789 dev_dbg(&spi->dev, "unregistered touchscreen\n");
790 dev_set_drvdata(&spi->dev, NULL);
791
792 return 0;
793}
794
795#ifdef CONFIG_PM
796static int ad7877_suspend(struct spi_device *spi, pm_message_t message)
797{
798 struct ad7877 *ts = dev_get_drvdata(&spi->dev);
799
800 ad7877_disable(ts);
801
802 return 0;
803}
804
805static int ad7877_resume(struct spi_device *spi)
806{
807 struct ad7877 *ts = dev_get_drvdata(&spi->dev);
808
809 ad7877_enable(ts);
810
811 return 0;
812}
813#else
814#define ad7877_suspend NULL
815#define ad7877_resume NULL
816#endif
817
818static struct spi_driver ad7877_driver = {
819 .driver = {
820 .name = "ad7877",
821 .bus = &spi_bus_type,
822 .owner = THIS_MODULE,
823 },
824 .probe = ad7877_probe,
825 .remove = __devexit_p(ad7877_remove),
826 .suspend = ad7877_suspend,
827 .resume = ad7877_resume,
828};
829
830static int __init ad7877_init(void)
831{
832 return spi_register_driver(&ad7877_driver);
833}
834module_init(ad7877_init);
835
836static void __exit ad7877_exit(void)
837{
838 spi_unregister_driver(&ad7877_driver);
839}
840module_exit(ad7877_exit);
841
842MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
843MODULE_DESCRIPTION("AD7877 touchscreen Driver");
844MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
new file mode 100644
index 000000000000..ea4c61d68683
--- /dev/null
+++ b/drivers/input/touchscreen/ad7879.c
@@ -0,0 +1,782 @@
1/*
2 * Copyright (C) 2008 Michael Hennerich, Analog Devices Inc.
3 *
4 * Description: AD7879 based touchscreen, and GPIO driver (I2C/SPI Interface)
5 *
6 * Bugs: Enter bugs at http://blackfin.uclinux.org/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * History:
24 * Copyright (c) 2005 David Brownell
25 * Copyright (c) 2006 Nokia Corporation
26 * Various changes: Imre Deak <imre.deak@nokia.com>
27 *
28 * Using code from:
29 * - corgi_ts.c
30 * Copyright (C) 2004-2005 Richard Purdie
31 * - omap_ts.[hc], ads7846.h, ts_osk.c
32 * Copyright (C) 2002 MontaVista Software
33 * Copyright (C) 2004 Texas Instruments
34 * Copyright (C) 2005 Dirk Behme
35 * - ad7877.c
36 * Copyright (C) 2006-2008 Analog Devices Inc.
37 */
38
39#include <linux/device.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/input.h>
43#include <linux/interrupt.h>
44#include <linux/irq.h>
45#include <linux/slab.h>
46#include <linux/workqueue.h>
47#include <linux/spi/spi.h>
48#include <linux/i2c.h>
49
50#include <linux/spi/ad7879.h>
51
52#define AD7879_REG_ZEROS 0
53#define AD7879_REG_CTRL1 1
54#define AD7879_REG_CTRL2 2
55#define AD7879_REG_CTRL3 3
56#define AD7879_REG_AUX1HIGH 4
57#define AD7879_REG_AUX1LOW 5
58#define AD7879_REG_TEMP1HIGH 6
59#define AD7879_REG_TEMP1LOW 7
60#define AD7879_REG_XPLUS 8
61#define AD7879_REG_YPLUS 9
62#define AD7879_REG_Z1 10
63#define AD7879_REG_Z2 11
64#define AD7879_REG_AUXVBAT 12
65#define AD7879_REG_TEMP 13
66#define AD7879_REG_REVID 14
67
68/* Control REG 1 */
69#define AD7879_TMR(x) ((x & 0xFF) << 0)
70#define AD7879_ACQ(x) ((x & 0x3) << 8)
71#define AD7879_MODE_NOC (0 << 10) /* Do not convert */
72#define AD7879_MODE_SCC (1 << 10) /* Single channel conversion */
73#define AD7879_MODE_SEQ0 (2 << 10) /* Sequence 0 in Slave Mode */
74#define AD7879_MODE_SEQ1 (3 << 10) /* Sequence 1 in Master Mode */
75#define AD7879_MODE_INT (1 << 15) /* PENIRQ disabled INT enabled */
76
77/* Control REG 2 */
78#define AD7879_FCD(x) ((x & 0x3) << 0)
79#define AD7879_RESET (1 << 4)
80#define AD7879_MFS(x) ((x & 0x3) << 5)
81#define AD7879_AVG(x) ((x & 0x3) << 7)
82#define AD7879_SER (1 << 9) /* non-differential */
83#define AD7879_DFR (0 << 9) /* differential */
84#define AD7879_GPIOPOL (1 << 10)
85#define AD7879_GPIODIR (1 << 11)
86#define AD7879_GPIO_DATA (1 << 12)
87#define AD7879_GPIO_EN (1 << 13)
88#define AD7879_PM(x) ((x & 0x3) << 14)
89#define AD7879_PM_SHUTDOWN (0)
90#define AD7879_PM_DYN (1)
91#define AD7879_PM_FULLON (2)
92
93/* Control REG 3 */
94#define AD7879_TEMPMASK_BIT (1<<15)
95#define AD7879_AUXVBATMASK_BIT (1<<14)
96#define AD7879_INTMODE_BIT (1<<13)
97#define AD7879_GPIOALERTMASK_BIT (1<<12)
98#define AD7879_AUXLOW_BIT (1<<11)
99#define AD7879_AUXHIGH_BIT (1<<10)
100#define AD7879_TEMPLOW_BIT (1<<9)
101#define AD7879_TEMPHIGH_BIT (1<<8)
102#define AD7879_YPLUS_BIT (1<<7)
103#define AD7879_XPLUS_BIT (1<<6)
104#define AD7879_Z1_BIT (1<<5)
105#define AD7879_Z2_BIT (1<<4)
106#define AD7879_AUX_BIT (1<<3)
107#define AD7879_VBAT_BIT (1<<2)
108#define AD7879_TEMP_BIT (1<<1)
109
110enum {
111 AD7879_SEQ_XPOS = 0,
112 AD7879_SEQ_YPOS = 1,
113 AD7879_SEQ_Z1 = 2,
114 AD7879_SEQ_Z2 = 3,
115 AD7879_NR_SENSE = 4,
116};
117
118#define MAX_12BIT ((1<<12)-1)
119#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50)
120
121#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
122#define AD7879_DEVID 0x7A
123typedef struct spi_device bus_device;
124#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
125#define AD7879_DEVID 0x79
126typedef struct i2c_client bus_device;
127#endif
128
129struct ad7879 {
130 bus_device *bus;
131 struct input_dev *input;
132 struct work_struct work;
133 struct timer_list timer;
134
135 struct mutex mutex;
136 unsigned disabled:1; /* P: mutex */
137
138#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
139 struct spi_message msg;
140 struct spi_transfer xfer[AD7879_NR_SENSE + 1];
141 u16 cmd;
142#endif
143 u16 conversion_data[AD7879_NR_SENSE];
144 char phys[32];
145 u8 first_conversion_delay;
146 u8 acquisition_time;
147 u8 averaging;
148 u8 pen_down_acc_interval;
149 u8 median;
150 u16 x_plate_ohms;
151 u16 pressure_max;
152 u16 gpio_init;
153 u16 cmd_crtl1;
154 u16 cmd_crtl2;
155 u16 cmd_crtl3;
156 unsigned gpio:1;
157};
158
159static int ad7879_read(bus_device *, u8);
160static int ad7879_write(bus_device *, u8, u16);
161static void ad7879_collect(struct ad7879 *);
162
163static void ad7879_report(struct ad7879 *ts)
164{
165 struct input_dev *input_dev = ts->input;
166 unsigned Rt;
167 u16 x, y, z1, z2;
168
169 x = ts->conversion_data[AD7879_SEQ_XPOS] & MAX_12BIT;
170 y = ts->conversion_data[AD7879_SEQ_YPOS] & MAX_12BIT;
171 z1 = ts->conversion_data[AD7879_SEQ_Z1] & MAX_12BIT;
172 z2 = ts->conversion_data[AD7879_SEQ_Z2] & MAX_12BIT;
173
174 /*
175 * The samples processed here are already preprocessed by the AD7879.
176 * The preprocessing function consists of a median and an averaging filter.
177 * The combination of these two techniques provides a robust solution,
178 * discarding the spurious noise in the signal and keeping only the data of interest.
179 * The size of both filters is programmable. (dev.platform_data, see linux/spi/ad7879.h)
180 * Other user-programmable conversion controls include variable acquisition time,
181 * and first conversion delay. Up to 16 averages can be taken per conversion.
182 */
183
184 if (likely(x && z1)) {
185 /* compute touch pressure resistance using equation #1 */
186 Rt = (z2 - z1) * x * ts->x_plate_ohms;
187 Rt /= z1;
188 Rt = (Rt + 2047) >> 12;
189
190 input_report_abs(input_dev, ABS_X, x);
191 input_report_abs(input_dev, ABS_Y, y);
192 input_report_abs(input_dev, ABS_PRESSURE, Rt);
193 input_sync(input_dev);
194 }
195}
196
197static void ad7879_work(struct work_struct *work)
198{
199 struct ad7879 *ts = container_of(work, struct ad7879, work);
200
201 /* use keventd context to read the result registers */
202 ad7879_collect(ts);
203 ad7879_report(ts);
204 mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
205}
206
207static void ad7879_ts_event_release(struct ad7879 *ts)
208{
209 struct input_dev *input_dev = ts->input;
210
211 input_report_abs(input_dev, ABS_PRESSURE, 0);
212 input_sync(input_dev);
213}
214
215static void ad7879_timer(unsigned long handle)
216{
217 struct ad7879 *ts = (void *)handle;
218
219 ad7879_ts_event_release(ts);
220}
221
222static irqreturn_t ad7879_irq(int irq, void *handle)
223{
224 struct ad7879 *ts = handle;
225
226 /* The repeated conversion sequencer controlled by TMR kicked off too fast.
227 * We ignore the last and process the sample sequence currently in the queue.
228 * It can't be older than 9.4ms
229 */
230
231 if (!work_pending(&ts->work))
232 schedule_work(&ts->work);
233
234 return IRQ_HANDLED;
235}
236
237static void ad7879_setup(struct ad7879 *ts)
238{
239 ts->cmd_crtl3 = AD7879_YPLUS_BIT |
240 AD7879_XPLUS_BIT |
241 AD7879_Z2_BIT |
242 AD7879_Z1_BIT |
243 AD7879_TEMPMASK_BIT |
244 AD7879_AUXVBATMASK_BIT |
245 AD7879_GPIOALERTMASK_BIT;
246
247 ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
248 AD7879_AVG(ts->averaging) |
249 AD7879_MFS(ts->median) |
250 AD7879_FCD(ts->first_conversion_delay) |
251 ts->gpio_init;
252
253 ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
254 AD7879_ACQ(ts->acquisition_time) |
255 AD7879_TMR(ts->pen_down_acc_interval);
256
257 ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
258 ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3);
259 ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1);
260}
261
262static void ad7879_disable(struct ad7879 *ts)
263{
264 mutex_lock(&ts->mutex);
265
266 if (!ts->disabled) {
267
268 ts->disabled = 1;
269 disable_irq(ts->bus->irq);
270
271 cancel_work_sync(&ts->work);
272
273 if (del_timer_sync(&ts->timer))
274 ad7879_ts_event_release(ts);
275
276 ad7879_write(ts->bus, AD7879_REG_CTRL2,
277 AD7879_PM(AD7879_PM_SHUTDOWN));
278 }
279
280 mutex_unlock(&ts->mutex);
281}
282
283static void ad7879_enable(struct ad7879 *ts)
284{
285 mutex_lock(&ts->mutex);
286
287 if (ts->disabled) {
288 ad7879_setup(ts);
289 ts->disabled = 0;
290 enable_irq(ts->bus->irq);
291 }
292
293 mutex_unlock(&ts->mutex);
294}
295
296static ssize_t ad7879_disable_show(struct device *dev,
297 struct device_attribute *attr, char *buf)
298{
299 struct ad7879 *ts = dev_get_drvdata(dev);
300
301 return sprintf(buf, "%u\n", ts->disabled);
302}
303
304static ssize_t ad7879_disable_store(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf, size_t count)
307{
308 struct ad7879 *ts = dev_get_drvdata(dev);
309 unsigned long val;
310 int error;
311
312 error = strict_strtoul(buf, 10, &val);
313 if (error)
314 return error;
315
316 if (val)
317 ad7879_disable(ts);
318 else
319 ad7879_enable(ts);
320
321 return count;
322}
323
324static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store);
325
326static ssize_t ad7879_gpio_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct ad7879 *ts = dev_get_drvdata(dev);
330
331 return sprintf(buf, "%u\n", ts->gpio);
332}
333
334static ssize_t ad7879_gpio_store(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
338 struct ad7879 *ts = dev_get_drvdata(dev);
339 unsigned long val;
340 int error;
341
342 error = strict_strtoul(buf, 10, &val);
343 if (error)
344 return error;
345
346 mutex_lock(&ts->mutex);
347 ts->gpio = !!val;
348 error = ad7879_write(ts->bus, AD7879_REG_CTRL2,
349 ts->gpio ?
350 ts->cmd_crtl2 & ~AD7879_GPIO_DATA :
351 ts->cmd_crtl2 | AD7879_GPIO_DATA);
352 mutex_unlock(&ts->mutex);
353
354 return error ? : count;
355}
356
357static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store);
358
359static struct attribute *ad7879_attributes[] = {
360 &dev_attr_disable.attr,
361 &dev_attr_gpio.attr,
362 NULL
363};
364
365static const struct attribute_group ad7879_attr_group = {
366 .attrs = ad7879_attributes,
367};
368
369static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
370{
371 struct input_dev *input_dev;
372 struct ad7879_platform_data *pdata = bus->dev.platform_data;
373 int err;
374 u16 revid;
375
376 if (!bus->irq) {
377 dev_err(&bus->dev, "no IRQ?\n");
378 return -ENODEV;
379 }
380
381 if (!pdata) {
382 dev_err(&bus->dev, "no platform data?\n");
383 return -ENODEV;
384 }
385
386 input_dev = input_allocate_device();
387 if (!input_dev)
388 return -ENOMEM;
389
390 ts->input = input_dev;
391
392 setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
393 INIT_WORK(&ts->work, ad7879_work);
394 mutex_init(&ts->mutex);
395
396 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
397 ts->pressure_max = pdata->pressure_max ? : ~0;
398
399 ts->first_conversion_delay = pdata->first_conversion_delay;
400 ts->acquisition_time = pdata->acquisition_time;
401 ts->averaging = pdata->averaging;
402 ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
403 ts->median = pdata->median;
404
405 if (pdata->gpio_output)
406 ts->gpio_init = AD7879_GPIO_EN |
407 (pdata->gpio_default ? 0 : AD7879_GPIO_DATA);
408 else
409 ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR;
410
411 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev));
412
413 input_dev->name = "AD7879 Touchscreen";
414 input_dev->phys = ts->phys;
415 input_dev->dev.parent = &bus->dev;
416
417 __set_bit(EV_ABS, input_dev->evbit);
418 __set_bit(ABS_X, input_dev->absbit);
419 __set_bit(ABS_Y, input_dev->absbit);
420 __set_bit(ABS_PRESSURE, input_dev->absbit);
421
422 input_set_abs_params(input_dev, ABS_X,
423 pdata->x_min ? : 0,
424 pdata->x_max ? : MAX_12BIT,
425 0, 0);
426 input_set_abs_params(input_dev, ABS_Y,
427 pdata->y_min ? : 0,
428 pdata->y_max ? : MAX_12BIT,
429 0, 0);
430 input_set_abs_params(input_dev, ABS_PRESSURE,
431 pdata->pressure_min, pdata->pressure_max, 0, 0);
432
433 err = ad7879_write(bus, AD7879_REG_CTRL2, AD7879_RESET);
434
435 if (err < 0) {
436 dev_err(&bus->dev, "Failed to write %s\n", input_dev->name);
437 goto err_free_mem;
438 }
439
440 revid = ad7879_read(bus, AD7879_REG_REVID);
441
442 if ((revid & 0xFF) != AD7879_DEVID) {
443 dev_err(&bus->dev, "Failed to probe %s\n", input_dev->name);
444 err = -ENODEV;
445 goto err_free_mem;
446 }
447
448 ad7879_setup(ts);
449
450 err = request_irq(bus->irq, ad7879_irq,
451 IRQF_TRIGGER_FALLING | IRQF_SAMPLE_RANDOM,
452 bus->dev.driver->name, ts);
453
454 if (err) {
455 dev_err(&bus->dev, "irq %d busy?\n", bus->irq);
456 goto err_free_mem;
457 }
458
459 err = sysfs_create_group(&bus->dev.kobj, &ad7879_attr_group);
460 if (err)
461 goto err_free_irq;
462
463 err = input_register_device(input_dev);
464 if (err)
465 goto err_remove_attr;
466
467 dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n",
468 revid >> 8, bus->irq);
469
470 return 0;
471
472err_remove_attr:
473 sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group);
474err_free_irq:
475 free_irq(bus->irq, ts);
476err_free_mem:
477 input_free_device(input_dev);
478
479 return err;
480}
481
482static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts)
483{
484 ad7879_disable(ts);
485 sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group);
486 free_irq(ts->bus->irq, ts);
487 input_unregister_device(ts->input);
488 dev_dbg(&bus->dev, "unregistered touchscreen\n");
489
490 return 0;
491}
492
493#ifdef CONFIG_PM
494static int ad7879_suspend(bus_device *bus, pm_message_t message)
495{
496 struct ad7879 *ts = dev_get_drvdata(&bus->dev);
497
498 ad7879_disable(ts);
499
500 return 0;
501}
502
503static int ad7879_resume(bus_device *bus)
504{
505 struct ad7879 *ts = dev_get_drvdata(&bus->dev);
506
507 ad7879_enable(ts);
508
509 return 0;
510}
511#else
512#define ad7879_suspend NULL
513#define ad7879_resume NULL
514#endif
515
516#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
517#define MAX_SPI_FREQ_HZ 5000000
518#define AD7879_CMD_MAGIC 0xE000
519#define AD7879_CMD_READ (1 << 10)
520#define AD7879_WRITECMD(reg) (AD7879_CMD_MAGIC | (reg & 0xF))
521#define AD7879_READCMD(reg) (AD7879_CMD_MAGIC | AD7879_CMD_READ | (reg & 0xF))
522
523struct ser_req {
524 u16 command;
525 u16 data;
526 struct spi_message msg;
527 struct spi_transfer xfer[2];
528};
529
530/*
531 * ad7879_read/write are only used for initial setup and for sysfs controls.
532 * The main traffic is done in ad7879_collect().
533 */
534
535static int ad7879_read(struct spi_device *spi, u8 reg)
536{
537 struct ser_req *req;
538 int status, ret;
539
540 req = kzalloc(sizeof *req, GFP_KERNEL);
541 if (!req)
542 return -ENOMEM;
543
544 spi_message_init(&req->msg);
545
546 req->command = (u16) AD7879_READCMD(reg);
547 req->xfer[0].tx_buf = &req->command;
548 req->xfer[0].len = 2;
549
550 req->xfer[1].rx_buf = &req->data;
551 req->xfer[1].len = 2;
552
553 spi_message_add_tail(&req->xfer[0], &req->msg);
554 spi_message_add_tail(&req->xfer[1], &req->msg);
555
556 status = spi_sync(spi, &req->msg);
557 ret = status ? : req->data;
558
559 kfree(req);
560
561 return ret;
562}
563
564static int ad7879_write(struct spi_device *spi, u8 reg, u16 val)
565{
566 struct ser_req *req;
567 int status;
568
569 req = kzalloc(sizeof *req, GFP_KERNEL);
570 if (!req)
571 return -ENOMEM;
572
573 spi_message_init(&req->msg);
574
575 req->command = (u16) AD7879_WRITECMD(reg);
576 req->xfer[0].tx_buf = &req->command;
577 req->xfer[0].len = 2;
578
579 req->data = val;
580 req->xfer[1].tx_buf = &req->data;
581 req->xfer[1].len = 2;
582
583 spi_message_add_tail(&req->xfer[0], &req->msg);
584 spi_message_add_tail(&req->xfer[1], &req->msg);
585
586 status = spi_sync(spi, &req->msg);
587
588 kfree(req);
589
590 return status;
591}
592
593static void ad7879_collect(struct ad7879 *ts)
594{
595 int status = spi_sync(ts->bus, &ts->msg);
596
597 if (status)
598 dev_err(&ts->bus->dev, "spi_sync --> %d\n", status);
599}
600
601static void ad7879_setup_ts_def_msg(struct ad7879 *ts)
602{
603 struct spi_message *m;
604 int i;
605
606 ts->cmd = (u16) AD7879_READCMD(AD7879_REG_XPLUS);
607
608 m = &ts->msg;
609 spi_message_init(m);
610 ts->xfer[0].tx_buf = &ts->cmd;
611 ts->xfer[0].len = 2;
612
613 spi_message_add_tail(&ts->xfer[0], m);
614
615 for (i = 0; i < AD7879_NR_SENSE; i++) {
616 ts->xfer[i + 1].rx_buf = &ts->conversion_data[i];
617 ts->xfer[i + 1].len = 2;
618 spi_message_add_tail(&ts->xfer[i + 1], m);
619 }
620}
621
622static int __devinit ad7879_probe(struct spi_device *spi)
623{
624 struct ad7879 *ts;
625 int error;
626
627 /* don't exceed max specified SPI CLK frequency */
628 if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
629 dev_err(&spi->dev, "SPI CLK %d Hz?\n", spi->max_speed_hz);
630 return -EINVAL;
631 }
632
633 ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL);
634 if (!ts)
635 return -ENOMEM;
636
637 dev_set_drvdata(&spi->dev, ts);
638 ts->bus = spi;
639
640 ad7879_setup_ts_def_msg(ts);
641
642 error = ad7879_construct(spi, ts);
643 if (error) {
644 dev_set_drvdata(&spi->dev, NULL);
645 kfree(ts);
646 }
647
648 return 0;
649}
650
651static int __devexit ad7879_remove(struct spi_device *spi)
652{
653 struct ad7879 *ts = dev_get_drvdata(&spi->dev);
654
655 ad7879_destroy(spi, ts);
656 dev_set_drvdata(&spi->dev, NULL);
657 kfree(ts);
658
659 return 0;
660}
661
662static struct spi_driver ad7879_driver = {
663 .driver = {
664 .name = "ad7879",
665 .bus = &spi_bus_type,
666 .owner = THIS_MODULE,
667 },
668 .probe = ad7879_probe,
669 .remove = __devexit_p(ad7879_remove),
670 .suspend = ad7879_suspend,
671 .resume = ad7879_resume,
672};
673
674static int __init ad7879_init(void)
675{
676 return spi_register_driver(&ad7879_driver);
677}
678module_init(ad7879_init);
679
680static void __exit ad7879_exit(void)
681{
682 spi_unregister_driver(&ad7879_driver);
683}
684module_exit(ad7879_exit);
685
686#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
687
688/* All registers are word-sized.
689 * AD7879 uses a high-byte first convention.
690 */
691static int ad7879_read(struct i2c_client *client, u8 reg)
692{
693 return swab16(i2c_smbus_read_word_data(client, reg));
694}
695
696static int ad7879_write(struct i2c_client *client, u8 reg, u16 val)
697{
698 return i2c_smbus_write_word_data(client, reg, swab16(val));
699}
700
701static void ad7879_collect(struct ad7879 *ts)
702{
703 int i;
704
705 for (i = 0; i < AD7879_NR_SENSE; i++)
706 ts->conversion_data[i] = ad7879_read(ts->bus,
707 AD7879_REG_XPLUS + i);
708}
709
710static int __devinit ad7879_probe(struct i2c_client *client,
711 const struct i2c_device_id *id)
712{
713 struct ad7879 *ts;
714 int error;
715
716 if (!i2c_check_functionality(client->adapter,
717 I2C_FUNC_SMBUS_WORD_DATA)) {
718 dev_err(&client->dev, "SMBUS Word Data not Supported\n");
719 return -EIO;
720 }
721
722 ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL);
723 if (!ts)
724 return -ENOMEM;
725
726 i2c_set_clientdata(client, ts);
727 ts->bus = client;
728
729 error = ad7879_construct(client, ts);
730 if (error) {
731 i2c_set_clientdata(client, NULL);
732 kfree(ts);
733 }
734
735 return 0;
736}
737
738static int __devexit ad7879_remove(struct i2c_client *client)
739{
740 struct ad7879 *ts = dev_get_drvdata(&client->dev);
741
742 ad7879_destroy(client, ts);
743 i2c_set_clientdata(client, NULL);
744 kfree(ts);
745
746 return 0;
747}
748
749static const struct i2c_device_id ad7879_id[] = {
750 { "ad7879", 0 },
751 { }
752};
753MODULE_DEVICE_TABLE(i2c, ad7879_id);
754
755static struct i2c_driver ad7879_driver = {
756 .driver = {
757 .name = "ad7879",
758 .owner = THIS_MODULE,
759 },
760 .probe = ad7879_probe,
761 .remove = __devexit_p(ad7879_remove),
762 .suspend = ad7879_suspend,
763 .resume = ad7879_resume,
764 .id_table = ad7879_id,
765};
766
767static int __init ad7879_init(void)
768{
769 return i2c_add_driver(&ad7879_driver);
770}
771module_init(ad7879_init);
772
773static void __exit ad7879_exit(void)
774{
775 i2c_del_driver(&ad7879_driver);
776}
777module_exit(ad7879_exit);
778#endif
779
780MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
781MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver");
782MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 1d11e2be9ef8..dfa6a84ab50a 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -162,6 +162,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
162 input_report_abs(wm->input_dev, ABS_X, x & 0xfff); 162 input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
163 input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); 163 input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
164 input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); 164 input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
165 input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
165 input_sync(wm->input_dev); 166 input_sync(wm->input_dev);
166 reads++; 167 reads++;
167 } while (reads < cinfo[sp_idx].reads); 168 } while (reads < cinfo[sp_idx].reads);
@@ -245,7 +246,7 @@ static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
245 if (enable) 246 if (enable)
246 enable_irq(wm->pen_irq); 247 enable_irq(wm->pen_irq);
247 else 248 else
248 disable_irq(wm->pen_irq); 249 disable_irq_nosync(wm->pen_irq);
249} 250}
250 251
251static struct wm97xx_mach_ops mainstone_mach_ops = { 252static struct wm97xx_mach_ops mainstone_mach_ops = {
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 54986627def0..e868264fe799 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -151,12 +151,14 @@ static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16
151 input_report_abs(idev, ABS_X, x); 151 input_report_abs(idev, ABS_X, x);
152 input_report_abs(idev, ABS_Y, y); 152 input_report_abs(idev, ABS_Y, y);
153 input_report_abs(idev, ABS_PRESSURE, pressure); 153 input_report_abs(idev, ABS_PRESSURE, pressure);
154 input_report_key(idev, BTN_TOUCH, 1);
154 input_sync(idev); 155 input_sync(idev);
155} 156}
156 157
157static void ucb1400_ts_event_release(struct input_dev *idev) 158static void ucb1400_ts_event_release(struct input_dev *idev)
158{ 159{
159 input_report_abs(idev, ABS_PRESSURE, 0); 160 input_report_abs(idev, ABS_PRESSURE, 0);
161 input_report_key(idev, BTN_TOUCH, 0);
160 input_sync(idev); 162 input_sync(idev);
161} 163}
162 164
@@ -377,7 +379,8 @@ static int ucb1400_ts_probe(struct platform_device *dev)
377 ucb->ts_idev->id.product = ucb->id; 379 ucb->ts_idev->id.product = ucb->id;
378 ucb->ts_idev->open = ucb1400_ts_open; 380 ucb->ts_idev->open = ucb1400_ts_open;
379 ucb->ts_idev->close = ucb1400_ts_close; 381 ucb->ts_idev->close = ucb1400_ts_close;
380 ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS); 382 ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
383 ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
381 384
382 ucb1400_adc_enable(ucb->ac97); 385 ucb1400_adc_enable(ucb->ac97);
383 x_res = ucb1400_ts_read_xres(ucb); 386 x_res = ucb1400_ts_read_xres(ucb);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index d15aa11d7056..cec480bffe38 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -409,6 +409,7 @@ static int wm97xx_read_samples(struct wm97xx *wm)
409 wm->pen_is_down = 0; 409 wm->pen_is_down = 0;
410 dev_dbg(wm->dev, "pen up\n"); 410 dev_dbg(wm->dev, "pen up\n");
411 input_report_abs(wm->input_dev, ABS_PRESSURE, 0); 411 input_report_abs(wm->input_dev, ABS_PRESSURE, 0);
412 input_report_key(wm->input_dev, BTN_TOUCH, 0);
412 input_sync(wm->input_dev); 413 input_sync(wm->input_dev);
413 } else if (!(rc & RC_AGAIN)) { 414 } else if (!(rc & RC_AGAIN)) {
414 /* We need high frequency updates only while 415 /* We need high frequency updates only while
@@ -433,6 +434,7 @@ static int wm97xx_read_samples(struct wm97xx *wm)
433 input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff); 434 input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff);
434 input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff); 435 input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff);
435 input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff); 436 input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff);
437 input_report_key(wm->input_dev, BTN_TOUCH, 1);
436 input_sync(wm->input_dev); 438 input_sync(wm->input_dev);
437 wm->pen_is_down = 1; 439 wm->pen_is_down = 1;
438 wm->ts_reader_interval = wm->ts_reader_min_interval; 440 wm->ts_reader_interval = wm->ts_reader_min_interval;
@@ -628,18 +630,21 @@ static int wm97xx_probe(struct device *dev)
628 wm->input_dev->phys = "wm97xx"; 630 wm->input_dev->phys = "wm97xx";
629 wm->input_dev->open = wm97xx_ts_input_open; 631 wm->input_dev->open = wm97xx_ts_input_open;
630 wm->input_dev->close = wm97xx_ts_input_close; 632 wm->input_dev->close = wm97xx_ts_input_close;
631 set_bit(EV_ABS, wm->input_dev->evbit); 633
632 set_bit(ABS_X, wm->input_dev->absbit); 634 __set_bit(EV_ABS, wm->input_dev->evbit);
633 set_bit(ABS_Y, wm->input_dev->absbit); 635 __set_bit(EV_KEY, wm->input_dev->evbit);
634 set_bit(ABS_PRESSURE, wm->input_dev->absbit); 636 __set_bit(BTN_TOUCH, wm->input_dev->keybit);
637
635 input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1], 638 input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
636 abs_x[2], 0); 639 abs_x[2], 0);
637 input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1], 640 input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
638 abs_y[2], 0); 641 abs_y[2], 0);
639 input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1], 642 input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
640 abs_p[2], 0); 643 abs_p[2], 0);
644
641 input_set_drvdata(wm->input_dev, wm); 645 input_set_drvdata(wm->input_dev, wm);
642 wm->input_dev->dev.parent = dev; 646 wm->input_dev->dev.parent = dev;
647
643 ret = input_register_device(wm->input_dev); 648 ret = input_register_device(wm->input_dev);
644 if (ret < 0) 649 if (ret < 0)
645 goto dev_alloc_err; 650 goto dev_alloc_err;
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
new file mode 100644
index 000000000000..41e4359c277c
--- /dev/null
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -0,0 +1,240 @@
1/*
2 * zylonite-wm97xx.c -- Zylonite Continuous Touch screen driver
3 *
4 * Copyright 2004, 2007, 2008 Wolfson Microelectronics PLC.
5 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
6 * Parts Copyright : Ian Molton <spyro@f2s.com>
7 * Andrew Zabolotny <zap@homelink.ru>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * Notes:
15 * This is a wm97xx extended touch driver supporting interrupt driven
16 * and continuous operation on Marvell Zylonite development systems
17 * (which have a WM9713 on board).
18 */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/wm97xx.h>
29
30#include <mach/hardware.h>
31#include <mach/mfp.h>
32#include <mach/regs-ac97.h>
33
34struct continuous {
35 u16 id; /* codec id */
36 u8 code; /* continuous code */
37 u8 reads; /* number of coord reads per read cycle */
38 u32 speed; /* number of coords per second */
39};
40
41#define WM_READS(sp) ((sp / HZ) + 1)
42
43static const struct continuous cinfo[] = {
44 { WM9713_ID2, 0, WM_READS(94), 94 },
45 { WM9713_ID2, 1, WM_READS(120), 120 },
46 { WM9713_ID2, 2, WM_READS(154), 154 },
47 { WM9713_ID2, 3, WM_READS(188), 188 },
48};
49
50/* continuous speed index */
51static int sp_idx;
52
53/*
54 * Pen sampling frequency (Hz) in continuous mode.
55 */
56static int cont_rate = 200;
57module_param(cont_rate, int, 0);
58MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)");
59
60/*
61 * Pressure readback.
62 *
63 * Set to 1 to read back pen down pressure
64 */
65static int pressure;
66module_param(pressure, int, 0);
67MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)");
68
69/*
70 * AC97 touch data slot.
71 *
72 * Touch screen readback data ac97 slot
73 */
74static int ac97_touch_slot = 5;
75module_param(ac97_touch_slot, int, 0);
76MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
77
78
79/* flush AC97 slot 5 FIFO machines */
80static void wm97xx_acc_pen_up(struct wm97xx *wm)
81{
82 int i;
83
84 msleep(1);
85
86 for (i = 0; i < 16; i++)
87 MODR;
88}
89
90static int wm97xx_acc_pen_down(struct wm97xx *wm)
91{
92 u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
93 int reads = 0;
94 static u16 last, tries;
95
96 /* When the AC97 queue has been drained we need to allow time
97 * to buffer up samples otherwise we end up spinning polling
98 * for samples. The controller can't have a suitably low
99 * threashold set to use the notifications it gives.
100 */
101 msleep(1);
102
103 if (tries > 5) {
104 tries = 0;
105 return RC_PENUP;
106 }
107
108 x = MODR;
109 if (x == last) {
110 tries++;
111 return RC_AGAIN;
112 }
113 last = x;
114 do {
115 if (reads)
116 x = MODR;
117 y = MODR;
118 if (pressure)
119 p = MODR;
120
121 /* are samples valid */
122 if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X ||
123 (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y ||
124 (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES)
125 goto up;
126
127 /* coordinate is good */
128 tries = 0;
129 input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
130 input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
131 input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
132 input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
133 input_sync(wm->input_dev);
134 reads++;
135 } while (reads < cinfo[sp_idx].reads);
136up:
137 return RC_PENDOWN | RC_AGAIN;
138}
139
140static int wm97xx_acc_startup(struct wm97xx *wm)
141{
142 int idx;
143
144 /* check we have a codec */
145 if (wm->ac97 == NULL)
146 return -ENODEV;
147
148 /* Go you big red fire engine */
149 for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) {
150 if (wm->id != cinfo[idx].id)
151 continue;
152 sp_idx = idx;
153 if (cont_rate <= cinfo[idx].speed)
154 break;
155 }
156 wm->acc_rate = cinfo[sp_idx].code;
157 wm->acc_slot = ac97_touch_slot;
158 dev_info(wm->dev,
159 "zylonite accelerated touchscreen driver, %d samples/sec\n",
160 cinfo[sp_idx].speed);
161
162 return 0;
163}
164
165static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
166{
167 if (enable)
168 enable_irq(wm->pen_irq);
169 else
170 disable_irq_nosync(wm->pen_irq);
171}
172
173static struct wm97xx_mach_ops zylonite_mach_ops = {
174 .acc_enabled = 1,
175 .acc_pen_up = wm97xx_acc_pen_up,
176 .acc_pen_down = wm97xx_acc_pen_down,
177 .acc_startup = wm97xx_acc_startup,
178 .irq_enable = wm97xx_irq_enable,
179 .irq_gpio = WM97XX_GPIO_2,
180};
181
182static int zylonite_wm97xx_probe(struct platform_device *pdev)
183{
184 struct wm97xx *wm = platform_get_drvdata(pdev);
185 int gpio_touch_irq;
186
187 if (cpu_is_pxa320())
188 gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15);
189 else
190 gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
191
192 wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
193 set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
194
195 wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
196 WM97XX_GPIO_POL_HIGH,
197 WM97XX_GPIO_STICKY,
198 WM97XX_GPIO_WAKE);
199 wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT,
200 WM97XX_GPIO_POL_HIGH,
201 WM97XX_GPIO_NOTSTICKY,
202 WM97XX_GPIO_NOWAKE);
203
204 return wm97xx_register_mach_ops(wm, &zylonite_mach_ops);
205}
206
207static int zylonite_wm97xx_remove(struct platform_device *pdev)
208{
209 struct wm97xx *wm = platform_get_drvdata(pdev);
210
211 wm97xx_unregister_mach_ops(wm);
212
213 return 0;
214}
215
216static struct platform_driver zylonite_wm97xx_driver = {
217 .probe = zylonite_wm97xx_probe,
218 .remove = zylonite_wm97xx_remove,
219 .driver = {
220 .name = "wm97xx-touch",
221 },
222};
223
224static int __init zylonite_wm97xx_init(void)
225{
226 return platform_driver_register(&zylonite_wm97xx_driver);
227}
228
229static void __exit zylonite_wm97xx_exit(void)
230{
231 platform_driver_unregister(&zylonite_wm97xx_driver);
232}
233
234module_init(zylonite_wm97xx_init);
235module_exit(zylonite_wm97xx_exit);
236
237/* Module information */
238MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
239MODULE_DESCRIPTION("wm97xx continuous touch driver for Zylonite");
240MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 102ef4a14c5f..d2109054de85 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -82,7 +82,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
82 if (!gpio_is_valid(template->gpio)) { 82 if (!gpio_is_valid(template->gpio)) {
83 printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", 83 printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n",
84 template->gpio, template->name); 84 template->gpio, template->name);
85 return; 85 return 0;
86 } 86 }
87 87
88 ret = gpio_request(template->gpio, template->name); 88 ret = gpio_request(template->gpio, template->name);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f01096549a93..823ceba6efa8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1047,6 +1047,19 @@ static int populate_table(struct dm_table *table,
1047 return dm_table_complete(table); 1047 return dm_table_complete(table);
1048} 1048}
1049 1049
1050static int table_prealloc_integrity(struct dm_table *t,
1051 struct mapped_device *md)
1052{
1053 struct list_head *devices = dm_table_get_devices(t);
1054 struct dm_dev_internal *dd;
1055
1056 list_for_each_entry(dd, devices, list)
1057 if (bdev_get_integrity(dd->dm_dev.bdev))
1058 return blk_integrity_register(dm_disk(md), NULL);
1059
1060 return 0;
1061}
1062
1050static int table_load(struct dm_ioctl *param, size_t param_size) 1063static int table_load(struct dm_ioctl *param, size_t param_size)
1051{ 1064{
1052 int r; 1065 int r;
@@ -1068,6 +1081,14 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1068 goto out; 1081 goto out;
1069 } 1082 }
1070 1083
1084 r = table_prealloc_integrity(t, md);
1085 if (r) {
1086 DMERR("%s: could not register integrity profile.",
1087 dm_device_name(md));
1088 dm_table_destroy(t);
1089 goto out;
1090 }
1091
1071 down_write(&_hash_lock); 1092 down_write(&_hash_lock);
1072 hc = dm_get_mdptr(md); 1093 hc = dm_get_mdptr(md);
1073 if (!hc || hc->md != md) { 1094 if (!hc || hc->md != md) {
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 0a225da21272..3e3fc06cb861 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job)
297 dm_kcopyd_notify_fn fn = job->fn; 297 dm_kcopyd_notify_fn fn = job->fn;
298 struct dm_kcopyd_client *kc = job->kc; 298 struct dm_kcopyd_client *kc = job->kc;
299 299
300 kcopyd_put_pages(kc, job->pages); 300 if (job->pages)
301 kcopyd_put_pages(kc, job->pages);
301 mempool_free(job, kc->job_pool); 302 mempool_free(job, kc->job_pool);
302 fn(read_err, write_err, context); 303 fn(read_err, write_err, context);
303 304
@@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err,
461 sector_t progress = 0; 462 sector_t progress = 0;
462 sector_t count = 0; 463 sector_t count = 0;
463 struct kcopyd_job *job = (struct kcopyd_job *) context; 464 struct kcopyd_job *job = (struct kcopyd_job *) context;
465 struct dm_kcopyd_client *kc = job->kc;
464 466
465 mutex_lock(&job->lock); 467 mutex_lock(&job->lock);
466 468
@@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err,
490 492
491 if (count) { 493 if (count) {
492 int i; 494 int i;
493 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, 495 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
494 GFP_NOIO); 496 GFP_NOIO);
495 497
496 *sub_job = *job; 498 *sub_job = *job;
@@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err,
509 } else if (atomic_dec_and_test(&job->sub_jobs)) { 511 } else if (atomic_dec_and_test(&job->sub_jobs)) {
510 512
511 /* 513 /*
512 * To avoid a race we must keep the job around 514 * Queue the completion callback to the kcopyd thread.
513 * until after the notify function has completed. 515 *
514 * Otherwise the client may try and stop the job 516 * Some callers assume that all the completions are called
515 * after we've completed. 517 * from a single thread and don't race with each other.
518 *
519 * We must not call the callback directly here because this
520 * code may not be executing in the thread.
516 */ 521 */
517 job->fn(read_err, write_err, job->context); 522 push(&kc->complete_jobs, job);
518 mempool_free(job, job->kc->job_pool); 523 wake(kc);
519 } 524 }
520} 525}
521 526
@@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job)
528{ 533{
529 int i; 534 int i;
530 535
536 atomic_inc(&job->kc->nr_jobs);
537
531 atomic_set(&job->sub_jobs, SPLIT_COUNT); 538 atomic_set(&job->sub_jobs, SPLIT_COUNT);
532 for (i = 0; i < SPLIT_COUNT; i++) 539 for (i = 0; i < SPLIT_COUNT; i++)
533 segment_complete(0, 0u, job); 540 segment_complete(0, 0u, job);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index bfa107f59d96..79fb53e51c70 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -142,7 +142,6 @@ static struct target_type linear_target = {
142 .status = linear_status, 142 .status = linear_status,
143 .ioctl = linear_ioctl, 143 .ioctl = linear_ioctl,
144 .merge = linear_merge, 144 .merge = linear_merge,
145 .features = DM_TARGET_SUPPORTS_BARRIERS,
146}; 145};
147 146
148int __init dm_linear_init(void) 147int __init dm_linear_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e8361b191b9b..429b50b975d5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -52,8 +52,6 @@ struct dm_table {
52 sector_t *highs; 52 sector_t *highs;
53 struct dm_target *targets; 53 struct dm_target *targets;
54 54
55 unsigned barriers_supported:1;
56
57 /* 55 /*
58 * Indicates the rw permissions for the new logical 56 * Indicates the rw permissions for the new logical
59 * device. This should be a combination of FMODE_READ 57 * device. This should be a combination of FMODE_READ
@@ -243,7 +241,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
243 241
244 INIT_LIST_HEAD(&t->devices); 242 INIT_LIST_HEAD(&t->devices);
245 atomic_set(&t->holders, 0); 243 atomic_set(&t->holders, 0);
246 t->barriers_supported = 1;
247 244
248 if (!num_targets) 245 if (!num_targets)
249 num_targets = KEYS_PER_NODE; 246 num_targets = KEYS_PER_NODE;
@@ -751,10 +748,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
751 /* FIXME: the plan is to combine high here and then have 748 /* FIXME: the plan is to combine high here and then have
752 * the merge fn apply the target level restrictions. */ 749 * the merge fn apply the target level restrictions. */
753 combine_restrictions_low(&t->limits, &tgt->limits); 750 combine_restrictions_low(&t->limits, &tgt->limits);
754
755 if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS))
756 t->barriers_supported = 0;
757
758 return 0; 751 return 0;
759 752
760 bad: 753 bad:
@@ -799,12 +792,6 @@ int dm_table_complete(struct dm_table *t)
799 792
800 check_for_valid_limits(&t->limits); 793 check_for_valid_limits(&t->limits);
801 794
802 /*
803 * We only support barriers if there is exactly one underlying device.
804 */
805 if (!list_is_singular(&t->devices))
806 t->barriers_supported = 0;
807
808 /* how many indexes will the btree have ? */ 795 /* how many indexes will the btree have ? */
809 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); 796 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
810 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); 797 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -879,6 +866,45 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
879 return &t->targets[(KEYS_PER_NODE * n) + k]; 866 return &t->targets[(KEYS_PER_NODE * n) + k];
880} 867}
881 868
869/*
870 * Set the integrity profile for this device if all devices used have
871 * matching profiles.
872 */
873static void dm_table_set_integrity(struct dm_table *t)
874{
875 struct list_head *devices = dm_table_get_devices(t);
876 struct dm_dev_internal *prev = NULL, *dd = NULL;
877
878 if (!blk_get_integrity(dm_disk(t->md)))
879 return;
880
881 list_for_each_entry(dd, devices, list) {
882 if (prev &&
883 blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
884 dd->dm_dev.bdev->bd_disk) < 0) {
885 DMWARN("%s: integrity not set: %s and %s mismatch",
886 dm_device_name(t->md),
887 prev->dm_dev.bdev->bd_disk->disk_name,
888 dd->dm_dev.bdev->bd_disk->disk_name);
889 goto no_integrity;
890 }
891 prev = dd;
892 }
893
894 if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
895 goto no_integrity;
896
897 blk_integrity_register(dm_disk(t->md),
898 bdev_get_integrity(prev->dm_dev.bdev));
899
900 return;
901
902no_integrity:
903 blk_integrity_register(dm_disk(t->md), NULL);
904
905 return;
906}
907
882void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) 908void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
883{ 909{
884 /* 910 /*
@@ -899,6 +925,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
899 else 925 else
900 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 926 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
901 927
928 dm_table_set_integrity(t);
902} 929}
903 930
904unsigned int dm_table_get_num_targets(struct dm_table *t) 931unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -1019,12 +1046,6 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
1019 return t->md; 1046 return t->md;
1020} 1047}
1021 1048
1022int dm_table_barrier_ok(struct dm_table *t)
1023{
1024 return t->barriers_supported;
1025}
1026EXPORT_SYMBOL(dm_table_barrier_ok);
1027
1028EXPORT_SYMBOL(dm_vcalloc); 1049EXPORT_SYMBOL(dm_vcalloc);
1029EXPORT_SYMBOL(dm_get_device); 1050EXPORT_SYMBOL(dm_get_device);
1030EXPORT_SYMBOL(dm_put_device); 1051EXPORT_SYMBOL(dm_put_device);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 788ba96a6256..8a994be035ba 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -89,12 +89,13 @@ union map_info *dm_get_mapinfo(struct bio *bio)
89/* 89/*
90 * Bits for the md->flags field. 90 * Bits for the md->flags field.
91 */ 91 */
92#define DMF_BLOCK_IO 0 92#define DMF_BLOCK_IO_FOR_SUSPEND 0
93#define DMF_SUSPENDED 1 93#define DMF_SUSPENDED 1
94#define DMF_FROZEN 2 94#define DMF_FROZEN 2
95#define DMF_FREEING 3 95#define DMF_FREEING 3
96#define DMF_DELETING 4 96#define DMF_DELETING 4
97#define DMF_NOFLUSH_SUSPENDING 5 97#define DMF_NOFLUSH_SUSPENDING 5
98#define DMF_QUEUE_IO_TO_THREAD 6
98 99
99/* 100/*
100 * Work processed by per-device workqueue. 101 * Work processed by per-device workqueue.
@@ -124,6 +125,11 @@ struct mapped_device {
124 spinlock_t deferred_lock; 125 spinlock_t deferred_lock;
125 126
126 /* 127 /*
128 * An error from the barrier request currently being processed.
129 */
130 int barrier_error;
131
132 /*
127 * Processing queue (flush/barriers) 133 * Processing queue (flush/barriers)
128 */ 134 */
129 struct workqueue_struct *wq; 135 struct workqueue_struct *wq;
@@ -424,6 +430,10 @@ static void end_io_acct(struct dm_io *io)
424 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 430 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
425 part_stat_unlock(); 431 part_stat_unlock();
426 432
433 /*
434 * After this is decremented the bio must not be touched if it is
435 * a barrier.
436 */
427 dm_disk(md)->part0.in_flight = pending = 437 dm_disk(md)->part0.in_flight = pending =
428 atomic_dec_return(&md->pending); 438 atomic_dec_return(&md->pending);
429 439
@@ -435,21 +445,18 @@ static void end_io_acct(struct dm_io *io)
435/* 445/*
436 * Add the bio to the list of deferred io. 446 * Add the bio to the list of deferred io.
437 */ 447 */
438static int queue_io(struct mapped_device *md, struct bio *bio) 448static void queue_io(struct mapped_device *md, struct bio *bio)
439{ 449{
440 down_write(&md->io_lock); 450 down_write(&md->io_lock);
441 451
442 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
443 up_write(&md->io_lock);
444 return 1;
445 }
446
447 spin_lock_irq(&md->deferred_lock); 452 spin_lock_irq(&md->deferred_lock);
448 bio_list_add(&md->deferred, bio); 453 bio_list_add(&md->deferred, bio);
449 spin_unlock_irq(&md->deferred_lock); 454 spin_unlock_irq(&md->deferred_lock);
450 455
456 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
457 queue_work(md->wq, &md->work);
458
451 up_write(&md->io_lock); 459 up_write(&md->io_lock);
452 return 0; /* deferred successfully */
453} 460}
454 461
455/* 462/*
@@ -533,25 +540,35 @@ static void dec_pending(struct dm_io *io, int error)
533 */ 540 */
534 spin_lock_irqsave(&md->deferred_lock, flags); 541 spin_lock_irqsave(&md->deferred_lock, flags);
535 if (__noflush_suspending(md)) 542 if (__noflush_suspending(md))
536 bio_list_add(&md->deferred, io->bio); 543 bio_list_add_head(&md->deferred, io->bio);
537 else 544 else
538 /* noflush suspend was interrupted. */ 545 /* noflush suspend was interrupted. */
539 io->error = -EIO; 546 io->error = -EIO;
540 spin_unlock_irqrestore(&md->deferred_lock, flags); 547 spin_unlock_irqrestore(&md->deferred_lock, flags);
541 } 548 }
542 549
543 end_io_acct(io);
544
545 io_error = io->error; 550 io_error = io->error;
546 bio = io->bio; 551 bio = io->bio;
547 552
548 free_io(md, io); 553 if (bio_barrier(bio)) {
554 /*
555 * There can be just one barrier request so we use
556 * a per-device variable for error reporting.
557 * Note that you can't touch the bio after end_io_acct
558 */
559 md->barrier_error = io_error;
560 end_io_acct(io);
561 } else {
562 end_io_acct(io);
549 563
550 if (io_error != DM_ENDIO_REQUEUE) { 564 if (io_error != DM_ENDIO_REQUEUE) {
551 trace_block_bio_complete(md->queue, bio); 565 trace_block_bio_complete(md->queue, bio);
552 566
553 bio_endio(bio, io_error); 567 bio_endio(bio, io_error);
568 }
554 } 569 }
570
571 free_io(md, io);
555 } 572 }
556} 573}
557 574
@@ -693,13 +710,19 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
693 710
694 clone->bi_sector = sector; 711 clone->bi_sector = sector;
695 clone->bi_bdev = bio->bi_bdev; 712 clone->bi_bdev = bio->bi_bdev;
696 clone->bi_rw = bio->bi_rw; 713 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
697 clone->bi_vcnt = 1; 714 clone->bi_vcnt = 1;
698 clone->bi_size = to_bytes(len); 715 clone->bi_size = to_bytes(len);
699 clone->bi_io_vec->bv_offset = offset; 716 clone->bi_io_vec->bv_offset = offset;
700 clone->bi_io_vec->bv_len = clone->bi_size; 717 clone->bi_io_vec->bv_len = clone->bi_size;
701 clone->bi_flags |= 1 << BIO_CLONED; 718 clone->bi_flags |= 1 << BIO_CLONED;
702 719
720 if (bio_integrity(bio)) {
721 bio_integrity_clone(clone, bio, GFP_NOIO);
722 bio_integrity_trim(clone,
723 bio_sector_offset(bio, idx, offset), len);
724 }
725
703 return clone; 726 return clone;
704} 727}
705 728
@@ -714,6 +737,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
714 737
715 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 738 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
716 __bio_clone(clone, bio); 739 __bio_clone(clone, bio);
740 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
717 clone->bi_destructor = dm_bio_destructor; 741 clone->bi_destructor = dm_bio_destructor;
718 clone->bi_sector = sector; 742 clone->bi_sector = sector;
719 clone->bi_idx = idx; 743 clone->bi_idx = idx;
@@ -721,6 +745,14 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
721 clone->bi_size = to_bytes(len); 745 clone->bi_size = to_bytes(len);
722 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 746 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
723 747
748 if (bio_integrity(bio)) {
749 bio_integrity_clone(clone, bio, GFP_NOIO);
750
751 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
752 bio_integrity_trim(clone,
753 bio_sector_offset(bio, idx, 0), len);
754 }
755
724 return clone; 756 return clone;
725} 757}
726 758
@@ -834,14 +866,13 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
834 866
835 ci.map = dm_get_table(md); 867 ci.map = dm_get_table(md);
836 if (unlikely(!ci.map)) { 868 if (unlikely(!ci.map)) {
837 bio_io_error(bio); 869 if (!bio_barrier(bio))
838 return; 870 bio_io_error(bio);
839 } 871 else
840 if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { 872 md->barrier_error = -EIO;
841 dm_table_put(ci.map);
842 bio_endio(bio, -EOPNOTSUPP);
843 return; 873 return;
844 } 874 }
875
845 ci.md = md; 876 ci.md = md;
846 ci.bio = bio; 877 ci.bio = bio;
847 ci.io = alloc_io(md); 878 ci.io = alloc_io(md);
@@ -918,7 +949,6 @@ out:
918 */ 949 */
919static int dm_request(struct request_queue *q, struct bio *bio) 950static int dm_request(struct request_queue *q, struct bio *bio)
920{ 951{
921 int r = -EIO;
922 int rw = bio_data_dir(bio); 952 int rw = bio_data_dir(bio);
923 struct mapped_device *md = q->queuedata; 953 struct mapped_device *md = q->queuedata;
924 int cpu; 954 int cpu;
@@ -931,34 +961,27 @@ static int dm_request(struct request_queue *q, struct bio *bio)
931 part_stat_unlock(); 961 part_stat_unlock();
932 962
933 /* 963 /*
934 * If we're suspended we have to queue 964 * If we're suspended or the thread is processing barriers
935 * this io for later. 965 * we have to queue this io for later.
936 */ 966 */
937 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 967 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
968 unlikely(bio_barrier(bio))) {
938 up_read(&md->io_lock); 969 up_read(&md->io_lock);
939 970
940 if (bio_rw(bio) != READA) 971 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
941 r = queue_io(md, bio); 972 bio_rw(bio) == READA) {
973 bio_io_error(bio);
974 return 0;
975 }
942 976
943 if (r <= 0) 977 queue_io(md, bio);
944 goto out_req;
945 978
946 /* 979 return 0;
947 * We're in a while loop, because someone could suspend
948 * before we get to the following read lock.
949 */
950 down_read(&md->io_lock);
951 } 980 }
952 981
953 __split_and_process_bio(md, bio); 982 __split_and_process_bio(md, bio);
954 up_read(&md->io_lock); 983 up_read(&md->io_lock);
955 return 0; 984 return 0;
956
957out_req:
958 if (r < 0)
959 bio_io_error(bio);
960
961 return 0;
962} 985}
963 986
964static void dm_unplug_all(struct request_queue *q) 987static void dm_unplug_all(struct request_queue *q)
@@ -978,7 +1001,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
978 struct mapped_device *md = congested_data; 1001 struct mapped_device *md = congested_data;
979 struct dm_table *map; 1002 struct dm_table *map;
980 1003
981 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 1004 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
982 map = dm_get_table(md); 1005 map = dm_get_table(md);
983 if (map) { 1006 if (map) {
984 r = dm_table_any_congested(map, bdi_bits); 1007 r = dm_table_any_congested(map, bdi_bits);
@@ -1193,6 +1216,7 @@ static void free_dev(struct mapped_device *md)
1193 mempool_destroy(md->tio_pool); 1216 mempool_destroy(md->tio_pool);
1194 mempool_destroy(md->io_pool); 1217 mempool_destroy(md->io_pool);
1195 bioset_free(md->bs); 1218 bioset_free(md->bs);
1219 blk_integrity_unregister(md->disk);
1196 del_gendisk(md->disk); 1220 del_gendisk(md->disk);
1197 free_minor(minor); 1221 free_minor(minor);
1198 1222
@@ -1406,6 +1430,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
1406 return r; 1430 return r;
1407} 1431}
1408 1432
1433static int dm_flush(struct mapped_device *md)
1434{
1435 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1436 return 0;
1437}
1438
1439static void process_barrier(struct mapped_device *md, struct bio *bio)
1440{
1441 int error = dm_flush(md);
1442
1443 if (unlikely(error)) {
1444 bio_endio(bio, error);
1445 return;
1446 }
1447 if (bio_empty_barrier(bio)) {
1448 bio_endio(bio, 0);
1449 return;
1450 }
1451
1452 __split_and_process_bio(md, bio);
1453
1454 error = dm_flush(md);
1455
1456 if (!error && md->barrier_error)
1457 error = md->barrier_error;
1458
1459 if (md->barrier_error != DM_ENDIO_REQUEUE)
1460 bio_endio(bio, error);
1461}
1462
1409/* 1463/*
1410 * Process the deferred bios 1464 * Process the deferred bios
1411 */ 1465 */
@@ -1417,25 +1471,34 @@ static void dm_wq_work(struct work_struct *work)
1417 1471
1418 down_write(&md->io_lock); 1472 down_write(&md->io_lock);
1419 1473
1420next_bio: 1474 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1421 spin_lock_irq(&md->deferred_lock); 1475 spin_lock_irq(&md->deferred_lock);
1422 c = bio_list_pop(&md->deferred); 1476 c = bio_list_pop(&md->deferred);
1423 spin_unlock_irq(&md->deferred_lock); 1477 spin_unlock_irq(&md->deferred_lock);
1424 1478
1425 if (c) { 1479 if (!c) {
1426 __split_and_process_bio(md, c); 1480 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1427 goto next_bio; 1481 break;
1428 } 1482 }
1429 1483
1430 clear_bit(DMF_BLOCK_IO, &md->flags); 1484 up_write(&md->io_lock);
1485
1486 if (bio_barrier(c))
1487 process_barrier(md, c);
1488 else
1489 __split_and_process_bio(md, c);
1490
1491 down_write(&md->io_lock);
1492 }
1431 1493
1432 up_write(&md->io_lock); 1494 up_write(&md->io_lock);
1433} 1495}
1434 1496
1435static void dm_queue_flush(struct mapped_device *md) 1497static void dm_queue_flush(struct mapped_device *md)
1436{ 1498{
1499 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1500 smp_mb__after_clear_bit();
1437 queue_work(md->wq, &md->work); 1501 queue_work(md->wq, &md->work);
1438 flush_workqueue(md->wq);
1439} 1502}
1440 1503
1441/* 1504/*
@@ -1553,20 +1616,36 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1553 } 1616 }
1554 1617
1555 /* 1618 /*
1556 * First we set the BLOCK_IO flag so no more ios will be mapped. 1619 * Here we must make sure that no processes are submitting requests
1620 * to target drivers i.e. no one may be executing
1621 * __split_and_process_bio. This is called from dm_request and
1622 * dm_wq_work.
1623 *
1624 * To get all processes out of __split_and_process_bio in dm_request,
1625 * we take the write lock. To prevent any process from reentering
1626 * __split_and_process_bio from dm_request, we set
1627 * DMF_QUEUE_IO_TO_THREAD.
1628 *
1629 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1630 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1631 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1632 * further calls to __split_and_process_bio from dm_wq_work.
1557 */ 1633 */
1558 down_write(&md->io_lock); 1634 down_write(&md->io_lock);
1559 set_bit(DMF_BLOCK_IO, &md->flags); 1635 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1560 1636 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1561 up_write(&md->io_lock); 1637 up_write(&md->io_lock);
1562 1638
1639 flush_workqueue(md->wq);
1640
1563 /* 1641 /*
1564 * Wait for the already-mapped ios to complete. 1642 * At this point no more requests are entering target request routines.
1643 * We call dm_wait_for_completion to wait for all existing requests
1644 * to finish.
1565 */ 1645 */
1566 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 1646 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1567 1647
1568 down_write(&md->io_lock); 1648 down_write(&md->io_lock);
1569
1570 if (noflush) 1649 if (noflush)
1571 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1650 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1572 up_write(&md->io_lock); 1651 up_write(&md->io_lock);
@@ -1579,6 +1658,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1579 goto out; /* pushback list is already flushed, so skip flush */ 1658 goto out; /* pushback list is already flushed, so skip flush */
1580 } 1659 }
1581 1660
1661 /*
1662 * If dm_wait_for_completion returned 0, the device is completely
1663 * quiescent now. There is no request-processing activity. All new
1664 * requests are being added to md->deferred list.
1665 */
1666
1582 dm_table_postsuspend_targets(map); 1667 dm_table_postsuspend_targets(map);
1583 1668
1584 set_bit(DMF_SUSPENDED, &md->flags); 1669 set_bit(DMF_SUSPENDED, &md->flags);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b48397c0abbd..a31506d93e91 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -52,7 +52,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
52 * To check the return value from dm_table_find_target(). 52 * To check the return value from dm_table_find_target().
53 */ 53 */
54#define dm_target_is_valid(t) ((t)->table) 54#define dm_target_is_valid(t) ((t)->table)
55int dm_table_barrier_ok(struct dm_table *t);
56 55
57/*----------------------------------------------------------------- 56/*-----------------------------------------------------------------
58 * A registry of target types. 57 * A registry of target types.
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 772990415f99..68eb4493f991 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -28,25 +28,12 @@ config DVB_AV7110
28 download/extract it, and then copy it to /usr/lib/hotplug/firmware 28 download/extract it, and then copy it to /usr/lib/hotplug/firmware
29 or /lib/firmware (depending on configuration of firmware hotplug). 29 or /lib/firmware (depending on configuration of firmware hotplug).
30 30
31 Say Y if you own such a card and want to use it. 31 Alternatively, you can download the file and use the kernel's
32 32 EXTRA_FIRMWARE configuration option to build it into your
33config DVB_AV7110_FIRMWARE 33 kernel image by adding the filename to the EXTRA_FIRMWARE
34 bool "Compile AV7110 firmware into the driver" 34 configuration option string.
35 depends on DVB_AV7110 && !STANDALONE
36 default y if DVB_AV7110=y
37 help
38 The AV7110 firmware is normally loaded by the firmware hotplug manager.
39 If you want to compile the firmware into the driver you need to say
40 Y here and provide the correct path of the firmware. You need this
41 option if you want to compile the whole driver statically into the
42 kernel.
43 35
44 All other people say N. 36 Say Y if you own such a card and want to use it.
45
46config DVB_AV7110_FIRMWARE_FILE
47 string "Full pathname of av7110 firmware file"
48 depends on DVB_AV7110_FIRMWARE
49 default "/usr/lib/hotplug/firmware/dvb-ttpci-01.fw"
50 37
51config DVB_AV7110_OSD 38config DVB_AV7110_OSD
52 bool "AV7110 OSD support" 39 bool "AV7110 OSD support"
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile
index 71451237294c..8a4d5bb20a5b 100644
--- a/drivers/media/dvb/ttpci/Makefile
+++ b/drivers/media/dvb/ttpci/Makefile
@@ -19,12 +19,3 @@ obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
19 19
20EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ 20EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
21EXTRA_CFLAGS += -Idrivers/media/common/tuners 21EXTRA_CFLAGS += -Idrivers/media/common/tuners
22
23hostprogs-y := fdump
24
25ifeq ($(CONFIG_DVB_AV7110_FIRMWARE),y)
26$(obj)/av7110.o: $(obj)/av7110_firm.h
27
28$(obj)/av7110_firm.h: $(obj)/fdump
29 $(obj)/fdump $(CONFIG_DVB_AV7110_FIRMWARE_FILE) dvb_ttpci_fw $@
30endif
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 4624cee93e74..d1d959ed37b7 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -1518,20 +1518,6 @@ static int check_firmware(struct av7110* av7110)
1518 return 0; 1518 return 0;
1519} 1519}
1520 1520
1521#ifdef CONFIG_DVB_AV7110_FIRMWARE_FILE
1522#include "av7110_firm.h"
1523static void put_firmware(struct av7110* av7110)
1524{
1525 av7110->bin_fw = NULL;
1526}
1527
1528static inline int get_firmware(struct av7110* av7110)
1529{
1530 av7110->bin_fw = dvb_ttpci_fw;
1531 av7110->size_fw = sizeof(dvb_ttpci_fw);
1532 return check_firmware(av7110);
1533}
1534#else
1535static void put_firmware(struct av7110* av7110) 1521static void put_firmware(struct av7110* av7110)
1536{ 1522{
1537 vfree(av7110->bin_fw); 1523 vfree(av7110->bin_fw);
@@ -1580,8 +1566,6 @@ static int get_firmware(struct av7110* av7110)
1580 release_firmware(fw); 1566 release_firmware(fw);
1581 return ret; 1567 return ret;
1582} 1568}
1583#endif
1584
1585 1569
1586static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) 1570static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
1587{ 1571{
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 3a3f5279e927..5e3f88911a1d 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -198,29 +198,10 @@ static int load_dram(struct av7110 *av7110, u32 *data, int len)
198 198
199/* we cannot write av7110 DRAM directly, so load a bootloader into 199/* we cannot write av7110 DRAM directly, so load a bootloader into
200 * the DPRAM which implements a simple boot protocol */ 200 * the DPRAM which implements a simple boot protocol */
201static u8 bootcode[] = {
202 0xea, 0x00, 0x00, 0x0e, 0xe1, 0xb0, 0xf0, 0x0e, 0xe2, 0x5e, 0xf0, 0x04,
203 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x08, 0xe2, 0x5e, 0xf0, 0x04,
204 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x04, 0x2c, 0x00, 0x00, 0x24,
205 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x34,
206 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0x5a, 0x5a, 0x00, 0x1f, 0x15, 0x55,
207 0x00, 0x00, 0x00, 0x09, 0xe5, 0x9f, 0xd0, 0x7c, 0xe5, 0x9f, 0x40, 0x74,
208 0xe3, 0xa0, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x04,
209 0xe5, 0x9f, 0x10, 0x70, 0xe5, 0x9f, 0x20, 0x70, 0xe5, 0x9f, 0x30, 0x64,
210 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe1, 0x51, 0x00, 0x02,
211 0xda, 0xff, 0xff, 0xfb, 0xe5, 0x9f, 0xf0, 0x50, 0xe1, 0xd4, 0x10, 0xb0,
212 0xe3, 0x51, 0x00, 0x00, 0x0a, 0xff, 0xff, 0xfc, 0xe1, 0xa0, 0x10, 0x0d,
213 0xe5, 0x94, 0x30, 0x04, 0xe1, 0xd4, 0x20, 0xb2, 0xe2, 0x82, 0x20, 0x3f,
214 0xe1, 0xb0, 0x23, 0x22, 0x03, 0xa0, 0x00, 0x02, 0xe1, 0xc4, 0x00, 0xb0,
215 0x0a, 0xff, 0xff, 0xf4, 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0,
216 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe2, 0x52, 0x20, 0x01,
217 0x1a, 0xff, 0xff, 0xf9, 0xe2, 0x2d, 0xdb, 0x05, 0xea, 0xff, 0xff, 0xec,
218 0x2c, 0x00, 0x03, 0xf8, 0x2c, 0x00, 0x04, 0x00, 0x9e, 0x00, 0x08, 0x00,
219 0x2c, 0x00, 0x00, 0x74, 0x2c, 0x00, 0x00, 0xc0
220};
221
222int av7110_bootarm(struct av7110 *av7110) 201int av7110_bootarm(struct av7110 *av7110)
223{ 202{
203 const struct firmware *fw;
204 const char *fw_name = "av7110/bootcode.bin";
224 struct saa7146_dev *dev = av7110->dev; 205 struct saa7146_dev *dev = av7110->dev;
225 u32 ret; 206 u32 ret;
226 int i; 207 int i;
@@ -261,7 +242,15 @@ int av7110_bootarm(struct av7110 *av7110)
261 //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT); 242 //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT);
262 //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT); 243 //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT);
263 244
264 mwdebi(av7110, DEBISWAB, DPRAM_BASE, bootcode, sizeof(bootcode)); 245 ret = request_firmware(&fw, fw_name, &dev->pci->dev);
246 if (ret) {
247 printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n",
248 fw_name);
249 return ret;
250 }
251
252 mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size);
253 release_firmware(fw);
265 iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); 254 iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
266 255
267 if (saa7146_wait_for_debi_done(av7110->dev, 1)) { 256 if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
@@ -302,7 +291,7 @@ int av7110_bootarm(struct av7110 *av7110)
302 av7110->arm_ready = 1; 291 av7110->arm_ready = 1;
303 return 0; 292 return 0;
304} 293}
305 294MODULE_FIRMWARE("av7110/bootcode.bin");
306 295
307/**************************************************************************** 296/****************************************************************************
308 * DEBI command polling 297 * DEBI command polling
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h
index ca99e5c1fc8a..1634aba5cb84 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.h
+++ b/drivers/media/dvb/ttpci/av7110_hw.h
@@ -390,7 +390,8 @@ static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val,
390} 390}
391 391
392/* buffer writes */ 392/* buffer writes */
393static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, u8 *val, int count) 393static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
394 const u8 *val, int count)
394{ 395{
395 memcpy(av7110->debi_virt, val, count); 396 memcpy(av7110->debi_virt, val, count);
396 av7110_debiwrite(av7110, config, addr, 0, count); 397 av7110_debiwrite(av7110, config, addr, 0, count);
diff --git a/drivers/media/dvb/ttpci/fdump.c b/drivers/media/dvb/ttpci/fdump.c
deleted file mode 100644
index c90001d35e7d..000000000000
--- a/drivers/media/dvb/ttpci/fdump.c
+++ /dev/null
@@ -1,44 +0,0 @@
1#include <stdio.h>
2#include <sys/types.h>
3#include <sys/stat.h>
4#include <fcntl.h>
5#include <unistd.h>
6
7int main(int argc, char **argv)
8{
9 unsigned char buf[8];
10 unsigned int i, count, bytes = 0;
11 FILE *fd_in, *fd_out;
12
13 if (argc != 4) {
14 fprintf(stderr, "\n\tusage: %s <ucode.bin> <array_name> <output_name>\n\n", argv[0]);
15 return -1;
16 }
17
18 fd_in = fopen(argv[1], "rb");
19 if (fd_in == NULL) {
20 fprintf(stderr, "firmware file '%s' not found\n", argv[1]);
21 return -1;
22 }
23
24 fd_out = fopen(argv[3], "w+");
25 if (fd_out == NULL) {
26 fprintf(stderr, "cannot create output file '%s'\n", argv[3]);
27 return -1;
28 }
29
30 fprintf(fd_out, "\n#include <asm/types.h>\n\nu8 %s [] = {", argv[2]);
31
32 while ((count = fread(buf, 1, 8, fd_in)) > 0) {
33 fprintf(fd_out, "\n\t");
34 for (i = 0; i < count; i++, bytes++)
35 fprintf(fd_out, "0x%02x, ", buf[i]);
36 }
37
38 fprintf(fd_out, "\n};\n\n");
39
40 fclose(fd_in);
41 fclose(fd_out);
42
43 return 0;
44}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c232d11a7ed4..06084dbf1277 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -208,7 +208,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
208 } 208 }
209 209
210 ext_csd_struct = ext_csd[EXT_CSD_REV]; 210 ext_csd_struct = ext_csd[EXT_CSD_REV];
211 if (ext_csd_struct > 2) { 211 if (ext_csd_struct > 3) {
212 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 212 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
213 "version %d\n", mmc_hostname(card->host), 213 "version %d\n", mmc_hostname(card->host),
214 ext_csd_struct); 214 ext_csd_struct);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 26fc098d77cd..cd81c395e164 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -363,15 +363,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
363 goto err; 363 goto err;
364 364
365 /* 365 /*
366 * For SPI, enable CRC as appropriate.
367 */
368 if (mmc_host_is_spi(host)) {
369 err = mmc_spi_set_crc(host, use_spi_crc);
370 if (err)
371 goto err;
372 }
373
374 /*
375 * Fetch CID from card. 366 * Fetch CID from card.
376 */ 367 */
377 if (mmc_host_is_spi(host)) 368 if (mmc_host_is_spi(host))
@@ -458,6 +449,18 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
458 } 449 }
459 450
460 /* 451 /*
452 * For SPI, enable CRC as appropriate.
453 * This CRC enable is located AFTER the reading of the
454 * card registers because some SDHC cards are not able
455 * to provide valid CRCs for non-512-byte blocks.
456 */
457 if (mmc_host_is_spi(host)) {
458 err = mmc_spi_set_crc(host, use_spi_crc);
459 if (err)
460 goto free_card;
461 }
462
463 /*
461 * Attempt to change to high-speed (if supported) 464 * Attempt to change to high-speed (if supported)
462 */ 465 */
463 err = mmc_switch_hs(card); 466 err = mmc_switch_hs(card);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index eb29b1d933ac..e0be21a4a696 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -307,13 +307,6 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
307 307
308 wmb(); 308 wmb();
309 309
310 if (host->actual_bus_width == MMC_BUS_WIDTH_4)
311 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */
312 else
313 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */
314
315 RSSR(host->dma) = DMA_REQ_SDHC;
316
317 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); 310 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
318 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); 311 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
319 312
@@ -818,9 +811,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
818 if (ios->bus_width == MMC_BUS_WIDTH_4) { 811 if (ios->bus_width == MMC_BUS_WIDTH_4) {
819 host->actual_bus_width = MMC_BUS_WIDTH_4; 812 host->actual_bus_width = MMC_BUS_WIDTH_4;
820 imx_gpio_mode(PB11_PF_SD_DAT3); 813 imx_gpio_mode(PB11_PF_SD_DAT3);
814 BLR(host->dma) = 0; /* burst 64 byte read/write */
821 } else { 815 } else {
822 host->actual_bus_width = MMC_BUS_WIDTH_1; 816 host->actual_bus_width = MMC_BUS_WIDTH_1;
823 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); 817 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
818 BLR(host->dma) = 16; /* burst 16 byte read/write */
824 } 819 }
825 820
826 if (host->power_mode != ios->power_mode) { 821 if (host->power_mode != ios->power_mode) {
@@ -938,7 +933,7 @@ static void imxmci_check_status(unsigned long data)
938 mod_timer(&host->timer, jiffies + (HZ>>1)); 933 mod_timer(&host->timer, jiffies + (HZ>>1));
939} 934}
940 935
941static int imxmci_probe(struct platform_device *pdev) 936static int __init imxmci_probe(struct platform_device *pdev)
942{ 937{
943 struct mmc_host *mmc; 938 struct mmc_host *mmc;
944 struct imxmci_host *host = NULL; 939 struct imxmci_host *host = NULL;
@@ -1034,6 +1029,7 @@ static int imxmci_probe(struct platform_device *pdev)
1034 } 1029 }
1035 host->dma_allocated = 1; 1030 host->dma_allocated = 1;
1036 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); 1031 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1032 RSSR(host->dma) = DMA_REQ_SDHC;
1037 1033
1038 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); 1034 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1039 host->status_reg=0; 1035 host->status_reg=0;
@@ -1079,7 +1075,7 @@ out:
1079 return ret; 1075 return ret;
1080} 1076}
1081 1077
1082static int imxmci_remove(struct platform_device *pdev) 1078static int __exit imxmci_remove(struct platform_device *pdev)
1083{ 1079{
1084 struct mmc_host *mmc = platform_get_drvdata(pdev); 1080 struct mmc_host *mmc = platform_get_drvdata(pdev);
1085 1081
@@ -1145,8 +1141,7 @@ static int imxmci_resume(struct platform_device *dev)
1145#endif /* CONFIG_PM */ 1141#endif /* CONFIG_PM */
1146 1142
1147static struct platform_driver imxmci_driver = { 1143static struct platform_driver imxmci_driver = {
1148 .probe = imxmci_probe, 1144 .remove = __exit_p(imxmci_remove),
1149 .remove = imxmci_remove,
1150 .suspend = imxmci_suspend, 1145 .suspend = imxmci_suspend,
1151 .resume = imxmci_resume, 1146 .resume = imxmci_resume,
1152 .driver = { 1147 .driver = {
@@ -1157,7 +1152,7 @@ static struct platform_driver imxmci_driver = {
1157 1152
1158static int __init imxmci_init(void) 1153static int __init imxmci_init(void)
1159{ 1154{
1160 return platform_driver_register(&imxmci_driver); 1155 return platform_driver_probe(&imxmci_driver, imxmci_probe);
1161} 1156}
1162 1157
1163static void __exit imxmci_exit(void) 1158static void __exit imxmci_exit(void)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 72f8bde4877a..f48349d18c92 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -24,7 +24,7 @@
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */ 26 */
27#include <linux/hrtimer.h> 27#include <linux/sched.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/bio.h> 29#include <linux/bio.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
@@ -95,7 +95,7 @@
95 * reads which takes nowhere near that long. Older cards may be able to use 95 * reads which takes nowhere near that long. Older cards may be able to use
96 * shorter timeouts ... but why bother? 96 * shorter timeouts ... but why bother?
97 */ 97 */
98#define r1b_timeout ktime_set(3, 0) 98#define r1b_timeout (HZ * 3)
99 99
100 100
101/****************************************************************************/ 101/****************************************************************************/
@@ -183,12 +183,11 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
183 return status; 183 return status;
184} 184}
185 185
186static int 186static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
187mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte) 187 unsigned n, u8 byte)
188{ 188{
189 u8 *cp = host->data->status; 189 u8 *cp = host->data->status;
190 190 unsigned long start = jiffies;
191 timeout = ktime_add(timeout, ktime_get());
192 191
193 while (1) { 192 while (1) {
194 int status; 193 int status;
@@ -203,22 +202,26 @@ mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte)
203 return cp[i]; 202 return cp[i];
204 } 203 }
205 204
206 /* REVISIT investigate msleep() to avoid busy-wait I/O 205 if (time_is_before_jiffies(start + timeout))
207 * in at least some cases.
208 */
209 if (ktime_to_ns(ktime_sub(ktime_get(), timeout)) > 0)
210 break; 206 break;
207
208 /* If we need long timeouts, we may release the CPU.
209 * We use jiffies here because we want to have a relation
210 * between elapsed time and the blocking of the scheduler.
211 */
212 if (time_is_before_jiffies(start+1))
213 schedule();
211 } 214 }
212 return -ETIMEDOUT; 215 return -ETIMEDOUT;
213} 216}
214 217
215static inline int 218static inline int
216mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout) 219mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
217{ 220{
218 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); 221 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
219} 222}
220 223
221static int mmc_spi_readtoken(struct mmc_spi_host *host, ktime_t timeout) 224static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
222{ 225{
223 return mmc_spi_skip(host, timeout, 1, 0xff); 226 return mmc_spi_skip(host, timeout, 1, 0xff);
224} 227}
@@ -251,6 +254,10 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
251 u8 *cp = host->data->status; 254 u8 *cp = host->data->status;
252 u8 *end = cp + host->t.len; 255 u8 *end = cp + host->t.len;
253 int value = 0; 256 int value = 0;
257 int bitshift;
258 u8 leftover = 0;
259 unsigned short rotator;
260 int i;
254 char tag[32]; 261 char tag[32];
255 262
256 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", 263 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
@@ -268,9 +275,8 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
268 275
269 /* Data block reads (R1 response types) may need more data... */ 276 /* Data block reads (R1 response types) may need more data... */
270 if (cp == end) { 277 if (cp == end) {
271 unsigned i;
272
273 cp = host->data->status; 278 cp = host->data->status;
279 end = cp+1;
274 280
275 /* Card sends N(CR) (== 1..8) bytes of all-ones then one 281 /* Card sends N(CR) (== 1..8) bytes of all-ones then one
276 * status byte ... and we already scanned 2 bytes. 282 * status byte ... and we already scanned 2 bytes.
@@ -295,20 +301,34 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
295 } 301 }
296 302
297checkstatus: 303checkstatus:
298 if (*cp & 0x80) { 304 bitshift = 0;
299 dev_dbg(&host->spi->dev, "%s: INVALID RESPONSE, %02x\n", 305 if (*cp & 0x80) {
300 tag, *cp); 306 /* Houston, we have an ugly card with a bit-shifted response */
301 value = -EBADR; 307 rotator = *cp++ << 8;
302 goto done; 308 /* read the next byte */
309 if (cp == end) {
310 value = mmc_spi_readbytes(host, 1);
311 if (value < 0)
312 goto done;
313 cp = host->data->status;
314 end = cp+1;
315 }
316 rotator |= *cp++;
317 while (rotator & 0x8000) {
318 bitshift++;
319 rotator <<= 1;
320 }
321 cmd->resp[0] = rotator >> 8;
322 leftover = rotator;
323 } else {
324 cmd->resp[0] = *cp++;
303 } 325 }
304
305 cmd->resp[0] = *cp++;
306 cmd->error = 0; 326 cmd->error = 0;
307 327
308 /* Status byte: the entire seven-bit R1 response. */ 328 /* Status byte: the entire seven-bit R1 response. */
309 if (cmd->resp[0] != 0) { 329 if (cmd->resp[0] != 0) {
310 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS 330 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS
311 | R1_SPI_ILLEGAL_COMMAND) 331 | R1_SPI_ILLEGAL_COMMAND)
312 & cmd->resp[0]) 332 & cmd->resp[0])
313 value = -EINVAL; 333 value = -EINVAL;
314 else if (R1_SPI_COM_CRC & cmd->resp[0]) 334 else if (R1_SPI_COM_CRC & cmd->resp[0])
@@ -336,12 +356,45 @@ checkstatus:
336 * SPI R5 == R1 + data byte; IO_RW_DIRECT 356 * SPI R5 == R1 + data byte; IO_RW_DIRECT
337 */ 357 */
338 case MMC_RSP_SPI_R2: 358 case MMC_RSP_SPI_R2:
339 cmd->resp[0] |= *cp << 8; 359 /* read the next byte */
360 if (cp == end) {
361 value = mmc_spi_readbytes(host, 1);
362 if (value < 0)
363 goto done;
364 cp = host->data->status;
365 end = cp+1;
366 }
367 if (bitshift) {
368 rotator = leftover << 8;
369 rotator |= *cp << bitshift;
370 cmd->resp[0] |= (rotator & 0xFF00);
371 } else {
372 cmd->resp[0] |= *cp << 8;
373 }
340 break; 374 break;
341 375
342 /* SPI R3, R4, or R7 == R1 + 4 bytes */ 376 /* SPI R3, R4, or R7 == R1 + 4 bytes */
343 case MMC_RSP_SPI_R3: 377 case MMC_RSP_SPI_R3:
344 cmd->resp[1] = get_unaligned_be32(cp); 378 rotator = leftover << 8;
379 cmd->resp[1] = 0;
380 for (i = 0; i < 4; i++) {
381 cmd->resp[1] <<= 8;
382 /* read the next byte */
383 if (cp == end) {
384 value = mmc_spi_readbytes(host, 1);
385 if (value < 0)
386 goto done;
387 cp = host->data->status;
388 end = cp+1;
389 }
390 if (bitshift) {
391 rotator |= *cp++ << bitshift;
392 cmd->resp[1] |= (rotator >> 8);
393 rotator <<= 8;
394 } else {
395 cmd->resp[1] |= *cp++;
396 }
397 }
345 break; 398 break;
346 399
347 /* SPI R1 == just one status byte */ 400 /* SPI R1 == just one status byte */
@@ -607,7 +660,7 @@ mmc_spi_setup_data_message(
607 */ 660 */
608static int 661static int
609mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, 662mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
610 ktime_t timeout) 663 unsigned long timeout)
611{ 664{
612 struct spi_device *spi = host->spi; 665 struct spi_device *spi = host->spi;
613 int status, i; 666 int status, i;
@@ -717,11 +770,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
717 */ 770 */
718static int 771static int
719mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, 772mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
720 ktime_t timeout) 773 unsigned long timeout)
721{ 774{
722 struct spi_device *spi = host->spi; 775 struct spi_device *spi = host->spi;
723 int status; 776 int status;
724 struct scratch *scratch = host->data; 777 struct scratch *scratch = host->data;
778 unsigned int bitshift;
779 u8 leftover;
725 780
726 /* At least one SD card sends an all-zeroes byte when N(CX) 781 /* At least one SD card sends an all-zeroes byte when N(CX)
727 * applies, before the all-ones bytes ... just cope with that. 782 * applies, before the all-ones bytes ... just cope with that.
@@ -733,38 +788,60 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
733 if (status == 0xff || status == 0) 788 if (status == 0xff || status == 0)
734 status = mmc_spi_readtoken(host, timeout); 789 status = mmc_spi_readtoken(host, timeout);
735 790
736 if (status == SPI_TOKEN_SINGLE) { 791 if (status < 0) {
737 if (host->dma_dev) { 792 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
738 dma_sync_single_for_device(host->dma_dev, 793 return status;
739 host->data_dma, sizeof(*scratch), 794 }
740 DMA_BIDIRECTIONAL);
741 dma_sync_single_for_device(host->dma_dev,
742 t->rx_dma, t->len,
743 DMA_FROM_DEVICE);
744 }
745 795
746 status = spi_sync(spi, &host->m); 796 /* The token may be bit-shifted...
797 * the first 0-bit precedes the data stream.
798 */
799 bitshift = 7;
800 while (status & 0x80) {
801 status <<= 1;
802 bitshift--;
803 }
804 leftover = status << 1;
747 805
748 if (host->dma_dev) { 806 if (host->dma_dev) {
749 dma_sync_single_for_cpu(host->dma_dev, 807 dma_sync_single_for_device(host->dma_dev,
750 host->data_dma, sizeof(*scratch), 808 host->data_dma, sizeof(*scratch),
751 DMA_BIDIRECTIONAL); 809 DMA_BIDIRECTIONAL);
752 dma_sync_single_for_cpu(host->dma_dev, 810 dma_sync_single_for_device(host->dma_dev,
753 t->rx_dma, t->len, 811 t->rx_dma, t->len,
754 DMA_FROM_DEVICE); 812 DMA_FROM_DEVICE);
755 } 813 }
756 814
757 } else { 815 status = spi_sync(spi, &host->m);
758 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
759 816
760 /* we've read extra garbage, timed out, etc */ 817 if (host->dma_dev) {
761 if (status < 0) 818 dma_sync_single_for_cpu(host->dma_dev,
762 return status; 819 host->data_dma, sizeof(*scratch),
820 DMA_BIDIRECTIONAL);
821 dma_sync_single_for_cpu(host->dma_dev,
822 t->rx_dma, t->len,
823 DMA_FROM_DEVICE);
824 }
763 825
764 /* low four bits are an R2 subset, fifth seems to be 826 if (bitshift) {
765 * vendor specific ... map them all to generic error.. 827 /* Walk through the data and the crc and do
828 * all the magic to get byte-aligned data.
766 */ 829 */
767 return -EIO; 830 u8 *cp = t->rx_buf;
831 unsigned int len;
832 unsigned int bitright = 8 - bitshift;
833 u8 temp;
834 for (len = t->len; len; len--) {
835 temp = *cp;
836 *cp++ = leftover | (temp >> bitshift);
837 leftover = temp << bitright;
838 }
839 cp = (u8 *) &scratch->crc_val;
840 temp = *cp;
841 *cp++ = leftover | (temp >> bitshift);
842 leftover = temp << bitright;
843 temp = *cp;
844 *cp = leftover | (temp >> bitshift);
768 } 845 }
769 846
770 if (host->mmc->use_spi_crc) { 847 if (host->mmc->use_spi_crc) {
@@ -803,7 +880,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
803 unsigned n_sg; 880 unsigned n_sg;
804 int multiple = (data->blocks > 1); 881 int multiple = (data->blocks > 1);
805 u32 clock_rate; 882 u32 clock_rate;
806 ktime_t timeout; 883 unsigned long timeout;
807 884
808 if (data->flags & MMC_DATA_READ) 885 if (data->flags & MMC_DATA_READ)
809 direction = DMA_FROM_DEVICE; 886 direction = DMA_FROM_DEVICE;
@@ -817,8 +894,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
817 else 894 else
818 clock_rate = spi->max_speed_hz; 895 clock_rate = spi->max_speed_hz;
819 896
820 timeout = ktime_add_ns(ktime_set(0, 0), data->timeout_ns + 897 timeout = data->timeout_ns +
821 data->timeout_clks * 1000000 / clock_rate); 898 data->timeout_clks * 1000000 / clock_rate;
899 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
822 900
823 /* Handle scatterlist segments one at a time, with synch for 901 /* Handle scatterlist segments one at a time, with synch for
824 * each 512-byte block 902 * each 512-byte block
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index d183be6f2a5f..e62a22a7f00c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -298,7 +298,6 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
298 struct mmc_request *mrq = host->mrq; 298 struct mmc_request *mrq = host->mrq;
299 299
300 host->mrq = NULL; 300 host->mrq = NULL;
301 mmc_omap_fclk_lazy_disable(host);
302 mmc_request_done(host->mmc, mrq); 301 mmc_request_done(host->mmc, mrq);
303 return; 302 return;
304 } 303 }
@@ -434,6 +433,8 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
434 if (host->mrq == NULL) { 433 if (host->mrq == NULL) {
435 OMAP_HSMMC_WRITE(host->base, STAT, 434 OMAP_HSMMC_WRITE(host->base, STAT,
436 OMAP_HSMMC_READ(host->base, STAT)); 435 OMAP_HSMMC_READ(host->base, STAT));
436 /* Flush posted write */
437 OMAP_HSMMC_READ(host->base, STAT);
437 return IRQ_HANDLED; 438 return IRQ_HANDLED;
438 } 439 }
439 440
@@ -489,8 +490,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
489 } 490 }
490 491
491 OMAP_HSMMC_WRITE(host->base, STAT, status); 492 OMAP_HSMMC_WRITE(host->base, STAT, status);
493 /* Flush posted write */
494 OMAP_HSMMC_READ(host->base, STAT);
492 495
493 if (end_cmd || (status & CC)) 496 if (end_cmd || ((status & CC) && host->cmd))
494 mmc_omap_cmd_done(host, host->cmd); 497 mmc_omap_cmd_done(host, host->cmd);
495 if (end_trans || (status & TC)) 498 if (end_trans || (status & TC))
496 mmc_omap_xfer_done(host, data); 499 mmc_omap_xfer_done(host, data);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index c5b316e22371..cd37962ec44f 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -729,6 +729,6 @@ static void __exit sdhci_drv_exit(void)
729module_init(sdhci_drv_init); 729module_init(sdhci_drv_init);
730module_exit(sdhci_drv_exit); 730module_exit(sdhci_drv_exit);
731 731
732MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 732MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
733MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 733MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
734MODULE_LICENSE("GPL"); 734MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 30d8e3d4e6fd..9234be2226e7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1935,7 +1935,7 @@ module_exit(sdhci_drv_exit);
1935 1935
1936module_param(debug_quirks, uint, 0444); 1936module_param(debug_quirks, uint, 0444);
1937 1937
1938MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1938MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
1939MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 1939MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1940MODULE_LICENSE("GPL"); 1940MODULE_LICENSE("GPL");
1941 1941
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index adda37952032..89bf8cd25cac 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -2036,7 +2036,7 @@ module_param_named(irq, param_irq, uint, 0444);
2036module_param_named(dma, param_dma, int, 0444); 2036module_param_named(dma, param_dma, int, 0444);
2037 2037
2038MODULE_LICENSE("GPL"); 2038MODULE_LICENSE("GPL");
2039MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 2039MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2040MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); 2040MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2041 2041
2042#ifdef CONFIG_PNP 2042#ifdef CONFIG_PNP
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9e7baec45720..9e921544ba20 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -977,6 +977,8 @@ config ETHOC
977 depends on NET_ETHERNET && HAS_IOMEM 977 depends on NET_ETHERNET && HAS_IOMEM
978 select MII 978 select MII
979 select PHYLIB 979 select PHYLIB
980 select CRC32
981 select BITREVERSE
980 help 982 help
981 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. 983 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
982 984
@@ -2056,6 +2058,27 @@ config IGB_DCA
2056 driver. DCA is a method for warming the CPU cache before data 2058 driver. DCA is a method for warming the CPU cache before data
2057 is used, with the intent of lessening the impact of cache misses. 2059 is used, with the intent of lessening the impact of cache misses.
2058 2060
2061config IGBVF
2062 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2063 depends on PCI
2064 ---help---
2065 This driver supports Intel(R) 82576 virtual functions. For more
2066 information on how to identify your adapter, go to the Adapter &
2067 Driver ID Guide at:
2068
2069 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2070
2071 For general information and support, go to the Intel support
2072 website at:
2073
2074 <http://support.intel.com>
2075
2076 More specific information on configuring the driver is in
2077 <file:Documentation/networking/e1000.txt>.
2078
2079 To compile this driver as a module, choose M here. The module
2080 will be called igbvf.
2081
2059source "drivers/net/ixp2000/Kconfig" 2082source "drivers/net/ixp2000/Kconfig"
2060 2083
2061config MYRI_SBUS 2084config MYRI_SBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index edc9a0d6171d..1fc4602a6ff2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_E1000E) += e1000e/ 6obj-$(CONFIG_E1000E) += e1000e/
7obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ 7obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
8obj-$(CONFIG_IGB) += igb/ 8obj-$(CONFIG_IGB) += igb/
9obj-$(CONFIG_IGBVF) += igbvf/
9obj-$(CONFIG_IXGBE) += ixgbe/ 10obj-$(CONFIG_IXGBE) += ixgbe/
10obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
11obj-$(CONFIG_IP1000) += ipg.o 12obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index d0d0c2fee054..02f64d578641 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -692,6 +692,17 @@ static struct zorro_driver a2065_driver = {
692 .remove = __devexit_p(a2065_remove_one), 692 .remove = __devexit_p(a2065_remove_one),
693}; 693};
694 694
695static const struct net_device_ops lance_netdev_ops = {
696 .ndo_open = lance_open,
697 .ndo_stop = lance_close,
698 .ndo_start_xmit = lance_start_xmit,
699 .ndo_tx_timeout = lance_tx_timeout,
700 .ndo_set_multicast_list = lance_set_multicast,
701 .ndo_validate_addr = eth_validate_addr,
702 .ndo_change_mtu = eth_change_mtu,
703 .ndo_set_mac_address = eth_mac_addr,
704};
705
695static int __devinit a2065_init_one(struct zorro_dev *z, 706static int __devinit a2065_init_one(struct zorro_dev *z,
696 const struct zorro_device_id *ent) 707 const struct zorro_device_id *ent)
697{ 708{
@@ -753,12 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
753 priv->rx_ring_mod_mask = RX_RING_MOD_MASK; 764 priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
754 priv->tx_ring_mod_mask = TX_RING_MOD_MASK; 765 priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
755 766
756 dev->open = &lance_open; 767 dev->netdev_ops = &lance_netdev_ops;
757 dev->stop = &lance_close;
758 dev->hard_start_xmit = &lance_start_xmit;
759 dev->tx_timeout = &lance_tx_timeout;
760 dev->watchdog_timeo = 5*HZ; 768 dev->watchdog_timeo = 5*HZ;
761 dev->set_multicast_list = &lance_set_multicast;
762 dev->dma = 0; 769 dev->dma = 0;
763 770
764 init_timer(&priv->multicast_timer); 771 init_timer(&priv->multicast_timer);
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index e1d72e06f3e1..58e8d522e5bc 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -155,6 +155,18 @@ static struct zorro_driver ariadne_driver = {
155 .remove = __devexit_p(ariadne_remove_one), 155 .remove = __devexit_p(ariadne_remove_one),
156}; 156};
157 157
158static const struct net_device_ops ariadne_netdev_ops = {
159 .ndo_open = ariadne_open,
160 .ndo_stop = ariadne_close,
161 .ndo_start_xmit = ariadne_start_xmit,
162 .ndo_tx_timeout = ariadne_tx_timeout,
163 .ndo_get_stats = ariadne_get_stats,
164 .ndo_set_multicast_list = set_multicast_list,
165 .ndo_validate_addr = eth_validate_addr,
166 .ndo_change_mtu = eth_change_mtu,
167 .ndo_set_mac_address = eth_mac_addr,
168};
169
158static int __devinit ariadne_init_one(struct zorro_dev *z, 170static int __devinit ariadne_init_one(struct zorro_dev *z,
159 const struct zorro_device_id *ent) 171 const struct zorro_device_id *ent)
160{ 172{
@@ -197,13 +209,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
197 dev->mem_start = ZTWO_VADDR(mem_start); 209 dev->mem_start = ZTWO_VADDR(mem_start);
198 dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; 210 dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE;
199 211
200 dev->open = &ariadne_open; 212 dev->netdev_ops = &ariadne_netdev_ops;
201 dev->stop = &ariadne_close;
202 dev->hard_start_xmit = &ariadne_start_xmit;
203 dev->tx_timeout = &ariadne_tx_timeout;
204 dev->watchdog_timeo = 5*HZ; 213 dev->watchdog_timeo = 5*HZ;
205 dev->get_stats = &ariadne_get_stats;
206 dev->set_multicast_list = &set_multicast_list;
207 214
208 err = register_netdev(dev); 215 err = register_netdev(dev);
209 if (err) { 216 if (err) {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 4bc6901b3819..627bc75da17d 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -665,6 +665,20 @@ static void __init am79c961_banner(void)
665 if (net_debug && version_printed++ == 0) 665 if (net_debug && version_printed++ == 0)
666 printk(KERN_INFO "%s", version); 666 printk(KERN_INFO "%s", version);
667} 667}
668static const struct net_device_ops am79c961_netdev_ops = {
669 .ndo_open = am79c961_open,
670 .ndo_stop = am79c961_close,
671 .ndo_start_xmit = am79c961_sendpacket,
672 .ndo_get_stats = am79c961_getstats,
673 .ndo_set_multicast_list = am79c961_setmulticastlist,
674 .ndo_tx_timeout = am79c961_timeout,
675 .ndo_validate_addr = eth_validate_addr,
676 .ndo_change_mtu = eth_change_mtu,
677 .ndo_set_mac_address = eth_mac_addr,
678#ifdef CONFIG_NET_POLL_CONTROLLER
679 .ndo_poll_controller = am79c961_poll_controller,
680#endif
681};
668 682
669static int __init am79c961_probe(struct platform_device *pdev) 683static int __init am79c961_probe(struct platform_device *pdev)
670{ 684{
@@ -732,15 +746,7 @@ static int __init am79c961_probe(struct platform_device *pdev)
732 if (am79c961_hw_init(dev)) 746 if (am79c961_hw_init(dev))
733 goto release; 747 goto release;
734 748
735 dev->open = am79c961_open; 749 dev->netdev_ops = &am79c961_netdev_ops;
736 dev->stop = am79c961_close;
737 dev->hard_start_xmit = am79c961_sendpacket;
738 dev->get_stats = am79c961_getstats;
739 dev->set_multicast_list = am79c961_setmulticastlist;
740 dev->tx_timeout = am79c961_timeout;
741#ifdef CONFIG_NET_POLL_CONTROLLER
742 dev->poll_controller = am79c961_poll_controller;
743#endif
744 750
745 ret = register_netdev(dev); 751 ret = register_netdev(dev);
746 if (ret == 0) { 752 if (ret == 0) {
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 442938d50380..7f4bc8ae5462 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -577,7 +577,7 @@ static void at91ether_sethashtable(struct net_device *dev)
577/* 577/*
578 * Enable/Disable promiscuous and multicast modes. 578 * Enable/Disable promiscuous and multicast modes.
579 */ 579 */
580static void at91ether_set_rx_mode(struct net_device *dev) 580static void at91ether_set_multicast_list(struct net_device *dev)
581{ 581{
582 unsigned long cfg; 582 unsigned long cfg;
583 583
@@ -808,7 +808,7 @@ static int at91ether_close(struct net_device *dev)
808/* 808/*
809 * Transmit packet. 809 * Transmit packet.
810 */ 810 */
811static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) 811static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
812{ 812{
813 struct at91_private *lp = netdev_priv(dev); 813 struct at91_private *lp = netdev_priv(dev);
814 814
@@ -828,7 +828,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
828 828
829 dev->trans_start = jiffies; 829 dev->trans_start = jiffies;
830 } else { 830 } else {
831 printk(KERN_ERR "at91_ether.c: at91ether_tx() called, but device is busy!\n"); 831 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
832 return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) 832 return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
833 on this skb, he also reports -ENETDOWN and printk's, so either 833 on this skb, he also reports -ENETDOWN and printk's, so either
834 we free and return(0) or don't free and return 1 */ 834 we free and return(0) or don't free and return 1 */
@@ -965,6 +965,21 @@ static void at91ether_poll_controller(struct net_device *dev)
965} 965}
966#endif 966#endif
967 967
968static const struct net_device_ops at91ether_netdev_ops = {
969 .ndo_open = at91ether_open,
970 .ndo_stop = at91ether_close,
971 .ndo_start_xmit = at91ether_start_xmit,
972 .ndo_get_stats = at91ether_stats,
973 .ndo_set_multicast_list = at91ether_set_multicast_list,
974 .ndo_set_mac_address = set_mac_address,
975 .ndo_do_ioctl = at91ether_ioctl,
976 .ndo_validate_addr = eth_validate_addr,
977 .ndo_change_mtu = eth_change_mtu,
978#ifdef CONFIG_NET_POLL_CONTROLLER
979 .ndo_poll_controller = at91ether_poll_controller,
980#endif
981};
982
968/* 983/*
969 * Initialize the ethernet interface 984 * Initialize the ethernet interface
970 */ 985 */
@@ -1005,17 +1020,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
1005 spin_lock_init(&lp->lock); 1020 spin_lock_init(&lp->lock);
1006 1021
1007 ether_setup(dev); 1022 ether_setup(dev);
1008 dev->open = at91ether_open; 1023 dev->netdev_ops = &at91ether_netdev_ops;
1009 dev->stop = at91ether_close;
1010 dev->hard_start_xmit = at91ether_tx;
1011 dev->get_stats = at91ether_stats;
1012 dev->set_multicast_list = at91ether_set_rx_mode;
1013 dev->set_mac_address = set_mac_address;
1014 dev->ethtool_ops = &at91ether_ethtool_ops; 1024 dev->ethtool_ops = &at91ether_ethtool_ops;
1015 dev->do_ioctl = at91ether_ioctl;
1016#ifdef CONFIG_NET_POLL_CONTROLLER
1017 dev->poll_controller = at91ether_poll_controller;
1018#endif
1019 1025
1020 SET_NETDEV_DEV(dev, &pdev->dev); 1026 SET_NETDEV_DEV(dev, &pdev->dev);
1021 1027
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cc7708775da0..41736772c1dd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -770,7 +770,18 @@ static struct ethtool_ops ep93xx_ethtool_ops = {
770 .get_link = ep93xx_get_link, 770 .get_link = ep93xx_get_link,
771}; 771};
772 772
773struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) 773static const struct net_device_ops ep93xx_netdev_ops = {
774 .ndo_open = ep93xx_open,
775 .ndo_stop = ep93xx_close,
776 .ndo_start_xmit = ep93xx_xmit,
777 .ndo_get_stats = ep93xx_get_stats,
778 .ndo_do_ioctl = ep93xx_ioctl,
779 .ndo_validate_addr = eth_validate_addr,
780 .ndo_change_mtu = eth_change_mtu,
781 .ndo_set_mac_address = eth_mac_addr,
782};
783
784static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
774{ 785{
775 struct net_device *dev; 786 struct net_device *dev;
776 787
@@ -780,12 +791,8 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
780 791
781 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); 792 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
782 793
783 dev->get_stats = ep93xx_get_stats;
784 dev->ethtool_ops = &ep93xx_ethtool_ops; 794 dev->ethtool_ops = &ep93xx_ethtool_ops;
785 dev->hard_start_xmit = ep93xx_xmit; 795 dev->netdev_ops = &ep93xx_netdev_ops;
786 dev->open = ep93xx_open;
787 dev->stop = ep93xx_close;
788 dev->do_ioctl = ep93xx_ioctl;
789 796
790 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 797 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
791 798
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e380de454463..edf770f639fa 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -991,6 +991,18 @@ static void __devinit ether1_banner(void)
991 printk(KERN_INFO "%s", version); 991 printk(KERN_INFO "%s", version);
992} 992}
993 993
994static const struct net_device_ops ether1_netdev_ops = {
995 .ndo_open = ether1_open,
996 .ndo_stop = ether1_close,
997 .ndo_start_xmit = ether1_sendpacket,
998 .ndo_get_stats = ether1_getstats,
999 .ndo_set_multicast_list = ether1_setmulticastlist,
1000 .ndo_tx_timeout = ether1_timeout,
1001 .ndo_validate_addr = eth_validate_addr,
1002 .ndo_change_mtu = eth_change_mtu,
1003 .ndo_set_mac_address = eth_mac_addr,
1004};
1005
994static int __devinit 1006static int __devinit
995ether1_probe(struct expansion_card *ec, const struct ecard_id *id) 1007ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
996{ 1008{
@@ -1031,12 +1043,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
1031 goto free; 1043 goto free;
1032 } 1044 }
1033 1045
1034 dev->open = ether1_open; 1046 dev->netdev_ops = &ether1_netdev_ops;
1035 dev->stop = ether1_close;
1036 dev->hard_start_xmit = ether1_sendpacket;
1037 dev->get_stats = ether1_getstats;
1038 dev->set_multicast_list = ether1_setmulticastlist;
1039 dev->tx_timeout = ether1_timeout;
1040 dev->watchdog_timeo = 5 * HZ / 100; 1047 dev->watchdog_timeo = 5 * HZ / 100;
1041 1048
1042 ret = register_netdev(dev); 1049 ret = register_netdev(dev);
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 21a7bef12d3b..ec8a1ae1e887 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -770,6 +770,18 @@ static void __devinit ether3_banner(void)
770 printk(KERN_INFO "%s", version); 770 printk(KERN_INFO "%s", version);
771} 771}
772 772
773static const struct net_device_ops ether3_netdev_ops = {
774 .ndo_open = ether3_open,
775 .ndo_stop = ether3_close,
776 .ndo_start_xmit = ether3_sendpacket,
777 .ndo_get_stats = ether3_getstats,
778 .ndo_set_multicast_list = ether3_setmulticastlist,
779 .ndo_tx_timeout = ether3_timeout,
780 .ndo_validate_addr = eth_validate_addr,
781 .ndo_change_mtu = eth_change_mtu,
782 .ndo_set_mac_address = eth_mac_addr,
783};
784
773static int __devinit 785static int __devinit
774ether3_probe(struct expansion_card *ec, const struct ecard_id *id) 786ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
775{ 787{
@@ -846,12 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
846 goto free; 858 goto free;
847 } 859 }
848 860
849 dev->open = ether3_open; 861 dev->netdev_ops = &ether3_netdev_ops;
850 dev->stop = ether3_close;
851 dev->hard_start_xmit = ether3_sendpacket;
852 dev->get_stats = ether3_getstats;
853 dev->set_multicast_list = ether3_setmulticastlist;
854 dev->tx_timeout = ether3_timeout;
855 dev->watchdog_timeo = 5 * HZ / 100; 862 dev->watchdog_timeo = 5 * HZ / 100;
856 863
857 ret = register_netdev(dev); 864 ret = register_netdev(dev);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 2d81f6afcb58..5425ab0c38c0 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -453,6 +453,16 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
453 return( ret ); 453 return( ret );
454} 454}
455 455
456static const struct net_device_ops lance_netdev_ops = {
457 .ndo_open = lance_open,
458 .ndo_stop = lance_close,
459 .ndo_start_xmit = lance_start_xmit,
460 .ndo_set_multicast_list = set_multicast_list,
461 .ndo_set_mac_address = lance_set_mac_address,
462 .ndo_tx_timeout = lance_tx_timeout,
463 .ndo_validate_addr = eth_validate_addr,
464 .ndo_change_mtu = eth_change_mtu,
465};
456 466
457static unsigned long __init lance_probe1( struct net_device *dev, 467static unsigned long __init lance_probe1( struct net_device *dev,
458 struct lance_addr *init_rec ) 468 struct lance_addr *init_rec )
@@ -623,15 +633,9 @@ static unsigned long __init lance_probe1( struct net_device *dev,
623 if (did_version++ == 0) 633 if (did_version++ == 0)
624 DPRINTK( 1, ( version )); 634 DPRINTK( 1, ( version ));
625 635
626 /* The LANCE-specific entries in the device structure. */ 636 dev->netdev_ops = &lance_netdev_ops;
627 dev->open = &lance_open;
628 dev->hard_start_xmit = &lance_start_xmit;
629 dev->stop = &lance_close;
630 dev->set_multicast_list = &set_multicast_list;
631 dev->set_mac_address = &lance_set_mac_address;
632 637
633 /* XXX MSch */ 638 /* XXX MSch */
634 dev->tx_timeout = lance_tx_timeout;
635 dev->watchdog_timeo = TX_TIMEOUT; 639 dev->watchdog_timeo = TX_TIMEOUT;
636 640
637 return( 1 ); 641 return( 1 );
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4274e4ac963b..d58c105fc779 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1004,12 +1004,12 @@ static void au1000_tx_timeout(struct net_device *dev)
1004 netif_wake_queue(dev); 1004 netif_wake_queue(dev);
1005} 1005}
1006 1006
1007static void set_rx_mode(struct net_device *dev) 1007static void au1000_multicast_list(struct net_device *dev)
1008{ 1008{
1009 struct au1000_private *aup = netdev_priv(dev); 1009 struct au1000_private *aup = netdev_priv(dev);
1010 1010
1011 if (au1000_debug > 4) 1011 if (au1000_debug > 4)
1012 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); 1012 printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
1013 1013
1014 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1014 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1015 aup->mac->control |= MAC_PROMISCUOUS; 1015 aup->mac->control |= MAC_PROMISCUOUS;
@@ -1047,6 +1047,18 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
1048} 1048}
1049 1049
1050static const struct net_device_ops au1000_netdev_ops = {
1051 .ndo_open = au1000_open,
1052 .ndo_stop = au1000_close,
1053 .ndo_start_xmit = au1000_tx,
1054 .ndo_set_multicast_list = au1000_multicast_list,
1055 .ndo_do_ioctl = au1000_ioctl,
1056 .ndo_tx_timeout = au1000_tx_timeout,
1057 .ndo_set_mac_address = eth_mac_addr,
1058 .ndo_validate_addr = eth_validate_addr,
1059 .ndo_change_mtu = eth_change_mtu,
1060};
1061
1050static struct net_device * au1000_probe(int port_num) 1062static struct net_device * au1000_probe(int port_num)
1051{ 1063{
1052 static unsigned version_printed = 0; 1064 static unsigned version_printed = 0;
@@ -1197,13 +1209,8 @@ static struct net_device * au1000_probe(int port_num)
1197 1209
1198 dev->base_addr = base; 1210 dev->base_addr = base;
1199 dev->irq = irq; 1211 dev->irq = irq;
1200 dev->open = au1000_open; 1212 dev->netdev_ops = &au1000_netdev_ops;
1201 dev->hard_start_xmit = au1000_tx;
1202 dev->stop = au1000_close;
1203 dev->set_multicast_list = &set_rx_mode;
1204 dev->do_ioctl = &au1000_ioctl;
1205 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); 1213 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1206 dev->tx_timeout = au1000_tx_timeout;
1207 dev->watchdog_timeo = ETH_TX_TIMEOUT; 1214 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1208 1215
1209 /* 1216 /*
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 04f4b73fa8d8..9592f22e4c8c 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -319,7 +319,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
319 319
320 be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, 320 be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause,
321 &ecmd->rx_pause); 321 &ecmd->rx_pause);
322 ecmd->autoneg = AUTONEG_ENABLE; 322 ecmd->autoneg = 0;
323} 323}
324 324
325static int 325static int
@@ -328,7 +328,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
328 struct be_adapter *adapter = netdev_priv(netdev); 328 struct be_adapter *adapter = netdev_priv(netdev);
329 int status; 329 int status;
330 330
331 if (ecmd->autoneg != AUTONEG_ENABLE) 331 if (ecmd->autoneg != 0)
332 return -EINVAL; 332 return -EINVAL;
333 333
334 status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, 334 status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause,
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9afe8092dfc4..9f971ed6b58d 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -979,6 +979,20 @@ static int bfin_mac_open(struct net_device *dev)
979 return 0; 979 return 0;
980} 980}
981 981
982static const struct net_device_ops bfin_mac_netdev_ops = {
983 .ndo_open = bfin_mac_open,
984 .ndo_stop = bfin_mac_close,
985 .ndo_start_xmit = bfin_mac_hard_start_xmit,
986 .ndo_set_mac_address = bfin_mac_set_mac_address,
987 .ndo_tx_timeout = bfin_mac_timeout,
988 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
989 .ndo_validate_addr = eth_validate_addr,
990 .ndo_change_mtu = eth_change_mtu,
991#ifdef CONFIG_NET_POLL_CONTROLLER
992 .ndo_poll_controller = bfin_mac_poll,
993#endif
994};
995
982/* 996/*
983 * 997 *
984 * this makes the board clean up everything that it can 998 * this makes the board clean up everything that it can
@@ -1086,15 +1100,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1086 /* Fill in the fields of the device structure with ethernet values. */ 1100 /* Fill in the fields of the device structure with ethernet values. */
1087 ether_setup(ndev); 1101 ether_setup(ndev);
1088 1102
1089 ndev->open = bfin_mac_open; 1103 ndev->netdev_ops = &bfin_mac_netdev_ops;
1090 ndev->stop = bfin_mac_close;
1091 ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
1092 ndev->set_mac_address = bfin_mac_set_mac_address;
1093 ndev->tx_timeout = bfin_mac_timeout;
1094 ndev->set_multicast_list = bfin_mac_set_multicast_list;
1095#ifdef CONFIG_NET_POLL_CONTROLLER
1096 ndev->poll_controller = bfin_mac_poll;
1097#endif
1098 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1104 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1099 1105
1100 spin_lock_init(&lp->lock); 1106 spin_lock_init(&lp->lock);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 9d268be0b670..d47839184a06 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3427,8 +3427,8 @@ static int __devinit
3427bnx2_request_firmware(struct bnx2 *bp) 3427bnx2_request_firmware(struct bnx2 *bp)
3428{ 3428{
3429 const char *mips_fw_file, *rv2p_fw_file; 3429 const char *mips_fw_file, *rv2p_fw_file;
3430 const struct bnx2_mips_fw_file *mips; 3430 const struct bnx2_mips_fw_file *mips_fw;
3431 const struct bnx2_rv2p_fw_file *rv2p; 3431 const struct bnx2_rv2p_fw_file *rv2p_fw;
3432 int rc; 3432 int rc;
3433 3433
3434 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 3434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
@@ -3452,21 +3452,21 @@ bnx2_request_firmware(struct bnx2 *bp)
3452 rv2p_fw_file); 3452 rv2p_fw_file);
3453 return rc; 3453 return rc;
3454 } 3454 }
3455 mips = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; 3455 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3456 rv2p = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; 3456 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3457 if (bp->mips_firmware->size < sizeof(*mips) || 3457 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3458 check_mips_fw_entry(bp->mips_firmware, &mips->com) || 3458 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3459 check_mips_fw_entry(bp->mips_firmware, &mips->cp) || 3459 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3460 check_mips_fw_entry(bp->mips_firmware, &mips->rxp) || 3460 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3461 check_mips_fw_entry(bp->mips_firmware, &mips->tpat) || 3461 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3462 check_mips_fw_entry(bp->mips_firmware, &mips->txp)) { 3462 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3463 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3463 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3464 mips_fw_file); 3464 mips_fw_file);
3465 return -EINVAL; 3465 return -EINVAL;
3466 } 3466 }
3467 if (bp->rv2p_firmware->size < sizeof(*rv2p) || 3467 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3468 check_fw_section(bp->rv2p_firmware, &rv2p->proc1.rv2p, 8, true) || 3468 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3469 check_fw_section(bp->rv2p_firmware, &rv2p->proc2.rv2p, 8, true)) { 3469 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3470 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3470 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3471 rv2p_fw_file); 3471 rv2p_fw_file);
3472 return -EINVAL; 3472 return -EINVAL;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8dc6fbb9a41e..553a89919778 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
370 370
371 if (arp->op_code == htons(ARPOP_REPLY)) { 371 if (arp->op_code == htons(ARPOP_REPLY)) {
372 /* update rx hash table for this ARP */ 372 /* update rx hash table for this ARP */
373 printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
374 bond_dev->name);
375 bond = netdev_priv(bond_dev); 373 bond = netdev_priv(bond_dev);
376 rlb_update_entry_from_arp(bond, arp); 374 rlb_update_entry_from_arp(bond, arp);
377 pr_debug("Server received an ARP Reply from client\n"); 375 pr_debug("Server received an ARP Reply from client\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 99610f358c40..63369b6b14d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2570,7 +2570,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2570 2570
2571 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2571 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2572 if (!targets[i]) 2572 if (!targets[i])
2573 continue; 2573 break;
2574 pr_debug("basa: target %x\n", targets[i]); 2574 pr_debug("basa: target %x\n", targets[i]);
2575 if (list_empty(&bond->vlan_list)) { 2575 if (list_empty(&bond->vlan_list)) {
2576 pr_debug("basa: empty vlan: arp_send\n"); 2576 pr_debug("basa: empty vlan: arp_send\n");
@@ -2677,7 +2677,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2677 int i; 2677 int i;
2678 __be32 *targets = bond->params.arp_targets; 2678 __be32 *targets = bond->params.arp_targets;
2679 2679
2680 targets = bond->params.arp_targets;
2681 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2680 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
2682 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", 2681 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
2683 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); 2682 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
@@ -3303,7 +3302,7 @@ static void bond_info_show_master(struct seq_file *seq)
3303 3302
3304 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { 3303 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
3305 if (!bond->params.arp_targets[i]) 3304 if (!bond->params.arp_targets[i])
3306 continue; 3305 break;
3307 if (printed) 3306 if (printed)
3308 seq_printf(seq, ","); 3307 seq_printf(seq, ",");
3309 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); 3308 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 18cf4787874c..d28731535226 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
684 goto out; 684 goto out;
685 } 685 }
686 /* look for an empty slot to put the target in, and check for dupes */ 686 /* look for an empty slot to put the target in, and check for dupes */
687 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 687 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
688 if (targets[i] == newtarget) { /* duplicate */ 688 if (targets[i] == newtarget) { /* duplicate */
689 printk(KERN_ERR DRV_NAME 689 printk(KERN_ERR DRV_NAME
690 ": %s: ARP target %pI4 is already present\n", 690 ": %s: ARP target %pI4 is already present\n",
691 bond->dev->name, &newtarget); 691 bond->dev->name, &newtarget);
692 if (done)
693 targets[i] = 0;
694 ret = -EINVAL; 692 ret = -EINVAL;
695 goto out; 693 goto out;
696 } 694 }
697 if (targets[i] == 0 && !done) { 695 if (targets[i] == 0) {
698 printk(KERN_INFO DRV_NAME 696 printk(KERN_INFO DRV_NAME
699 ": %s: adding ARP target %pI4.\n", 697 ": %s: adding ARP target %pI4.\n",
700 bond->dev->name, &newtarget); 698 bond->dev->name, &newtarget);
@@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d,
720 goto out; 718 goto out;
721 } 719 }
722 720
723 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 721 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
724 if (targets[i] == newtarget) { 722 if (targets[i] == newtarget) {
723 int j;
725 printk(KERN_INFO DRV_NAME 724 printk(KERN_INFO DRV_NAME
726 ": %s: removing ARP target %pI4.\n", 725 ": %s: removing ARP target %pI4.\n",
727 bond->dev->name, &newtarget); 726 bond->dev->name, &newtarget);
728 targets[i] = 0; 727 for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
728 targets[j] = targets[j+1];
729
730 targets[j] = 0;
729 done = 1; 731 done = 1;
730 } 732 }
731 } 733 }
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index c9806c58b2fd..7a18dc7e5c7f 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -257,6 +257,23 @@ struct transceiver_ops transceivers[] =
257 257
258struct transceiver_ops* transceiver = &transceivers[0]; 258struct transceiver_ops* transceiver = &transceivers[0];
259 259
260static const struct net_device_ops e100_netdev_ops = {
261 .ndo_open = e100_open,
262 .ndo_stop = e100_close,
263 .ndo_start_xmit = e100_send_packet,
264 .ndo_tx_timeout = e100_tx_timeout,
265 .ndo_get_stats = e100_get_stats,
266 .ndo_set_multicast_list = set_multicast_list,
267 .ndo_do_ioctl = e100_ioctl,
268 .ndo_set_mac_address = e100_set_mac_address,
269 .ndo_validate_addr = eth_validate_addr,
270 .ndo_change_mtu = eth_change_mtu,
271 .ndo_set_config = e100_set_config,
272#ifdef CONFIG_NET_POLL_CONTROLLER
273 .ndo_poll_controller = e100_netpoll,
274#endif
275};
276
260#define tx_done(dev) (*R_DMA_CH0_CMD == 0) 277#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
261 278
262/* 279/*
@@ -300,19 +317,8 @@ etrax_ethernet_init(void)
300 317
301 /* fill in our handlers so the network layer can talk to us in the future */ 318 /* fill in our handlers so the network layer can talk to us in the future */
302 319
303 dev->open = e100_open;
304 dev->hard_start_xmit = e100_send_packet;
305 dev->stop = e100_close;
306 dev->get_stats = e100_get_stats;
307 dev->set_multicast_list = set_multicast_list;
308 dev->set_mac_address = e100_set_mac_address;
309 dev->ethtool_ops = &e100_ethtool_ops; 320 dev->ethtool_ops = &e100_ethtool_ops;
310 dev->do_ioctl = e100_ioctl; 321 dev->netdev_ops = &e100_netdev_ops;
311 dev->set_config = e100_set_config;
312 dev->tx_timeout = e100_tx_timeout;
313#ifdef CONFIG_NET_POLL_CONTROLLER
314 dev->poll_controller = e100_netpoll;
315#endif
316 322
317 spin_lock_init(&np->lock); 323 spin_lock_init(&np->lock);
318 spin_lock_init(&np->led_lock); 324 spin_lock_init(&np->led_lock);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 861c867fca87..b62405a69180 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1010,6 +1010,17 @@ static void lance_set_multicast_retry(unsigned long _opaque)
1010 lance_set_multicast(dev); 1010 lance_set_multicast(dev);
1011} 1011}
1012 1012
1013static const struct net_device_ops lance_netdev_ops = {
1014 .ndo_open = lance_open,
1015 .ndo_stop = lance_close,
1016 .ndo_start_xmit = lance_start_xmit,
1017 .ndo_tx_timeout = lance_tx_timeout,
1018 .ndo_set_multicast_list = lance_set_multicast,
1019 .ndo_change_mtu = eth_change_mtu,
1020 .ndo_validate_addr = eth_validate_addr,
1021 .ndo_set_mac_address = eth_mac_addr,
1022};
1023
1013static int __init dec_lance_probe(struct device *bdev, const int type) 1024static int __init dec_lance_probe(struct device *bdev, const int type)
1014{ 1025{
1015 static unsigned version_printed; 1026 static unsigned version_printed;
@@ -1223,12 +1234,8 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1223 1234
1224 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); 1235 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1225 1236
1226 dev->open = &lance_open; 1237 dev->netdev_ops = &lance_netdev_ops;
1227 dev->stop = &lance_close;
1228 dev->hard_start_xmit = &lance_start_xmit;
1229 dev->tx_timeout = &lance_tx_timeout;
1230 dev->watchdog_timeo = 5*HZ; 1238 dev->watchdog_timeo = 5*HZ;
1231 dev->set_multicast_list = &lance_set_multicast;
1232 1239
1233 /* lp->ll is the location of the registers for lance card */ 1240 /* lp->ll is the location of the registers for lance card */
1234 lp->ll = ll; 1241 lp->ll = ll;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ddc5c533e89c..ef12931d302a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -156,8 +156,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
156static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 156static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
157static void e1000_restore_vlan(struct e1000_adapter *adapter); 157static void e1000_restore_vlan(struct e1000_adapter *adapter);
158 158
159static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
160#ifdef CONFIG_PM 159#ifdef CONFIG_PM
160static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
161static int e1000_resume(struct pci_dev *pdev); 161static int e1000_resume(struct pci_dev *pdev);
162#endif 162#endif
163static void e1000_shutdown(struct pci_dev *pdev); 163static void e1000_shutdown(struct pci_dev *pdev);
@@ -3834,7 +3834,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3834 struct e1000_buffer *buffer_info; 3834 struct e1000_buffer *buffer_info;
3835 unsigned int i, eop; 3835 unsigned int i, eop;
3836 unsigned int count = 0; 3836 unsigned int count = 0;
3837 bool cleaned; 3837 bool cleaned = false;
3838 unsigned int total_tx_bytes=0, total_tx_packets=0; 3838 unsigned int total_tx_bytes=0, total_tx_packets=0;
3839 3839
3840 i = tx_ring->next_to_clean; 3840 i = tx_ring->next_to_clean;
@@ -4601,7 +4601,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4601 return 0; 4601 return 0;
4602} 4602}
4603 4603
4604static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4604static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4605{ 4605{
4606 struct net_device *netdev = pci_get_drvdata(pdev); 4606 struct net_device *netdev = pci_get_drvdata(pdev);
4607 struct e1000_adapter *adapter = netdev_priv(netdev); 4607 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4664,22 +4664,18 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4664 4664
4665 ew32(WUC, E1000_WUC_PME_EN); 4665 ew32(WUC, E1000_WUC_PME_EN);
4666 ew32(WUFC, wufc); 4666 ew32(WUFC, wufc);
4667 pci_enable_wake(pdev, PCI_D3hot, 1);
4668 pci_enable_wake(pdev, PCI_D3cold, 1);
4669 } else { 4667 } else {
4670 ew32(WUC, 0); 4668 ew32(WUC, 0);
4671 ew32(WUFC, 0); 4669 ew32(WUFC, 0);
4672 pci_enable_wake(pdev, PCI_D3hot, 0);
4673 pci_enable_wake(pdev, PCI_D3cold, 0);
4674 } 4670 }
4675 4671
4676 e1000_release_manageability(adapter); 4672 e1000_release_manageability(adapter);
4677 4673
4674 *enable_wake = !!wufc;
4675
4678 /* make sure adapter isn't asleep if manageability is enabled */ 4676 /* make sure adapter isn't asleep if manageability is enabled */
4679 if (adapter->en_mng_pt) { 4677 if (adapter->en_mng_pt)
4680 pci_enable_wake(pdev, PCI_D3hot, 1); 4678 *enable_wake = true;
4681 pci_enable_wake(pdev, PCI_D3cold, 1);
4682 }
4683 4679
4684 if (hw->phy_type == e1000_phy_igp_3) 4680 if (hw->phy_type == e1000_phy_igp_3)
4685 e1000_phy_powerdown_workaround(hw); 4681 e1000_phy_powerdown_workaround(hw);
@@ -4693,12 +4689,29 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4693 4689
4694 pci_disable_device(pdev); 4690 pci_disable_device(pdev);
4695 4691
4696 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4697
4698 return 0; 4692 return 0;
4699} 4693}
4700 4694
4701#ifdef CONFIG_PM 4695#ifdef CONFIG_PM
4696static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4697{
4698 int retval;
4699 bool wake;
4700
4701 retval = __e1000_shutdown(pdev, &wake);
4702 if (retval)
4703 return retval;
4704
4705 if (wake) {
4706 pci_prepare_to_sleep(pdev);
4707 } else {
4708 pci_wake_from_d3(pdev, false);
4709 pci_set_power_state(pdev, PCI_D3hot);
4710 }
4711
4712 return 0;
4713}
4714
4702static int e1000_resume(struct pci_dev *pdev) 4715static int e1000_resume(struct pci_dev *pdev)
4703{ 4716{
4704 struct net_device *netdev = pci_get_drvdata(pdev); 4717 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4753,7 +4766,14 @@ static int e1000_resume(struct pci_dev *pdev)
4753 4766
4754static void e1000_shutdown(struct pci_dev *pdev) 4767static void e1000_shutdown(struct pci_dev *pdev)
4755{ 4768{
4756 e1000_suspend(pdev, PMSG_SUSPEND); 4769 bool wake;
4770
4771 __e1000_shutdown(pdev, &wake);
4772
4773 if (system_state == SYSTEM_POWER_OFF) {
4774 pci_wake_from_d3(pdev, wake);
4775 pci_set_power_state(pdev, PCI_D3hot);
4776 }
4757} 4777}
4758 4778
4759#ifdef CONFIG_NET_POLL_CONTROLLER 4779#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 409b58cad0e5..1693ed116b16 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -621,7 +621,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
621 struct e1000_buffer *buffer_info; 621 struct e1000_buffer *buffer_info;
622 unsigned int i, eop; 622 unsigned int i, eop;
623 unsigned int count = 0; 623 unsigned int count = 0;
624 bool cleaned; 624 bool cleaned = false;
625 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 625 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
626 626
627 i = tx_ring->next_to_clean; 627 i = tx_ring->next_to_clean;
@@ -4346,7 +4346,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4346 } 4346 }
4347} 4347}
4348 4348
4349static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4349static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4350{ 4350{
4351 struct net_device *netdev = pci_get_drvdata(pdev); 4351 struct net_device *netdev = pci_get_drvdata(pdev);
4352 struct e1000_adapter *adapter = netdev_priv(netdev); 4352 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4409,20 +4409,16 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4409 4409
4410 ew32(WUC, E1000_WUC_PME_EN); 4410 ew32(WUC, E1000_WUC_PME_EN);
4411 ew32(WUFC, wufc); 4411 ew32(WUFC, wufc);
4412 pci_enable_wake(pdev, PCI_D3hot, 1);
4413 pci_enable_wake(pdev, PCI_D3cold, 1);
4414 } else { 4412 } else {
4415 ew32(WUC, 0); 4413 ew32(WUC, 0);
4416 ew32(WUFC, 0); 4414 ew32(WUFC, 0);
4417 pci_enable_wake(pdev, PCI_D3hot, 0);
4418 pci_enable_wake(pdev, PCI_D3cold, 0);
4419 } 4415 }
4420 4416
4417 *enable_wake = !!wufc;
4418
4421 /* make sure adapter isn't asleep if manageability is enabled */ 4419 /* make sure adapter isn't asleep if manageability is enabled */
4422 if (adapter->flags & FLAG_MNG_PT_ENABLED) { 4420 if (adapter->flags & FLAG_MNG_PT_ENABLED)
4423 pci_enable_wake(pdev, PCI_D3hot, 1); 4421 *enable_wake = true;
4424 pci_enable_wake(pdev, PCI_D3cold, 1);
4425 }
4426 4422
4427 if (adapter->hw.phy.type == e1000_phy_igp_3) 4423 if (adapter->hw.phy.type == e1000_phy_igp_3)
4428 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 4424 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
@@ -4435,6 +4431,26 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4435 4431
4436 pci_disable_device(pdev); 4432 pci_disable_device(pdev);
4437 4433
4434 return 0;
4435}
4436
4437static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
4438{
4439 if (sleep && wake) {
4440 pci_prepare_to_sleep(pdev);
4441 return;
4442 }
4443
4444 pci_wake_from_d3(pdev, wake);
4445 pci_set_power_state(pdev, PCI_D3hot);
4446}
4447
4448static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4449 bool wake)
4450{
4451 struct net_device *netdev = pci_get_drvdata(pdev);
4452 struct e1000_adapter *adapter = netdev_priv(netdev);
4453
4438 /* 4454 /*
4439 * The pci-e switch on some quad port adapters will report a 4455 * The pci-e switch on some quad port adapters will report a
4440 * correctable error when the MAC transitions from D0 to D3. To 4456 * correctable error when the MAC transitions from D0 to D3. To
@@ -4450,14 +4466,12 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4450 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 4466 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4451 (devctl & ~PCI_EXP_DEVCTL_CERE)); 4467 (devctl & ~PCI_EXP_DEVCTL_CERE));
4452 4468
4453 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4469 e1000_power_off(pdev, sleep, wake);
4454 4470
4455 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 4471 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4456 } else { 4472 } else {
4457 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4473 e1000_power_off(pdev, sleep, wake);
4458 } 4474 }
4459
4460 return 0;
4461} 4475}
4462 4476
4463static void e1000e_disable_l1aspm(struct pci_dev *pdev) 4477static void e1000e_disable_l1aspm(struct pci_dev *pdev)
@@ -4486,6 +4500,18 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev)
4486} 4500}
4487 4501
4488#ifdef CONFIG_PM 4502#ifdef CONFIG_PM
4503static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4504{
4505 int retval;
4506 bool wake;
4507
4508 retval = __e1000_shutdown(pdev, &wake);
4509 if (!retval)
4510 e1000_complete_shutdown(pdev, true, wake);
4511
4512 return retval;
4513}
4514
4489static int e1000_resume(struct pci_dev *pdev) 4515static int e1000_resume(struct pci_dev *pdev)
4490{ 4516{
4491 struct net_device *netdev = pci_get_drvdata(pdev); 4517 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4549,7 +4575,12 @@ static int e1000_resume(struct pci_dev *pdev)
4549 4575
4550static void e1000_shutdown(struct pci_dev *pdev) 4576static void e1000_shutdown(struct pci_dev *pdev)
4551{ 4577{
4552 e1000_suspend(pdev, PMSG_SUSPEND); 4578 bool wake = false;
4579
4580 __e1000_shutdown(pdev, &wake);
4581
4582 if (system_state == SYSTEM_POWER_OFF)
4583 e1000_complete_shutdown(pdev, false, wake);
4553} 4584}
4554 4585
4555#ifdef CONFIG_NET_POLL_CONTROLLER 4586#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index ac0c5b438e0a..604c844d0769 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3080,7 +3080,8 @@ static const struct net_device_ops ehea_netdev_ops = {
3080 .ndo_change_mtu = ehea_change_mtu, 3080 .ndo_change_mtu = ehea_change_mtu,
3081 .ndo_vlan_rx_register = ehea_vlan_rx_register, 3081 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3082 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, 3082 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3083 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid 3083 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3084 .ndo_tx_timeout = ehea_tx_watchdog,
3084}; 3085};
3085 3086
3086struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3087struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
@@ -3142,7 +3143,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3142 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3143 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3143 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3144 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3144 | NETIF_F_LLTX; 3145 | NETIF_F_LLTX;
3145 dev->tx_timeout = &ehea_tx_watchdog;
3146 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3146 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3147 3147
3148 INIT_WORK(&port->reset_task, ehea_reset_port); 3148 INIT_WORK(&port->reset_task, ehea_reset_port);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 51ead7941f83..5210bb1027cc 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -542,6 +542,8 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
542 } 542 }
543 spin_unlock_bh(&eql->queue.lock); 543 spin_unlock_bh(&eql->queue.lock);
544 544
545 dev_put(slave_dev);
546
545 return ret; 547 return ret;
546} 548}
547 549
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 63eaf5de2300..28db6919c526 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1155,6 +1155,7 @@ static void __inline__ fec_phy_ack_intr(void)
1155 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1155 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1156 *icrp = 0x0d000000; 1156 *icrp = 0x0d000000;
1157} 1157}
1158#endif
1158 1159
1159#ifdef CONFIG_M5272 1160#ifdef CONFIG_M5272
1160static void __inline__ fec_get_mac(struct net_device *dev) 1161static void __inline__ fec_get_mac(struct net_device *dev)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d37465020bcc..11d5db16ed9c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3745,14 +3745,14 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3745 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3745 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3746 } 3746 }
3747 spin_unlock_irqrestore(&np->lock, flags); 3747 spin_unlock_irqrestore(&np->lock, flags);
3748 __napi_complete(napi); 3748 napi_complete(napi);
3749 return rx_work; 3749 return rx_work;
3750 } 3750 }
3751 3751
3752 if (rx_work < budget) { 3752 if (rx_work < budget) {
3753 /* re-enable interrupts 3753 /* re-enable interrupts
3754 (msix not enabled in napi) */ 3754 (msix not enabled in napi) */
3755 __napi_complete(napi); 3755 napi_complete(napi);
3756 3756
3757 writel(np->irqmask, base + NvRegIrqMask); 3757 writel(np->irqmask, base + NvRegIrqMask);
3758 } 3758 }
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index b037ce9857bf..a9cbc3191a2a 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1019,6 +1019,22 @@ out_put_phy:
1019#define IS_FEC(match) 0 1019#define IS_FEC(match) 0
1020#endif 1020#endif
1021 1021
1022static const struct net_device_ops fs_enet_netdev_ops = {
1023 .ndo_open = fs_enet_open,
1024 .ndo_stop = fs_enet_close,
1025 .ndo_get_stats = fs_enet_get_stats,
1026 .ndo_start_xmit = fs_enet_start_xmit,
1027 .ndo_tx_timeout = fs_timeout,
1028 .ndo_set_multicast_list = fs_set_multicast_list,
1029 .ndo_do_ioctl = fs_ioctl,
1030 .ndo_validate_addr = eth_validate_addr,
1031 .ndo_set_mac_address = eth_mac_addr,
1032 .ndo_change_mtu = eth_change_mtu,
1033#ifdef CONFIG_NET_POLL_CONTROLLER
1034 .ndo_poll_controller = fs_enet_netpoll,
1035#endif
1036};
1037
1022static int __devinit fs_enet_probe(struct of_device *ofdev, 1038static int __devinit fs_enet_probe(struct of_device *ofdev,
1023 const struct of_device_id *match) 1039 const struct of_device_id *match)
1024{ 1040{
@@ -1093,22 +1109,13 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1093 fep->tx_ring = fpi->tx_ring; 1109 fep->tx_ring = fpi->tx_ring;
1094 fep->rx_ring = fpi->rx_ring; 1110 fep->rx_ring = fpi->rx_ring;
1095 1111
1096 ndev->open = fs_enet_open; 1112 ndev->netdev_ops = &fs_enet_netdev_ops;
1097 ndev->hard_start_xmit = fs_enet_start_xmit;
1098 ndev->tx_timeout = fs_timeout;
1099 ndev->watchdog_timeo = 2 * HZ; 1113 ndev->watchdog_timeo = 2 * HZ;
1100 ndev->stop = fs_enet_close;
1101 ndev->get_stats = fs_enet_get_stats;
1102 ndev->set_multicast_list = fs_set_multicast_list;
1103#ifdef CONFIG_NET_POLL_CONTROLLER
1104 ndev->poll_controller = fs_enet_netpoll;
1105#endif
1106 if (fpi->use_napi) 1114 if (fpi->use_napi)
1107 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1115 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1108 fpi->napi_weight); 1116 fpi->napi_weight);
1109 1117
1110 ndev->ethtool_ops = &fs_ethtool_ops; 1118 ndev->ethtool_ops = &fs_ethtool_ops;
1111 ndev->do_ioctl = fs_ioctl;
1112 1119
1113 init_timer(&fep->phy_timer_list); 1120 init_timer(&fep->phy_timer_list);
1114 1121
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 65f55877be95..b2c49679bba7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1583,8 +1583,10 @@ static void gfar_reset_task(struct work_struct *work)
1583 struct net_device *dev = priv->ndev; 1583 struct net_device *dev = priv->ndev;
1584 1584
1585 if (dev->flags & IFF_UP) { 1585 if (dev->flags & IFF_UP) {
1586 netif_stop_queue(dev);
1586 stop_gfar(dev); 1587 stop_gfar(dev);
1587 startup_gfar(dev); 1588 startup_gfar(dev);
1589 netif_start_queue(dev);
1588 } 1590 }
1589 1591
1590 netif_tx_schedule_all(dev); 1592 netif_tx_schedule_all(dev);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 77e4b5b52fc8..806533c831c7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2686,6 +2686,32 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2686 return 0; 2686 return 0;
2687} 2687}
2688 2688
2689static const struct net_device_ops emac_netdev_ops = {
2690 .ndo_open = emac_open,
2691 .ndo_stop = emac_close,
2692 .ndo_get_stats = emac_stats,
2693 .ndo_set_multicast_list = emac_set_multicast_list,
2694 .ndo_do_ioctl = emac_ioctl,
2695 .ndo_tx_timeout = emac_tx_timeout,
2696 .ndo_validate_addr = eth_validate_addr,
2697 .ndo_set_mac_address = eth_mac_addr,
2698 .ndo_start_xmit = emac_start_xmit,
2699 .ndo_change_mtu = eth_change_mtu,
2700};
2701
2702static const struct net_device_ops emac_gige_netdev_ops = {
2703 .ndo_open = emac_open,
2704 .ndo_stop = emac_close,
2705 .ndo_get_stats = emac_stats,
2706 .ndo_set_multicast_list = emac_set_multicast_list,
2707 .ndo_do_ioctl = emac_ioctl,
2708 .ndo_tx_timeout = emac_tx_timeout,
2709 .ndo_validate_addr = eth_validate_addr,
2710 .ndo_set_mac_address = eth_mac_addr,
2711 .ndo_start_xmit = emac_start_xmit_sg,
2712 .ndo_change_mtu = emac_change_mtu,
2713};
2714
2689static int __devinit emac_probe(struct of_device *ofdev, 2715static int __devinit emac_probe(struct of_device *ofdev,
2690 const struct of_device_id *match) 2716 const struct of_device_id *match)
2691{ 2717{
@@ -2827,23 +2853,14 @@ static int __devinit emac_probe(struct of_device *ofdev,
2827 if (err != 0) 2853 if (err != 0)
2828 goto err_detach_tah; 2854 goto err_detach_tah;
2829 2855
2830 /* Fill in the driver function table */
2831 ndev->open = &emac_open;
2832 if (dev->tah_dev) 2856 if (dev->tah_dev)
2833 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 2857 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2834 ndev->tx_timeout = &emac_tx_timeout;
2835 ndev->watchdog_timeo = 5 * HZ; 2858 ndev->watchdog_timeo = 5 * HZ;
2836 ndev->stop = &emac_close;
2837 ndev->get_stats = &emac_stats;
2838 ndev->set_multicast_list = &emac_set_multicast_list;
2839 ndev->do_ioctl = &emac_ioctl;
2840 if (emac_phy_supports_gige(dev->phy_mode)) { 2859 if (emac_phy_supports_gige(dev->phy_mode)) {
2841 ndev->hard_start_xmit = &emac_start_xmit_sg; 2860 ndev->netdev_ops = &emac_gige_netdev_ops;
2842 ndev->change_mtu = &emac_change_mtu;
2843 dev->commac.ops = &emac_commac_sg_ops; 2861 dev->commac.ops = &emac_commac_sg_ops;
2844 } else { 2862 } else
2845 ndev->hard_start_xmit = &emac_start_xmit; 2863 ndev->netdev_ops = &emac_netdev_ops;
2846 }
2847 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2864 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2848 2865
2849 netif_carrier_off(ndev); 2866 netif_carrier_off(ndev);
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index f4c315b5a900..472f3f124840 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -111,7 +111,7 @@ void igb_clear_vfta(struct e1000_hw *hw)
111 * Writes value at the given offset in the register array which stores 111 * Writes value at the given offset in the register array which stores
112 * the VLAN filter table. 112 * the VLAN filter table.
113 **/ 113 **/
114void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 114static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
115{ 115{
116 array_wr32(E1000_VFTA, offset, value); 116 array_wr32(E1000_VFTA, offset, value);
117 wrfl(); 117 wrfl();
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index a34de5269637..1d690b4c9ae4 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -66,7 +66,6 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
66s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 66s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
67void igb_reset_adaptive(struct e1000_hw *hw); 67void igb_reset_adaptive(struct e1000_hw *hw);
68void igb_update_adaptive(struct e1000_hw *hw); 68void igb_update_adaptive(struct e1000_hw *hw);
69void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
70 69
71bool igb_enable_mng_pass_thru(struct e1000_hw *hw); 70bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
72 71
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index fe71c7ddaa05..840782fb5736 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -188,7 +188,7 @@ out:
188 * returns SUCCESS if it successfully received a message notification and 188 * returns SUCCESS if it successfully received a message notification and
189 * copied it into the receive buffer. 189 * copied it into the receive buffer.
190 **/ 190 **/
191s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 191static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
192{ 192{
193 struct e1000_mbx_info *mbx = &hw->mbx; 193 struct e1000_mbx_info *mbx = &hw->mbx;
194 s32 ret_val = -E1000_ERR_MBX; 194 s32 ret_val = -E1000_ERR_MBX;
@@ -214,7 +214,7 @@ out:
214 * returns SUCCESS if it successfully copied message into the buffer and 214 * returns SUCCESS if it successfully copied message into the buffer and
215 * received an ack to that message within delay * timeout period 215 * received an ack to that message within delay * timeout period
216 **/ 216 **/
217s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 217static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{ 218{
219 struct e1000_mbx_info *mbx = &hw->mbx; 219 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0; 220 s32 ret_val = 0;
@@ -232,19 +232,6 @@ out:
232 return ret_val; 232 return ret_val;
233} 233}
234 234
235/**
236 * e1000_init_mbx_ops_generic - Initialize NVM function pointers
237 * @hw: pointer to the HW structure
238 *
239 * Setups up the function pointers to no-op functions
240 **/
241void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
242{
243 struct e1000_mbx_info *mbx = &hw->mbx;
244 mbx->ops.read_posted = igb_read_posted_mbx;
245 mbx->ops.write_posted = igb_write_posted_mbx;
246}
247
248static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) 235static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
249{ 236{
250 u32 mbvficr = rd32(E1000_MBVFICR); 237 u32 mbvficr = rd32(E1000_MBVFICR);
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index 6ec9890a8f7a..ebc02ea3f198 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -67,8 +67,6 @@
67 67
68s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); 68s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
69s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); 69s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
70s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
72s32 igb_check_for_msg(struct e1000_hw *, u16); 70s32 igb_check_for_msg(struct e1000_hw *, u16);
73s32 igb_check_for_ack(struct e1000_hw *, u16); 71s32 igb_check_for_ack(struct e1000_hw *, u16);
74s32 igb_check_for_rst(struct e1000_hw *, u16); 72s32 igb_check_for_rst(struct e1000_hw *, u16);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 6b0697c565b9..08c801490c72 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -152,14 +152,13 @@ static struct notifier_block dca_notifier = {
152/* for netdump / net console */ 152/* for netdump / net console */
153static void igb_netpoll(struct net_device *); 153static void igb_netpoll(struct net_device *);
154#endif 154#endif
155
156#ifdef CONFIG_PCI_IOV 155#ifdef CONFIG_PCI_IOV
157static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *, 156static unsigned int max_vfs = 0;
158 const char *, size_t); 157module_param(max_vfs, uint, 0);
159static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *, 158MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
160 char *); 159 "per physical function");
161DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs); 160#endif /* CONFIG_PCI_IOV */
162#endif 161
163static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 162static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
164 pci_channel_state_t); 163 pci_channel_state_t);
165static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); 164static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
@@ -671,6 +670,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
671 670
672 /* If we can't do MSI-X, try MSI */ 671 /* If we can't do MSI-X, try MSI */
673msi_only: 672msi_only:
673#ifdef CONFIG_PCI_IOV
674 /* disable SR-IOV for non MSI-X configurations */
675 if (adapter->vf_data) {
676 struct e1000_hw *hw = &adapter->hw;
677 /* disable iov and allow time for transactions to clear */
678 pci_disable_sriov(adapter->pdev);
679 msleep(500);
680
681 kfree(adapter->vf_data);
682 adapter->vf_data = NULL;
683 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
684 msleep(100);
685 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
686 }
687#endif
674 adapter->num_rx_queues = 1; 688 adapter->num_rx_queues = 1;
675 adapter->num_tx_queues = 1; 689 adapter->num_tx_queues = 1;
676 if (!pci_enable_msi(adapter->pdev)) 690 if (!pci_enable_msi(adapter->pdev))
@@ -1238,6 +1252,46 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1238 if (err) 1252 if (err)
1239 goto err_sw_init; 1253 goto err_sw_init;
1240 1254
1255#ifdef CONFIG_PCI_IOV
1256 /* since iov functionality isn't critical to base device function we
1257 * can accept failure. If it fails we don't allow iov to be enabled */
1258 if (hw->mac.type == e1000_82576) {
1259 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1260 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1261 int i;
1262 unsigned char mac_addr[ETH_ALEN];
1263
1264 if (num_vfs) {
1265 adapter->vf_data = kcalloc(num_vfs,
1266 sizeof(struct vf_data_storage),
1267 GFP_KERNEL);
1268 if (!adapter->vf_data) {
1269 dev_err(&pdev->dev,
1270 "Could not allocate VF private data - "
1271 "IOV enable failed\n");
1272 } else {
1273 err = pci_enable_sriov(pdev, num_vfs);
1274 if (!err) {
1275 adapter->vfs_allocated_count = num_vfs;
1276 dev_info(&pdev->dev,
1277 "%d vfs allocated\n",
1278 num_vfs);
1279 for (i = 0;
1280 i < adapter->vfs_allocated_count;
1281 i++) {
1282 random_ether_addr(mac_addr);
1283 igb_set_vf_mac(adapter, i,
1284 mac_addr);
1285 }
1286 } else {
1287 kfree(adapter->vf_data);
1288 adapter->vf_data = NULL;
1289 }
1290 }
1291 }
1292 }
1293
1294#endif
1241 /* setup the private structure */ 1295 /* setup the private structure */
1242 err = igb_sw_init(adapter); 1296 err = igb_sw_init(adapter);
1243 if (err) 1297 if (err)
@@ -1397,19 +1451,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1397 if (err) 1451 if (err)
1398 goto err_register; 1452 goto err_register;
1399 1453
1400#ifdef CONFIG_PCI_IOV
1401 /* since iov functionality isn't critical to base device function we
1402 * can accept failure. If it fails we don't allow iov to be enabled */
1403 if (hw->mac.type == e1000_82576) {
1404 err = pci_enable_sriov(pdev, 0);
1405 if (!err)
1406 err = device_create_file(&netdev->dev,
1407 &dev_attr_num_vfs);
1408 if (err)
1409 dev_err(&pdev->dev, "Failed to initialize IOV\n");
1410 }
1411
1412#endif
1413#ifdef CONFIG_IGB_DCA 1454#ifdef CONFIG_IGB_DCA
1414 if (dca_add_requester(&pdev->dev) == 0) { 1455 if (dca_add_requester(&pdev->dev) == 0) {
1415 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1456 adapter->flags |= IGB_FLAG_DCA_ENABLED;
@@ -5422,89 +5463,4 @@ static void igb_vmm_control(struct igb_adapter *adapter)
5422 igb_vmdq_set_replication_pf(hw, true); 5463 igb_vmdq_set_replication_pf(hw, true);
5423} 5464}
5424 5465
5425#ifdef CONFIG_PCI_IOV
5426static ssize_t igb_show_num_vfs(struct device *dev,
5427 struct device_attribute *attr, char *buf)
5428{
5429 struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
5430
5431 return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
5432}
5433
5434static ssize_t igb_set_num_vfs(struct device *dev,
5435 struct device_attribute *attr,
5436 const char *buf, size_t count)
5437{
5438 struct net_device *netdev = to_net_dev(dev);
5439 struct igb_adapter *adapter = netdev_priv(netdev);
5440 struct e1000_hw *hw = &adapter->hw;
5441 struct pci_dev *pdev = adapter->pdev;
5442 unsigned int num_vfs, i;
5443 unsigned char mac_addr[ETH_ALEN];
5444 int err;
5445
5446 sscanf(buf, "%u", &num_vfs);
5447
5448 if (num_vfs > 7)
5449 num_vfs = 7;
5450
5451 /* value unchanged do nothing */
5452 if (num_vfs == adapter->vfs_allocated_count)
5453 return count;
5454
5455 if (netdev->flags & IFF_UP)
5456 igb_close(netdev);
5457
5458 igb_reset_interrupt_capability(adapter);
5459 igb_free_queues(adapter);
5460 adapter->tx_ring = NULL;
5461 adapter->rx_ring = NULL;
5462 adapter->vfs_allocated_count = 0;
5463
5464 /* reclaim resources allocated to VFs since we are changing count */
5465 if (adapter->vf_data) {
5466 /* disable iov and allow time for transactions to clear */
5467 pci_disable_sriov(pdev);
5468 msleep(500);
5469
5470 kfree(adapter->vf_data);
5471 adapter->vf_data = NULL;
5472 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
5473 msleep(100);
5474 dev_info(&pdev->dev, "IOV Disabled\n");
5475 }
5476
5477 if (num_vfs) {
5478 adapter->vf_data = kcalloc(num_vfs,
5479 sizeof(struct vf_data_storage),
5480 GFP_KERNEL);
5481 if (!adapter->vf_data) {
5482 dev_err(&pdev->dev, "Could not allocate VF private "
5483 "data - IOV enable failed\n");
5484 } else {
5485 err = pci_enable_sriov(pdev, num_vfs);
5486 if (!err) {
5487 adapter->vfs_allocated_count = num_vfs;
5488 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
5489 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5490 random_ether_addr(mac_addr);
5491 igb_set_vf_mac(adapter, i, mac_addr);
5492 }
5493 } else {
5494 kfree(adapter->vf_data);
5495 adapter->vf_data = NULL;
5496 }
5497 }
5498 }
5499
5500 igb_set_interrupt_capability(adapter);
5501 igb_alloc_queues(adapter);
5502 igb_reset(adapter);
5503
5504 if (netdev->flags & IFF_UP)
5505 igb_open(netdev);
5506
5507 return count;
5508}
5509#endif /* CONFIG_PCI_IOV */
5510/* igb_main.c */ 5466/* igb_main.c */
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
new file mode 100644
index 000000000000..c2f150d8f2d9
--- /dev/null
+++ b/drivers/net/igbvf/Makefile
@@ -0,0 +1,38 @@
1################################################################################
2#
3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) 82576 VF ethernet driver
30#
31
32obj-$(CONFIG_IGBVF) += igbvf.o
33
34igbvf-objs := vf.o \
35 mbx.o \
36 ethtool.o \
37 netdev.o
38
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
new file mode 100644
index 000000000000..88a47537518a
--- /dev/null
+++ b/drivers/net/igbvf/defines.h
@@ -0,0 +1,125 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_DEFINES_H_
29#define _E1000_DEFINES_H_
30
31/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
32#define REQ_TX_DESCRIPTOR_MULTIPLE 8
33#define REQ_RX_DESCRIPTOR_MULTIPLE 8
34
35/* IVAR valid bit */
36#define E1000_IVAR_VALID 0x80
37
38/* Receive Descriptor bit definitions */
39#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
40#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
41#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
42#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
43#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
44#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
45#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
46#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
47#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
48
49#define E1000_RXDEXT_STATERR_CE 0x01000000
50#define E1000_RXDEXT_STATERR_SE 0x02000000
51#define E1000_RXDEXT_STATERR_SEQ 0x04000000
52#define E1000_RXDEXT_STATERR_CXE 0x10000000
53#define E1000_RXDEXT_STATERR_TCPE 0x20000000
54#define E1000_RXDEXT_STATERR_IPE 0x40000000
55#define E1000_RXDEXT_STATERR_RXE 0x80000000
56
57
58/* Same mask, but for extended and packet split descriptors */
59#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
60 E1000_RXDEXT_STATERR_CE | \
61 E1000_RXDEXT_STATERR_SE | \
62 E1000_RXDEXT_STATERR_SEQ | \
63 E1000_RXDEXT_STATERR_CXE | \
64 E1000_RXDEXT_STATERR_RXE)
65
66/* Device Control */
67#define E1000_CTRL_RST 0x04000000 /* Global reset */
68
69/* Device Status */
70#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
71#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
72#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
73#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
74#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
75#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
76
77#define SPEED_10 10
78#define SPEED_100 100
79#define SPEED_1000 1000
80#define HALF_DUPLEX 1
81#define FULL_DUPLEX 2
82
83/* Transmit Descriptor bit definitions */
84#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
85#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
86#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
87#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
88
89#define MAX_JUMBO_FRAME_SIZE 0x3F00
90
91/* 802.1q VLAN Packet Size */
92#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
93
94/* Error Codes */
95#define E1000_SUCCESS 0
96#define E1000_ERR_CONFIG 3
97#define E1000_ERR_MAC_INIT 5
98#define E1000_ERR_MBX 15
99
100#ifndef ETH_ADDR_LEN
101#define ETH_ADDR_LEN 6
102#endif
103
104/* SRRCTL bit definitions */
105#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
106#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
107#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
108#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
109#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
110#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
111#define E1000_SRRCTL_DROP_EN 0x80000000
112
113#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
114#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
115
116/* Additional Descriptor Control definitions */
117#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
118#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
119
120/* Direct Cache Access (DCA) definitions */
121#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
122
123#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
124
125#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
new file mode 100644
index 000000000000..1dcaa6905312
--- /dev/null
+++ b/drivers/net/igbvf/ethtool.c
@@ -0,0 +1,540 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igbvf */
29
30#include <linux/netdevice.h>
31#include <linux/ethtool.h>
32#include <linux/pci.h>
33#include <linux/vmalloc.h>
34#include <linux/delay.h>
35
36#include "igbvf.h"
37#include <linux/if_vlan.h>
38
39
40struct igbvf_stats {
41 char stat_string[ETH_GSTRING_LEN];
42 int sizeof_stat;
43 int stat_offset;
44 int base_stat_offset;
45};
46
47#define IGBVF_STAT(current, base) \
48 sizeof(((struct igbvf_adapter *)0)->current), \
49 offsetof(struct igbvf_adapter, current), \
50 offsetof(struct igbvf_adapter, base)
51
52static const struct igbvf_stats igbvf_gstrings_stats[] = {
53 { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
54 { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
55 { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
56 { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
57 { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
58 { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
59 { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
60 { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
61 { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
62 { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
63 { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
64 { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
65 { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
66};
67
68#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
69
70static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
71 "Link test (on/offline)"
72};
73
74#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
75
76static int igbvf_get_settings(struct net_device *netdev,
77 struct ethtool_cmd *ecmd)
78{
79 struct igbvf_adapter *adapter = netdev_priv(netdev);
80 struct e1000_hw *hw = &adapter->hw;
81 u32 status;
82
83 ecmd->supported = SUPPORTED_1000baseT_Full;
84
85 ecmd->advertising = ADVERTISED_1000baseT_Full;
86
87 ecmd->port = -1;
88 ecmd->transceiver = XCVR_DUMMY1;
89
90 status = er32(STATUS);
91 if (status & E1000_STATUS_LU) {
92 if (status & E1000_STATUS_SPEED_1000)
93 ecmd->speed = 1000;
94 else if (status & E1000_STATUS_SPEED_100)
95 ecmd->speed = 100;
96 else
97 ecmd->speed = 10;
98
99 if (status & E1000_STATUS_FD)
100 ecmd->duplex = DUPLEX_FULL;
101 else
102 ecmd->duplex = DUPLEX_HALF;
103 } else {
104 ecmd->speed = -1;
105 ecmd->duplex = -1;
106 }
107
108 ecmd->autoneg = AUTONEG_DISABLE;
109
110 return 0;
111}
112
113static u32 igbvf_get_link(struct net_device *netdev)
114{
115 return netif_carrier_ok(netdev);
116}
117
118static int igbvf_set_settings(struct net_device *netdev,
119 struct ethtool_cmd *ecmd)
120{
121 return -EOPNOTSUPP;
122}
123
124static void igbvf_get_pauseparam(struct net_device *netdev,
125 struct ethtool_pauseparam *pause)
126{
127 return;
128}
129
130static int igbvf_set_pauseparam(struct net_device *netdev,
131 struct ethtool_pauseparam *pause)
132{
133 return -EOPNOTSUPP;
134}
135
136static u32 igbvf_get_tx_csum(struct net_device *netdev)
137{
138 return ((netdev->features & NETIF_F_IP_CSUM) != 0);
139}
140
141static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
142{
143 if (data)
144 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
145 else
146 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
147 return 0;
148}
149
150static int igbvf_set_tso(struct net_device *netdev, u32 data)
151{
152 struct igbvf_adapter *adapter = netdev_priv(netdev);
153 int i;
154 struct net_device *v_netdev;
155
156 if (data) {
157 netdev->features |= NETIF_F_TSO;
158 netdev->features |= NETIF_F_TSO6;
159 } else {
160 netdev->features &= ~NETIF_F_TSO;
161 netdev->features &= ~NETIF_F_TSO6;
162 /* disable TSO on all VLANs if they're present */
163 if (!adapter->vlgrp)
164 goto tso_out;
165 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
166 v_netdev = vlan_group_get_device(adapter->vlgrp, i);
167 if (!v_netdev)
168 continue;
169
170 v_netdev->features &= ~NETIF_F_TSO;
171 v_netdev->features &= ~NETIF_F_TSO6;
172 vlan_group_set_device(adapter->vlgrp, i, v_netdev);
173 }
174 }
175
176tso_out:
177 dev_info(&adapter->pdev->dev, "TSO is %s\n",
178 data ? "Enabled" : "Disabled");
179 adapter->flags |= FLAG_TSO_FORCE;
180 return 0;
181}
182
183static u32 igbvf_get_msglevel(struct net_device *netdev)
184{
185 struct igbvf_adapter *adapter = netdev_priv(netdev);
186 return adapter->msg_enable;
187}
188
189static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
190{
191 struct igbvf_adapter *adapter = netdev_priv(netdev);
192 adapter->msg_enable = data;
193}
194
195static int igbvf_get_regs_len(struct net_device *netdev)
196{
197#define IGBVF_REGS_LEN 8
198 return IGBVF_REGS_LEN * sizeof(u32);
199}
200
201static void igbvf_get_regs(struct net_device *netdev,
202 struct ethtool_regs *regs, void *p)
203{
204 struct igbvf_adapter *adapter = netdev_priv(netdev);
205 struct e1000_hw *hw = &adapter->hw;
206 u32 *regs_buff = p;
207 u8 revision_id;
208
209 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
210
211 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
212
213 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
214
215 regs_buff[0] = er32(CTRL);
216 regs_buff[1] = er32(STATUS);
217
218 regs_buff[2] = er32(RDLEN(0));
219 regs_buff[3] = er32(RDH(0));
220 regs_buff[4] = er32(RDT(0));
221
222 regs_buff[5] = er32(TDLEN(0));
223 regs_buff[6] = er32(TDH(0));
224 regs_buff[7] = er32(TDT(0));
225}
226
227static int igbvf_get_eeprom_len(struct net_device *netdev)
228{
229 return 0;
230}
231
232static int igbvf_get_eeprom(struct net_device *netdev,
233 struct ethtool_eeprom *eeprom, u8 *bytes)
234{
235 return -EOPNOTSUPP;
236}
237
238static int igbvf_set_eeprom(struct net_device *netdev,
239 struct ethtool_eeprom *eeprom, u8 *bytes)
240{
241 return -EOPNOTSUPP;
242}
243
244static void igbvf_get_drvinfo(struct net_device *netdev,
245 struct ethtool_drvinfo *drvinfo)
246{
247 struct igbvf_adapter *adapter = netdev_priv(netdev);
248 char firmware_version[32] = "N/A";
249
250 strncpy(drvinfo->driver, igbvf_driver_name, 32);
251 strncpy(drvinfo->version, igbvf_driver_version, 32);
252 strncpy(drvinfo->fw_version, firmware_version, 32);
253 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
254 drvinfo->regdump_len = igbvf_get_regs_len(netdev);
255 drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
256}
257
258static void igbvf_get_ringparam(struct net_device *netdev,
259 struct ethtool_ringparam *ring)
260{
261 struct igbvf_adapter *adapter = netdev_priv(netdev);
262 struct igbvf_ring *tx_ring = adapter->tx_ring;
263 struct igbvf_ring *rx_ring = adapter->rx_ring;
264
265 ring->rx_max_pending = IGBVF_MAX_RXD;
266 ring->tx_max_pending = IGBVF_MAX_TXD;
267 ring->rx_mini_max_pending = 0;
268 ring->rx_jumbo_max_pending = 0;
269 ring->rx_pending = rx_ring->count;
270 ring->tx_pending = tx_ring->count;
271 ring->rx_mini_pending = 0;
272 ring->rx_jumbo_pending = 0;
273}
274
275static int igbvf_set_ringparam(struct net_device *netdev,
276 struct ethtool_ringparam *ring)
277{
278 struct igbvf_adapter *adapter = netdev_priv(netdev);
279 struct igbvf_ring *temp_ring;
280 int err;
281 u32 new_rx_count, new_tx_count;
282
283 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
284 return -EINVAL;
285
286 new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
287 new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
288 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
289
290 new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
291 new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
292 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
293
294 if ((new_tx_count == adapter->tx_ring->count) &&
295 (new_rx_count == adapter->rx_ring->count)) {
296 /* nothing to do */
297 return 0;
298 }
299
300 temp_ring = vmalloc(sizeof(struct igbvf_ring));
301 if (!temp_ring)
302 return -ENOMEM;
303
304 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
305 msleep(1);
306
307 if (netif_running(adapter->netdev))
308 igbvf_down(adapter);
309
310 /*
311 * We can't just free everything and then setup again,
312 * because the ISRs in MSI-X mode get passed pointers
313 * to the tx and rx ring structs.
314 */
315 if (new_tx_count != adapter->tx_ring->count) {
316 memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
317
318 temp_ring->count = new_tx_count;
319 err = igbvf_setup_tx_resources(adapter, temp_ring);
320 if (err)
321 goto err_setup;
322
323 igbvf_free_tx_resources(adapter->tx_ring);
324
325 memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
326 }
327
328 if (new_rx_count != adapter->rx_ring->count) {
329 memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
330
331 temp_ring->count = new_rx_count;
332 err = igbvf_setup_rx_resources(adapter, temp_ring);
333 if (err)
334 goto err_setup;
335
336 igbvf_free_rx_resources(adapter->rx_ring);
337
338 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
339 }
340
341 err = 0;
342err_setup:
343 if (netif_running(adapter->netdev))
344 igbvf_up(adapter);
345
346 clear_bit(__IGBVF_RESETTING, &adapter->state);
347 vfree(temp_ring);
348 return err;
349}
350
351static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
352{
353 struct e1000_hw *hw = &adapter->hw;
354 *data = 0;
355
356 hw->mac.ops.check_for_link(hw);
357
358 if (!(er32(STATUS) & E1000_STATUS_LU))
359 *data = 1;
360
361 return *data;
362}
363
364static int igbvf_get_self_test_count(struct net_device *netdev)
365{
366 return IGBVF_TEST_LEN;
367}
368
369static int igbvf_get_stats_count(struct net_device *netdev)
370{
371 return IGBVF_GLOBAL_STATS_LEN;
372}
373
374static void igbvf_diag_test(struct net_device *netdev,
375 struct ethtool_test *eth_test, u64 *data)
376{
377 struct igbvf_adapter *adapter = netdev_priv(netdev);
378
379 set_bit(__IGBVF_TESTING, &adapter->state);
380
381 /*
382 * Link test performed before hardware reset so autoneg doesn't
383 * interfere with test result
384 */
385 if (igbvf_link_test(adapter, &data[0]))
386 eth_test->flags |= ETH_TEST_FL_FAILED;
387
388 clear_bit(__IGBVF_TESTING, &adapter->state);
389 msleep_interruptible(4 * 1000);
390}
391
392static void igbvf_get_wol(struct net_device *netdev,
393 struct ethtool_wolinfo *wol)
394{
395 wol->supported = 0;
396 wol->wolopts = 0;
397
398 return;
399}
400
401static int igbvf_set_wol(struct net_device *netdev,
402 struct ethtool_wolinfo *wol)
403{
404 return -EOPNOTSUPP;
405}
406
407static int igbvf_phys_id(struct net_device *netdev, u32 data)
408{
409 return 0;
410}
411
412static int igbvf_get_coalesce(struct net_device *netdev,
413 struct ethtool_coalesce *ec)
414{
415 struct igbvf_adapter *adapter = netdev_priv(netdev);
416
417 if (adapter->itr_setting <= 3)
418 ec->rx_coalesce_usecs = adapter->itr_setting;
419 else
420 ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
421
422 return 0;
423}
424
425static int igbvf_set_coalesce(struct net_device *netdev,
426 struct ethtool_coalesce *ec)
427{
428 struct igbvf_adapter *adapter = netdev_priv(netdev);
429 struct e1000_hw *hw = &adapter->hw;
430
431 if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
432 ((ec->rx_coalesce_usecs > 3) &&
433 (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
434 (ec->rx_coalesce_usecs == 2))
435 return -EINVAL;
436
437 /* convert to rate of irq's per second */
438 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
439 adapter->itr = IGBVF_START_ITR;
440 adapter->itr_setting = ec->rx_coalesce_usecs;
441 } else {
442 adapter->itr = ec->rx_coalesce_usecs << 2;
443 adapter->itr_setting = adapter->itr;
444 }
445
446 writel(adapter->itr,
447 hw->hw_addr + adapter->rx_ring[0].itr_register);
448
449 return 0;
450}
451
452static int igbvf_nway_reset(struct net_device *netdev)
453{
454 struct igbvf_adapter *adapter = netdev_priv(netdev);
455 if (netif_running(netdev))
456 igbvf_reinit_locked(adapter);
457 return 0;
458}
459
460
461static void igbvf_get_ethtool_stats(struct net_device *netdev,
462 struct ethtool_stats *stats,
463 u64 *data)
464{
465 struct igbvf_adapter *adapter = netdev_priv(netdev);
466 int i;
467
468 igbvf_update_stats(adapter);
469 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
470 char *p = (char *)adapter +
471 igbvf_gstrings_stats[i].stat_offset;
472 char *b = (char *)adapter +
473 igbvf_gstrings_stats[i].base_stat_offset;
474 data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
475 sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
476 (*(u32 *)p - *(u32 *)b));
477 }
478
479}
480
481static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
482 u8 *data)
483{
484 u8 *p = data;
485 int i;
486
487 switch (stringset) {
488 case ETH_SS_TEST:
489 memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
490 break;
491 case ETH_SS_STATS:
492 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
493 memcpy(p, igbvf_gstrings_stats[i].stat_string,
494 ETH_GSTRING_LEN);
495 p += ETH_GSTRING_LEN;
496 }
497 break;
498 }
499}
500
501static const struct ethtool_ops igbvf_ethtool_ops = {
502 .get_settings = igbvf_get_settings,
503 .set_settings = igbvf_set_settings,
504 .get_drvinfo = igbvf_get_drvinfo,
505 .get_regs_len = igbvf_get_regs_len,
506 .get_regs = igbvf_get_regs,
507 .get_wol = igbvf_get_wol,
508 .set_wol = igbvf_set_wol,
509 .get_msglevel = igbvf_get_msglevel,
510 .set_msglevel = igbvf_set_msglevel,
511 .nway_reset = igbvf_nway_reset,
512 .get_link = igbvf_get_link,
513 .get_eeprom_len = igbvf_get_eeprom_len,
514 .get_eeprom = igbvf_get_eeprom,
515 .set_eeprom = igbvf_set_eeprom,
516 .get_ringparam = igbvf_get_ringparam,
517 .set_ringparam = igbvf_set_ringparam,
518 .get_pauseparam = igbvf_get_pauseparam,
519 .set_pauseparam = igbvf_set_pauseparam,
520 .get_tx_csum = igbvf_get_tx_csum,
521 .set_tx_csum = igbvf_set_tx_csum,
522 .get_sg = ethtool_op_get_sg,
523 .set_sg = ethtool_op_set_sg,
524 .get_tso = ethtool_op_get_tso,
525 .set_tso = igbvf_set_tso,
526 .self_test = igbvf_diag_test,
527 .get_strings = igbvf_get_strings,
528 .phys_id = igbvf_phys_id,
529 .get_ethtool_stats = igbvf_get_ethtool_stats,
530 .self_test_count = igbvf_get_self_test_count,
531 .get_stats_count = igbvf_get_stats_count,
532 .get_coalesce = igbvf_get_coalesce,
533 .set_coalesce = igbvf_set_coalesce,
534};
535
536void igbvf_set_ethtool_ops(struct net_device *netdev)
537{
538 /* have to "undeclare" const on this struct to remove warnings */
539 SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
540}
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
new file mode 100644
index 000000000000..4bff35e46871
--- /dev/null
+++ b/drivers/net/igbvf/igbvf.h
@@ -0,0 +1,332 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* Linux PRO/1000 Ethernet Driver main header file */
29
30#ifndef _IGBVF_H_
31#define _IGBVF_H_
32
33#include <linux/types.h>
34#include <linux/timer.h>
35#include <linux/io.h>
36#include <linux/netdevice.h>
37
38
39#include "vf.h"
40
41/* Forward declarations */
42struct igbvf_info;
43struct igbvf_adapter;
44
45/* Interrupt defines */
46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
47
48/* Interrupt modes, as used by the IntMode paramter */
49#define IGBVF_INT_MODE_LEGACY 0
50#define IGBVF_INT_MODE_MSI 1
51#define IGBVF_INT_MODE_MSIX 2
52
53/* Tx/Rx descriptor defines */
54#define IGBVF_DEFAULT_TXD 256
55#define IGBVF_MAX_TXD 4096
56#define IGBVF_MIN_TXD 80
57
58#define IGBVF_DEFAULT_RXD 256
59#define IGBVF_MAX_RXD 4096
60#define IGBVF_MIN_RXD 80
61
62#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
63#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
64
65/* RX descriptor control thresholds.
66 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
67 * descriptors available in its onboard memory.
68 * Setting this to 0 disables RX descriptor prefetch.
69 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
70 * available in host memory.
71 * If PTHRESH is 0, this should also be 0.
72 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
73 * descriptors until either it has this many to write back, or the
74 * ITR timer expires.
75 */
76#define IGBVF_RX_PTHRESH 16
77#define IGBVF_RX_HTHRESH 8
78#define IGBVF_RX_WTHRESH 1
79
80/* this is the size past which hardware will drop packets when setting LPE=0 */
81#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
82
83#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
84
85/* How many Tx Descriptors do we need to call netif_wake_queue ? */
86#define IGBVF_TX_QUEUE_WAKE 32
87/* How many Rx Buffers do we bundle into one write to the hardware ? */
88#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
89
90#define AUTO_ALL_MODES 0
91#define IGBVF_EEPROM_APME 0x0400
92
93#define IGBVF_MNG_VLAN_NONE (-1)
94
95/* Number of packet split data buffers (not including the header buffer) */
96#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
97
98enum igbvf_boards {
99 board_vf,
100};
101
102struct igbvf_queue_stats {
103 u64 packets;
104 u64 bytes;
105};
106
107/*
108 * wrappers around a pointer to a socket buffer,
109 * so a DMA handle can be stored along with the buffer
110 */
111struct igbvf_buffer {
112 dma_addr_t dma;
113 struct sk_buff *skb;
114 union {
115 /* Tx */
116 struct {
117 unsigned long time_stamp;
118 u16 length;
119 u16 next_to_watch;
120 };
121 /* Rx */
122 struct {
123 struct page *page;
124 u64 page_dma;
125 unsigned int page_offset;
126 };
127 };
128 struct page *page;
129};
130
131union igbvf_desc {
132 union e1000_adv_rx_desc rx_desc;
133 union e1000_adv_tx_desc tx_desc;
134 struct e1000_adv_tx_context_desc tx_context_desc;
135};
136
137struct igbvf_ring {
138 struct igbvf_adapter *adapter; /* backlink */
139 union igbvf_desc *desc; /* pointer to ring memory */
140 dma_addr_t dma; /* phys address of ring */
141 unsigned int size; /* length of ring in bytes */
142 unsigned int count; /* number of desc. in ring */
143
144 u16 next_to_use;
145 u16 next_to_clean;
146
147 u16 head;
148 u16 tail;
149
150 /* array of buffer information structs */
151 struct igbvf_buffer *buffer_info;
152 struct napi_struct napi;
153
154 char name[IFNAMSIZ + 5];
155 u32 eims_value;
156 u32 itr_val;
157 u16 itr_register;
158 int set_itr;
159
160 struct sk_buff *rx_skb_top;
161
162 struct igbvf_queue_stats stats;
163};
164
165/* board specific private data structure */
166struct igbvf_adapter {
167 struct timer_list watchdog_timer;
168 struct timer_list blink_timer;
169
170 struct work_struct reset_task;
171 struct work_struct watchdog_task;
172
173 const struct igbvf_info *ei;
174
175 struct vlan_group *vlgrp;
176 u32 bd_number;
177 u32 rx_buffer_len;
178 u32 polling_interval;
179 u16 mng_vlan_id;
180 u16 link_speed;
181 u16 link_duplex;
182
183 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
184
185 /* track device up/down/testing state */
186 unsigned long state;
187
188 /* Interrupt Throttle Rate */
189 u32 itr;
190 u32 itr_setting;
191 u16 tx_itr;
192 u16 rx_itr;
193
194 /*
195 * Tx
196 */
197 struct igbvf_ring *tx_ring /* One per active queue */
198 ____cacheline_aligned_in_smp;
199
200 unsigned long tx_queue_len;
201 unsigned int restart_queue;
202 u32 txd_cmd;
203
204 bool detect_tx_hung;
205 u8 tx_timeout_factor;
206
207 u32 tx_int_delay;
208 u32 tx_abs_int_delay;
209
210 unsigned int total_tx_bytes;
211 unsigned int total_tx_packets;
212 unsigned int total_rx_bytes;
213 unsigned int total_rx_packets;
214
215 /* Tx stats */
216 u32 tx_timeout_count;
217 u32 tx_fifo_head;
218 u32 tx_head_addr;
219 u32 tx_fifo_size;
220 u32 tx_dma_failed;
221
222 /*
223 * Rx
224 */
225 struct igbvf_ring *rx_ring;
226
227 u32 rx_int_delay;
228 u32 rx_abs_int_delay;
229
230 /* Rx stats */
231 u64 hw_csum_err;
232 u64 hw_csum_good;
233 u64 rx_hdr_split;
234 u32 alloc_rx_buff_failed;
235 u32 rx_dma_failed;
236
237 unsigned int rx_ps_hdr_size;
238 u32 max_frame_size;
239 u32 min_frame_size;
240
241 /* OS defined structs */
242 struct net_device *netdev;
243 struct pci_dev *pdev;
244 struct net_device_stats net_stats;
245 spinlock_t stats_lock; /* prevent concurrent stats updates */
246
247 /* structs defined in e1000_hw.h */
248 struct e1000_hw hw;
249
250 /* The VF counters don't clear on read so we have to get a base
251 * count on driver start up and always subtract that base on
252 * on the first update, thus the flag..
253 */
254 struct e1000_vf_stats stats;
255 u64 zero_base;
256
257 struct igbvf_ring test_tx_ring;
258 struct igbvf_ring test_rx_ring;
259 u32 test_icr;
260
261 u32 msg_enable;
262 struct msix_entry *msix_entries;
263 int int_mode;
264 u32 eims_enable_mask;
265 u32 eims_other;
266 u32 int_counter0;
267 u32 int_counter1;
268
269 u32 eeprom_wol;
270 u32 wol;
271 u32 pba;
272
273 bool fc_autoneg;
274
275 unsigned long led_status;
276
277 unsigned int flags;
278};
279
280struct igbvf_info {
281 enum e1000_mac_type mac;
282 unsigned int flags;
283 u32 pba;
284 void (*init_ops)(struct e1000_hw *);
285 s32 (*get_variants)(struct igbvf_adapter *);
286};
287
288/* hardware capability, feature, and workaround flags */
289#define FLAG_HAS_HW_VLAN_FILTER (1 << 0)
290#define FLAG_HAS_JUMBO_FRAMES (1 << 1)
291#define FLAG_MSI_ENABLED (1 << 2)
292#define FLAG_RX_CSUM_ENABLED (1 << 3)
293#define FLAG_TSO_FORCE (1 << 4)
294
295#define IGBVF_RX_DESC_ADV(R, i) \
296 (&((((R).desc))[i].rx_desc))
297#define IGBVF_TX_DESC_ADV(R, i) \
298 (&((((R).desc))[i].tx_desc))
299#define IGBVF_TX_CTXTDESC_ADV(R, i) \
300 (&((((R).desc))[i].tx_context_desc))
301
302enum igbvf_state_t {
303 __IGBVF_TESTING,
304 __IGBVF_RESETTING,
305 __IGBVF_DOWN
306};
307
308enum latency_range {
309 lowest_latency = 0,
310 low_latency = 1,
311 bulk_latency = 2,
312 latency_invalid = 255
313};
314
315extern char igbvf_driver_name[];
316extern const char igbvf_driver_version[];
317
318extern void igbvf_check_options(struct igbvf_adapter *);
319extern void igbvf_set_ethtool_ops(struct net_device *);
320
321extern int igbvf_up(struct igbvf_adapter *);
322extern void igbvf_down(struct igbvf_adapter *);
323extern void igbvf_reinit_locked(struct igbvf_adapter *);
324extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
325extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
326extern void igbvf_free_rx_resources(struct igbvf_ring *);
327extern void igbvf_free_tx_resources(struct igbvf_ring *);
328extern void igbvf_update_stats(struct igbvf_adapter *);
329
330extern unsigned int copybreak;
331
332#endif /* _IGBVF_H_ */
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
new file mode 100644
index 000000000000..819a8ec901dc
--- /dev/null
+++ b/drivers/net/igbvf/mbx.c
@@ -0,0 +1,350 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "mbx.h"
29
30/**
31 * e1000_poll_for_msg - Wait for message notification
32 * @hw: pointer to the HW structure
33 *
34 * returns SUCCESS if it successfully received a message notification
35 **/
36static s32 e1000_poll_for_msg(struct e1000_hw *hw)
37{
38 struct e1000_mbx_info *mbx = &hw->mbx;
39 int countdown = mbx->timeout;
40
41 if (!mbx->ops.check_for_msg)
42 goto out;
43
44 while (countdown && mbx->ops.check_for_msg(hw)) {
45 countdown--;
46 udelay(mbx->usec_delay);
47 }
48
49 /* if we failed, all future posted messages fail until reset */
50 if (!countdown)
51 mbx->timeout = 0;
52out:
53 return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
54}
55
56/**
57 * e1000_poll_for_ack - Wait for message acknowledgement
58 * @hw: pointer to the HW structure
59 *
60 * returns SUCCESS if it successfully received a message acknowledgement
61 **/
62static s32 e1000_poll_for_ack(struct e1000_hw *hw)
63{
64 struct e1000_mbx_info *mbx = &hw->mbx;
65 int countdown = mbx->timeout;
66
67 if (!mbx->ops.check_for_ack)
68 goto out;
69
70 while (countdown && mbx->ops.check_for_ack(hw)) {
71 countdown--;
72 udelay(mbx->usec_delay);
73 }
74
75 /* if we failed, all future posted messages fail until reset */
76 if (!countdown)
77 mbx->timeout = 0;
78out:
79 return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
80}
81
82/**
83 * e1000_read_posted_mbx - Wait for message notification and receive message
84 * @hw: pointer to the HW structure
85 * @msg: The message buffer
86 * @size: Length of buffer
87 *
88 * returns SUCCESS if it successfully received a message notification and
89 * copied it into the receive buffer.
90 **/
91static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
92{
93 struct e1000_mbx_info *mbx = &hw->mbx;
94 s32 ret_val = -E1000_ERR_MBX;
95
96 if (!mbx->ops.read)
97 goto out;
98
99 ret_val = e1000_poll_for_msg(hw);
100
101 /* if ack received read message, otherwise we timed out */
102 if (!ret_val)
103 ret_val = mbx->ops.read(hw, msg, size);
104out:
105 return ret_val;
106}
107
108/**
109 * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
110 * @hw: pointer to the HW structure
111 * @msg: The message buffer
112 * @size: Length of buffer
113 *
114 * returns SUCCESS if it successfully copied message into the buffer and
115 * received an ack to that message within delay * timeout period
116 **/
117static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
118{
119 struct e1000_mbx_info *mbx = &hw->mbx;
120 s32 ret_val = -E1000_ERR_MBX;
121
122 /* exit if we either can't write or there isn't a defined timeout */
123 if (!mbx->ops.write || !mbx->timeout)
124 goto out;
125
126 /* send msg*/
127 ret_val = mbx->ops.write(hw, msg, size);
128
129 /* if msg sent wait until we receive an ack */
130 if (!ret_val)
131 ret_val = e1000_poll_for_ack(hw);
132out:
133 return ret_val;
134}
135
136/**
137 * e1000_read_v2p_mailbox - read v2p mailbox
138 * @hw: pointer to the HW structure
139 *
140 * This function is used to read the v2p mailbox without losing the read to
141 * clear status bits.
142 **/
143static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
144{
145 u32 v2p_mailbox = er32(V2PMAILBOX(0));
146
147 v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
148 hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
149
150 return v2p_mailbox;
151}
152
153/**
154 * e1000_check_for_bit_vf - Determine if a status bit was set
155 * @hw: pointer to the HW structure
156 * @mask: bitmask for bits to be tested and cleared
157 *
158 * This function is used to check for the read to clear bits within
159 * the V2P mailbox.
160 **/
161static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
162{
163 u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
164 s32 ret_val = -E1000_ERR_MBX;
165
166 if (v2p_mailbox & mask)
167 ret_val = E1000_SUCCESS;
168
169 hw->dev_spec.vf.v2p_mailbox &= ~mask;
170
171 return ret_val;
172}
173
174/**
175 * e1000_check_for_msg_vf - checks to see if the PF has sent mail
176 * @hw: pointer to the HW structure
177 *
178 * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
179 **/
180static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
181{
182 s32 ret_val = -E1000_ERR_MBX;
183
184 if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
185 ret_val = E1000_SUCCESS;
186 hw->mbx.stats.reqs++;
187 }
188
189 return ret_val;
190}
191
192/**
193 * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
194 * @hw: pointer to the HW structure
195 *
196 * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
197 **/
198static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
199{
200 s32 ret_val = -E1000_ERR_MBX;
201
202 if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
203 ret_val = E1000_SUCCESS;
204 hw->mbx.stats.acks++;
205 }
206
207 return ret_val;
208}
209
210/**
211 * e1000_check_for_rst_vf - checks to see if the PF has reset
212 * @hw: pointer to the HW structure
213 *
214 * returns true if the PF has set the reset done bit or else false
215 **/
216static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
217{
218 s32 ret_val = -E1000_ERR_MBX;
219
220 if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
221 E1000_V2PMAILBOX_RSTI))) {
222 ret_val = E1000_SUCCESS;
223 hw->mbx.stats.rsts++;
224 }
225
226 return ret_val;
227}
228
229/**
230 * e1000_obtain_mbx_lock_vf - obtain mailbox lock
231 * @hw: pointer to the HW structure
232 *
233 * return SUCCESS if we obtained the mailbox lock
234 **/
235static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
236{
237 s32 ret_val = -E1000_ERR_MBX;
238
239 /* Take ownership of the buffer */
240 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
241
242 /* reserve mailbox for vf use */
243 if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
244 ret_val = E1000_SUCCESS;
245
246 return ret_val;
247}
248
249/**
250 * e1000_write_mbx_vf - Write a message to the mailbox
251 * @hw: pointer to the HW structure
252 * @msg: The message buffer
253 * @size: Length of buffer
254 *
255 * returns SUCCESS if it successfully copied message into the buffer
256 **/
257static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
258{
259 s32 err;
260 u16 i;
261
262 /* lock the mailbox to prevent pf/vf race condition */
263 err = e1000_obtain_mbx_lock_vf(hw);
264 if (err)
265 goto out_no_write;
266
267 /* flush any ack or msg as we are going to overwrite mailbox */
268 e1000_check_for_ack_vf(hw);
269 e1000_check_for_msg_vf(hw);
270
271 /* copy the caller specified message to the mailbox memory buffer */
272 for (i = 0; i < size; i++)
273 array_ew32(VMBMEM(0), i, msg[i]);
274
275 /* update stats */
276 hw->mbx.stats.msgs_tx++;
277
278 /* Drop VFU and interrupt the PF to tell it a message has been sent */
279 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
280
281out_no_write:
282 return err;
283}
284
285/**
286 * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
287 * @hw: pointer to the HW structure
288 * @msg: The message buffer
289 * @size: Length of buffer
290 *
291 * returns SUCCESS if it successfuly read message from buffer
292 **/
293static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
294{
295 s32 err;
296 u16 i;
297
298 /* lock the mailbox to prevent pf/vf race condition */
299 err = e1000_obtain_mbx_lock_vf(hw);
300 if (err)
301 goto out_no_read;
302
303 /* copy the message from the mailbox memory buffer */
304 for (i = 0; i < size; i++)
305 msg[i] = array_er32(VMBMEM(0), i);
306
307 /* Acknowledge receipt and release mailbox, then we're done */
308 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
309
310 /* update stats */
311 hw->mbx.stats.msgs_rx++;
312
313out_no_read:
314 return err;
315}
316
317/**
318 * e1000_init_mbx_params_vf - set initial values for vf mailbox
319 * @hw: pointer to the HW structure
320 *
321 * Initializes the hw->mbx struct to correct values for vf mailbox
322 */
323s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
324{
325 struct e1000_mbx_info *mbx = &hw->mbx;
326
327 /* start mailbox as timed out and let the reset_hw call set the timeout
328 * value to being communications */
329 mbx->timeout = 0;
330 mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
331
332 mbx->size = E1000_VFMAILBOX_SIZE;
333
334 mbx->ops.read = e1000_read_mbx_vf;
335 mbx->ops.write = e1000_write_mbx_vf;
336 mbx->ops.read_posted = e1000_read_posted_mbx;
337 mbx->ops.write_posted = e1000_write_posted_mbx;
338 mbx->ops.check_for_msg = e1000_check_for_msg_vf;
339 mbx->ops.check_for_ack = e1000_check_for_ack_vf;
340 mbx->ops.check_for_rst = e1000_check_for_rst_vf;
341
342 mbx->stats.msgs_tx = 0;
343 mbx->stats.msgs_rx = 0;
344 mbx->stats.reqs = 0;
345 mbx->stats.acks = 0;
346 mbx->stats.rsts = 0;
347
348 return E1000_SUCCESS;
349}
350
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
new file mode 100644
index 000000000000..4938609dbfb5
--- /dev/null
+++ b/drivers/net/igbvf/mbx.h
@@ -0,0 +1,75 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MBX_H_
29#define _E1000_MBX_H_
30
31#include "vf.h"
32
33#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
34#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
35#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
38#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
39#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
40#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
41#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
42
43#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
44
45/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
46 * PF. The reverse is true if it is E1000_PF_*.
47 * Message ACK's are the value or'd with 0xF0000000
48 */
49#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
50 * this are the ACK */
51#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
52 * this are the NACK */
53#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
54 clear to send requests */
55
56/* We have a total wait time of 1s for vf mailbox posted messages */
57#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
58#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
59
60#define E1000_VT_MSGINFO_SHIFT 16
61/* bits 23:16 are used for exra info for certain messages */
62#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
63
64#define E1000_VF_RESET 0x01 /* VF requests reset */
65#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
66#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
67#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
68#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
69
70#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
71
72void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
73s32 e1000_init_mbx_params_vf(struct e1000_hw *);
74
75#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
new file mode 100644
index 000000000000..b774666ad3cf
--- /dev/null
+++ b/drivers/net/igbvf/netdev.c
@@ -0,0 +1,2922 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#include <linux/mii.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43#include <linux/pm_qos_params.h>
44
45#include "igbvf.h"
46
47#define DRV_VERSION "1.0.0-k0"
48char igbvf_driver_name[] = "igbvf";
49const char igbvf_driver_version[] = DRV_VERSION;
50static const char igbvf_driver_string[] =
51 "Intel(R) Virtual Function Network Driver";
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
53
54static int igbvf_poll(struct napi_struct *napi, int budget);
55static void igbvf_reset(struct igbvf_adapter *);
56static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
57static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
58
59static struct igbvf_info igbvf_vf_info = {
60 .mac = e1000_vfadapt,
61 .flags = FLAG_HAS_JUMBO_FRAMES
62 | FLAG_RX_CSUM_ENABLED,
63 .pba = 10,
64 .init_ops = e1000_init_function_pointers_vf,
65};
66
67static const struct igbvf_info *igbvf_info_tbl[] = {
68 [board_vf] = &igbvf_vf_info,
69};
70
71/**
72 * igbvf_desc_unused - calculate if we have unused descriptors
73 **/
74static int igbvf_desc_unused(struct igbvf_ring *ring)
75{
76 if (ring->next_to_clean > ring->next_to_use)
77 return ring->next_to_clean - ring->next_to_use - 1;
78
79 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
80}
81
82/**
83 * igbvf_receive_skb - helper function to handle Rx indications
84 * @adapter: board private structure
85 * @status: descriptor status field as written by hardware
86 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
87 * @skb: pointer to sk_buff to be indicated to stack
88 **/
89static void igbvf_receive_skb(struct igbvf_adapter *adapter,
90 struct net_device *netdev,
91 struct sk_buff *skb,
92 u32 status, u16 vlan)
93{
94 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
95 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
96 le16_to_cpu(vlan) &
97 E1000_RXD_SPC_VLAN_MASK);
98 else
99 netif_receive_skb(skb);
100
101 netdev->last_rx = jiffies;
102}
103
104static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
105 u32 status_err, struct sk_buff *skb)
106{
107 skb->ip_summed = CHECKSUM_NONE;
108
109 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
110 if ((status_err & E1000_RXD_STAT_IXSM))
111 return;
112 /* TCP/UDP checksum error bit is set */
113 if (status_err &
114 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
115 /* let the stack verify checksum errors */
116 adapter->hw_csum_err++;
117 return;
118 }
119 /* It must be a TCP or UDP packet with a valid checksum */
120 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
121 skb->ip_summed = CHECKSUM_UNNECESSARY;
122
123 adapter->hw_csum_good++;
124}
125
126/**
127 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
128 * @rx_ring: address of ring structure to repopulate
129 * @cleaned_count: number of buffers to repopulate
130 **/
131static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
132 int cleaned_count)
133{
134 struct igbvf_adapter *adapter = rx_ring->adapter;
135 struct net_device *netdev = adapter->netdev;
136 struct pci_dev *pdev = adapter->pdev;
137 union e1000_adv_rx_desc *rx_desc;
138 struct igbvf_buffer *buffer_info;
139 struct sk_buff *skb;
140 unsigned int i;
141 int bufsz;
142
143 i = rx_ring->next_to_use;
144 buffer_info = &rx_ring->buffer_info[i];
145
146 if (adapter->rx_ps_hdr_size)
147 bufsz = adapter->rx_ps_hdr_size;
148 else
149 bufsz = adapter->rx_buffer_len;
150 bufsz += NET_IP_ALIGN;
151
152 while (cleaned_count--) {
153 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
154
155 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
156 if (!buffer_info->page) {
157 buffer_info->page = alloc_page(GFP_ATOMIC);
158 if (!buffer_info->page) {
159 adapter->alloc_rx_buff_failed++;
160 goto no_buffers;
161 }
162 buffer_info->page_offset = 0;
163 } else {
164 buffer_info->page_offset ^= PAGE_SIZE / 2;
165 }
166 buffer_info->page_dma =
167 pci_map_page(pdev, buffer_info->page,
168 buffer_info->page_offset,
169 PAGE_SIZE / 2,
170 PCI_DMA_FROMDEVICE);
171 }
172
173 if (!buffer_info->skb) {
174 skb = netdev_alloc_skb(netdev, bufsz);
175 if (!skb) {
176 adapter->alloc_rx_buff_failed++;
177 goto no_buffers;
178 }
179
180 /* Make buffer alignment 2 beyond a 16 byte boundary
181 * this will result in a 16 byte aligned IP header after
182 * the 14 byte MAC header is removed
183 */
184 skb_reserve(skb, NET_IP_ALIGN);
185
186 buffer_info->skb = skb;
187 buffer_info->dma = pci_map_single(pdev, skb->data,
188 bufsz,
189 PCI_DMA_FROMDEVICE);
190 }
191 /* Refresh the desc even if buffer_addrs didn't change because
192 * each write-back erases this info. */
193 if (adapter->rx_ps_hdr_size) {
194 rx_desc->read.pkt_addr =
195 cpu_to_le64(buffer_info->page_dma);
196 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
197 } else {
198 rx_desc->read.pkt_addr =
199 cpu_to_le64(buffer_info->dma);
200 rx_desc->read.hdr_addr = 0;
201 }
202
203 i++;
204 if (i == rx_ring->count)
205 i = 0;
206 buffer_info = &rx_ring->buffer_info[i];
207 }
208
209no_buffers:
210 if (rx_ring->next_to_use != i) {
211 rx_ring->next_to_use = i;
212 if (i == 0)
213 i = (rx_ring->count - 1);
214 else
215 i--;
216
217 /* Force memory writes to complete before letting h/w
218 * know there are new descriptors to fetch. (Only
219 * applicable for weak-ordered memory model archs,
220 * such as IA-64). */
221 wmb();
222 writel(i, adapter->hw.hw_addr + rx_ring->tail);
223 }
224}
225
226/**
227 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
228 * @adapter: board private structure
229 *
230 * the return value indicates whether actual cleaning was done, there
231 * is no guarantee that everything was cleaned
232 **/
233static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
234 int *work_done, int work_to_do)
235{
236 struct igbvf_ring *rx_ring = adapter->rx_ring;
237 struct net_device *netdev = adapter->netdev;
238 struct pci_dev *pdev = adapter->pdev;
239 union e1000_adv_rx_desc *rx_desc, *next_rxd;
240 struct igbvf_buffer *buffer_info, *next_buffer;
241 struct sk_buff *skb;
242 bool cleaned = false;
243 int cleaned_count = 0;
244 unsigned int total_bytes = 0, total_packets = 0;
245 unsigned int i;
246 u32 length, hlen, staterr;
247
248 i = rx_ring->next_to_clean;
249 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
250 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
251
252 while (staterr & E1000_RXD_STAT_DD) {
253 if (*work_done >= work_to_do)
254 break;
255 (*work_done)++;
256
257 buffer_info = &rx_ring->buffer_info[i];
258
259 /* HW will not DMA in data larger than the given buffer, even
260 * if it parses the (NFS, of course) header to be larger. In
261 * that case, it fills the header buffer and spills the rest
262 * into the page.
263 */
264 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
265 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
266 if (hlen > adapter->rx_ps_hdr_size)
267 hlen = adapter->rx_ps_hdr_size;
268
269 length = le16_to_cpu(rx_desc->wb.upper.length);
270 cleaned = true;
271 cleaned_count++;
272
273 skb = buffer_info->skb;
274 prefetch(skb->data - NET_IP_ALIGN);
275 buffer_info->skb = NULL;
276 if (!adapter->rx_ps_hdr_size) {
277 pci_unmap_single(pdev, buffer_info->dma,
278 adapter->rx_buffer_len,
279 PCI_DMA_FROMDEVICE);
280 buffer_info->dma = 0;
281 skb_put(skb, length);
282 goto send_up;
283 }
284
285 if (!skb_shinfo(skb)->nr_frags) {
286 pci_unmap_single(pdev, buffer_info->dma,
287 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
288 PCI_DMA_FROMDEVICE);
289 skb_put(skb, hlen);
290 }
291
292 if (length) {
293 pci_unmap_page(pdev, buffer_info->page_dma,
294 PAGE_SIZE / 2,
295 PCI_DMA_FROMDEVICE);
296 buffer_info->page_dma = 0;
297
298 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
299 buffer_info->page,
300 buffer_info->page_offset,
301 length);
302
303 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
304 (page_count(buffer_info->page) != 1))
305 buffer_info->page = NULL;
306 else
307 get_page(buffer_info->page);
308
309 skb->len += length;
310 skb->data_len += length;
311 skb->truesize += length;
312 }
313send_up:
314 i++;
315 if (i == rx_ring->count)
316 i = 0;
317 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
318 prefetch(next_rxd);
319 next_buffer = &rx_ring->buffer_info[i];
320
321 if (!(staterr & E1000_RXD_STAT_EOP)) {
322 buffer_info->skb = next_buffer->skb;
323 buffer_info->dma = next_buffer->dma;
324 next_buffer->skb = skb;
325 next_buffer->dma = 0;
326 goto next_desc;
327 }
328
329 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
330 dev_kfree_skb_irq(skb);
331 goto next_desc;
332 }
333
334 total_bytes += skb->len;
335 total_packets++;
336
337 igbvf_rx_checksum_adv(adapter, staterr, skb);
338
339 skb->protocol = eth_type_trans(skb, netdev);
340
341 igbvf_receive_skb(adapter, netdev, skb, staterr,
342 rx_desc->wb.upper.vlan);
343
344 netdev->last_rx = jiffies;
345
346next_desc:
347 rx_desc->wb.upper.status_error = 0;
348
349 /* return some buffers to hardware, one at a time is too slow */
350 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
351 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
352 cleaned_count = 0;
353 }
354
355 /* use prefetched values */
356 rx_desc = next_rxd;
357 buffer_info = next_buffer;
358
359 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
360 }
361
362 rx_ring->next_to_clean = i;
363 cleaned_count = igbvf_desc_unused(rx_ring);
364
365 if (cleaned_count)
366 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
367
368 adapter->total_rx_packets += total_packets;
369 adapter->total_rx_bytes += total_bytes;
370 adapter->net_stats.rx_bytes += total_bytes;
371 adapter->net_stats.rx_packets += total_packets;
372 return cleaned;
373}
374
375static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
376 struct igbvf_buffer *buffer_info)
377{
378 buffer_info->dma = 0;
379 if (buffer_info->skb) {
380 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
381 DMA_TO_DEVICE);
382 dev_kfree_skb_any(buffer_info->skb);
383 buffer_info->skb = NULL;
384 }
385 buffer_info->time_stamp = 0;
386}
387
388static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
389{
390 struct igbvf_ring *tx_ring = adapter->tx_ring;
391 unsigned int i = tx_ring->next_to_clean;
392 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
393 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
394
395 /* detected Tx unit hang */
396 dev_err(&adapter->pdev->dev,
397 "Detected Tx Unit Hang:\n"
398 " TDH <%x>\n"
399 " TDT <%x>\n"
400 " next_to_use <%x>\n"
401 " next_to_clean <%x>\n"
402 "buffer_info[next_to_clean]:\n"
403 " time_stamp <%lx>\n"
404 " next_to_watch <%x>\n"
405 " jiffies <%lx>\n"
406 " next_to_watch.status <%x>\n",
407 readl(adapter->hw.hw_addr + tx_ring->head),
408 readl(adapter->hw.hw_addr + tx_ring->tail),
409 tx_ring->next_to_use,
410 tx_ring->next_to_clean,
411 tx_ring->buffer_info[eop].time_stamp,
412 eop,
413 jiffies,
414 eop_desc->wb.status);
415}
416
417/**
418 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
419 * @adapter: board private structure
420 *
421 * Return 0 on success, negative on failure
422 **/
423int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
424 struct igbvf_ring *tx_ring)
425{
426 struct pci_dev *pdev = adapter->pdev;
427 int size;
428
429 size = sizeof(struct igbvf_buffer) * tx_ring->count;
430 tx_ring->buffer_info = vmalloc(size);
431 if (!tx_ring->buffer_info)
432 goto err;
433 memset(tx_ring->buffer_info, 0, size);
434
435 /* round up to nearest 4K */
436 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
437 tx_ring->size = ALIGN(tx_ring->size, 4096);
438
439 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
440 &tx_ring->dma);
441
442 if (!tx_ring->desc)
443 goto err;
444
445 tx_ring->adapter = adapter;
446 tx_ring->next_to_use = 0;
447 tx_ring->next_to_clean = 0;
448
449 return 0;
450err:
451 vfree(tx_ring->buffer_info);
452 dev_err(&adapter->pdev->dev,
453 "Unable to allocate memory for the transmit descriptor ring\n");
454 return -ENOMEM;
455}
456
457/**
458 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
459 * @adapter: board private structure
460 *
461 * Returns 0 on success, negative on failure
462 **/
463int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
464 struct igbvf_ring *rx_ring)
465{
466 struct pci_dev *pdev = adapter->pdev;
467 int size, desc_len;
468
469 size = sizeof(struct igbvf_buffer) * rx_ring->count;
470 rx_ring->buffer_info = vmalloc(size);
471 if (!rx_ring->buffer_info)
472 goto err;
473 memset(rx_ring->buffer_info, 0, size);
474
475 desc_len = sizeof(union e1000_adv_rx_desc);
476
477 /* Round up to nearest 4K */
478 rx_ring->size = rx_ring->count * desc_len;
479 rx_ring->size = ALIGN(rx_ring->size, 4096);
480
481 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
482 &rx_ring->dma);
483
484 if (!rx_ring->desc)
485 goto err;
486
487 rx_ring->next_to_clean = 0;
488 rx_ring->next_to_use = 0;
489
490 rx_ring->adapter = adapter;
491
492 return 0;
493
494err:
495 vfree(rx_ring->buffer_info);
496 rx_ring->buffer_info = NULL;
497 dev_err(&adapter->pdev->dev,
498 "Unable to allocate memory for the receive descriptor ring\n");
499 return -ENOMEM;
500}
501
502/**
503 * igbvf_clean_tx_ring - Free Tx Buffers
504 * @tx_ring: ring to be cleaned
505 **/
506static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
507{
508 struct igbvf_adapter *adapter = tx_ring->adapter;
509 struct igbvf_buffer *buffer_info;
510 unsigned long size;
511 unsigned int i;
512
513 if (!tx_ring->buffer_info)
514 return;
515
516 /* Free all the Tx ring sk_buffs */
517 for (i = 0; i < tx_ring->count; i++) {
518 buffer_info = &tx_ring->buffer_info[i];
519 igbvf_put_txbuf(adapter, buffer_info);
520 }
521
522 size = sizeof(struct igbvf_buffer) * tx_ring->count;
523 memset(tx_ring->buffer_info, 0, size);
524
525 /* Zero out the descriptor ring */
526 memset(tx_ring->desc, 0, tx_ring->size);
527
528 tx_ring->next_to_use = 0;
529 tx_ring->next_to_clean = 0;
530
531 writel(0, adapter->hw.hw_addr + tx_ring->head);
532 writel(0, adapter->hw.hw_addr + tx_ring->tail);
533}
534
535/**
536 * igbvf_free_tx_resources - Free Tx Resources per Queue
537 * @tx_ring: ring to free resources from
538 *
539 * Free all transmit software resources
540 **/
541void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
542{
543 struct pci_dev *pdev = tx_ring->adapter->pdev;
544
545 igbvf_clean_tx_ring(tx_ring);
546
547 vfree(tx_ring->buffer_info);
548 tx_ring->buffer_info = NULL;
549
550 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
551
552 tx_ring->desc = NULL;
553}
554
555/**
556 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
557 * @adapter: board private structure
558 **/
559static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
560{
561 struct igbvf_adapter *adapter = rx_ring->adapter;
562 struct igbvf_buffer *buffer_info;
563 struct pci_dev *pdev = adapter->pdev;
564 unsigned long size;
565 unsigned int i;
566
567 if (!rx_ring->buffer_info)
568 return;
569
570 /* Free all the Rx ring sk_buffs */
571 for (i = 0; i < rx_ring->count; i++) {
572 buffer_info = &rx_ring->buffer_info[i];
573 if (buffer_info->dma) {
574 if (adapter->rx_ps_hdr_size){
575 pci_unmap_single(pdev, buffer_info->dma,
576 adapter->rx_ps_hdr_size,
577 PCI_DMA_FROMDEVICE);
578 } else {
579 pci_unmap_single(pdev, buffer_info->dma,
580 adapter->rx_buffer_len,
581 PCI_DMA_FROMDEVICE);
582 }
583 buffer_info->dma = 0;
584 }
585
586 if (buffer_info->skb) {
587 dev_kfree_skb(buffer_info->skb);
588 buffer_info->skb = NULL;
589 }
590
591 if (buffer_info->page) {
592 if (buffer_info->page_dma)
593 pci_unmap_page(pdev, buffer_info->page_dma,
594 PAGE_SIZE / 2,
595 PCI_DMA_FROMDEVICE);
596 put_page(buffer_info->page);
597 buffer_info->page = NULL;
598 buffer_info->page_dma = 0;
599 buffer_info->page_offset = 0;
600 }
601 }
602
603 size = sizeof(struct igbvf_buffer) * rx_ring->count;
604 memset(rx_ring->buffer_info, 0, size);
605
606 /* Zero out the descriptor ring */
607 memset(rx_ring->desc, 0, rx_ring->size);
608
609 rx_ring->next_to_clean = 0;
610 rx_ring->next_to_use = 0;
611
612 writel(0, adapter->hw.hw_addr + rx_ring->head);
613 writel(0, adapter->hw.hw_addr + rx_ring->tail);
614}
615
616/**
617 * igbvf_free_rx_resources - Free Rx Resources
618 * @rx_ring: ring to clean the resources from
619 *
620 * Free all receive software resources
621 **/
622
623void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
624{
625 struct pci_dev *pdev = rx_ring->adapter->pdev;
626
627 igbvf_clean_rx_ring(rx_ring);
628
629 vfree(rx_ring->buffer_info);
630 rx_ring->buffer_info = NULL;
631
632 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
633 rx_ring->dma);
634 rx_ring->desc = NULL;
635}
636
637/**
638 * igbvf_update_itr - update the dynamic ITR value based on statistics
639 * @adapter: pointer to adapter
640 * @itr_setting: current adapter->itr
641 * @packets: the number of packets during this measurement interval
642 * @bytes: the number of bytes during this measurement interval
643 *
644 * Stores a new ITR value based on packets and byte
645 * counts during the last interrupt. The advantage of per interrupt
646 * computation is faster updates and more accurate ITR for the current
647 * traffic pattern. Constants in this function were computed
648 * based on theoretical maximum wire speed and thresholds were set based
649 * on testing data as well as attempting to minimize response time
650 * while increasing bulk throughput. This functionality is controlled
651 * by the InterruptThrottleRate module parameter.
652 **/
653static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
654 u16 itr_setting, int packets,
655 int bytes)
656{
657 unsigned int retval = itr_setting;
658
659 if (packets == 0)
660 goto update_itr_done;
661
662 switch (itr_setting) {
663 case lowest_latency:
664 /* handle TSO and jumbo frames */
665 if (bytes/packets > 8000)
666 retval = bulk_latency;
667 else if ((packets < 5) && (bytes > 512))
668 retval = low_latency;
669 break;
670 case low_latency: /* 50 usec aka 20000 ints/s */
671 if (bytes > 10000) {
672 /* this if handles the TSO accounting */
673 if (bytes/packets > 8000)
674 retval = bulk_latency;
675 else if ((packets < 10) || ((bytes/packets) > 1200))
676 retval = bulk_latency;
677 else if ((packets > 35))
678 retval = lowest_latency;
679 } else if (bytes/packets > 2000) {
680 retval = bulk_latency;
681 } else if (packets <= 2 && bytes < 512) {
682 retval = lowest_latency;
683 }
684 break;
685 case bulk_latency: /* 250 usec aka 4000 ints/s */
686 if (bytes > 25000) {
687 if (packets > 35)
688 retval = low_latency;
689 } else if (bytes < 6000) {
690 retval = low_latency;
691 }
692 break;
693 }
694
695update_itr_done:
696 return retval;
697}
698
699static void igbvf_set_itr(struct igbvf_adapter *adapter)
700{
701 struct e1000_hw *hw = &adapter->hw;
702 u16 current_itr;
703 u32 new_itr = adapter->itr;
704
705 adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
706 adapter->total_tx_packets,
707 adapter->total_tx_bytes);
708 /* conservative mode (itr 3) eliminates the lowest_latency setting */
709 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
710 adapter->tx_itr = low_latency;
711
712 adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
713 adapter->total_rx_packets,
714 adapter->total_rx_bytes);
715 /* conservative mode (itr 3) eliminates the lowest_latency setting */
716 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
717 adapter->rx_itr = low_latency;
718
719 current_itr = max(adapter->rx_itr, adapter->tx_itr);
720
721 switch (current_itr) {
722 /* counts and packets in update_itr are dependent on these numbers */
723 case lowest_latency:
724 new_itr = 70000;
725 break;
726 case low_latency:
727 new_itr = 20000; /* aka hwitr = ~200 */
728 break;
729 case bulk_latency:
730 new_itr = 4000;
731 break;
732 default:
733 break;
734 }
735
736 if (new_itr != adapter->itr) {
737 /*
738 * this attempts to bias the interrupt rate towards Bulk
739 * by adding intermediate steps when interrupt rate is
740 * increasing
741 */
742 new_itr = new_itr > adapter->itr ?
743 min(adapter->itr + (new_itr >> 2), new_itr) :
744 new_itr;
745 adapter->itr = new_itr;
746 adapter->rx_ring->itr_val = 1952;
747
748 if (adapter->msix_entries)
749 adapter->rx_ring->set_itr = 1;
750 else
751 ew32(ITR, 1952);
752 }
753}
754
755/**
756 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
757 * @adapter: board private structure
758 * returns true if ring is completely cleaned
759 **/
760static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
761{
762 struct igbvf_adapter *adapter = tx_ring->adapter;
763 struct e1000_hw *hw = &adapter->hw;
764 struct net_device *netdev = adapter->netdev;
765 struct igbvf_buffer *buffer_info;
766 struct sk_buff *skb;
767 union e1000_adv_tx_desc *tx_desc, *eop_desc;
768 unsigned int total_bytes = 0, total_packets = 0;
769 unsigned int i, eop, count = 0;
770 bool cleaned = false;
771
772 i = tx_ring->next_to_clean;
773 eop = tx_ring->buffer_info[i].next_to_watch;
774 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
775
776 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
777 (count < tx_ring->count)) {
778 for (cleaned = false; !cleaned; count++) {
779 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
780 buffer_info = &tx_ring->buffer_info[i];
781 cleaned = (i == eop);
782 skb = buffer_info->skb;
783
784 if (skb) {
785 unsigned int segs, bytecount;
786
787 /* gso_segs is currently only valid for tcp */
788 segs = skb_shinfo(skb)->gso_segs ?: 1;
789 /* multiply data chunks by size of headers */
790 bytecount = ((segs - 1) * skb_headlen(skb)) +
791 skb->len;
792 total_packets += segs;
793 total_bytes += bytecount;
794 }
795
796 igbvf_put_txbuf(adapter, buffer_info);
797 tx_desc->wb.status = 0;
798
799 i++;
800 if (i == tx_ring->count)
801 i = 0;
802 }
803 eop = tx_ring->buffer_info[i].next_to_watch;
804 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
805 }
806
807 tx_ring->next_to_clean = i;
808
809 if (unlikely(count &&
810 netif_carrier_ok(netdev) &&
811 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
812 /* Make sure that anybody stopping the queue after this
813 * sees the new next_to_clean.
814 */
815 smp_mb();
816 if (netif_queue_stopped(netdev) &&
817 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
818 netif_wake_queue(netdev);
819 ++adapter->restart_queue;
820 }
821 }
822
823 if (adapter->detect_tx_hung) {
824 /* Detect a transmit hang in hardware, this serializes the
825 * check with the clearing of time_stamp and movement of i */
826 adapter->detect_tx_hung = false;
827 if (tx_ring->buffer_info[i].time_stamp &&
828 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
829 (adapter->tx_timeout_factor * HZ))
830 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
831
832 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
833 /* detected Tx unit hang */
834 igbvf_print_tx_hang(adapter);
835
836 netif_stop_queue(netdev);
837 }
838 }
839 adapter->net_stats.tx_bytes += total_bytes;
840 adapter->net_stats.tx_packets += total_packets;
841 return (count < tx_ring->count);
842}
843
844static irqreturn_t igbvf_msix_other(int irq, void *data)
845{
846 struct net_device *netdev = data;
847 struct igbvf_adapter *adapter = netdev_priv(netdev);
848 struct e1000_hw *hw = &adapter->hw;
849
850 adapter->int_counter1++;
851
852 netif_carrier_off(netdev);
853 hw->mac.get_link_status = 1;
854 if (!test_bit(__IGBVF_DOWN, &adapter->state))
855 mod_timer(&adapter->watchdog_timer, jiffies + 1);
856
857 ew32(EIMS, adapter->eims_other);
858
859 return IRQ_HANDLED;
860}
861
862static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
863{
864 struct net_device *netdev = data;
865 struct igbvf_adapter *adapter = netdev_priv(netdev);
866 struct e1000_hw *hw = &adapter->hw;
867 struct igbvf_ring *tx_ring = adapter->tx_ring;
868
869
870 adapter->total_tx_bytes = 0;
871 adapter->total_tx_packets = 0;
872
873 /* auto mask will automatically reenable the interrupt when we write
874 * EICS */
875 if (!igbvf_clean_tx_irq(tx_ring))
876 /* Ring was not completely cleaned, so fire another interrupt */
877 ew32(EICS, tx_ring->eims_value);
878 else
879 ew32(EIMS, tx_ring->eims_value);
880
881 return IRQ_HANDLED;
882}
883
884static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
885{
886 struct net_device *netdev = data;
887 struct igbvf_adapter *adapter = netdev_priv(netdev);
888
889 adapter->int_counter0++;
890
891 /* Write the ITR value calculated at the end of the
892 * previous interrupt.
893 */
894 if (adapter->rx_ring->set_itr) {
895 writel(adapter->rx_ring->itr_val,
896 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
897 adapter->rx_ring->set_itr = 0;
898 }
899
900 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
901 adapter->total_rx_bytes = 0;
902 adapter->total_rx_packets = 0;
903 __napi_schedule(&adapter->rx_ring->napi);
904 }
905
906 return IRQ_HANDLED;
907}
908
909#define IGBVF_NO_QUEUE -1
910
911static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
912 int tx_queue, int msix_vector)
913{
914 struct e1000_hw *hw = &adapter->hw;
915 u32 ivar, index;
916
917 /* 82576 uses a table-based method for assigning vectors.
918 Each queue has a single entry in the table to which we write
919 a vector number along with a "valid" bit. Sadly, the layout
920 of the table is somewhat counterintuitive. */
921 if (rx_queue > IGBVF_NO_QUEUE) {
922 index = (rx_queue >> 1);
923 ivar = array_er32(IVAR0, index);
924 if (rx_queue & 0x1) {
925 /* vector goes into third byte of register */
926 ivar = ivar & 0xFF00FFFF;
927 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
928 } else {
929 /* vector goes into low byte of register */
930 ivar = ivar & 0xFFFFFF00;
931 ivar |= msix_vector | E1000_IVAR_VALID;
932 }
933 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
934 array_ew32(IVAR0, index, ivar);
935 }
936 if (tx_queue > IGBVF_NO_QUEUE) {
937 index = (tx_queue >> 1);
938 ivar = array_er32(IVAR0, index);
939 if (tx_queue & 0x1) {
940 /* vector goes into high byte of register */
941 ivar = ivar & 0x00FFFFFF;
942 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
943 } else {
944 /* vector goes into second byte of register */
945 ivar = ivar & 0xFFFF00FF;
946 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
947 }
948 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
949 array_ew32(IVAR0, index, ivar);
950 }
951}
952
953/**
954 * igbvf_configure_msix - Configure MSI-X hardware
955 *
956 * igbvf_configure_msix sets up the hardware to properly
957 * generate MSI-X interrupts.
958 **/
959static void igbvf_configure_msix(struct igbvf_adapter *adapter)
960{
961 u32 tmp;
962 struct e1000_hw *hw = &adapter->hw;
963 struct igbvf_ring *tx_ring = adapter->tx_ring;
964 struct igbvf_ring *rx_ring = adapter->rx_ring;
965 int vector = 0;
966
967 adapter->eims_enable_mask = 0;
968
969 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
970 adapter->eims_enable_mask |= tx_ring->eims_value;
971 if (tx_ring->itr_val)
972 writel(tx_ring->itr_val,
973 hw->hw_addr + tx_ring->itr_register);
974 else
975 writel(1952, hw->hw_addr + tx_ring->itr_register);
976
977 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
978 adapter->eims_enable_mask |= rx_ring->eims_value;
979 if (rx_ring->itr_val)
980 writel(rx_ring->itr_val,
981 hw->hw_addr + rx_ring->itr_register);
982 else
983 writel(1952, hw->hw_addr + rx_ring->itr_register);
984
985 /* set vector for other causes, i.e. link changes */
986
987 tmp = (vector++ | E1000_IVAR_VALID);
988
989 ew32(IVAR_MISC, tmp);
990
991 adapter->eims_enable_mask = (1 << (vector)) - 1;
992 adapter->eims_other = 1 << (vector - 1);
993 e1e_flush();
994}
995
996static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
997{
998 if (adapter->msix_entries) {
999 pci_disable_msix(adapter->pdev);
1000 kfree(adapter->msix_entries);
1001 adapter->msix_entries = NULL;
1002 }
1003}
1004
1005/**
1006 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1007 *
1008 * Attempt to configure interrupts using the best available
1009 * capabilities of the hardware and kernel.
1010 **/
1011static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1012{
1013 int err = -ENOMEM;
1014 int i;
1015
1016 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1017 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1018 GFP_KERNEL);
1019 if (adapter->msix_entries) {
1020 for (i = 0; i < 3; i++)
1021 adapter->msix_entries[i].entry = i;
1022
1023 err = pci_enable_msix(adapter->pdev,
1024 adapter->msix_entries, 3);
1025 }
1026
1027 if (err) {
1028 /* MSI-X failed */
1029 dev_err(&adapter->pdev->dev,
1030 "Failed to initialize MSI-X interrupts.\n");
1031 igbvf_reset_interrupt_capability(adapter);
1032 }
1033}
1034
1035/**
1036 * igbvf_request_msix - Initialize MSI-X interrupts
1037 *
1038 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1039 * kernel.
1040 **/
1041static int igbvf_request_msix(struct igbvf_adapter *adapter)
1042{
1043 struct net_device *netdev = adapter->netdev;
1044 int err = 0, vector = 0;
1045
1046 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1047 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1048 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1049 } else {
1050 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1051 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1052 }
1053
1054 err = request_irq(adapter->msix_entries[vector].vector,
1055 &igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1056 netdev);
1057 if (err)
1058 goto out;
1059
1060 adapter->tx_ring->itr_register = E1000_EITR(vector);
1061 adapter->tx_ring->itr_val = 1952;
1062 vector++;
1063
1064 err = request_irq(adapter->msix_entries[vector].vector,
1065 &igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1066 netdev);
1067 if (err)
1068 goto out;
1069
1070 adapter->rx_ring->itr_register = E1000_EITR(vector);
1071 adapter->rx_ring->itr_val = 1952;
1072 vector++;
1073
1074 err = request_irq(adapter->msix_entries[vector].vector,
1075 &igbvf_msix_other, 0, netdev->name, netdev);
1076 if (err)
1077 goto out;
1078
1079 igbvf_configure_msix(adapter);
1080 return 0;
1081out:
1082 return err;
1083}
1084
1085/**
1086 * igbvf_alloc_queues - Allocate memory for all rings
1087 * @adapter: board private structure to initialize
1088 **/
1089static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1090{
1091 struct net_device *netdev = adapter->netdev;
1092
1093 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1094 if (!adapter->tx_ring)
1095 return -ENOMEM;
1096
1097 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1098 if (!adapter->rx_ring) {
1099 kfree(adapter->tx_ring);
1100 return -ENOMEM;
1101 }
1102
1103 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1104
1105 return 0;
1106}
1107
1108/**
1109 * igbvf_request_irq - initialize interrupts
1110 *
1111 * Attempts to configure interrupts using the best available
1112 * capabilities of the hardware and kernel.
1113 **/
1114static int igbvf_request_irq(struct igbvf_adapter *adapter)
1115{
1116 int err = -1;
1117
1118 /* igbvf supports msi-x only */
1119 if (adapter->msix_entries)
1120 err = igbvf_request_msix(adapter);
1121
1122 if (!err)
1123 return err;
1124
1125 dev_err(&adapter->pdev->dev,
1126 "Unable to allocate interrupt, Error: %d\n", err);
1127
1128 return err;
1129}
1130
1131static void igbvf_free_irq(struct igbvf_adapter *adapter)
1132{
1133 struct net_device *netdev = adapter->netdev;
1134 int vector;
1135
1136 if (adapter->msix_entries) {
1137 for (vector = 0; vector < 3; vector++)
1138 free_irq(adapter->msix_entries[vector].vector, netdev);
1139 }
1140}
1141
1142/**
1143 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1144 **/
1145static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1146{
1147 struct e1000_hw *hw = &adapter->hw;
1148
1149 ew32(EIMC, ~0);
1150
1151 if (adapter->msix_entries)
1152 ew32(EIAC, 0);
1153}
1154
1155/**
1156 * igbvf_irq_enable - Enable default interrupt generation settings
1157 **/
1158static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1159{
1160 struct e1000_hw *hw = &adapter->hw;
1161
1162 ew32(EIAC, adapter->eims_enable_mask);
1163 ew32(EIAM, adapter->eims_enable_mask);
1164 ew32(EIMS, adapter->eims_enable_mask);
1165}
1166
1167/**
1168 * igbvf_poll - NAPI Rx polling callback
1169 * @napi: struct associated with this polling callback
1170 * @budget: amount of packets driver is allowed to process this poll
1171 **/
1172static int igbvf_poll(struct napi_struct *napi, int budget)
1173{
1174 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1175 struct igbvf_adapter *adapter = rx_ring->adapter;
1176 struct e1000_hw *hw = &adapter->hw;
1177 int work_done = 0;
1178
1179 igbvf_clean_rx_irq(adapter, &work_done, budget);
1180
1181 /* If not enough Rx work done, exit the polling mode */
1182 if (work_done < budget) {
1183 napi_complete(napi);
1184
1185 if (adapter->itr_setting & 3)
1186 igbvf_set_itr(adapter);
1187
1188 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1189 ew32(EIMS, adapter->rx_ring->eims_value);
1190 }
1191
1192 return work_done;
1193}
1194
1195/**
1196 * igbvf_set_rlpml - set receive large packet maximum length
1197 * @adapter: board private structure
1198 *
1199 * Configure the maximum size of packets that will be received
1200 */
1201static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1202{
1203 int max_frame_size = adapter->max_frame_size;
1204 struct e1000_hw *hw = &adapter->hw;
1205
1206 if (adapter->vlgrp)
1207 max_frame_size += VLAN_TAG_SIZE;
1208
1209 e1000_rlpml_set_vf(hw, max_frame_size);
1210}
1211
1212static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1213{
1214 struct igbvf_adapter *adapter = netdev_priv(netdev);
1215 struct e1000_hw *hw = &adapter->hw;
1216
1217 if (hw->mac.ops.set_vfta(hw, vid, true))
1218 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1219}
1220
1221static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1222{
1223 struct igbvf_adapter *adapter = netdev_priv(netdev);
1224 struct e1000_hw *hw = &adapter->hw;
1225
1226 igbvf_irq_disable(adapter);
1227 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1228
1229 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1230 igbvf_irq_enable(adapter);
1231
1232 if (hw->mac.ops.set_vfta(hw, vid, false))
1233 dev_err(&adapter->pdev->dev,
1234 "Failed to remove vlan id %d\n", vid);
1235}
1236
1237static void igbvf_vlan_rx_register(struct net_device *netdev,
1238 struct vlan_group *grp)
1239{
1240 struct igbvf_adapter *adapter = netdev_priv(netdev);
1241
1242 adapter->vlgrp = grp;
1243}
1244
1245static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1246{
1247 u16 vid;
1248
1249 if (!adapter->vlgrp)
1250 return;
1251
1252 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1253 if (!vlan_group_get_device(adapter->vlgrp, vid))
1254 continue;
1255 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1256 }
1257
1258 igbvf_set_rlpml(adapter);
1259}
1260
1261/**
1262 * igbvf_configure_tx - Configure Transmit Unit after Reset
1263 * @adapter: board private structure
1264 *
1265 * Configure the Tx unit of the MAC after a reset.
1266 **/
1267static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1268{
1269 struct e1000_hw *hw = &adapter->hw;
1270 struct igbvf_ring *tx_ring = adapter->tx_ring;
1271 u64 tdba;
1272 u32 txdctl, dca_txctrl;
1273
1274 /* disable transmits */
1275 txdctl = er32(TXDCTL(0));
1276 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1277 msleep(10);
1278
1279 /* Setup the HW Tx Head and Tail descriptor pointers */
1280 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1281 tdba = tx_ring->dma;
1282 ew32(TDBAL(0), (tdba & DMA_32BIT_MASK));
1283 ew32(TDBAH(0), (tdba >> 32));
1284 ew32(TDH(0), 0);
1285 ew32(TDT(0), 0);
1286 tx_ring->head = E1000_TDH(0);
1287 tx_ring->tail = E1000_TDT(0);
1288
1289 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1290 * MUST be delivered in order or it will completely screw up
1291 * our bookeeping.
1292 */
1293 dca_txctrl = er32(DCA_TXCTRL(0));
1294 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1295 ew32(DCA_TXCTRL(0), dca_txctrl);
1296
1297 /* enable transmits */
1298 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1299 ew32(TXDCTL(0), txdctl);
1300
1301 /* Setup Transmit Descriptor Settings for eop descriptor */
1302 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1303
1304 /* enable Report Status bit */
1305 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1306
1307 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1308}
1309
1310/**
1311 * igbvf_setup_srrctl - configure the receive control registers
1312 * @adapter: Board private structure
1313 **/
1314static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1315{
1316 struct e1000_hw *hw = &adapter->hw;
1317 u32 srrctl = 0;
1318
1319 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1320 E1000_SRRCTL_BSIZEHDR_MASK |
1321 E1000_SRRCTL_BSIZEPKT_MASK);
1322
1323 /* Enable queue drop to avoid head of line blocking */
1324 srrctl |= E1000_SRRCTL_DROP_EN;
1325
1326 /* Setup buffer sizes */
1327 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1328 E1000_SRRCTL_BSIZEPKT_SHIFT;
1329
1330 if (adapter->rx_buffer_len < 2048) {
1331 adapter->rx_ps_hdr_size = 0;
1332 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1333 } else {
1334 adapter->rx_ps_hdr_size = 128;
1335 srrctl |= adapter->rx_ps_hdr_size <<
1336 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1337 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1338 }
1339
1340 ew32(SRRCTL(0), srrctl);
1341}
1342
1343/**
1344 * igbvf_configure_rx - Configure Receive Unit after Reset
1345 * @adapter: board private structure
1346 *
1347 * Configure the Rx unit of the MAC after a reset.
1348 **/
1349static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1350{
1351 struct e1000_hw *hw = &adapter->hw;
1352 struct igbvf_ring *rx_ring = adapter->rx_ring;
1353 u64 rdba;
1354 u32 rdlen, rxdctl;
1355
1356 /* disable receives */
1357 rxdctl = er32(RXDCTL(0));
1358 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1359 msleep(10);
1360
1361 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1362
1363 /*
1364 * Setup the HW Rx Head and Tail Descriptor Pointers and
1365 * the Base and Length of the Rx Descriptor Ring
1366 */
1367 rdba = rx_ring->dma;
1368 ew32(RDBAL(0), (rdba & DMA_32BIT_MASK));
1369 ew32(RDBAH(0), (rdba >> 32));
1370 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1371 rx_ring->head = E1000_RDH(0);
1372 rx_ring->tail = E1000_RDT(0);
1373 ew32(RDH(0), 0);
1374 ew32(RDT(0), 0);
1375
1376 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1377 rxdctl &= 0xFFF00000;
1378 rxdctl |= IGBVF_RX_PTHRESH;
1379 rxdctl |= IGBVF_RX_HTHRESH << 8;
1380 rxdctl |= IGBVF_RX_WTHRESH << 16;
1381
1382 igbvf_set_rlpml(adapter);
1383
1384 /* enable receives */
1385 ew32(RXDCTL(0), rxdctl);
1386}
1387
1388/**
1389 * igbvf_set_multi - Multicast and Promiscuous mode set
1390 * @netdev: network interface device structure
1391 *
1392 * The set_multi entry point is called whenever the multicast address
1393 * list or the network interface flags are updated. This routine is
1394 * responsible for configuring the hardware for proper multicast,
1395 * promiscuous mode, and all-multi behavior.
1396 **/
1397static void igbvf_set_multi(struct net_device *netdev)
1398{
1399 struct igbvf_adapter *adapter = netdev_priv(netdev);
1400 struct e1000_hw *hw = &adapter->hw;
1401 struct dev_mc_list *mc_ptr;
1402 u8 *mta_list = NULL;
1403 int i;
1404
1405 if (netdev->mc_count) {
1406 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
1407 if (!mta_list) {
1408 dev_err(&adapter->pdev->dev,
1409 "failed to allocate multicast filter list\n");
1410 return;
1411 }
1412 }
1413
1414 /* prepare a packed array of only addresses. */
1415 mc_ptr = netdev->mc_list;
1416
1417 for (i = 0; i < netdev->mc_count; i++) {
1418 if (!mc_ptr)
1419 break;
1420 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
1421 ETH_ALEN);
1422 mc_ptr = mc_ptr->next;
1423 }
1424
1425 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1426 kfree(mta_list);
1427}
1428
1429/**
1430 * igbvf_configure - configure the hardware for Rx and Tx
1431 * @adapter: private board structure
1432 **/
1433static void igbvf_configure(struct igbvf_adapter *adapter)
1434{
1435 igbvf_set_multi(adapter->netdev);
1436
1437 igbvf_restore_vlan(adapter);
1438
1439 igbvf_configure_tx(adapter);
1440 igbvf_setup_srrctl(adapter);
1441 igbvf_configure_rx(adapter);
1442 igbvf_alloc_rx_buffers(adapter->rx_ring,
1443 igbvf_desc_unused(adapter->rx_ring));
1444}
1445
1446/* igbvf_reset - bring the hardware into a known good state
1447 *
1448 * This function boots the hardware and enables some settings that
1449 * require a configuration cycle of the hardware - those cannot be
1450 * set/changed during runtime. After reset the device needs to be
1451 * properly configured for Rx, Tx etc.
1452 */
1453static void igbvf_reset(struct igbvf_adapter *adapter)
1454{
1455 struct e1000_mac_info *mac = &adapter->hw.mac;
1456 struct net_device *netdev = adapter->netdev;
1457 struct e1000_hw *hw = &adapter->hw;
1458
1459 /* Allow time for pending master requests to run */
1460 if (mac->ops.reset_hw(hw))
1461 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1462
1463 mac->ops.init_hw(hw);
1464
1465 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1466 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1467 netdev->addr_len);
1468 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1469 netdev->addr_len);
1470 }
1471}
1472
1473int igbvf_up(struct igbvf_adapter *adapter)
1474{
1475 struct e1000_hw *hw = &adapter->hw;
1476
1477 /* hardware has been reset, we need to reload some things */
1478 igbvf_configure(adapter);
1479
1480 clear_bit(__IGBVF_DOWN, &adapter->state);
1481
1482 napi_enable(&adapter->rx_ring->napi);
1483 if (adapter->msix_entries)
1484 igbvf_configure_msix(adapter);
1485
1486 /* Clear any pending interrupts. */
1487 er32(EICR);
1488 igbvf_irq_enable(adapter);
1489
1490 /* start the watchdog */
1491 hw->mac.get_link_status = 1;
1492 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1493
1494
1495 return 0;
1496}
1497
1498void igbvf_down(struct igbvf_adapter *adapter)
1499{
1500 struct net_device *netdev = adapter->netdev;
1501 struct e1000_hw *hw = &adapter->hw;
1502 u32 rxdctl, txdctl;
1503
1504 /*
1505 * signal that we're down so the interrupt handler does not
1506 * reschedule our watchdog timer
1507 */
1508 set_bit(__IGBVF_DOWN, &adapter->state);
1509
1510 /* disable receives in the hardware */
1511 rxdctl = er32(RXDCTL(0));
1512 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1513
1514 netif_stop_queue(netdev);
1515
1516 /* disable transmits in the hardware */
1517 txdctl = er32(TXDCTL(0));
1518 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1519
1520 /* flush both disables and wait for them to finish */
1521 e1e_flush();
1522 msleep(10);
1523
1524 napi_disable(&adapter->rx_ring->napi);
1525
1526 igbvf_irq_disable(adapter);
1527
1528 del_timer_sync(&adapter->watchdog_timer);
1529
1530 netdev->tx_queue_len = adapter->tx_queue_len;
1531 netif_carrier_off(netdev);
1532
1533 /* record the stats before reset*/
1534 igbvf_update_stats(adapter);
1535
1536 adapter->link_speed = 0;
1537 adapter->link_duplex = 0;
1538
1539 igbvf_reset(adapter);
1540 igbvf_clean_tx_ring(adapter->tx_ring);
1541 igbvf_clean_rx_ring(adapter->rx_ring);
1542}
1543
1544void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1545{
1546 might_sleep();
1547 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1548 msleep(1);
1549 igbvf_down(adapter);
1550 igbvf_up(adapter);
1551 clear_bit(__IGBVF_RESETTING, &adapter->state);
1552}
1553
1554/**
1555 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1556 * @adapter: board private structure to initialize
1557 *
1558 * igbvf_sw_init initializes the Adapter private data structure.
1559 * Fields are initialized based on PCI device information and
1560 * OS network device settings (MTU size).
1561 **/
1562static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1563{
1564 struct net_device *netdev = adapter->netdev;
1565 s32 rc;
1566
1567 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1568 adapter->rx_ps_hdr_size = 0;
1569 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1570 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1571
1572 adapter->tx_int_delay = 8;
1573 adapter->tx_abs_int_delay = 32;
1574 adapter->rx_int_delay = 0;
1575 adapter->rx_abs_int_delay = 8;
1576 adapter->itr_setting = 3;
1577 adapter->itr = 20000;
1578
1579 /* Set various function pointers */
1580 adapter->ei->init_ops(&adapter->hw);
1581
1582 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1583 if (rc)
1584 return rc;
1585
1586 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1587 if (rc)
1588 return rc;
1589
1590 igbvf_set_interrupt_capability(adapter);
1591
1592 if (igbvf_alloc_queues(adapter))
1593 return -ENOMEM;
1594
1595 spin_lock_init(&adapter->tx_queue_lock);
1596
1597 /* Explicitly disable IRQ since the NIC can be in any state. */
1598 igbvf_irq_disable(adapter);
1599
1600 spin_lock_init(&adapter->stats_lock);
1601
1602 set_bit(__IGBVF_DOWN, &adapter->state);
1603 return 0;
1604}
1605
1606static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1607{
1608 struct e1000_hw *hw = &adapter->hw;
1609
1610 adapter->stats.last_gprc = er32(VFGPRC);
1611 adapter->stats.last_gorc = er32(VFGORC);
1612 adapter->stats.last_gptc = er32(VFGPTC);
1613 adapter->stats.last_gotc = er32(VFGOTC);
1614 adapter->stats.last_mprc = er32(VFMPRC);
1615 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1616 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1617 adapter->stats.last_gorlbc = er32(VFGORLBC);
1618 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1619
1620 adapter->stats.base_gprc = er32(VFGPRC);
1621 adapter->stats.base_gorc = er32(VFGORC);
1622 adapter->stats.base_gptc = er32(VFGPTC);
1623 adapter->stats.base_gotc = er32(VFGOTC);
1624 adapter->stats.base_mprc = er32(VFMPRC);
1625 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1626 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1627 adapter->stats.base_gorlbc = er32(VFGORLBC);
1628 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1629}
1630
1631/**
1632 * igbvf_open - Called when a network interface is made active
1633 * @netdev: network interface device structure
1634 *
1635 * Returns 0 on success, negative value on failure
1636 *
1637 * The open entry point is called when a network interface is made
1638 * active by the system (IFF_UP). At this point all resources needed
1639 * for transmit and receive operations are allocated, the interrupt
1640 * handler is registered with the OS, the watchdog timer is started,
1641 * and the stack is notified that the interface is ready.
1642 **/
1643static int igbvf_open(struct net_device *netdev)
1644{
1645 struct igbvf_adapter *adapter = netdev_priv(netdev);
1646 struct e1000_hw *hw = &adapter->hw;
1647 int err;
1648
1649 /* disallow open during test */
1650 if (test_bit(__IGBVF_TESTING, &adapter->state))
1651 return -EBUSY;
1652
1653 /* allocate transmit descriptors */
1654 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1655 if (err)
1656 goto err_setup_tx;
1657
1658 /* allocate receive descriptors */
1659 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1660 if (err)
1661 goto err_setup_rx;
1662
1663 /*
1664 * before we allocate an interrupt, we must be ready to handle it.
1665 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1666 * as soon as we call pci_request_irq, so we have to setup our
1667 * clean_rx handler before we do so.
1668 */
1669 igbvf_configure(adapter);
1670
1671 err = igbvf_request_irq(adapter);
1672 if (err)
1673 goto err_req_irq;
1674
1675 /* From here on the code is the same as igbvf_up() */
1676 clear_bit(__IGBVF_DOWN, &adapter->state);
1677
1678 napi_enable(&adapter->rx_ring->napi);
1679
1680 /* clear any pending interrupts */
1681 er32(EICR);
1682
1683 igbvf_irq_enable(adapter);
1684
1685 /* start the watchdog */
1686 hw->mac.get_link_status = 1;
1687 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1688
1689 return 0;
1690
1691err_req_irq:
1692 igbvf_free_rx_resources(adapter->rx_ring);
1693err_setup_rx:
1694 igbvf_free_tx_resources(adapter->tx_ring);
1695err_setup_tx:
1696 igbvf_reset(adapter);
1697
1698 return err;
1699}
1700
1701/**
1702 * igbvf_close - Disables a network interface
1703 * @netdev: network interface device structure
1704 *
1705 * Returns 0, this is not allowed to fail
1706 *
1707 * The close entry point is called when an interface is de-activated
1708 * by the OS. The hardware is still under the drivers control, but
1709 * needs to be disabled. A global MAC reset is issued to stop the
1710 * hardware, and all transmit and receive resources are freed.
1711 **/
1712static int igbvf_close(struct net_device *netdev)
1713{
1714 struct igbvf_adapter *adapter = netdev_priv(netdev);
1715
1716 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1717 igbvf_down(adapter);
1718
1719 igbvf_free_irq(adapter);
1720
1721 igbvf_free_tx_resources(adapter->tx_ring);
1722 igbvf_free_rx_resources(adapter->rx_ring);
1723
1724 return 0;
1725}
1726/**
1727 * igbvf_set_mac - Change the Ethernet Address of the NIC
1728 * @netdev: network interface device structure
1729 * @p: pointer to an address structure
1730 *
1731 * Returns 0 on success, negative on failure
1732 **/
1733static int igbvf_set_mac(struct net_device *netdev, void *p)
1734{
1735 struct igbvf_adapter *adapter = netdev_priv(netdev);
1736 struct e1000_hw *hw = &adapter->hw;
1737 struct sockaddr *addr = p;
1738
1739 if (!is_valid_ether_addr(addr->sa_data))
1740 return -EADDRNOTAVAIL;
1741
1742 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1743
1744 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1745
1746 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1747 return -EADDRNOTAVAIL;
1748
1749 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1750
1751 return 0;
1752}
1753
1754#define UPDATE_VF_COUNTER(reg, name) \
1755 { \
1756 u32 current_counter = er32(reg); \
1757 if (current_counter < adapter->stats.last_##name) \
1758 adapter->stats.name += 0x100000000LL; \
1759 adapter->stats.last_##name = current_counter; \
1760 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1761 adapter->stats.name |= current_counter; \
1762 }
1763
1764/**
1765 * igbvf_update_stats - Update the board statistics counters
1766 * @adapter: board private structure
1767**/
1768void igbvf_update_stats(struct igbvf_adapter *adapter)
1769{
1770 struct e1000_hw *hw = &adapter->hw;
1771 struct pci_dev *pdev = adapter->pdev;
1772
1773 /*
1774 * Prevent stats update while adapter is being reset, link is down
1775 * or if the pci connection is down.
1776 */
1777 if (adapter->link_speed == 0)
1778 return;
1779
1780 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1781 return;
1782
1783 if (pci_channel_offline(pdev))
1784 return;
1785
1786 UPDATE_VF_COUNTER(VFGPRC, gprc);
1787 UPDATE_VF_COUNTER(VFGORC, gorc);
1788 UPDATE_VF_COUNTER(VFGPTC, gptc);
1789 UPDATE_VF_COUNTER(VFGOTC, gotc);
1790 UPDATE_VF_COUNTER(VFMPRC, mprc);
1791 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1792 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1793 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1794 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1795
1796 /* Fill out the OS statistics structure */
1797 adapter->net_stats.multicast = adapter->stats.mprc;
1798}
1799
1800static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1801{
1802 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
1803 adapter->link_speed,
1804 ((adapter->link_duplex == FULL_DUPLEX) ?
1805 "Full Duplex" : "Half Duplex"));
1806}
1807
1808static bool igbvf_has_link(struct igbvf_adapter *adapter)
1809{
1810 struct e1000_hw *hw = &adapter->hw;
1811 s32 ret_val = E1000_SUCCESS;
1812 bool link_active;
1813
1814 ret_val = hw->mac.ops.check_for_link(hw);
1815 link_active = !hw->mac.get_link_status;
1816
1817 /* if check for link returns error we will need to reset */
1818 if (ret_val)
1819 schedule_work(&adapter->reset_task);
1820
1821 return link_active;
1822}
1823
1824/**
1825 * igbvf_watchdog - Timer Call-back
1826 * @data: pointer to adapter cast into an unsigned long
1827 **/
1828static void igbvf_watchdog(unsigned long data)
1829{
1830 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1831
1832 /* Do the rest outside of interrupt context */
1833 schedule_work(&adapter->watchdog_task);
1834}
1835
1836static void igbvf_watchdog_task(struct work_struct *work)
1837{
1838 struct igbvf_adapter *adapter = container_of(work,
1839 struct igbvf_adapter,
1840 watchdog_task);
1841 struct net_device *netdev = adapter->netdev;
1842 struct e1000_mac_info *mac = &adapter->hw.mac;
1843 struct igbvf_ring *tx_ring = adapter->tx_ring;
1844 struct e1000_hw *hw = &adapter->hw;
1845 u32 link;
1846 int tx_pending = 0;
1847
1848 link = igbvf_has_link(adapter);
1849
1850 if (link) {
1851 if (!netif_carrier_ok(netdev)) {
1852 bool txb2b = 1;
1853
1854 mac->ops.get_link_up_info(&adapter->hw,
1855 &adapter->link_speed,
1856 &adapter->link_duplex);
1857 igbvf_print_link_info(adapter);
1858
1859 /*
1860 * tweak tx_queue_len according to speed/duplex
1861 * and adjust the timeout factor
1862 */
1863 netdev->tx_queue_len = adapter->tx_queue_len;
1864 adapter->tx_timeout_factor = 1;
1865 switch (adapter->link_speed) {
1866 case SPEED_10:
1867 txb2b = 0;
1868 netdev->tx_queue_len = 10;
1869 adapter->tx_timeout_factor = 16;
1870 break;
1871 case SPEED_100:
1872 txb2b = 0;
1873 netdev->tx_queue_len = 100;
1874 /* maybe add some timeout factor ? */
1875 break;
1876 }
1877
1878 netif_carrier_on(netdev);
1879 netif_wake_queue(netdev);
1880 }
1881 } else {
1882 if (netif_carrier_ok(netdev)) {
1883 adapter->link_speed = 0;
1884 adapter->link_duplex = 0;
1885 dev_info(&adapter->pdev->dev, "Link is Down\n");
1886 netif_carrier_off(netdev);
1887 netif_stop_queue(netdev);
1888 }
1889 }
1890
1891 if (netif_carrier_ok(netdev)) {
1892 igbvf_update_stats(adapter);
1893 } else {
1894 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1895 tx_ring->count);
1896 if (tx_pending) {
1897 /*
1898 * We've lost link, so the controller stops DMA,
1899 * but we've got queued Tx work that's never going
1900 * to get done, so reset controller to flush Tx.
1901 * (Do the reset outside of interrupt context).
1902 */
1903 adapter->tx_timeout_count++;
1904 schedule_work(&adapter->reset_task);
1905 }
1906 }
1907
1908 /* Cause software interrupt to ensure Rx ring is cleaned */
1909 ew32(EICS, adapter->rx_ring->eims_value);
1910
1911 /* Force detection of hung controller every watchdog period */
1912 adapter->detect_tx_hung = 1;
1913
1914 /* Reset the timer */
1915 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1916 mod_timer(&adapter->watchdog_timer,
1917 round_jiffies(jiffies + (2 * HZ)));
1918}
1919
1920#define IGBVF_TX_FLAGS_CSUM 0x00000001
1921#define IGBVF_TX_FLAGS_VLAN 0x00000002
1922#define IGBVF_TX_FLAGS_TSO 0x00000004
1923#define IGBVF_TX_FLAGS_IPV4 0x00000008
1924#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1925#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1926
1927static int igbvf_tso(struct igbvf_adapter *adapter,
1928 struct igbvf_ring *tx_ring,
1929 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1930{
1931 struct e1000_adv_tx_context_desc *context_desc;
1932 unsigned int i;
1933 int err;
1934 struct igbvf_buffer *buffer_info;
1935 u32 info = 0, tu_cmd = 0;
1936 u32 mss_l4len_idx, l4len;
1937 *hdr_len = 0;
1938
1939 if (skb_header_cloned(skb)) {
1940 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1941 if (err) {
1942 dev_err(&adapter->pdev->dev,
1943 "igbvf_tso returning an error\n");
1944 return err;
1945 }
1946 }
1947
1948 l4len = tcp_hdrlen(skb);
1949 *hdr_len += l4len;
1950
1951 if (skb->protocol == htons(ETH_P_IP)) {
1952 struct iphdr *iph = ip_hdr(skb);
1953 iph->tot_len = 0;
1954 iph->check = 0;
1955 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1956 iph->daddr, 0,
1957 IPPROTO_TCP,
1958 0);
1959 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
1960 ipv6_hdr(skb)->payload_len = 0;
1961 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1962 &ipv6_hdr(skb)->daddr,
1963 0, IPPROTO_TCP, 0);
1964 }
1965
1966 i = tx_ring->next_to_use;
1967
1968 buffer_info = &tx_ring->buffer_info[i];
1969 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1970 /* VLAN MACLEN IPLEN */
1971 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1972 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1973 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1974 *hdr_len += skb_network_offset(skb);
1975 info |= (skb_transport_header(skb) - skb_network_header(skb));
1976 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1977 context_desc->vlan_macip_lens = cpu_to_le32(info);
1978
1979 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1980 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1981
1982 if (skb->protocol == htons(ETH_P_IP))
1983 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1984 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1985
1986 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1987
1988 /* MSS L4LEN IDX */
1989 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1990 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1991
1992 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1993 context_desc->seqnum_seed = 0;
1994
1995 buffer_info->time_stamp = jiffies;
1996 buffer_info->next_to_watch = i;
1997 buffer_info->dma = 0;
1998 i++;
1999 if (i == tx_ring->count)
2000 i = 0;
2001
2002 tx_ring->next_to_use = i;
2003
2004 return true;
2005}
2006
2007static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2008 struct igbvf_ring *tx_ring,
2009 struct sk_buff *skb, u32 tx_flags)
2010{
2011 struct e1000_adv_tx_context_desc *context_desc;
2012 unsigned int i;
2013 struct igbvf_buffer *buffer_info;
2014 u32 info = 0, tu_cmd = 0;
2015
2016 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2017 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
2018 i = tx_ring->next_to_use;
2019 buffer_info = &tx_ring->buffer_info[i];
2020 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
2021
2022 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2023 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
2024
2025 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2026 if (skb->ip_summed == CHECKSUM_PARTIAL)
2027 info |= (skb_transport_header(skb) -
2028 skb_network_header(skb));
2029
2030
2031 context_desc->vlan_macip_lens = cpu_to_le32(info);
2032
2033 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2034
2035 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2036 switch (skb->protocol) {
2037 case __constant_htons(ETH_P_IP):
2038 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2039 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2040 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2041 break;
2042 case __constant_htons(ETH_P_IPV6):
2043 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2044 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2045 break;
2046 default:
2047 break;
2048 }
2049 }
2050
2051 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2052 context_desc->seqnum_seed = 0;
2053 context_desc->mss_l4len_idx = 0;
2054
2055 buffer_info->time_stamp = jiffies;
2056 buffer_info->next_to_watch = i;
2057 buffer_info->dma = 0;
2058 i++;
2059 if (i == tx_ring->count)
2060 i = 0;
2061 tx_ring->next_to_use = i;
2062
2063 return true;
2064 }
2065
2066 return false;
2067}
2068
2069static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2070{
2071 struct igbvf_adapter *adapter = netdev_priv(netdev);
2072
2073 /* there is enough descriptors then we don't need to worry */
2074 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2075 return 0;
2076
2077 netif_stop_queue(netdev);
2078
2079 smp_mb();
2080
2081 /* We need to check again just in case room has been made available */
2082 if (igbvf_desc_unused(adapter->tx_ring) < size)
2083 return -EBUSY;
2084
2085 netif_wake_queue(netdev);
2086
2087 ++adapter->restart_queue;
2088 return 0;
2089}
2090
2091#define IGBVF_MAX_TXD_PWR 16
2092#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2093
2094static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2095 struct igbvf_ring *tx_ring,
2096 struct sk_buff *skb,
2097 unsigned int first)
2098{
2099 struct igbvf_buffer *buffer_info;
2100 unsigned int len = skb_headlen(skb);
2101 unsigned int count = 0, i;
2102 unsigned int f;
2103 dma_addr_t *map;
2104
2105 i = tx_ring->next_to_use;
2106
2107 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
2108 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
2109 return 0;
2110 }
2111
2112 map = skb_shinfo(skb)->dma_maps;
2113
2114 buffer_info = &tx_ring->buffer_info[i];
2115 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2116 buffer_info->length = len;
2117 /* set time_stamp *before* dma to help avoid a possible race */
2118 buffer_info->time_stamp = jiffies;
2119 buffer_info->next_to_watch = i;
2120 buffer_info->dma = map[count];
2121 count++;
2122
2123 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2124 struct skb_frag_struct *frag;
2125
2126 i++;
2127 if (i == tx_ring->count)
2128 i = 0;
2129
2130 frag = &skb_shinfo(skb)->frags[f];
2131 len = frag->size;
2132
2133 buffer_info = &tx_ring->buffer_info[i];
2134 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2135 buffer_info->length = len;
2136 buffer_info->time_stamp = jiffies;
2137 buffer_info->next_to_watch = i;
2138 buffer_info->dma = map[count];
2139 count++;
2140 }
2141
2142 tx_ring->buffer_info[i].skb = skb;
2143 tx_ring->buffer_info[first].next_to_watch = i;
2144
2145 return count;
2146}
2147
2148static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2149 struct igbvf_ring *tx_ring,
2150 int tx_flags, int count, u32 paylen,
2151 u8 hdr_len)
2152{
2153 union e1000_adv_tx_desc *tx_desc = NULL;
2154 struct igbvf_buffer *buffer_info;
2155 u32 olinfo_status = 0, cmd_type_len;
2156 unsigned int i;
2157
2158 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2159 E1000_ADVTXD_DCMD_DEXT);
2160
2161 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2162 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2163
2164 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2165 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2166
2167 /* insert tcp checksum */
2168 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2169
2170 /* insert ip checksum */
2171 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2172 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2173
2174 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2175 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2176 }
2177
2178 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2179
2180 i = tx_ring->next_to_use;
2181 while (count--) {
2182 buffer_info = &tx_ring->buffer_info[i];
2183 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2184 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2185 tx_desc->read.cmd_type_len =
2186 cpu_to_le32(cmd_type_len | buffer_info->length);
2187 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2188 i++;
2189 if (i == tx_ring->count)
2190 i = 0;
2191 }
2192
2193 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2194 /* Force memory writes to complete before letting h/w
2195 * know there are new descriptors to fetch. (Only
2196 * applicable for weak-ordered memory model archs,
2197 * such as IA-64). */
2198 wmb();
2199
2200 tx_ring->next_to_use = i;
2201 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2202 /* we need this if more than one processor can write to our tail
2203 * at a time, it syncronizes IO on IA64/Altix systems */
2204 mmiowb();
2205}
2206
2207static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2208 struct net_device *netdev,
2209 struct igbvf_ring *tx_ring)
2210{
2211 struct igbvf_adapter *adapter = netdev_priv(netdev);
2212 unsigned int first, tx_flags = 0;
2213 u8 hdr_len = 0;
2214 int count = 0;
2215 int tso = 0;
2216
2217 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2218 dev_kfree_skb_any(skb);
2219 return NETDEV_TX_OK;
2220 }
2221
2222 if (skb->len <= 0) {
2223 dev_kfree_skb_any(skb);
2224 return NETDEV_TX_OK;
2225 }
2226
2227 /*
2228 * need: count + 4 desc gap to keep tail from touching
2229 * + 2 desc gap to keep tail from touching head,
2230 * + 1 desc for skb->data,
2231 * + 1 desc for context descriptor,
2232 * head, otherwise try next time
2233 */
2234 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2235 /* this is a hard error */
2236 return NETDEV_TX_BUSY;
2237 }
2238
2239 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2240 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2241 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2242 }
2243
2244 if (skb->protocol == htons(ETH_P_IP))
2245 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2246
2247 first = tx_ring->next_to_use;
2248
2249 tso = skb_is_gso(skb) ?
2250 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2251 if (unlikely(tso < 0)) {
2252 dev_kfree_skb_any(skb);
2253 return NETDEV_TX_OK;
2254 }
2255
2256 if (tso)
2257 tx_flags |= IGBVF_TX_FLAGS_TSO;
2258 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2259 (skb->ip_summed == CHECKSUM_PARTIAL))
2260 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2261
2262 /*
2263 * count reflects descriptors mapped, if 0 then mapping error
2264 * has occured and we need to rewind the descriptor queue
2265 */
2266 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2267
2268 if (count) {
2269 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2270 skb->len, hdr_len);
2271 netdev->trans_start = jiffies;
2272 /* Make sure there is space in the ring for the next send. */
2273 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2274 } else {
2275 dev_kfree_skb_any(skb);
2276 tx_ring->buffer_info[first].time_stamp = 0;
2277 tx_ring->next_to_use = first;
2278 }
2279
2280 return NETDEV_TX_OK;
2281}
2282
2283static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2284{
2285 struct igbvf_adapter *adapter = netdev_priv(netdev);
2286 struct igbvf_ring *tx_ring;
2287 int retval;
2288
2289 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2290 dev_kfree_skb_any(skb);
2291 return NETDEV_TX_OK;
2292 }
2293
2294 tx_ring = &adapter->tx_ring[0];
2295
2296 retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2297
2298 return retval;
2299}
2300
2301/**
2302 * igbvf_tx_timeout - Respond to a Tx Hang
2303 * @netdev: network interface device structure
2304 **/
2305static void igbvf_tx_timeout(struct net_device *netdev)
2306{
2307 struct igbvf_adapter *adapter = netdev_priv(netdev);
2308
2309 /* Do the reset outside of interrupt context */
2310 adapter->tx_timeout_count++;
2311 schedule_work(&adapter->reset_task);
2312}
2313
2314static void igbvf_reset_task(struct work_struct *work)
2315{
2316 struct igbvf_adapter *adapter;
2317 adapter = container_of(work, struct igbvf_adapter, reset_task);
2318
2319 igbvf_reinit_locked(adapter);
2320}
2321
2322/**
2323 * igbvf_get_stats - Get System Network Statistics
2324 * @netdev: network interface device structure
2325 *
2326 * Returns the address of the device statistics structure.
2327 * The statistics are actually updated from the timer callback.
2328 **/
2329static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2330{
2331 struct igbvf_adapter *adapter = netdev_priv(netdev);
2332
2333 /* only return the current stats */
2334 return &adapter->net_stats;
2335}
2336
2337/**
2338 * igbvf_change_mtu - Change the Maximum Transfer Unit
2339 * @netdev: network interface device structure
2340 * @new_mtu: new value for maximum frame size
2341 *
2342 * Returns 0 on success, negative on failure
2343 **/
2344static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2345{
2346 struct igbvf_adapter *adapter = netdev_priv(netdev);
2347 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2348
2349 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2350 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2351 return -EINVAL;
2352 }
2353
2354 /* Jumbo frame size limits */
2355 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
2356 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
2357 dev_err(&adapter->pdev->dev,
2358 "Jumbo Frames not supported.\n");
2359 return -EINVAL;
2360 }
2361 }
2362
2363#define MAX_STD_JUMBO_FRAME_SIZE 9234
2364 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2365 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2366 return -EINVAL;
2367 }
2368
2369 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2370 msleep(1);
2371 /* igbvf_down has a dependency on max_frame_size */
2372 adapter->max_frame_size = max_frame;
2373 if (netif_running(netdev))
2374 igbvf_down(adapter);
2375
2376 /*
2377 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2378 * means we reserve 2 more, this pushes us to allocate from the next
2379 * larger slab size.
2380 * i.e. RXBUFFER_2048 --> size-4096 slab
2381 * However with the new *_jumbo_rx* routines, jumbo receives will use
2382 * fragmented skbs
2383 */
2384
2385 if (max_frame <= 1024)
2386 adapter->rx_buffer_len = 1024;
2387 else if (max_frame <= 2048)
2388 adapter->rx_buffer_len = 2048;
2389 else
2390#if (PAGE_SIZE / 2) > 16384
2391 adapter->rx_buffer_len = 16384;
2392#else
2393 adapter->rx_buffer_len = PAGE_SIZE / 2;
2394#endif
2395
2396
2397 /* adjust allocation if LPE protects us, and we aren't using SBP */
2398 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2399 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2400 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2401 ETH_FCS_LEN;
2402
2403 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2404 netdev->mtu, new_mtu);
2405 netdev->mtu = new_mtu;
2406
2407 if (netif_running(netdev))
2408 igbvf_up(adapter);
2409 else
2410 igbvf_reset(adapter);
2411
2412 clear_bit(__IGBVF_RESETTING, &adapter->state);
2413
2414 return 0;
2415}
2416
2417static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2418{
2419 switch (cmd) {
2420 default:
2421 return -EOPNOTSUPP;
2422 }
2423}
2424
2425static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2426{
2427 struct net_device *netdev = pci_get_drvdata(pdev);
2428 struct igbvf_adapter *adapter = netdev_priv(netdev);
2429#ifdef CONFIG_PM
2430 int retval = 0;
2431#endif
2432
2433 netif_device_detach(netdev);
2434
2435 if (netif_running(netdev)) {
2436 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2437 igbvf_down(adapter);
2438 igbvf_free_irq(adapter);
2439 }
2440
2441#ifdef CONFIG_PM
2442 retval = pci_save_state(pdev);
2443 if (retval)
2444 return retval;
2445#endif
2446
2447 pci_disable_device(pdev);
2448
2449 return 0;
2450}
2451
2452#ifdef CONFIG_PM
2453static int igbvf_resume(struct pci_dev *pdev)
2454{
2455 struct net_device *netdev = pci_get_drvdata(pdev);
2456 struct igbvf_adapter *adapter = netdev_priv(netdev);
2457 u32 err;
2458
2459 pci_restore_state(pdev);
2460 err = pci_enable_device_mem(pdev);
2461 if (err) {
2462 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2463 return err;
2464 }
2465
2466 pci_set_master(pdev);
2467
2468 if (netif_running(netdev)) {
2469 err = igbvf_request_irq(adapter);
2470 if (err)
2471 return err;
2472 }
2473
2474 igbvf_reset(adapter);
2475
2476 if (netif_running(netdev))
2477 igbvf_up(adapter);
2478
2479 netif_device_attach(netdev);
2480
2481 return 0;
2482}
2483#endif
2484
2485static void igbvf_shutdown(struct pci_dev *pdev)
2486{
2487 igbvf_suspend(pdev, PMSG_SUSPEND);
2488}
2489
2490#ifdef CONFIG_NET_POLL_CONTROLLER
2491/*
2492 * Polling 'interrupt' - used by things like netconsole to send skbs
2493 * without having to re-enable interrupts. It's not called while
2494 * the interrupt routine is executing.
2495 */
2496static void igbvf_netpoll(struct net_device *netdev)
2497{
2498 struct igbvf_adapter *adapter = netdev_priv(netdev);
2499
2500 disable_irq(adapter->pdev->irq);
2501
2502 igbvf_clean_tx_irq(adapter->tx_ring);
2503
2504 enable_irq(adapter->pdev->irq);
2505}
2506#endif
2507
2508/**
2509 * igbvf_io_error_detected - called when PCI error is detected
2510 * @pdev: Pointer to PCI device
2511 * @state: The current pci connection state
2512 *
2513 * This function is called after a PCI bus error affecting
2514 * this device has been detected.
2515 */
2516static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2517 pci_channel_state_t state)
2518{
2519 struct net_device *netdev = pci_get_drvdata(pdev);
2520 struct igbvf_adapter *adapter = netdev_priv(netdev);
2521
2522 netif_device_detach(netdev);
2523
2524 if (netif_running(netdev))
2525 igbvf_down(adapter);
2526 pci_disable_device(pdev);
2527
2528 /* Request a slot slot reset. */
2529 return PCI_ERS_RESULT_NEED_RESET;
2530}
2531
2532/**
2533 * igbvf_io_slot_reset - called after the pci bus has been reset.
2534 * @pdev: Pointer to PCI device
2535 *
2536 * Restart the card from scratch, as if from a cold-boot. Implementation
2537 * resembles the first-half of the igbvf_resume routine.
2538 */
2539static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2540{
2541 struct net_device *netdev = pci_get_drvdata(pdev);
2542 struct igbvf_adapter *adapter = netdev_priv(netdev);
2543
2544 if (pci_enable_device_mem(pdev)) {
2545 dev_err(&pdev->dev,
2546 "Cannot re-enable PCI device after reset.\n");
2547 return PCI_ERS_RESULT_DISCONNECT;
2548 }
2549 pci_set_master(pdev);
2550
2551 igbvf_reset(adapter);
2552
2553 return PCI_ERS_RESULT_RECOVERED;
2554}
2555
2556/**
2557 * igbvf_io_resume - called when traffic can start flowing again.
2558 * @pdev: Pointer to PCI device
2559 *
2560 * This callback is called when the error recovery driver tells us that
2561 * its OK to resume normal operation. Implementation resembles the
2562 * second-half of the igbvf_resume routine.
2563 */
2564static void igbvf_io_resume(struct pci_dev *pdev)
2565{
2566 struct net_device *netdev = pci_get_drvdata(pdev);
2567 struct igbvf_adapter *adapter = netdev_priv(netdev);
2568
2569 if (netif_running(netdev)) {
2570 if (igbvf_up(adapter)) {
2571 dev_err(&pdev->dev,
2572 "can't bring device back up after reset\n");
2573 return;
2574 }
2575 }
2576
2577 netif_device_attach(netdev);
2578}
2579
2580static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2581{
2582 struct e1000_hw *hw = &adapter->hw;
2583 struct net_device *netdev = adapter->netdev;
2584 struct pci_dev *pdev = adapter->pdev;
2585
2586 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2587 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
2588 /* MAC address */
2589 netdev->dev_addr[0], netdev->dev_addr[1],
2590 netdev->dev_addr[2], netdev->dev_addr[3],
2591 netdev->dev_addr[4], netdev->dev_addr[5]);
2592 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
2593}
2594
2595static const struct net_device_ops igbvf_netdev_ops = {
2596 .ndo_open = igbvf_open,
2597 .ndo_stop = igbvf_close,
2598 .ndo_start_xmit = igbvf_xmit_frame,
2599 .ndo_get_stats = igbvf_get_stats,
2600 .ndo_set_multicast_list = igbvf_set_multi,
2601 .ndo_set_mac_address = igbvf_set_mac,
2602 .ndo_change_mtu = igbvf_change_mtu,
2603 .ndo_do_ioctl = igbvf_ioctl,
2604 .ndo_tx_timeout = igbvf_tx_timeout,
2605 .ndo_vlan_rx_register = igbvf_vlan_rx_register,
2606 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2607 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2608#ifdef CONFIG_NET_POLL_CONTROLLER
2609 .ndo_poll_controller = igbvf_netpoll,
2610#endif
2611};
2612
2613/**
2614 * igbvf_probe - Device Initialization Routine
2615 * @pdev: PCI device information struct
2616 * @ent: entry in igbvf_pci_tbl
2617 *
2618 * Returns 0 on success, negative on failure
2619 *
2620 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2621 * The OS initialization, configuring of the adapter private structure,
2622 * and a hardware reset occur.
2623 **/
2624static int __devinit igbvf_probe(struct pci_dev *pdev,
2625 const struct pci_device_id *ent)
2626{
2627 struct net_device *netdev;
2628 struct igbvf_adapter *adapter;
2629 struct e1000_hw *hw;
2630 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2631
2632 static int cards_found;
2633 int err, pci_using_dac;
2634
2635 err = pci_enable_device_mem(pdev);
2636 if (err)
2637 return err;
2638
2639 pci_using_dac = 0;
2640 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2641 if (!err) {
2642 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2643 if (!err)
2644 pci_using_dac = 1;
2645 } else {
2646 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2647 if (err) {
2648 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2649 if (err) {
2650 dev_err(&pdev->dev, "No usable DMA "
2651 "configuration, aborting\n");
2652 goto err_dma;
2653 }
2654 }
2655 }
2656
2657 err = pci_request_regions(pdev, igbvf_driver_name);
2658 if (err)
2659 goto err_pci_reg;
2660
2661 pci_set_master(pdev);
2662
2663 err = -ENOMEM;
2664 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2665 if (!netdev)
2666 goto err_alloc_etherdev;
2667
2668 SET_NETDEV_DEV(netdev, &pdev->dev);
2669
2670 pci_set_drvdata(pdev, netdev);
2671 adapter = netdev_priv(netdev);
2672 hw = &adapter->hw;
2673 adapter->netdev = netdev;
2674 adapter->pdev = pdev;
2675 adapter->ei = ei;
2676 adapter->pba = ei->pba;
2677 adapter->flags = ei->flags;
2678 adapter->hw.back = adapter;
2679 adapter->hw.mac.type = ei->mac;
2680 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
2681
2682 /* PCI config space info */
2683
2684 hw->vendor_id = pdev->vendor;
2685 hw->device_id = pdev->device;
2686 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2687 hw->subsystem_device_id = pdev->subsystem_device;
2688
2689 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2690
2691 err = -EIO;
2692 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2693 pci_resource_len(pdev, 0));
2694
2695 if (!adapter->hw.hw_addr)
2696 goto err_ioremap;
2697
2698 if (ei->get_variants) {
2699 err = ei->get_variants(adapter);
2700 if (err)
2701 goto err_ioremap;
2702 }
2703
2704 /* setup adapter struct */
2705 err = igbvf_sw_init(adapter);
2706 if (err)
2707 goto err_sw_init;
2708
2709 /* construct the net_device struct */
2710 netdev->netdev_ops = &igbvf_netdev_ops;
2711
2712 igbvf_set_ethtool_ops(netdev);
2713 netdev->watchdog_timeo = 5 * HZ;
2714 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2715
2716 adapter->bd_number = cards_found++;
2717
2718 netdev->features = NETIF_F_SG |
2719 NETIF_F_IP_CSUM |
2720 NETIF_F_HW_VLAN_TX |
2721 NETIF_F_HW_VLAN_RX |
2722 NETIF_F_HW_VLAN_FILTER;
2723
2724 netdev->features |= NETIF_F_IPV6_CSUM;
2725 netdev->features |= NETIF_F_TSO;
2726 netdev->features |= NETIF_F_TSO6;
2727
2728 if (pci_using_dac)
2729 netdev->features |= NETIF_F_HIGHDMA;
2730
2731 netdev->vlan_features |= NETIF_F_TSO;
2732 netdev->vlan_features |= NETIF_F_TSO6;
2733 netdev->vlan_features |= NETIF_F_IP_CSUM;
2734 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2735 netdev->vlan_features |= NETIF_F_SG;
2736
2737 /*reset the controller to put the device in a known good state */
2738 err = hw->mac.ops.reset_hw(hw);
2739 if (err) {
2740 dev_info(&pdev->dev,
2741 "PF still in reset state, assigning new address\n");
2742 random_ether_addr(hw->mac.addr);
2743 } else {
2744 err = hw->mac.ops.read_mac_addr(hw);
2745 if (err) {
2746 dev_err(&pdev->dev, "Error reading MAC address\n");
2747 goto err_hw_init;
2748 }
2749 }
2750
2751 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2752 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2753
2754 if (!is_valid_ether_addr(netdev->perm_addr)) {
2755 dev_err(&pdev->dev, "Invalid MAC Address: "
2756 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2757 netdev->dev_addr[0], netdev->dev_addr[1],
2758 netdev->dev_addr[2], netdev->dev_addr[3],
2759 netdev->dev_addr[4], netdev->dev_addr[5]);
2760 err = -EIO;
2761 goto err_hw_init;
2762 }
2763
2764 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2765 (unsigned long) adapter);
2766
2767 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2768 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2769
2770 /* ring size defaults */
2771 adapter->rx_ring->count = 1024;
2772 adapter->tx_ring->count = 1024;
2773
2774 /* reset the hardware with the new settings */
2775 igbvf_reset(adapter);
2776
2777 /* tell the stack to leave us alone until igbvf_open() is called */
2778 netif_carrier_off(netdev);
2779 netif_stop_queue(netdev);
2780
2781 strcpy(netdev->name, "eth%d");
2782 err = register_netdev(netdev);
2783 if (err)
2784 goto err_hw_init;
2785
2786 igbvf_print_device_info(adapter);
2787
2788 igbvf_initialize_last_counter_stats(adapter);
2789
2790 return 0;
2791
2792err_hw_init:
2793 kfree(adapter->tx_ring);
2794 kfree(adapter->rx_ring);
2795err_sw_init:
2796 igbvf_reset_interrupt_capability(adapter);
2797 iounmap(adapter->hw.hw_addr);
2798err_ioremap:
2799 free_netdev(netdev);
2800err_alloc_etherdev:
2801 pci_release_regions(pdev);
2802err_pci_reg:
2803err_dma:
2804 pci_disable_device(pdev);
2805 return err;
2806}
2807
2808/**
2809 * igbvf_remove - Device Removal Routine
2810 * @pdev: PCI device information struct
2811 *
2812 * igbvf_remove is called by the PCI subsystem to alert the driver
2813 * that it should release a PCI device. The could be caused by a
2814 * Hot-Plug event, or because the driver is going to be removed from
2815 * memory.
2816 **/
2817static void __devexit igbvf_remove(struct pci_dev *pdev)
2818{
2819 struct net_device *netdev = pci_get_drvdata(pdev);
2820 struct igbvf_adapter *adapter = netdev_priv(netdev);
2821 struct e1000_hw *hw = &adapter->hw;
2822
2823 /*
2824 * flush_scheduled work may reschedule our watchdog task, so
2825 * explicitly disable watchdog tasks from being rescheduled
2826 */
2827 set_bit(__IGBVF_DOWN, &adapter->state);
2828 del_timer_sync(&adapter->watchdog_timer);
2829
2830 flush_scheduled_work();
2831
2832 unregister_netdev(netdev);
2833
2834 igbvf_reset_interrupt_capability(adapter);
2835
2836 /*
2837 * it is important to delete the napi struct prior to freeing the
2838 * rx ring so that you do not end up with null pointer refs
2839 */
2840 netif_napi_del(&adapter->rx_ring->napi);
2841 kfree(adapter->tx_ring);
2842 kfree(adapter->rx_ring);
2843
2844 iounmap(hw->hw_addr);
2845 if (hw->flash_address)
2846 iounmap(hw->flash_address);
2847 pci_release_regions(pdev);
2848
2849 free_netdev(netdev);
2850
2851 pci_disable_device(pdev);
2852}
2853
2854/* PCI Error Recovery (ERS) */
2855static struct pci_error_handlers igbvf_err_handler = {
2856 .error_detected = igbvf_io_error_detected,
2857 .slot_reset = igbvf_io_slot_reset,
2858 .resume = igbvf_io_resume,
2859};
2860
2861static struct pci_device_id igbvf_pci_tbl[] = {
2862 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2863 { } /* terminate list */
2864};
2865MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2866
2867/* PCI Device API Driver */
2868static struct pci_driver igbvf_driver = {
2869 .name = igbvf_driver_name,
2870 .id_table = igbvf_pci_tbl,
2871 .probe = igbvf_probe,
2872 .remove = __devexit_p(igbvf_remove),
2873#ifdef CONFIG_PM
2874 /* Power Management Hooks */
2875 .suspend = igbvf_suspend,
2876 .resume = igbvf_resume,
2877#endif
2878 .shutdown = igbvf_shutdown,
2879 .err_handler = &igbvf_err_handler
2880};
2881
2882/**
2883 * igbvf_init_module - Driver Registration Routine
2884 *
2885 * igbvf_init_module is the first routine called when the driver is
2886 * loaded. All it does is register with the PCI subsystem.
2887 **/
2888static int __init igbvf_init_module(void)
2889{
2890 int ret;
2891 printk(KERN_INFO "%s - version %s\n",
2892 igbvf_driver_string, igbvf_driver_version);
2893 printk(KERN_INFO "%s\n", igbvf_copyright);
2894
2895 ret = pci_register_driver(&igbvf_driver);
2896 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name,
2897 PM_QOS_DEFAULT_VALUE);
2898
2899 return ret;
2900}
2901module_init(igbvf_init_module);
2902
2903/**
2904 * igbvf_exit_module - Driver Exit Cleanup Routine
2905 *
2906 * igbvf_exit_module is called just before the driver is removed
2907 * from memory.
2908 **/
2909static void __exit igbvf_exit_module(void)
2910{
2911 pci_unregister_driver(&igbvf_driver);
2912 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name);
2913}
2914module_exit(igbvf_exit_module);
2915
2916
2917MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2918MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
2919MODULE_LICENSE("GPL");
2920MODULE_VERSION(DRV_VERSION);
2921
2922/* netdev.c */
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
new file mode 100644
index 000000000000..b9e24ed70d0a
--- /dev/null
+++ b/drivers/net/igbvf/regs.h
@@ -0,0 +1,108 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_
30
31#define E1000_CTRL 0x00000 /* Device Control - RW */
32#define E1000_STATUS 0x00008 /* Device Status - RO */
33#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
34#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
35#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
36#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
37#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
38#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
39#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
40#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
41#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
42#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
43/*
44 * Convenience macros
45 *
46 * Note: "_n" is the queue number of the register to be written to.
47 *
48 * Example usage:
49 * E1000_RDBAL_REG(current_rx_queue)
50 */
51#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
52 (0x0C000 + ((_n) * 0x40)))
53#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
54 (0x0C004 + ((_n) * 0x40)))
55#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
56 (0x0C008 + ((_n) * 0x40)))
57#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
58 (0x0C00C + ((_n) * 0x40)))
59#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
60 (0x0C010 + ((_n) * 0x40)))
61#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
62 (0x0C018 + ((_n) * 0x40)))
63#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
64 (0x0C028 + ((_n) * 0x40)))
65#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
66 (0x0E000 + ((_n) * 0x40)))
67#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
68 (0x0E004 + ((_n) * 0x40)))
69#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
70 (0x0E008 + ((_n) * 0x40)))
71#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
72 (0x0E010 + ((_n) * 0x40)))
73#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
74 (0x0E018 + ((_n) * 0x40)))
75#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
76 (0x0E028 + ((_n) * 0x40)))
77#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
78#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
79#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
80 (0x054E0 + ((_i - 16) * 8)))
81#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
82 (0x054E4 + ((_i - 16) * 8)))
83
84/* Statistics registers */
85#define E1000_VFGPRC 0x00F10
86#define E1000_VFGORC 0x00F18
87#define E1000_VFMPRC 0x00F3C
88#define E1000_VFGPTC 0x00F14
89#define E1000_VFGOTC 0x00F34
90#define E1000_VFGOTLBC 0x00F50
91#define E1000_VFGPTLBC 0x00F44
92#define E1000_VFGORLBC 0x00F48
93#define E1000_VFGPRLBC 0x00F40
94
95/* These act per VF so an array friendly macro is used */
96#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
97#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
98
99/* Define macros for handling registers */
100#define er32(reg) readl(hw->hw_addr + E1000_##reg)
101#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
102#define array_er32(reg, offset) \
103 readl(hw->hw_addr + E1000_##reg + (offset << 2))
104#define array_ew32(reg, offset, val) \
105 writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
106#define e1e_flush() er32(STATUS)
107
108#endif
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
new file mode 100644
index 000000000000..2a4faf9ade69
--- /dev/null
+++ b/drivers/net/igbvf/vf.c
@@ -0,0 +1,398 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29#include "vf.h"
30
31static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
32static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
33 u16 *duplex);
34static s32 e1000_init_hw_vf(struct e1000_hw *hw);
35static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
36
37static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
38 u32, u32, u32);
39static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
40static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
41static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
42
43/**
44 * e1000_init_mac_params_vf - Inits MAC params
45 * @hw: pointer to the HW structure
46 **/
47static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
48{
49 struct e1000_mac_info *mac = &hw->mac;
50
51 /* VF's have no MTA Registers - PF feature only */
52 mac->mta_reg_count = 128;
53 /* VF's have no access to RAR entries */
54 mac->rar_entry_count = 1;
55
56 /* Function pointers */
57 /* reset */
58 mac->ops.reset_hw = e1000_reset_hw_vf;
59 /* hw initialization */
60 mac->ops.init_hw = e1000_init_hw_vf;
61 /* check for link */
62 mac->ops.check_for_link = e1000_check_for_link_vf;
63 /* link info */
64 mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
65 /* multicast address update */
66 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
67 /* set mac address */
68 mac->ops.rar_set = e1000_rar_set_vf;
69 /* read mac address */
70 mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
71 /* set vlan filter table array */
72 mac->ops.set_vfta = e1000_set_vfta_vf;
73
74 return E1000_SUCCESS;
75}
76
77/**
78 * e1000_init_function_pointers_vf - Inits function pointers
79 * @hw: pointer to the HW structure
80 **/
81void e1000_init_function_pointers_vf(struct e1000_hw *hw)
82{
83 hw->mac.ops.init_params = e1000_init_mac_params_vf;
84 hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
85}
86
87/**
88 * e1000_get_link_up_info_vf - Gets link info.
89 * @hw: pointer to the HW structure
90 * @speed: pointer to 16 bit value to store link speed.
91 * @duplex: pointer to 16 bit value to store duplex.
92 *
93 * Since we cannot read the PHY and get accurate link info, we must rely upon
94 * the status register's data which is often stale and inaccurate.
95 **/
96static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
97 u16 *duplex)
98{
99 s32 status;
100
101 status = er32(STATUS);
102 if (status & E1000_STATUS_SPEED_1000)
103 *speed = SPEED_1000;
104 else if (status & E1000_STATUS_SPEED_100)
105 *speed = SPEED_100;
106 else
107 *speed = SPEED_10;
108
109 if (status & E1000_STATUS_FD)
110 *duplex = FULL_DUPLEX;
111 else
112 *duplex = HALF_DUPLEX;
113
114 return E1000_SUCCESS;
115}
116
117/**
118 * e1000_reset_hw_vf - Resets the HW
119 * @hw: pointer to the HW structure
120 *
121 * VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
122 * This is all the reset we can perform on a VF.
123 **/
124static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
125{
126 struct e1000_mbx_info *mbx = &hw->mbx;
127 u32 timeout = E1000_VF_INIT_TIMEOUT;
128 u32 ret_val = -E1000_ERR_MAC_INIT;
129 u32 msgbuf[3];
130 u8 *addr = (u8 *)(&msgbuf[1]);
131 u32 ctrl;
132
133 /* assert vf queue/interrupt reset */
134 ctrl = er32(CTRL);
135 ew32(CTRL, ctrl | E1000_CTRL_RST);
136
137 /* we cannot initialize while the RSTI / RSTD bits are asserted */
138 while (!mbx->ops.check_for_rst(hw) && timeout) {
139 timeout--;
140 udelay(5);
141 }
142
143 if (timeout) {
144 /* mailbox timeout can now become active */
145 mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
146
147 /* notify pf of vf reset completion */
148 msgbuf[0] = E1000_VF_RESET;
149 mbx->ops.write_posted(hw, msgbuf, 1);
150
151 msleep(10);
152
153 /* set our "perm_addr" based on info provided by PF */
154 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
155 if (!ret_val) {
156 if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
157 memcpy(hw->mac.perm_addr, addr, 6);
158 else
159 ret_val = -E1000_ERR_MAC_INIT;
160 }
161 }
162
163 return ret_val;
164}
165
166/**
167 * e1000_init_hw_vf - Inits the HW
168 * @hw: pointer to the HW structure
169 *
170 * Not much to do here except clear the PF Reset indication if there is one.
171 **/
172static s32 e1000_init_hw_vf(struct e1000_hw *hw)
173{
174 /* attempt to set and restore our mac address */
175 e1000_rar_set_vf(hw, hw->mac.addr, 0);
176
177 return E1000_SUCCESS;
178}
179
180/**
181 * e1000_hash_mc_addr_vf - Generate a multicast hash value
182 * @hw: pointer to the HW structure
183 * @mc_addr: pointer to a multicast address
184 *
185 * Generates a multicast address hash value which is used to determine
186 * the multicast filter table array address and new table value. See
187 * e1000_mta_set_generic()
188 **/
189static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
190{
191 u32 hash_value, hash_mask;
192 u8 bit_shift = 0;
193
194 /* Register count multiplied by bits per register */
195 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
196
197 /*
198 * The bit_shift is the number of left-shifts
199 * where 0xFF would still fall within the hash mask.
200 */
201 while (hash_mask >> bit_shift != 0xFF)
202 bit_shift++;
203
204 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
205 (((u16) mc_addr[5]) << bit_shift)));
206
207 return hash_value;
208}
209
210/**
211 * e1000_update_mc_addr_list_vf - Update Multicast addresses
212 * @hw: pointer to the HW structure
213 * @mc_addr_list: array of multicast addresses to program
214 * @mc_addr_count: number of multicast addresses to program
215 * @rar_used_count: the first RAR register free to program
216 * @rar_count: total number of supported Receive Address Registers
217 *
218 * Updates the Receive Address Registers and Multicast Table Array.
219 * The caller must have a packed mc_addr_list of multicast addresses.
220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this.
222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count)
226{
227 struct e1000_mbx_info *mbx = &hw->mbx;
228 u32 msgbuf[E1000_VFMAILBOX_SIZE];
229 u16 *hash_list = (u16 *)&msgbuf[1];
230 u32 hash_value;
231 u32 cnt, i;
232
233 /* Each entry in the list uses 1 16 bit word. We have 30
234 * 16 bit words available in our HW msg buffer (minus 1 for the
235 * msg type). That's 30 hash values if we pack 'em right. If
236 * there are more than 30 MC addresses to add then punt the
237 * extras for now and then add code to handle more than 30 later.
238 * It would be unusual for a server to request that many multi-cast
239 * addresses except for in large enterprise network environments.
240 */
241
242 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
243 msgbuf[0] = E1000_VF_SET_MULTICAST;
244 msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT;
245
246 for (i = 0; i < cnt; i++) {
247 hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
248 hash_list[i] = hash_value & 0x0FFFF;
249 mc_addr_list += ETH_ADDR_LEN;
250 }
251
252 mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
253}
254
255/**
256 * e1000_set_vfta_vf - Set/Unset vlan filter table address
257 * @hw: pointer to the HW structure
258 * @vid: determines the vfta register and bit to set/unset
259 * @set: if true then set bit, else clear bit
260 **/
261static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
262{
263 struct e1000_mbx_info *mbx = &hw->mbx;
264 u32 msgbuf[2];
265 s32 err;
266
267 msgbuf[0] = E1000_VF_SET_VLAN;
268 msgbuf[1] = vid;
269 /* Setting the 8 bit field MSG INFO to true indicates "add" */
270 if (set)
271 msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
272
273 mbx->ops.write_posted(hw, msgbuf, 2);
274
275 err = mbx->ops.read_posted(hw, msgbuf, 2);
276
277 /* if nacked the vlan was rejected */
278 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
279 err = -E1000_ERR_MAC_INIT;
280
281 return err;
282}
283
284/** e1000_rlpml_set_vf - Set the maximum receive packet length
285 * @hw: pointer to the HW structure
286 * @max_size: value to assign to max frame size
287 **/
288void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
289{
290 struct e1000_mbx_info *mbx = &hw->mbx;
291 u32 msgbuf[2];
292
293 msgbuf[0] = E1000_VF_SET_LPE;
294 msgbuf[1] = max_size;
295
296 mbx->ops.write_posted(hw, msgbuf, 2);
297}
298
299/**
300 * e1000_rar_set_vf - set device MAC address
301 * @hw: pointer to the HW structure
302 * @addr: pointer to the receive address
303 * @index receive address array register
304 **/
305static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
306{
307 struct e1000_mbx_info *mbx = &hw->mbx;
308 u32 msgbuf[3];
309 u8 *msg_addr = (u8 *)(&msgbuf[1]);
310 s32 ret_val;
311
312 memset(msgbuf, 0, 12);
313 msgbuf[0] = E1000_VF_SET_MAC_ADDR;
314 memcpy(msg_addr, addr, 6);
315 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
316
317 if (!ret_val)
318 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
319
320 /* if nacked the address was rejected, use "perm_addr" */
321 if (!ret_val &&
322 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
323 e1000_read_mac_addr_vf(hw);
324}
325
326/**
327 * e1000_read_mac_addr_vf - Read device MAC address
328 * @hw: pointer to the HW structure
329 **/
330static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
331{
332 int i;
333
334 for (i = 0; i < ETH_ADDR_LEN; i++)
335 hw->mac.addr[i] = hw->mac.perm_addr[i];
336
337 return E1000_SUCCESS;
338}
339
340/**
341 * e1000_check_for_link_vf - Check for link for a virtual interface
342 * @hw: pointer to the HW structure
343 *
344 * Checks to see if the underlying PF is still talking to the VF and
345 * if it is then it reports the link state to the hardware, otherwise
346 * it reports link down and returns an error.
347 **/
348static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
349{
350 struct e1000_mbx_info *mbx = &hw->mbx;
351 struct e1000_mac_info *mac = &hw->mac;
352 s32 ret_val = E1000_SUCCESS;
353 u32 in_msg = 0;
354
355 /*
356 * We only want to run this if there has been a rst asserted.
357 * in this case that could mean a link change, device reset,
358 * or a virtual function reset
359 */
360
361 /* If we were hit with a reset drop the link */
362 if (!mbx->ops.check_for_rst(hw))
363 mac->get_link_status = true;
364
365 if (!mac->get_link_status)
366 goto out;
367
368 /* if link status is down no point in checking to see if pf is up */
369 if (!(er32(STATUS) & E1000_STATUS_LU))
370 goto out;
371
372 /* if the read failed it could just be a mailbox collision, best wait
373 * until we are called again and don't report an error */
374 if (mbx->ops.read(hw, &in_msg, 1))
375 goto out;
376
377 /* if incoming message isn't clear to send we are waiting on response */
378 if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
379 /* message is not CTS and is NACK we must have lost CTS status */
380 if (in_msg & E1000_VT_MSGTYPE_NACK)
381 ret_val = -E1000_ERR_MAC_INIT;
382 goto out;
383 }
384
385 /* the pf is talking, if we timed out in the past we reinit */
386 if (!mbx->timeout) {
387 ret_val = -E1000_ERR_MAC_INIT;
388 goto out;
389 }
390
391 /* if we passed all the tests above then the link is up and we no
392 * longer need to check for link */
393 mac->get_link_status = false;
394
395out:
396 return ret_val;
397}
398
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
new file mode 100644
index 000000000000..1e8ce3741a67
--- /dev/null
+++ b/drivers/net/igbvf/vf.h
@@ -0,0 +1,264 @@
1/*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_VF_H_
29#define _E1000_VF_H_
30
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35
36#include "regs.h"
37#include "defines.h"
38
39struct e1000_hw;
40
41#define E1000_DEV_ID_82576_VF 0x10CA
42#define E1000_REVISION_0 0
43#define E1000_REVISION_1 1
44#define E1000_REVISION_2 2
45#define E1000_REVISION_3 3
46#define E1000_REVISION_4 4
47
48#define E1000_FUNC_0 0
49#define E1000_FUNC_1 1
50
51/*
52 * Receive Address Register Count
53 * Number of high/low register pairs in the RAR. The RAR (Receive Address
54 * Registers) holds the directed and multicast addresses that we monitor.
55 * These entries are also used for MAC-based filtering.
56 */
57#define E1000_RAR_ENTRIES_VF 1
58
59/* Receive Descriptor - Advanced */
60union e1000_adv_rx_desc {
61 struct {
62 u64 pkt_addr; /* Packet buffer address */
63 u64 hdr_addr; /* Header buffer address */
64 } read;
65 struct {
66 struct {
67 union {
68 u32 data;
69 struct {
70 u16 pkt_info; /* RSS/Packet type */
71 u16 hdr_info; /* Split Header,
72 * hdr buffer length */
73 } hs_rss;
74 } lo_dword;
75 union {
76 u32 rss; /* RSS Hash */
77 struct {
78 u16 ip_id; /* IP id */
79 u16 csum; /* Packet Checksum */
80 } csum_ip;
81 } hi_dword;
82 } lower;
83 struct {
84 u32 status_error; /* ext status/error */
85 u16 length; /* Packet length */
86 u16 vlan; /* VLAN tag */
87 } upper;
88 } wb; /* writeback */
89};
90
91#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
92#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
93
94/* Transmit Descriptor - Advanced */
95union e1000_adv_tx_desc {
96 struct {
97 u64 buffer_addr; /* Address of descriptor's data buf */
98 u32 cmd_type_len;
99 u32 olinfo_status;
100 } read;
101 struct {
102 u64 rsvd; /* Reserved */
103 u32 nxtseq_seed;
104 u32 status;
105 } wb;
106};
107
108/* Adv Transmit Descriptor Config Masks */
109#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
110#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
111#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
112#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
113#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
114#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
115#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
116#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
117#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
118
119/* Context descriptors */
120struct e1000_adv_tx_context_desc {
121 u32 vlan_macip_lens;
122 u32 seqnum_seed;
123 u32 type_tucmd_mlhl;
124 u32 mss_l4len_idx;
125};
126
127#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
128#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
129#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
130#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
131#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
132
133enum e1000_mac_type {
134 e1000_undefined = 0,
135 e1000_vfadapt,
136 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
137};
138
139struct e1000_vf_stats {
140 u64 base_gprc;
141 u64 base_gptc;
142 u64 base_gorc;
143 u64 base_gotc;
144 u64 base_mprc;
145 u64 base_gotlbc;
146 u64 base_gptlbc;
147 u64 base_gorlbc;
148 u64 base_gprlbc;
149
150 u32 last_gprc;
151 u32 last_gptc;
152 u32 last_gorc;
153 u32 last_gotc;
154 u32 last_mprc;
155 u32 last_gotlbc;
156 u32 last_gptlbc;
157 u32 last_gorlbc;
158 u32 last_gprlbc;
159
160 u64 gprc;
161 u64 gptc;
162 u64 gorc;
163 u64 gotc;
164 u64 mprc;
165 u64 gotlbc;
166 u64 gptlbc;
167 u64 gorlbc;
168 u64 gprlbc;
169};
170
171#include "mbx.h"
172
173struct e1000_mac_operations {
174 /* Function pointers for the MAC. */
175 s32 (*init_params)(struct e1000_hw *);
176 s32 (*check_for_link)(struct e1000_hw *);
177 void (*clear_vfta)(struct e1000_hw *);
178 s32 (*get_bus_info)(struct e1000_hw *);
179 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
180 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
181 s32 (*reset_hw)(struct e1000_hw *);
182 s32 (*init_hw)(struct e1000_hw *);
183 s32 (*setup_link)(struct e1000_hw *);
184 void (*write_vfta)(struct e1000_hw *, u32, u32);
185 void (*mta_set)(struct e1000_hw *, u32);
186 void (*rar_set)(struct e1000_hw *, u8*, u32);
187 s32 (*read_mac_addr)(struct e1000_hw *);
188 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
189};
190
191struct e1000_mac_info {
192 struct e1000_mac_operations ops;
193 u8 addr[6];
194 u8 perm_addr[6];
195
196 enum e1000_mac_type type;
197
198 u16 mta_reg_count;
199 u16 rar_entry_count;
200
201 bool get_link_status;
202};
203
204struct e1000_mbx_operations {
205 s32 (*init_params)(struct e1000_hw *hw);
206 s32 (*read)(struct e1000_hw *, u32 *, u16);
207 s32 (*write)(struct e1000_hw *, u32 *, u16);
208 s32 (*read_posted)(struct e1000_hw *, u32 *, u16);
209 s32 (*write_posted)(struct e1000_hw *, u32 *, u16);
210 s32 (*check_for_msg)(struct e1000_hw *);
211 s32 (*check_for_ack)(struct e1000_hw *);
212 s32 (*check_for_rst)(struct e1000_hw *);
213};
214
215struct e1000_mbx_stats {
216 u32 msgs_tx;
217 u32 msgs_rx;
218
219 u32 acks;
220 u32 reqs;
221 u32 rsts;
222};
223
224struct e1000_mbx_info {
225 struct e1000_mbx_operations ops;
226 struct e1000_mbx_stats stats;
227 u32 timeout;
228 u32 usec_delay;
229 u16 size;
230};
231
232struct e1000_dev_spec_vf {
233 u32 vf_number;
234 u32 v2p_mailbox;
235};
236
237struct e1000_hw {
238 void *back;
239
240 u8 __iomem *hw_addr;
241 u8 __iomem *flash_address;
242 unsigned long io_base;
243
244 struct e1000_mac_info mac;
245 struct e1000_mbx_info mbx;
246
247 union {
248 struct e1000_dev_spec_vf vf;
249 } dev_spec;
250
251 u16 device_id;
252 u16 subsystem_vendor_id;
253 u16 subsystem_device_id;
254 u16 vendor_id;
255
256 u8 revision_id;
257};
258
259/* These functions must be implemented by drivers */
260void e1000_rlpml_set_vf(struct e1000_hw *, u16);
261void e1000_init_function_pointers_vf(struct e1000_hw *hw);
262
263
264#endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index cbc63ff13add..c5593f4665a4 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1214,6 +1214,19 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1214} 1214}
1215#endif 1215#endif
1216 1216
1217static const struct net_device_ops ioc3_netdev_ops = {
1218 .ndo_open = ioc3_open,
1219 .ndo_stop = ioc3_close,
1220 .ndo_start_xmit = ioc3_start_xmit,
1221 .ndo_tx_timeout = ioc3_timeout,
1222 .ndo_get_stats = ioc3_get_stats,
1223 .ndo_set_multicast_list = ioc3_set_multicast_list,
1224 .ndo_do_ioctl = ioc3_ioctl,
1225 .ndo_validate_addr = eth_validate_addr,
1226 .ndo_set_mac_address = ioc3_set_mac_address,
1227 .ndo_change_mtu = eth_change_mtu,
1228};
1229
1217static int __devinit ioc3_probe(struct pci_dev *pdev, 1230static int __devinit ioc3_probe(struct pci_dev *pdev,
1218 const struct pci_device_id *ent) 1231 const struct pci_device_id *ent)
1219{ 1232{
@@ -1310,15 +1323,8 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
1310 ioc3_get_eaddr(ip); 1323 ioc3_get_eaddr(ip);
1311 1324
1312 /* The IOC3-specific entries in the device structure. */ 1325 /* The IOC3-specific entries in the device structure. */
1313 dev->open = ioc3_open;
1314 dev->hard_start_xmit = ioc3_start_xmit;
1315 dev->tx_timeout = ioc3_timeout;
1316 dev->watchdog_timeo = 5 * HZ; 1326 dev->watchdog_timeo = 5 * HZ;
1317 dev->stop = ioc3_close; 1327 dev->netdev_ops = &ioc3_netdev_ops;
1318 dev->get_stats = ioc3_get_stats;
1319 dev->do_ioctl = ioc3_ioctl;
1320 dev->set_multicast_list = ioc3_set_multicast_list;
1321 dev->set_mac_address = ioc3_set_mac_address;
1322 dev->ethtool_ops = &ioc3_ethtool_ops; 1328 dev->ethtool_ops = &ioc3_ethtool_ops;
1323 dev->features = NETIF_F_IP_CSUM; 1329 dev->features = NETIF_F_IP_CSUM;
1324 1330
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 3126678bdd3c..73585fd8f29f 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -181,6 +181,18 @@ out:
181} 181}
182#endif 182#endif
183 183
184static const struct net_device_ops netcard_netdev_ops = {
185 .ndo_open = net_open,
186 .ndo_stop = net_close,
187 .ndo_start_xmit = net_send_packet,
188 .ndo_get_stats = net_get_stats,
189 .ndo_set_multicast_list = set_multicast_list,
190 .ndo_tx_timeout = net_tx_timeout,
191 .ndo_validate_addr = eth_validate_addr,
192 .ndo_set_mac_address = eth_mac_addr,
193 .ndo_change_mtu = eth_change_mtu,
194};
195
184/* 196/*
185 * This is the real probe routine. Linux has a history of friendly device 197 * This is the real probe routine. Linux has a history of friendly device
186 * probes on the ISA bus. A good device probes avoids doing writes, and 198 * probes on the ISA bus. A good device probes avoids doing writes, and
@@ -303,13 +315,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
303 np = netdev_priv(dev); 315 np = netdev_priv(dev);
304 spin_lock_init(&np->lock); 316 spin_lock_init(&np->lock);
305 317
306 dev->open = net_open; 318 dev->netdev_ops = &netcard_netdev_ops;
307 dev->stop = net_close;
308 dev->hard_start_xmit = net_send_packet;
309 dev->get_stats = net_get_stats;
310 dev->set_multicast_list = &set_multicast_list;
311
312 dev->tx_timeout = &net_tx_timeout;
313 dev->watchdog_timeo = MY_TX_TIMEOUT; 319 dev->watchdog_timeo = MY_TX_TIMEOUT;
314 320
315 err = register_netdev(dev); 321 err = register_netdev(dev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index a7ae4d45b53d..03eb54f4f1cc 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -921,61 +921,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
921} 921}
922 922
923/** 923/**
924 * ixgbe_blink_led_start_82598 - Blink LED based on index.
925 * @hw: pointer to hardware structure
926 * @index: led number to blink
927 **/
928static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
929{
930 ixgbe_link_speed speed = 0;
931 bool link_up = 0;
932 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
933 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
934
935 /*
936 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
937 * force it if link is down.
938 */
939 hw->mac.ops.check_link(hw, &speed, &link_up, false);
940
941 if (!link_up) {
942 autoc_reg |= IXGBE_AUTOC_FLU;
943 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
944 msleep(10);
945 }
946
947 led_reg &= ~IXGBE_LED_MODE_MASK(index);
948 led_reg |= IXGBE_LED_BLINK(index);
949 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
950 IXGBE_WRITE_FLUSH(hw);
951
952 return 0;
953}
954
955/**
956 * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
957 * @hw: pointer to hardware structure
958 * @index: led number to stop blinking
959 **/
960static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
961{
962 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
963 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
964
965 autoc_reg &= ~IXGBE_AUTOC_FLU;
966 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
967 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
968
969 led_reg &= ~IXGBE_LED_MODE_MASK(index);
970 led_reg &= ~IXGBE_LED_BLINK(index);
971 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
972 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
973 IXGBE_WRITE_FLUSH(hw);
974
975 return 0;
976}
977
978/**
979 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 924 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
980 * @hw: pointer to hardware structure 925 * @hw: pointer to hardware structure
981 * @reg: analog register to read 926 * @reg: analog register to read
@@ -1197,8 +1142,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1197 .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 1142 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1198 .led_on = &ixgbe_led_on_generic, 1143 .led_on = &ixgbe_led_on_generic,
1199 .led_off = &ixgbe_led_off_generic, 1144 .led_off = &ixgbe_led_off_generic,
1200 .blink_led_start = &ixgbe_blink_led_start_82598, 1145 .blink_led_start = &ixgbe_blink_led_start_generic,
1201 .blink_led_stop = &ixgbe_blink_led_stop_82598, 1146 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1202 .set_rar = &ixgbe_set_rar_generic, 1147 .set_rar = &ixgbe_set_rar_generic,
1203 .clear_rar = &ixgbe_clear_rar_generic, 1148 .clear_rar = &ixgbe_clear_rar_generic,
1204 .set_vmdq = &ixgbe_set_vmdq_82598, 1149 .set_vmdq = &ixgbe_set_vmdq_82598,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b3f4e96a018c..9e824b450416 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -68,8 +68,6 @@ s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
68s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, 68s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
69 u32 vind, bool vlan_on); 69 u32 vind, bool vlan_on);
70s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); 70s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
71s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index);
72s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index);
73s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); 71s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
74s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 72s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
75s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 73s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
@@ -1039,40 +1037,6 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
1039} 1037}
1040 1038
1041/** 1039/**
1042 * ixgbe_blink_led_start_82599 - Blink LED based on index.
1043 * @hw: pointer to hardware structure
1044 * @index: led number to blink
1045 **/
1046s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index)
1047{
1048 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1049
1050 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1051 led_reg |= IXGBE_LED_BLINK(index);
1052 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1053 IXGBE_WRITE_FLUSH(hw);
1054
1055 return 0;
1056}
1057
1058/**
1059 * ixgbe_blink_led_stop_82599 - Stop blinking LED based on index.
1060 * @hw: pointer to hardware structure
1061 * @index: led number to stop blinking
1062 **/
1063s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index)
1064{
1065 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1066
1067 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1068 led_reg &= ~IXGBE_LED_BLINK(index);
1069 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1070 IXGBE_WRITE_FLUSH(hw);
1071
1072 return 0;
1073}
1074
1075/**
1076 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array 1040 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1077 * @hw: pointer to hardware structure 1041 * @hw: pointer to hardware structure
1078 **/ 1042 **/
@@ -1353,8 +1317,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1353 .get_link_capabilities = &ixgbe_get_link_capabilities_82599, 1317 .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
1354 .led_on = &ixgbe_led_on_generic, 1318 .led_on = &ixgbe_led_on_generic,
1355 .led_off = &ixgbe_led_off_generic, 1319 .led_off = &ixgbe_led_off_generic,
1356 .blink_led_start = &ixgbe_blink_led_start_82599, 1320 .blink_led_start = &ixgbe_blink_led_start_generic,
1357 .blink_led_stop = &ixgbe_blink_led_stop_82599, 1321 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1358 .set_rar = &ixgbe_set_rar_generic, 1322 .set_rar = &ixgbe_set_rar_generic,
1359 .clear_rar = &ixgbe_clear_rar_generic, 1323 .clear_rar = &ixgbe_clear_rar_generic,
1360 .set_vmdq = &ixgbe_set_vmdq_82599, 1324 .set_vmdq = &ixgbe_set_vmdq_82599,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 2d4af5d2d3f7..5f2ee34e9d1d 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -2074,3 +2074,58 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2074 2074
2075 return 0; 2075 return 0;
2076} 2076}
2077
2078/**
2079 * ixgbe_blink_led_start_generic - Blink LED based on index.
2080 * @hw: pointer to hardware structure
2081 * @index: led number to blink
2082 **/
2083s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2084{
2085 ixgbe_link_speed speed = 0;
2086 bool link_up = 0;
2087 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2088 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2089
2090 /*
2091 * Link must be up to auto-blink the LEDs;
2092 * Force it if link is down.
2093 */
2094 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2095
2096 if (!link_up) {
2097 autoc_reg |= IXGBE_AUTOC_FLU;
2098 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2099 msleep(10);
2100 }
2101
2102 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2103 led_reg |= IXGBE_LED_BLINK(index);
2104 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2105 IXGBE_WRITE_FLUSH(hw);
2106
2107 return 0;
2108}
2109
2110/**
2111 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2112 * @hw: pointer to hardware structure
2113 * @index: led number to stop blinking
2114 **/
2115s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2116{
2117 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2118 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2119
2120 autoc_reg &= ~IXGBE_AUTOC_FLU;
2121 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2122 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2123
2124 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2125 led_reg &= ~IXGBE_LED_BLINK(index);
2126 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2127 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2128 IXGBE_WRITE_FLUSH(hw);
2129
2130 return 0;
2131}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 24f73e719c3f..dd260890ad0a 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -76,6 +76,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
76s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); 76s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
77s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); 77s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
78 78
79s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
80s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
81
79#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 82#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
80 83
81#ifndef writeq 84#ifndef writeq
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index aafc120f164e..f0a20facc650 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -943,6 +943,24 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
943} 943}
944 944
945 945
946static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
947 struct ethtool_wolinfo *wol)
948{
949 struct ixgbe_hw *hw = &adapter->hw;
950 int retval = 1;
951
952 switch(hw->device_id) {
953 case IXGBE_DEV_ID_82599_KX4:
954 retval = 0;
955 break;
956 default:
957 wol->supported = 0;
958 retval = 0;
959 }
960
961 return retval;
962}
963
946static void ixgbe_get_wol(struct net_device *netdev, 964static void ixgbe_get_wol(struct net_device *netdev,
947 struct ethtool_wolinfo *wol) 965 struct ethtool_wolinfo *wol)
948{ 966{
@@ -952,7 +970,8 @@ static void ixgbe_get_wol(struct net_device *netdev,
952 WAKE_BCAST | WAKE_MAGIC; 970 WAKE_BCAST | WAKE_MAGIC;
953 wol->wolopts = 0; 971 wol->wolopts = 0;
954 972
955 if (!device_can_wakeup(&adapter->pdev->dev)) 973 if (ixgbe_wol_exclusion(adapter, wol) ||
974 !device_can_wakeup(&adapter->pdev->dev))
956 return; 975 return;
957 976
958 if (adapter->wol & IXGBE_WUFC_EX) 977 if (adapter->wol & IXGBE_WUFC_EX)
@@ -974,6 +993,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
974 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 993 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
975 return -EOPNOTSUPP; 994 return -EOPNOTSUPP;
976 995
996 if (ixgbe_wol_exclusion(adapter, wol))
997 return wol->wolopts ? -EOPNOTSUPP : 0;
998
977 adapter->wol = 0; 999 adapter->wol = 0;
978 1000
979 if (wol->wolopts & WAKE_UCAST) 1001 if (wol->wolopts & WAKE_UCAST)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 813c5bc1a8fa..0d9a3ac043a6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2723,17 +2723,21 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2723 **/ 2723 **/
2724static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2724static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2725{ 2725{
2726 /* Start with base case */
2727 adapter->num_rx_queues = 1;
2728 adapter->num_tx_queues = 1;
2729
2730#ifdef CONFIG_IXGBE_DCB 2726#ifdef CONFIG_IXGBE_DCB
2731 if (ixgbe_set_dcb_queues(adapter)) 2727 if (ixgbe_set_dcb_queues(adapter))
2732 return; 2728 goto done;
2733 2729
2734#endif 2730#endif
2735 if (ixgbe_set_rss_queues(adapter)) 2731 if (ixgbe_set_rss_queues(adapter))
2736 return; 2732 goto done;
2733
2734 /* fallback to base case */
2735 adapter->num_rx_queues = 1;
2736 adapter->num_tx_queues = 1;
2737
2738done:
2739 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2740 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2737} 2741}
2738 2742
2739static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2743static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -2992,9 +2996,6 @@ try_msi:
2992 } 2996 }
2993 2997
2994out: 2998out:
2995 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2996 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2997
2998 return err; 2999 return err;
2999} 3000}
3000 3001
@@ -3611,9 +3612,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
3611 3612
3612 return 0; 3613 return 0;
3613} 3614}
3614
3615#endif /* CONFIG_PM */ 3615#endif /* CONFIG_PM */
3616static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 3616
3617static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
3617{ 3618{
3618 struct net_device *netdev = pci_get_drvdata(pdev); 3619 struct net_device *netdev = pci_get_drvdata(pdev);
3619 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3620 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -3672,18 +3673,46 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3672 pci_enable_wake(pdev, PCI_D3cold, 0); 3673 pci_enable_wake(pdev, PCI_D3cold, 0);
3673 } 3674 }
3674 3675
3676 *enable_wake = !!wufc;
3677
3675 ixgbe_release_hw_control(adapter); 3678 ixgbe_release_hw_control(adapter);
3676 3679
3677 pci_disable_device(pdev); 3680 pci_disable_device(pdev);
3678 3681
3679 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3682 return 0;
3683}
3684
3685#ifdef CONFIG_PM
3686static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3687{
3688 int retval;
3689 bool wake;
3690
3691 retval = __ixgbe_shutdown(pdev, &wake);
3692 if (retval)
3693 return retval;
3694
3695 if (wake) {
3696 pci_prepare_to_sleep(pdev);
3697 } else {
3698 pci_wake_from_d3(pdev, false);
3699 pci_set_power_state(pdev, PCI_D3hot);
3700 }
3680 3701
3681 return 0; 3702 return 0;
3682} 3703}
3704#endif /* CONFIG_PM */
3683 3705
3684static void ixgbe_shutdown(struct pci_dev *pdev) 3706static void ixgbe_shutdown(struct pci_dev *pdev)
3685{ 3707{
3686 ixgbe_suspend(pdev, PMSG_SUSPEND); 3708 bool wake;
3709
3710 __ixgbe_shutdown(pdev, &wake);
3711
3712 if (system_state == SYSTEM_POWER_OFF) {
3713 pci_wake_from_d3(pdev, wake);
3714 pci_set_power_state(pdev, PCI_D3hot);
3715 }
3687} 3716}
3688 3717
3689/** 3718/**
@@ -4342,7 +4371,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4342 int count = 0; 4371 int count = 0;
4343 unsigned int f; 4372 unsigned int f;
4344 4373
4345 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 4374 r_idx = skb->queue_mapping;
4346 tx_ring = &adapter->tx_ring[r_idx]; 4375 tx_ring = &adapter->tx_ring[r_idx];
4347 4376
4348 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4377 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 380a1a54d530..384e072de2e7 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -168,6 +168,17 @@ writereg(struct net_device *dev, int portno, int value)
168 nubus_writew(swab16(value), dev->mem_start + portno); 168 nubus_writew(swab16(value), dev->mem_start + portno);
169} 169}
170 170
171static const struct net_device_ops mac89x0_netdev_ops = {
172 .ndo_open = net_open,
173 .ndo_stop = net_close,
174 .ndo_start_xmit = net_send_packet,
175 .ndo_get_stats = net_get_stats,
176 .ndo_set_multicast_list = set_multicast_list,
177 .ndo_set_mac_address = set_mac_address,
178 .ndo_validate_addr = eth_validate_addr,
179 .ndo_change_mtu = eth_change_mtu,
180};
181
171/* Probe for the CS8900 card in slot E. We won't bother looking 182/* Probe for the CS8900 card in slot E. We won't bother looking
172 anywhere else until we have a really good reason to do so. */ 183 anywhere else until we have a really good reason to do so. */
173struct net_device * __init mac89x0_probe(int unit) 184struct net_device * __init mac89x0_probe(int unit)
@@ -280,12 +291,7 @@ struct net_device * __init mac89x0_probe(int unit)
280 291
281 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); 292 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
282 293
283 dev->open = net_open; 294 dev->netdev_ops = &mac89x0_netdev_ops;
284 dev->stop = net_close;
285 dev->hard_start_xmit = net_send_packet;
286 dev->get_stats = net_get_stats;
287 dev->set_multicast_list = &set_multicast_list;
288 dev->set_mac_address = &set_mac_address;
289 295
290 err = register_netdev(dev); 296 err = register_netdev(dev);
291 if (err) 297 if (err)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f50501013b1c..46073de290cf 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1100,6 +1100,18 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1100 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1100 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1101} 1101}
1102 1102
1103static const struct net_device_ops macb_netdev_ops = {
1104 .ndo_open = macb_open,
1105 .ndo_stop = macb_close,
1106 .ndo_start_xmit = macb_start_xmit,
1107 .ndo_set_multicast_list = macb_set_rx_mode,
1108 .ndo_get_stats = macb_get_stats,
1109 .ndo_do_ioctl = macb_ioctl,
1110 .ndo_validate_addr = eth_validate_addr,
1111 .ndo_change_mtu = eth_change_mtu,
1112 .ndo_set_mac_address = eth_mac_addr,
1113};
1114
1103static int __init macb_probe(struct platform_device *pdev) 1115static int __init macb_probe(struct platform_device *pdev)
1104{ 1116{
1105 struct eth_platform_data *pdata; 1117 struct eth_platform_data *pdata;
@@ -1175,12 +1187,7 @@ static int __init macb_probe(struct platform_device *pdev)
1175 goto err_out_iounmap; 1187 goto err_out_iounmap;
1176 } 1188 }
1177 1189
1178 dev->open = macb_open; 1190 dev->netdev_ops = &macb_netdev_ops;
1179 dev->stop = macb_close;
1180 dev->hard_start_xmit = macb_start_xmit;
1181 dev->get_stats = macb_get_stats;
1182 dev->set_multicast_list = macb_set_rx_mode;
1183 dev->do_ioctl = macb_ioctl;
1184 netif_napi_add(dev, &bp->napi, macb_poll, 64); 1191 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1185 dev->ethtool_ops = &macb_ethtool_ops; 1192 dev->ethtool_ops = &macb_ethtool_ops;
1186 1193
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 527166e35d56..acd143da161d 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -167,6 +167,18 @@ static int macsonic_close(struct net_device* dev)
167 return err; 167 return err;
168} 168}
169 169
170static const struct net_device_ops macsonic_netdev_ops = {
171 .ndo_open = macsonic_open,
172 .ndo_stop = macsonic_close,
173 .ndo_start_xmit = sonic_send_packet,
174 .ndo_set_multicast_list = sonic_multicast_list,
175 .ndo_tx_timeout = sonic_tx_timeout,
176 .ndo_get_stats = sonic_get_stats,
177 .ndo_validate_addr = eth_validate_addr,
178 .ndo_change_mtu = eth_change_mtu,
179 .ndo_set_mac_address = eth_mac_addr,
180};
181
170static int __init macsonic_init(struct net_device *dev) 182static int __init macsonic_init(struct net_device *dev)
171{ 183{
172 struct sonic_local* lp = netdev_priv(dev); 184 struct sonic_local* lp = netdev_priv(dev);
@@ -198,12 +210,7 @@ static int __init macsonic_init(struct net_device *dev)
198 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 210 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
199 * SONIC_BUS_SCALE(lp->dma_bitmode)); 211 * SONIC_BUS_SCALE(lp->dma_bitmode));
200 212
201 dev->open = macsonic_open; 213 dev->netdev_ops = &macsonic_netdev_ops;
202 dev->stop = macsonic_close;
203 dev->hard_start_xmit = sonic_send_packet;
204 dev->get_stats = sonic_get_stats;
205 dev->set_multicast_list = &sonic_multicast_list;
206 dev->tx_timeout = sonic_tx_timeout;
207 dev->watchdog_timeo = TX_TIMEOUT; 214 dev->watchdog_timeo = TX_TIMEOUT;
208 215
209 /* 216 /*
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 7cce3342ef8c..606aa58afdea 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -299,13 +299,14 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
299 struct mlx4_cmd_mailbox *mailbox; 299 struct mlx4_cmd_mailbox *mailbox;
300 int err; 300 int err;
301 301
302 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
303 return 0;
304
302 mailbox = mlx4_alloc_cmd_mailbox(dev); 305 mailbox = mlx4_alloc_cmd_mailbox(dev);
303 if (IS_ERR(mailbox)) 306 if (IS_ERR(mailbox))
304 return PTR_ERR(mailbox); 307 return PTR_ERR(mailbox);
305 308
306 memset(mailbox->buf, 0, 256); 309 memset(mailbox->buf, 0, 256);
307 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
308 return 0;
309 310
310 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 311 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
311 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 312 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a56d9d2df73f..b3185bf2c158 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2274,8 +2274,6 @@ static void port_start(struct mv643xx_eth_private *mp)
2274 pscr |= FORCE_LINK_PASS; 2274 pscr |= FORCE_LINK_PASS;
2275 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2275 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2276 2276
2277 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2278
2279 /* 2277 /*
2280 * Configure TX path and queues. 2278 * Configure TX path and queues.
2281 */ 2279 */
@@ -2957,6 +2955,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 2955
2958 netif_carrier_off(dev); 2956 netif_carrier_off(dev);
2959 2957
2958 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2959
2960 set_rx_coal(mp, 250); 2960 set_rx_coal(mp, 250);
2961 set_tx_coal(mp, 0); 2961 set_tx_coal(mp, 0);
2962 2962
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 9eed126a82f0..f2c4a665e93f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2447,6 +2447,7 @@ static int myri10ge_open(struct net_device *dev)
2447 lro_mgr->lro_arr = ss->rx_done.lro_desc; 2447 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2448 lro_mgr->get_frag_header = myri10ge_get_frag_header; 2448 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2449 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 2449 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2450 lro_mgr->frag_align_pad = 2;
2450 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 2451 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2451 lro_mgr->max_aggr = MAX_SKB_FRAGS; 2452 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2452 2453
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 73cac6c78cb6..2b1745328cf7 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4834,6 +4834,7 @@ static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4834{ 4834{
4835 u64 val = 0; 4835 u64 val = 0;
4836 4836
4837 *ret = 0;
4837 switch (rp->rbr_block_size) { 4838 switch (rp->rbr_block_size) {
4838 case 4 * 1024: 4839 case 4 * 1024:
4839 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4840 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
@@ -9542,7 +9543,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9542 9543
9543 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9544 plat_dev = platform_device_register_simple("niu", niu_parent_index,
9544 NULL, 0); 9545 NULL, 0);
9545 if (!plat_dev) 9546 if (IS_ERR(plat_dev))
9546 return NULL; 9547 return NULL;
9547 9548
9548 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 9549 for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index cf24cc34debe..e7070515d2e3 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -19,6 +19,7 @@
19#include <linux/mii.h> 19#include <linux/mii.h>
20#include <linux/phy.h> 20#include <linux/phy.h>
21#include <linux/phy_fixed.h> 21#include <linux/phy_fixed.h>
22#include <linux/err.h>
22 23
23#define MII_REGS_NUM 29 24#define MII_REGS_NUM 29
24 25
@@ -207,8 +208,8 @@ static int __init fixed_mdio_bus_init(void)
207 int ret; 208 int ret;
208 209
209 pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); 210 pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
210 if (!pdev) { 211 if (IS_ERR(pdev)) {
211 ret = -ENOMEM; 212 ret = PTR_ERR(pdev);
212 goto err_pdev; 213 goto err_pdev;
213 } 214 }
214 215
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eb6411c4694f..7a3ec9d39a9a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -69,6 +69,11 @@
69#define MII_M1111_COPPER 0 69#define MII_M1111_COPPER 0
70#define MII_M1111_FIBER 1 70#define MII_M1111_FIBER 1
71 71
72#define MII_88E1121_PHY_LED_CTRL 16
73#define MII_88E1121_PHY_LED_PAGE 3
74#define MII_88E1121_PHY_LED_DEF 0x0030
75#define MII_88E1121_PHY_PAGE 22
76
72#define MII_M1011_PHY_STATUS 0x11 77#define MII_M1011_PHY_STATUS 0x11
73#define MII_M1011_PHY_STATUS_1000 0x8000 78#define MII_M1011_PHY_STATUS_1000 0x8000
74#define MII_M1011_PHY_STATUS_100 0x4000 79#define MII_M1011_PHY_STATUS_100 0x4000
@@ -154,6 +159,30 @@ static int marvell_config_aneg(struct phy_device *phydev)
154 return err; 159 return err;
155} 160}
156 161
162static int m88e1121_config_aneg(struct phy_device *phydev)
163{
164 int err, temp;
165
166 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
167 if (err < 0)
168 return err;
169
170 err = phy_write(phydev, MII_M1011_PHY_SCR,
171 MII_M1011_PHY_SCR_AUTO_CROSS);
172 if (err < 0)
173 return err;
174
175 temp = phy_read(phydev, MII_88E1121_PHY_PAGE);
176
177 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
178 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
179 phy_write(phydev, MII_88E1121_PHY_PAGE, temp);
180
181 err = genphy_config_aneg(phydev);
182
183 return err;
184}
185
157static int m88e1111_config_init(struct phy_device *phydev) 186static int m88e1111_config_init(struct phy_device *phydev)
158{ 187{
159 int err; 188 int err;
@@ -429,6 +458,18 @@ static int marvell_read_status(struct phy_device *phydev)
429 return 0; 458 return 0;
430} 459}
431 460
461static int m88e1121_did_interrupt(struct phy_device *phydev)
462{
463 int imask;
464
465 imask = phy_read(phydev, MII_M1011_IEVENT);
466
467 if (imask & MII_M1011_IMASK_INIT)
468 return 1;
469
470 return 0;
471}
472
432static struct phy_driver marvell_drivers[] = { 473static struct phy_driver marvell_drivers[] = {
433 { 474 {
434 .phy_id = 0x01410c60, 475 .phy_id = 0x01410c60,
@@ -482,6 +523,19 @@ static struct phy_driver marvell_drivers[] = {
482 .driver = {.owner = THIS_MODULE,}, 523 .driver = {.owner = THIS_MODULE,},
483 }, 524 },
484 { 525 {
526 .phy_id = 0x01410cb0,
527 .phy_id_mask = 0xfffffff0,
528 .name = "Marvell 88E1121R",
529 .features = PHY_GBIT_FEATURES,
530 .flags = PHY_HAS_INTERRUPT,
531 .config_aneg = &m88e1121_config_aneg,
532 .read_status = &marvell_read_status,
533 .ack_interrupt = &marvell_ack_interrupt,
534 .config_intr = &marvell_config_intr,
535 .did_interrupt = &m88e1121_did_interrupt,
536 .driver = { .owner = THIS_MODULE },
537 },
538 {
485 .phy_id = 0x01410cd0, 539 .phy_id = 0x01410cd0,
486 .phy_id_mask = 0xfffffff0, 540 .phy_id_mask = 0xfffffff0,
487 .name = "Marvell 88E1145", 541 .name = "Marvell 88E1145",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3ff1f425f1bb..61755cbd978e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -434,7 +434,7 @@ void phy_start_machine(struct phy_device *phydev,
434 phydev->adjust_state = handler; 434 phydev->adjust_state = handler;
435 435
436 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); 436 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
437 schedule_delayed_work(&phydev->state_queue, jiffies + HZ); 437 schedule_delayed_work(&phydev->state_queue, HZ);
438} 438}
439 439
440/** 440/**
@@ -655,6 +655,10 @@ static void phy_change(struct work_struct *work)
655 struct phy_device *phydev = 655 struct phy_device *phydev =
656 container_of(work, struct phy_device, phy_queue); 656 container_of(work, struct phy_device, phy_queue);
657 657
658 if (phydev->drv->did_interrupt &&
659 !phydev->drv->did_interrupt(phydev))
660 goto ignore;
661
658 err = phy_disable_interrupts(phydev); 662 err = phy_disable_interrupts(phydev);
659 663
660 if (err) 664 if (err)
@@ -681,6 +685,11 @@ static void phy_change(struct work_struct *work)
681 685
682 return; 686 return;
683 687
688ignore:
689 atomic_dec(&phydev->irq_disable);
690 enable_irq(phydev->irq);
691 return;
692
684irq_enable_err: 693irq_enable_err:
685 disable_irq(phydev->irq); 694 disable_irq(phydev->irq);
686 atomic_inc(&phydev->irq_disable); 695 atomic_inc(&phydev->irq_disable);
@@ -937,6 +946,5 @@ static void phy_state_machine(struct work_struct *work)
937 if (err < 0) 946 if (err < 0)
938 phy_error(phydev); 947 phy_error(phydev);
939 948
940 schedule_delayed_work(&phydev->state_queue, 949 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
941 jiffies + PHY_STATE_TIME * HZ);
942} 950}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5e8540b6ffa1..6f97b47d74a6 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -160,6 +160,7 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
160 "Florian Fainelli <florian@openwrt.org>"); 160 "Florian Fainelli <florian@openwrt.org>");
161MODULE_LICENSE("GPL"); 161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); 162MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
163MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
163 164
164/* RX and TX interrupts that we handle */ 165/* RX and TX interrupts that we handle */
165#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) 166#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index dee23b159df2..7269a426051c 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -448,9 +448,6 @@ static void efx_init_channels(struct efx_nic *efx)
448 448
449 WARN_ON(channel->rx_pkt != NULL); 449 WARN_ON(channel->rx_pkt != NULL);
450 efx_rx_strategy(channel); 450 efx_rx_strategy(channel);
451
452 netif_napi_add(channel->napi_dev, &channel->napi_str,
453 efx_poll, napi_weight);
454 } 451 }
455} 452}
456 453
@@ -1321,6 +1318,8 @@ static int efx_init_napi(struct efx_nic *efx)
1321 1318
1322 efx_for_each_channel(channel, efx) { 1319 efx_for_each_channel(channel, efx) {
1323 channel->napi_dev = efx->net_dev; 1320 channel->napi_dev = efx->net_dev;
1321 netif_napi_add(channel->napi_dev, &channel->napi_str,
1322 efx_poll, napi_weight);
1324 } 1323 }
1325 return 0; 1324 return 0;
1326} 1325}
@@ -1330,6 +1329,8 @@ static void efx_fini_napi(struct efx_nic *efx)
1330 struct efx_channel *channel; 1329 struct efx_channel *channel;
1331 1330
1332 efx_for_each_channel(channel, efx) { 1331 efx_for_each_channel(channel, efx) {
1332 if (channel->napi_dev)
1333 netif_napi_del(&channel->napi_str);
1333 channel->napi_dev = NULL; 1334 channel->napi_dev = NULL;
1334 } 1335 }
1335} 1336}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d4629ab2c614..466a8abb0053 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1176,9 +1176,9 @@ void falcon_sim_phy_event(struct efx_nic *efx)
1176 1176
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); 1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1178 if (EFX_IS10G(efx)) 1178 if (EFX_IS10G(efx))
1179 EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1); 1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1180 else 1180 else
1181 EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1); 1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1182 1182
1183 falcon_generate_event(&efx->channel[0], &phy_event); 1183 falcon_generate_event(&efx->channel[0], &phy_event);
1184} 1184}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7b1882765a0c..3ab28bb00c12 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1188,6 +1188,19 @@ out:
1188 return ret; 1188 return ret;
1189} 1189}
1190 1190
1191static const struct net_device_ops sh_eth_netdev_ops = {
1192 .ndo_open = sh_eth_open,
1193 .ndo_stop = sh_eth_close,
1194 .ndo_start_xmit = sh_eth_start_xmit,
1195 .ndo_get_stats = sh_eth_get_stats,
1196 .ndo_set_multicast_list = sh_eth_set_multicast_list,
1197 .ndo_tx_timeout = sh_eth_tx_timeout,
1198 .ndo_do_ioctl = sh_eth_do_ioctl,
1199 .ndo_validate_addr = eth_validate_addr,
1200 .ndo_set_mac_address = eth_mac_addr,
1201 .ndo_change_mtu = eth_change_mtu,
1202};
1203
1191static int sh_eth_drv_probe(struct platform_device *pdev) 1204static int sh_eth_drv_probe(struct platform_device *pdev)
1192{ 1205{
1193 int ret, i, devno = 0; 1206 int ret, i, devno = 0;
@@ -1240,13 +1253,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1240 mdp->edmac_endian = pd->edmac_endian; 1253 mdp->edmac_endian = pd->edmac_endian;
1241 1254
1242 /* set function */ 1255 /* set function */
1243 ndev->open = sh_eth_open; 1256 ndev->netdev_ops = &sh_eth_netdev_ops;
1244 ndev->hard_start_xmit = sh_eth_start_xmit;
1245 ndev->stop = sh_eth_close;
1246 ndev->get_stats = sh_eth_get_stats;
1247 ndev->set_multicast_list = sh_eth_set_multicast_list;
1248 ndev->do_ioctl = sh_eth_do_ioctl;
1249 ndev->tx_timeout = sh_eth_tx_timeout;
1250 ndev->watchdog_timeo = TX_TIMEOUT; 1257 ndev->watchdog_timeo = TX_TIMEOUT;
1251 1258
1252 mdp->post_rx = POST_RX >> (devno << 1); 1259 mdp->post_rx = POST_RX >> (devno << 1);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b8978d4af1b7..c11cdd08ec57 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev)
2674 if (netif_msg_ifdown(skge)) 2674 if (netif_msg_ifdown(skge))
2675 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2675 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2676 2676
2677 netif_stop_queue(dev); 2677 netif_tx_disable(dev);
2678 2678
2679 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2679 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2680 del_timer_sync(&skge->link_timer); 2680 del_timer_sync(&skge->link_timer);
@@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev)
2881 } 2881 }
2882 2882
2883 skge->tx_ring.to_clean = e; 2883 skge->tx_ring.to_clean = e;
2884 netif_wake_queue(dev);
2885} 2884}
2886 2885
2887static void skge_tx_timeout(struct net_device *dev) 2886static void skge_tx_timeout(struct net_device *dev)
@@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev)
2893 2892
2894 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2893 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2895 skge_tx_clean(dev); 2894 skge_tx_clean(dev);
2895 netif_wake_queue(dev);
2896} 2896}
2897 2897
2898static int skge_change_mtu(struct net_device *dev, int new_mtu) 2898static int skge_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 912308eec865..329f890e2903 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -369,7 +369,7 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
369 * MN10300/AM33 configuration 369 * MN10300/AM33 configuration
370 */ 370 */
371 371
372#include <asm/unit/smc91111.h> 372#include <unit/smc91111.h>
373 373
374#else 374#else
375 375
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 6da678129828..eb7db032a780 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -317,7 +317,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
317 goto out; 317 goto out;
318 } 318 }
319 319
320 SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); 320 SMSC_WARNING(HW, "Timed out waiting for MII read to finish");
321 reg = -EIO; 321 reg = -EIO;
322 322
323out: 323out:
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index e0d84772771c..a39c0b9ba8b6 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -331,6 +331,18 @@ out:
331 return ERR_PTR(err); 331 return ERR_PTR(err);
332} 332}
333 333
334static const struct net_device_ops sun3_82586_netdev_ops = {
335 .ndo_open = sun3_82586_open,
336 .ndo_stop = sun3_82586_close,
337 .ndo_start_xmit = sun3_82586_send_packet,
338 .ndo_set_multicast_list = set_multicast_list,
339 .ndo_tx_timeout = sun3_82586_timeout,
340 .ndo_get_stats = sun3_82586_get_stats,
341 .ndo_validate_addr = eth_validate_addr,
342 .ndo_set_mac_address = eth_mac_addr,
343 .ndo_change_mtu = eth_change_mtu,
344};
345
334static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) 346static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
335{ 347{
336 int i, size, retval; 348 int i, size, retval;
@@ -381,13 +393,8 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
381 393
382 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); 394 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
383 395
384 dev->open = sun3_82586_open; 396 dev->netdev_ops = &sun3_82586_netdev_ops;
385 dev->stop = sun3_82586_close;
386 dev->get_stats = sun3_82586_get_stats;
387 dev->tx_timeout = sun3_82586_timeout;
388 dev->watchdog_timeo = HZ/20; 397 dev->watchdog_timeo = HZ/20;
389 dev->hard_start_xmit = sun3_82586_send_packet;
390 dev->set_multicast_list = set_multicast_list;
391 398
392 dev->if_port = 0; 399 dev->if_port = 0;
393 return 0; 400 return 0;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d91e95b237b7..0ce2db6ce2bf 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -862,6 +862,22 @@ static int __devinit tc35815_init_dev_addr(struct net_device *dev)
862 return 0; 862 return 0;
863} 863}
864 864
865static const struct net_device_ops tc35815_netdev_ops = {
866 .ndo_open = tc35815_open,
867 .ndo_stop = tc35815_close,
868 .ndo_start_xmit = tc35815_send_packet,
869 .ndo_get_stats = tc35815_get_stats,
870 .ndo_set_multicast_list = tc35815_set_multicast_list,
871 .ndo_tx_timeout = tc35815_tx_timeout,
872 .ndo_do_ioctl = tc35815_ioctl,
873 .ndo_validate_addr = eth_validate_addr,
874 .ndo_change_mtu = eth_change_mtu,
875 .ndo_set_mac_address = eth_mac_addr,
876#ifdef CONFIG_NET_POLL_CONTROLLER
877 .ndo_poll_controller = tc35815_poll_controller,
878#endif
879};
880
865static int __devinit tc35815_init_one(struct pci_dev *pdev, 881static int __devinit tc35815_init_one(struct pci_dev *pdev,
866 const struct pci_device_id *ent) 882 const struct pci_device_id *ent)
867{ 883{
@@ -904,21 +920,12 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
904 ioaddr = pcim_iomap_table(pdev)[1]; 920 ioaddr = pcim_iomap_table(pdev)[1];
905 921
906 /* Initialize the device structure. */ 922 /* Initialize the device structure. */
907 dev->open = tc35815_open; 923 dev->netdev_ops = &tc35815_netdev_ops;
908 dev->hard_start_xmit = tc35815_send_packet;
909 dev->stop = tc35815_close;
910 dev->get_stats = tc35815_get_stats;
911 dev->set_multicast_list = tc35815_set_multicast_list;
912 dev->do_ioctl = tc35815_ioctl;
913 dev->ethtool_ops = &tc35815_ethtool_ops; 924 dev->ethtool_ops = &tc35815_ethtool_ops;
914 dev->tx_timeout = tc35815_tx_timeout;
915 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 925 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
916#ifdef TC35815_NAPI 926#ifdef TC35815_NAPI
917 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 927 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
918#endif 928#endif
919#ifdef CONFIG_NET_POLL_CONTROLLER
920 dev->poll_controller = tc35815_poll_controller;
921#endif
922 929
923 dev->irq = pdev->irq; 930 dev->irq = pdev->irq;
924 dev->base_addr = (unsigned long)ioaddr; 931 dev->base_addr = (unsigned long)ioaddr;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6a736dda3ee2..7a837c465960 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -12443,8 +12443,13 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
12443 /* Next, try NVRAM. */ 12443 /* Next, try NVRAM. */
12444 if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 12444 if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 12445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12446 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 12446 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12447 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 12447 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12448 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12449 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12450 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12451 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12452
12448 } 12453 }
12449 /* Finally just fetch it out of the MAC control regs. */ 12454 /* Finally just fetch it out of the MAC control regs. */
12450 else { 12455 else {
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index bb43e7fb2a50..0f78f99f9b20 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1561,6 +1561,18 @@ static const struct ethtool_ops tsi108_ethtool_ops = {
1561 .set_settings = tsi108_set_settings, 1561 .set_settings = tsi108_set_settings,
1562}; 1562};
1563 1563
1564static const struct net_device_ops tsi108_netdev_ops = {
1565 .ndo_open = tsi108_open,
1566 .ndo_stop = tsi108_close,
1567 .ndo_start_xmit = tsi108_send_packet,
1568 .ndo_set_multicast_list = tsi108_set_rx_mode,
1569 .ndo_get_stats = tsi108_get_stats,
1570 .ndo_do_ioctl = tsi108_do_ioctl,
1571 .ndo_set_mac_address = tsi108_set_mac,
1572 .ndo_validate_addr = eth_validate_addr,
1573 .ndo_change_mtu = eth_change_mtu,
1574};
1575
1564static int 1576static int
1565tsi108_init_one(struct platform_device *pdev) 1577tsi108_init_one(struct platform_device *pdev)
1566{ 1578{
@@ -1616,14 +1628,8 @@ tsi108_init_one(struct platform_device *pdev)
1616 data->phy_type = einfo->phy_type; 1628 data->phy_type = einfo->phy_type;
1617 data->irq_num = einfo->irq_num; 1629 data->irq_num = einfo->irq_num;
1618 data->id = pdev->id; 1630 data->id = pdev->id;
1619 dev->open = tsi108_open;
1620 dev->stop = tsi108_close;
1621 dev->hard_start_xmit = tsi108_send_packet;
1622 dev->set_mac_address = tsi108_set_mac;
1623 dev->set_multicast_list = tsi108_set_rx_mode;
1624 dev->get_stats = tsi108_get_stats;
1625 netif_napi_add(dev, &data->napi, tsi108_poll, 64); 1631 netif_napi_add(dev, &data->napi, tsi108_poll, 64);
1626 dev->do_ioctl = tsi108_do_ioctl; 1632 dev->netdev_ops = &tsi108_netdev_ops;
1627 dev->ethtool_ops = &tsi108_ethtool_ops; 1633 dev->ethtool_ops = &tsi108_ethtool_ops;
1628 1634
1629 /* Apparently, the Linux networking code won't use scatter-gather 1635 /* Apparently, the Linux networking code won't use scatter-gather
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a1b0697340ba..16716aef184c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -518,7 +518,7 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
518 int err; 518 int err;
519 519
520 /* Under a page? Don't bother with paged skb. */ 520 /* Under a page? Don't bother with paged skb. */
521 if (prepad + len < PAGE_SIZE) 521 if (prepad + len < PAGE_SIZE || !linear)
522 linear = len; 522 linear = len;
523 523
524 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 524 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
@@ -565,7 +565,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
565 565
566 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 566 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
567 align = NET_IP_ALIGN; 567 align = NET_IP_ALIGN;
568 if (unlikely(len < ETH_HLEN)) 568 if (unlikely(len < ETH_HLEN ||
569 (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
569 return -EINVAL; 570 return -EINVAL;
570 } 571 }
571 572
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fb53ef872df3..754a4b182c1d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -377,7 +377,7 @@ static void velocity_print_info(struct velocity_info *vptr);
377static int velocity_open(struct net_device *dev); 377static int velocity_open(struct net_device *dev);
378static int velocity_change_mtu(struct net_device *dev, int mtu); 378static int velocity_change_mtu(struct net_device *dev, int mtu);
379static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); 379static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
380static int velocity_intr(int irq, void *dev_instance); 380static irqreturn_t velocity_intr(int irq, void *dev_instance);
381static void velocity_set_multi(struct net_device *dev); 381static void velocity_set_multi(struct net_device *dev);
382static struct net_device_stats *velocity_get_stats(struct net_device *dev); 382static struct net_device_stats *velocity_get_stats(struct net_device *dev);
383static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 383static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -2215,7 +2215,7 @@ out:
2215 * efficiently as possible. 2215 * efficiently as possible.
2216 */ 2216 */
2217 2217
2218static int velocity_intr(int irq, void *dev_instance) 2218static irqreturn_t velocity_intr(int irq, void *dev_instance)
2219{ 2219{
2220 struct net_device *dev = dev_instance; 2220 struct net_device *dev = dev_instance;
2221 struct velocity_info *vptr = netdev_priv(dev); 2221 struct velocity_info *vptr = netdev_priv(dev);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index a12a7211c982..5a4ad156f63e 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -108,6 +108,18 @@ static int xtsonic_close(struct net_device *dev)
108 return err; 108 return err;
109} 109}
110 110
111static const struct net_device_ops xtsonic_netdev_ops = {
112 .ndo_open = xtsonic_open,
113 .ndo_stop = xtsonic_close,
114 .ndo_start_xmit = sonic_send_packet,
115 .ndo_get_stats = sonic_get_stats,
116 .ndo_set_multicast_list = sonic_multicast_list,
117 .ndo_tx_timeout = sonic_tx_timeout,
118 .ndo_validate_addr = eth_validate_addr,
119 .ndo_change_mtu = eth_change_mtu,
120 .ndo_set_mac_address = eth_mac_addr,
121};
122
111static int __init sonic_probe1(struct net_device *dev) 123static int __init sonic_probe1(struct net_device *dev)
112{ 124{
113 static unsigned version_printed = 0; 125 static unsigned version_printed = 0;
@@ -205,12 +217,7 @@ static int __init sonic_probe1(struct net_device *dev)
205 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 217 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
206 * SONIC_BUS_SCALE(lp->dma_bitmode)); 218 * SONIC_BUS_SCALE(lp->dma_bitmode));
207 219
208 dev->open = xtsonic_open; 220 dev->netdev_ops = &xtsonic_netdev_ops;
209 dev->stop = xtsonic_close;
210 dev->hard_start_xmit = sonic_send_packet;
211 dev->get_stats = sonic_get_stats;
212 dev->set_multicast_list = &sonic_multicast_list;
213 dev->tx_timeout = sonic_tx_timeout;
214 dev->watchdog_timeo = TX_TIMEOUT; 221 dev->watchdog_timeo = TX_TIMEOUT;
215 222
216 /* 223 /*
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 25a00ce4f24d..fa3a11365ec3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -173,12 +173,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct dmar_drhd_unit *dmaru; 173 struct dmar_drhd_unit *dmaru;
174 int ret = 0; 174 int ret = 0;
175 175
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
176 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
177 if (!dmaru) 187 if (!dmaru)
178 return -ENOMEM; 188 return -ENOMEM;
179 189
180 dmaru->hdr = header; 190 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 191 dmaru->reg_base_addr = drhd->address;
183 dmaru->segment = drhd->segment; 192 dmaru->segment = drhd->segment;
184 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index fb3a3f3fca7a..001b328adf80 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -733,8 +733,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
733 start &= (((u64)1) << addr_width) - 1; 733 start &= (((u64)1) << addr_width) - 1;
734 end &= (((u64)1) << addr_width) - 1; 734 end &= (((u64)1) << addr_width) - 1;
735 /* in case it's partial page */ 735 /* in case it's partial page */
736 start = PAGE_ALIGN(start); 736 start &= PAGE_MASK;
737 end &= PAGE_MASK; 737 end = PAGE_ALIGN(end);
738 npages = (end - start) / VTD_PAGE_SIZE; 738 npages = (end - start) / VTD_PAGE_SIZE;
739 739
740 /* we don't need lock here, nobody else touches the iova range */ 740 /* we don't need lock here, nobody else touches the iova range */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 45940f31fe9e..218b9a16ac3f 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -174,8 +174,7 @@ struct fujitsu_hotkey_t {
174 174
175static struct fujitsu_hotkey_t *fujitsu_hotkey; 175static struct fujitsu_hotkey_t *fujitsu_hotkey;
176 176
177static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, 177static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
178 void *data);
179 178
180#ifdef CONFIG_LEDS_CLASS 179#ifdef CONFIG_LEDS_CLASS
181static enum led_brightness logolamp_get(struct led_classdev *cdev); 180static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -203,7 +202,7 @@ struct led_classdev kblamps_led = {
203static u32 dbg_level = 0x03; 202static u32 dbg_level = 0x03;
204#endif 203#endif
205 204
206static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); 205static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
207 206
208/* Fujitsu ACPI interface function */ 207/* Fujitsu ACPI interface function */
209 208
@@ -658,7 +657,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
658 657
659static int acpi_fujitsu_add(struct acpi_device *device) 658static int acpi_fujitsu_add(struct acpi_device *device)
660{ 659{
661 acpi_status status;
662 acpi_handle handle; 660 acpi_handle handle;
663 int result = 0; 661 int result = 0;
664 int state = 0; 662 int state = 0;
@@ -673,20 +671,10 @@ static int acpi_fujitsu_add(struct acpi_device *device)
673 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 671 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
674 device->driver_data = fujitsu; 672 device->driver_data = fujitsu;
675 673
676 status = acpi_install_notify_handler(device->handle,
677 ACPI_DEVICE_NOTIFY,
678 acpi_fujitsu_notify, fujitsu);
679
680 if (ACPI_FAILURE(status)) {
681 printk(KERN_ERR "Error installing notify handler\n");
682 error = -ENODEV;
683 goto err_stop;
684 }
685
686 fujitsu->input = input = input_allocate_device(); 674 fujitsu->input = input = input_allocate_device();
687 if (!input) { 675 if (!input) {
688 error = -ENOMEM; 676 error = -ENOMEM;
689 goto err_uninstall_notify; 677 goto err_stop;
690 } 678 }
691 679
692 snprintf(fujitsu->phys, sizeof(fujitsu->phys), 680 snprintf(fujitsu->phys, sizeof(fujitsu->phys),
@@ -743,9 +731,6 @@ static int acpi_fujitsu_add(struct acpi_device *device)
743end: 731end:
744err_free_input_dev: 732err_free_input_dev:
745 input_free_device(input); 733 input_free_device(input);
746err_uninstall_notify:
747 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
748 acpi_fujitsu_notify);
749err_stop: 734err_stop:
750 735
751 return result; 736 return result;
@@ -753,7 +738,6 @@ err_stop:
753 738
754static int acpi_fujitsu_remove(struct acpi_device *device, int type) 739static int acpi_fujitsu_remove(struct acpi_device *device, int type)
755{ 740{
756 acpi_status status;
757 struct fujitsu_t *fujitsu = NULL; 741 struct fujitsu_t *fujitsu = NULL;
758 742
759 if (!device || !acpi_driver_data(device)) 743 if (!device || !acpi_driver_data(device))
@@ -761,10 +745,6 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type)
761 745
762 fujitsu = acpi_driver_data(device); 746 fujitsu = acpi_driver_data(device);
763 747
764 status = acpi_remove_notify_handler(fujitsu->acpi_handle,
765 ACPI_DEVICE_NOTIFY,
766 acpi_fujitsu_notify);
767
768 if (!device || !acpi_driver_data(device)) 748 if (!device || !acpi_driver_data(device))
769 return -EINVAL; 749 return -EINVAL;
770 750
@@ -775,7 +755,7 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type)
775 755
776/* Brightness notify */ 756/* Brightness notify */
777 757
778static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) 758static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
779{ 759{
780 struct input_dev *input; 760 struct input_dev *input;
781 int keycode; 761 int keycode;
@@ -829,15 +809,12 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
829 input_report_key(input, keycode, 0); 809 input_report_key(input, keycode, 0);
830 input_sync(input); 810 input_sync(input);
831 } 811 }
832
833 return;
834} 812}
835 813
836/* ACPI device for hotkey handling */ 814/* ACPI device for hotkey handling */
837 815
838static int acpi_fujitsu_hotkey_add(struct acpi_device *device) 816static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
839{ 817{
840 acpi_status status;
841 acpi_handle handle; 818 acpi_handle handle;
842 int result = 0; 819 int result = 0;
843 int state = 0; 820 int state = 0;
@@ -854,17 +831,6 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
854 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 831 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
855 device->driver_data = fujitsu_hotkey; 832 device->driver_data = fujitsu_hotkey;
856 833
857 status = acpi_install_notify_handler(device->handle,
858 ACPI_DEVICE_NOTIFY,
859 acpi_fujitsu_hotkey_notify,
860 fujitsu_hotkey);
861
862 if (ACPI_FAILURE(status)) {
863 printk(KERN_ERR "Error installing notify handler\n");
864 error = -ENODEV;
865 goto err_stop;
866 }
867
868 /* kfifo */ 834 /* kfifo */
869 spin_lock_init(&fujitsu_hotkey->fifo_lock); 835 spin_lock_init(&fujitsu_hotkey->fifo_lock);
870 fujitsu_hotkey->fifo = 836 fujitsu_hotkey->fifo =
@@ -879,7 +845,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
879 fujitsu_hotkey->input = input = input_allocate_device(); 845 fujitsu_hotkey->input = input = input_allocate_device();
880 if (!input) { 846 if (!input) {
881 error = -ENOMEM; 847 error = -ENOMEM;
882 goto err_uninstall_notify; 848 goto err_free_fifo;
883 } 849 }
884 850
885 snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), 851 snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
@@ -975,9 +941,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
975end: 941end:
976err_free_input_dev: 942err_free_input_dev:
977 input_free_device(input); 943 input_free_device(input);
978err_uninstall_notify: 944err_free_fifo:
979 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
980 acpi_fujitsu_hotkey_notify);
981 kfifo_free(fujitsu_hotkey->fifo); 945 kfifo_free(fujitsu_hotkey->fifo);
982err_stop: 946err_stop:
983 947
@@ -986,7 +950,6 @@ err_stop:
986 950
987static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) 951static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
988{ 952{
989 acpi_status status;
990 struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; 953 struct fujitsu_hotkey_t *fujitsu_hotkey = NULL;
991 954
992 if (!device || !acpi_driver_data(device)) 955 if (!device || !acpi_driver_data(device))
@@ -994,10 +957,6 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
994 957
995 fujitsu_hotkey = acpi_driver_data(device); 958 fujitsu_hotkey = acpi_driver_data(device);
996 959
997 status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle,
998 ACPI_DEVICE_NOTIFY,
999 acpi_fujitsu_hotkey_notify);
1000
1001 fujitsu_hotkey->acpi_handle = NULL; 960 fujitsu_hotkey->acpi_handle = NULL;
1002 961
1003 kfifo_free(fujitsu_hotkey->fifo); 962 kfifo_free(fujitsu_hotkey->fifo);
@@ -1005,8 +964,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
1005 return 0; 964 return 0;
1006} 965}
1007 966
1008static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, 967static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1009 void *data)
1010{ 968{
1011 struct input_dev *input; 969 struct input_dev *input;
1012 int keycode, keycode_r; 970 int keycode, keycode_r;
@@ -1089,8 +1047,6 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
1089 input_sync(input); 1047 input_sync(input);
1090 break; 1048 break;
1091 } 1049 }
1092
1093 return;
1094} 1050}
1095 1051
1096/* Initialization */ 1052/* Initialization */
@@ -1107,6 +1063,7 @@ static struct acpi_driver acpi_fujitsu_driver = {
1107 .ops = { 1063 .ops = {
1108 .add = acpi_fujitsu_add, 1064 .add = acpi_fujitsu_add,
1109 .remove = acpi_fujitsu_remove, 1065 .remove = acpi_fujitsu_remove,
1066 .notify = acpi_fujitsu_notify,
1110 }, 1067 },
1111}; 1068};
1112 1069
@@ -1122,6 +1079,7 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = {
1122 .ops = { 1079 .ops = {
1123 .add = acpi_fujitsu_hotkey_add, 1080 .add = acpi_fujitsu_hotkey_add,
1124 .remove = acpi_fujitsu_hotkey_remove, 1081 .remove = acpi_fujitsu_hotkey_remove,
1082 .notify = acpi_fujitsu_hotkey_notify,
1125 }, 1083 },
1126}; 1084};
1127 1085
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index a5ce4bc202e3..fe7cf0188acc 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -176,6 +176,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0,
176static int acpi_pcc_hotkey_add(struct acpi_device *device); 176static int acpi_pcc_hotkey_add(struct acpi_device *device);
177static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); 177static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
178static int acpi_pcc_hotkey_resume(struct acpi_device *device); 178static int acpi_pcc_hotkey_resume(struct acpi_device *device);
179static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
179 180
180static const struct acpi_device_id pcc_device_ids[] = { 181static const struct acpi_device_id pcc_device_ids[] = {
181 { "MAT0012", 0}, 182 { "MAT0012", 0},
@@ -194,6 +195,7 @@ static struct acpi_driver acpi_pcc_driver = {
194 .add = acpi_pcc_hotkey_add, 195 .add = acpi_pcc_hotkey_add,
195 .remove = acpi_pcc_hotkey_remove, 196 .remove = acpi_pcc_hotkey_remove,
196 .resume = acpi_pcc_hotkey_resume, 197 .resume = acpi_pcc_hotkey_resume,
198 .notify = acpi_pcc_hotkey_notify,
197 }, 199 },
198}; 200};
199 201
@@ -271,7 +273,7 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
271 union acpi_object *hkey = NULL; 273 union acpi_object *hkey = NULL;
272 int i; 274 int i;
273 275
274 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, 276 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL,
275 &buffer); 277 &buffer);
276 if (ACPI_FAILURE(status)) { 278 if (ACPI_FAILURE(status)) {
277 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 279 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@@ -527,9 +529,9 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
527 return; 529 return;
528} 530}
529 531
530static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) 532static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
531{ 533{
532 struct pcc_acpi *pcc = (struct pcc_acpi *) data; 534 struct pcc_acpi *pcc = acpi_driver_data(device);
533 535
534 switch (event) { 536 switch (event) {
535 case HKEY_NOTIFY: 537 case HKEY_NOTIFY:
@@ -599,7 +601,6 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device)
599 601
600static int acpi_pcc_hotkey_add(struct acpi_device *device) 602static int acpi_pcc_hotkey_add(struct acpi_device *device)
601{ 603{
602 acpi_status status;
603 struct pcc_acpi *pcc; 604 struct pcc_acpi *pcc;
604 int num_sifr, result; 605 int num_sifr, result;
605 606
@@ -640,22 +641,11 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
640 goto out_sinf; 641 goto out_sinf;
641 } 642 }
642 643
643 /* initialize hotkey input device */
644 status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
645 acpi_pcc_hotkey_notify, pcc);
646
647 if (ACPI_FAILURE(status)) {
648 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
649 "Error installing notify handler\n"));
650 result = -ENODEV;
651 goto out_input;
652 }
653
654 /* initialize backlight */ 644 /* initialize backlight */
655 pcc->backlight = backlight_device_register("panasonic", NULL, pcc, 645 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
656 &pcc_backlight_ops); 646 &pcc_backlight_ops);
657 if (IS_ERR(pcc->backlight)) 647 if (IS_ERR(pcc->backlight))
658 goto out_notify; 648 goto out_input;
659 649
660 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { 650 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
661 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 651 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@@ -680,9 +670,6 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
680 670
681out_backlight: 671out_backlight:
682 backlight_device_unregister(pcc->backlight); 672 backlight_device_unregister(pcc->backlight);
683out_notify:
684 acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
685 acpi_pcc_hotkey_notify);
686out_input: 673out_input:
687 input_unregister_device(pcc->input_dev); 674 input_unregister_device(pcc->input_dev);
688 /* no need to input_free_device() since core input API refcount and 675 /* no need to input_free_device() since core input API refcount and
@@ -723,9 +710,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
723 710
724 backlight_device_unregister(pcc->backlight); 711 backlight_device_unregister(pcc->backlight);
725 712
726 acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
727 acpi_pcc_hotkey_notify);
728
729 input_unregister_device(pcc->input_dev); 713 input_unregister_device(pcc->input_dev);
730 /* no need to input_free_device() since core input API refcount and 714 /* no need to input_free_device() since core input API refcount and
731 * free()s the device */ 715 * free()s the device */
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index a90ec5cb2f20..d3c92d777bde 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -914,7 +914,7 @@ static struct sony_nc_event sony_127_events[] = {
914/* 914/*
915 * ACPI callbacks 915 * ACPI callbacks
916 */ 916 */
917static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) 917static void sony_nc_notify(struct acpi_device *device, u32 event)
918{ 918{
919 u32 ev = event; 919 u32 ev = event;
920 920
@@ -933,7 +933,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
933 struct sony_nc_event *key_event; 933 struct sony_nc_event *key_event;
934 934
935 if (sony_call_snc_handle(key_handle, 0x200, &result)) { 935 if (sony_call_snc_handle(key_handle, 0x200, &result)) {
936 dprintk("sony_acpi_notify, unable to decode" 936 dprintk("sony_nc_notify, unable to decode"
937 " event 0x%.2x 0x%.2x\n", key_handle, 937 " event 0x%.2x 0x%.2x\n", key_handle,
938 ev); 938 ev);
939 /* restore the original event */ 939 /* restore the original event */
@@ -968,7 +968,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
968 } else 968 } else
969 sony_laptop_report_input_event(ev); 969 sony_laptop_report_input_event(ev);
970 970
971 dprintk("sony_acpi_notify, event: 0x%.2x\n", ev); 971 dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
972 acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); 972 acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
973} 973}
974 974
@@ -1276,15 +1276,6 @@ static int sony_nc_add(struct acpi_device *device)
1276 goto outwalk; 1276 goto outwalk;
1277 } 1277 }
1278 1278
1279 status = acpi_install_notify_handler(sony_nc_acpi_handle,
1280 ACPI_DEVICE_NOTIFY,
1281 sony_acpi_notify, NULL);
1282 if (ACPI_FAILURE(status)) {
1283 printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status);
1284 result = -ENODEV;
1285 goto outinput;
1286 }
1287
1288 if (acpi_video_backlight_support()) { 1279 if (acpi_video_backlight_support()) {
1289 printk(KERN_INFO DRV_PFX "brightness ignored, must be " 1280 printk(KERN_INFO DRV_PFX "brightness ignored, must be "
1290 "controlled by ACPI video driver\n"); 1281 "controlled by ACPI video driver\n");
@@ -1362,13 +1353,6 @@ static int sony_nc_add(struct acpi_device *device)
1362 if (sony_backlight_device) 1353 if (sony_backlight_device)
1363 backlight_device_unregister(sony_backlight_device); 1354 backlight_device_unregister(sony_backlight_device);
1364 1355
1365 status = acpi_remove_notify_handler(sony_nc_acpi_handle,
1366 ACPI_DEVICE_NOTIFY,
1367 sony_acpi_notify);
1368 if (ACPI_FAILURE(status))
1369 printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
1370
1371 outinput:
1372 sony_laptop_remove_input(); 1356 sony_laptop_remove_input();
1373 1357
1374 outwalk: 1358 outwalk:
@@ -1378,7 +1362,6 @@ static int sony_nc_add(struct acpi_device *device)
1378 1362
1379static int sony_nc_remove(struct acpi_device *device, int type) 1363static int sony_nc_remove(struct acpi_device *device, int type)
1380{ 1364{
1381 acpi_status status;
1382 struct sony_nc_value *item; 1365 struct sony_nc_value *item;
1383 1366
1384 if (sony_backlight_device) 1367 if (sony_backlight_device)
@@ -1386,12 +1369,6 @@ static int sony_nc_remove(struct acpi_device *device, int type)
1386 1369
1387 sony_nc_acpi_device = NULL; 1370 sony_nc_acpi_device = NULL;
1388 1371
1389 status = acpi_remove_notify_handler(sony_nc_acpi_handle,
1390 ACPI_DEVICE_NOTIFY,
1391 sony_acpi_notify);
1392 if (ACPI_FAILURE(status))
1393 printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
1394
1395 for (item = sony_nc_values; item->name; ++item) { 1372 for (item = sony_nc_values; item->name; ++item) {
1396 device_remove_file(&sony_pf_device->dev, &item->devattr); 1373 device_remove_file(&sony_pf_device->dev, &item->devattr);
1397 } 1374 }
@@ -1425,6 +1402,7 @@ static struct acpi_driver sony_nc_driver = {
1425 .add = sony_nc_add, 1402 .add = sony_nc_add,
1426 .remove = sony_nc_remove, 1403 .remove = sony_nc_remove,
1427 .resume = sony_nc_resume, 1404 .resume = sony_nc_resume,
1405 .notify = sony_nc_notify,
1428 }, 1406 },
1429}; 1407};
1430 1408
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 2f269e117b8f..043b208d971d 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -81,6 +81,7 @@ static struct wmi_block wmi_blocks;
81 81
82static int acpi_wmi_remove(struct acpi_device *device, int type); 82static int acpi_wmi_remove(struct acpi_device *device, int type);
83static int acpi_wmi_add(struct acpi_device *device); 83static int acpi_wmi_add(struct acpi_device *device);
84static void acpi_wmi_notify(struct acpi_device *device, u32 event);
84 85
85static const struct acpi_device_id wmi_device_ids[] = { 86static const struct acpi_device_id wmi_device_ids[] = {
86 {"PNP0C14", 0}, 87 {"PNP0C14", 0},
@@ -96,6 +97,7 @@ static struct acpi_driver acpi_wmi_driver = {
96 .ops = { 97 .ops = {
97 .add = acpi_wmi_add, 98 .add = acpi_wmi_add,
98 .remove = acpi_wmi_remove, 99 .remove = acpi_wmi_remove,
100 .notify = acpi_wmi_notify,
99 }, 101 },
100}; 102};
101 103
@@ -643,12 +645,11 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
643 } 645 }
644} 646}
645 647
646static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data) 648static void acpi_wmi_notify(struct acpi_device *device, u32 event)
647{ 649{
648 struct guid_block *block; 650 struct guid_block *block;
649 struct wmi_block *wblock; 651 struct wmi_block *wblock;
650 struct list_head *p; 652 struct list_head *p;
651 struct acpi_device *device = data;
652 653
653 list_for_each(p, &wmi_blocks.list) { 654 list_for_each(p, &wmi_blocks.list) {
654 wblock = list_entry(p, struct wmi_block, list); 655 wblock = list_entry(p, struct wmi_block, list);
@@ -669,9 +670,6 @@ static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data)
669 670
670static int acpi_wmi_remove(struct acpi_device *device, int type) 671static int acpi_wmi_remove(struct acpi_device *device, int type)
671{ 672{
672 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
673 acpi_wmi_notify);
674
675 acpi_remove_address_space_handler(device->handle, 673 acpi_remove_address_space_handler(device->handle,
676 ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); 674 ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
677 675
@@ -683,13 +681,6 @@ static int __init acpi_wmi_add(struct acpi_device *device)
683 acpi_status status; 681 acpi_status status;
684 int result = 0; 682 int result = 0;
685 683
686 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
687 acpi_wmi_notify, device);
688 if (ACPI_FAILURE(status)) {
689 printk(KERN_ERR PREFIX "Error installing notify handler\n");
690 return -ENODEV;
691 }
692
693 status = acpi_install_address_space_handler(device->handle, 684 status = acpi_install_address_space_handler(device->handle,
694 ACPI_ADR_SPACE_EC, 685 ACPI_ADR_SPACE_EC,
695 &acpi_wmi_ec_space_handler, 686 &acpi_wmi_ec_space_handler,
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 41aec2acbb91..e8b278f71781 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -36,6 +36,8 @@ struct pcf50633_mbc {
36 36
37 struct power_supply usb; 37 struct power_supply usb;
38 struct power_supply adapter; 38 struct power_supply adapter;
39
40 struct delayed_work charging_restart_work;
39}; 41};
40 42
41int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) 43int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
@@ -43,6 +45,8 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
43 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); 45 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
44 int ret = 0; 46 int ret = 0;
45 u8 bits; 47 u8 bits;
48 int charging_start = 1;
49 u8 mbcs2, chgmod;
46 50
47 if (ma >= 1000) 51 if (ma >= 1000)
48 bits = PCF50633_MBCC7_USB_1000mA; 52 bits = PCF50633_MBCC7_USB_1000mA;
@@ -50,8 +54,10 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
50 bits = PCF50633_MBCC7_USB_500mA; 54 bits = PCF50633_MBCC7_USB_500mA;
51 else if (ma >= 100) 55 else if (ma >= 100)
52 bits = PCF50633_MBCC7_USB_100mA; 56 bits = PCF50633_MBCC7_USB_100mA;
53 else 57 else {
54 bits = PCF50633_MBCC7_USB_SUSPEND; 58 bits = PCF50633_MBCC7_USB_SUSPEND;
59 charging_start = 0;
60 }
55 61
56 ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, 62 ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
57 PCF50633_MBCC7_USB_MASK, bits); 63 PCF50633_MBCC7_USB_MASK, bits);
@@ -60,6 +66,22 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
60 else 66 else
61 dev_info(pcf->dev, "usb curlim to %d mA\n", ma); 67 dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
62 68
69 /* Manual charging start */
70 mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
71 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
72
73 /* If chgmod == BATFULL, setting chgena has no effect.
74 * We need to set resume instead.
75 */
76 if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
77 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
78 PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
79 else
80 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
81 PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
82
83 mbc->usb_active = charging_start;
84
63 power_supply_changed(&mbc->usb); 85 power_supply_changed(&mbc->usb);
64 86
65 return ret; 87 return ret;
@@ -84,21 +106,6 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf)
84} 106}
85EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); 107EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
86 108
87void pcf50633_mbc_set_status(struct pcf50633 *pcf, int what, int status)
88{
89 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
90
91 if (what & PCF50633_MBC_USB_ONLINE)
92 mbc->usb_online = !!status;
93 if (what & PCF50633_MBC_USB_ACTIVE)
94 mbc->usb_active = !!status;
95 if (what & PCF50633_MBC_ADAPTER_ONLINE)
96 mbc->adapter_online = !!status;
97 if (what & PCF50633_MBC_ADAPTER_ACTIVE)
98 mbc->adapter_active = !!status;
99}
100EXPORT_SYMBOL_GPL(pcf50633_mbc_set_status);
101
102static ssize_t 109static ssize_t
103show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) 110show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
104{ 111{
@@ -160,10 +167,44 @@ static struct attribute_group mbc_attr_group = {
160 .attrs = pcf50633_mbc_sysfs_entries, 167 .attrs = pcf50633_mbc_sysfs_entries,
161}; 168};
162 169
170/* MBC state machine switches into charging mode when the battery voltage
171 * falls below 96% of a battery float voltage. But the voltage drop in Li-ion
172 * batteries is marginal(1~2 %) till about 80% of its capacity - which means,
173 * after a BATFULL, charging won't be restarted until 80%.
174 *
175 * This work_struct function restarts charging at regular intervals to make
176 * sure we don't discharge too much
177 */
178
179static void pcf50633_mbc_charging_restart(struct work_struct *work)
180{
181 struct pcf50633_mbc *mbc;
182 u8 mbcs2, chgmod;
183
184 mbc = container_of(work, struct pcf50633_mbc,
185 charging_restart_work.work);
186
187 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
188 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
189
190 if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
191 return;
192
193 /* Restart charging */
194 pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1,
195 PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
196 mbc->usb_active = 1;
197 power_supply_changed(&mbc->usb);
198
199 dev_info(mbc->pcf->dev, "Charging restarted\n");
200}
201
163static void 202static void
164pcf50633_mbc_irq_handler(int irq, void *data) 203pcf50633_mbc_irq_handler(int irq, void *data)
165{ 204{
166 struct pcf50633_mbc *mbc = data; 205 struct pcf50633_mbc *mbc = data;
206 int chg_restart_interval =
207 mbc->pcf->pdata->charging_restart_interval;
167 208
168 /* USB */ 209 /* USB */
169 if (irq == PCF50633_IRQ_USBINS) { 210 if (irq == PCF50633_IRQ_USBINS) {
@@ -172,6 +213,7 @@ pcf50633_mbc_irq_handler(int irq, void *data)
172 mbc->usb_online = 0; 213 mbc->usb_online = 0;
173 mbc->usb_active = 0; 214 mbc->usb_active = 0;
174 pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); 215 pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
216 cancel_delayed_work_sync(&mbc->charging_restart_work);
175 } 217 }
176 218
177 /* Adapter */ 219 /* Adapter */
@@ -186,7 +228,14 @@ pcf50633_mbc_irq_handler(int irq, void *data)
186 if (irq == PCF50633_IRQ_BATFULL) { 228 if (irq == PCF50633_IRQ_BATFULL) {
187 mbc->usb_active = 0; 229 mbc->usb_active = 0;
188 mbc->adapter_active = 0; 230 mbc->adapter_active = 0;
189 } 231
232 if (chg_restart_interval > 0)
233 schedule_delayed_work(&mbc->charging_restart_work,
234 chg_restart_interval);
235 } else if (irq == PCF50633_IRQ_USBLIMON)
236 mbc->usb_active = 0;
237 else if (irq == PCF50633_IRQ_USBLIMOFF)
238 mbc->usb_active = 1;
190 239
191 power_supply_changed(&mbc->usb); 240 power_supply_changed(&mbc->usb);
192 power_supply_changed(&mbc->adapter); 241 power_supply_changed(&mbc->adapter);
@@ -303,6 +352,9 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
303 return ret; 352 return ret;
304 } 353 }
305 354
355 INIT_DELAYED_WORK(&mbc->charging_restart_work,
356 pcf50633_mbc_charging_restart);
357
306 ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); 358 ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
307 if (ret) 359 if (ret)
308 dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); 360 dev_err(mbc->pcf->dev, "failed to create sysfs entries\n");
@@ -328,6 +380,8 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
328 power_supply_unregister(&mbc->usb); 380 power_supply_unregister(&mbc->usb);
329 power_supply_unregister(&mbc->adapter); 381 power_supply_unregister(&mbc->adapter);
330 382
383 cancel_delayed_work_sync(&mbc->charging_restart_work);
384
331 kfree(mbc); 385 kfree(mbc);
332 386
333 return 0; 387 return 0;
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index b56a704409d2..a232de6a5703 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -12,11 +12,14 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/err.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <linux/power_supply.h> 17#include <linux/power_supply.h>
17#include <linux/pda_power.h> 18#include <linux/pda_power.h>
19#include <linux/regulator/consumer.h>
18#include <linux/timer.h> 20#include <linux/timer.h>
19#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/usb/otg.h>
20 23
21static inline unsigned int get_irq_flags(struct resource *res) 24static inline unsigned int get_irq_flags(struct resource *res)
22{ 25{
@@ -35,6 +38,11 @@ static struct timer_list supply_timer;
35static struct timer_list polling_timer; 38static struct timer_list polling_timer;
36static int polling; 39static int polling;
37 40
41#ifdef CONFIG_USB_OTG_UTILS
42static struct otg_transceiver *transceiver;
43#endif
44static struct regulator *ac_draw;
45
38enum { 46enum {
39 PDA_PSY_OFFLINE = 0, 47 PDA_PSY_OFFLINE = 0,
40 PDA_PSY_ONLINE = 1, 48 PDA_PSY_ONLINE = 1,
@@ -104,18 +112,35 @@ static void update_status(void)
104 112
105static void update_charger(void) 113static void update_charger(void)
106{ 114{
107 if (!pdata->set_charge) 115 static int regulator_enabled;
108 return; 116 int max_uA = pdata->ac_max_uA;
109 117
110 if (new_ac_status > 0) { 118 if (pdata->set_charge) {
111 dev_dbg(dev, "charger on (AC)\n"); 119 if (new_ac_status > 0) {
112 pdata->set_charge(PDA_POWER_CHARGE_AC); 120 dev_dbg(dev, "charger on (AC)\n");
113 } else if (new_usb_status > 0) { 121 pdata->set_charge(PDA_POWER_CHARGE_AC);
114 dev_dbg(dev, "charger on (USB)\n"); 122 } else if (new_usb_status > 0) {
115 pdata->set_charge(PDA_POWER_CHARGE_USB); 123 dev_dbg(dev, "charger on (USB)\n");
116 } else { 124 pdata->set_charge(PDA_POWER_CHARGE_USB);
117 dev_dbg(dev, "charger off\n"); 125 } else {
118 pdata->set_charge(0); 126 dev_dbg(dev, "charger off\n");
127 pdata->set_charge(0);
128 }
129 } else if (ac_draw) {
130 if (new_ac_status > 0) {
131 regulator_set_current_limit(ac_draw, max_uA, max_uA);
132 if (!regulator_enabled) {
133 dev_dbg(dev, "charger on (AC)\n");
134 regulator_enable(ac_draw);
135 regulator_enabled = 1;
136 }
137 } else {
138 if (regulator_enabled) {
139 dev_dbg(dev, "charger off\n");
140 regulator_disable(ac_draw);
141 regulator_enabled = 0;
142 }
143 }
119 } 144 }
120} 145}
121 146
@@ -194,6 +219,13 @@ static void polling_timer_func(unsigned long unused)
194 jiffies + msecs_to_jiffies(pdata->polling_interval)); 219 jiffies + msecs_to_jiffies(pdata->polling_interval));
195} 220}
196 221
222#ifdef CONFIG_USB_OTG_UTILS
223static int otg_is_usb_online(void)
224{
225 return (transceiver->state == OTG_STATE_B_PERIPHERAL);
226}
227#endif
228
197static int pda_power_probe(struct platform_device *pdev) 229static int pda_power_probe(struct platform_device *pdev)
198{ 230{
199 int ret = 0; 231 int ret = 0;
@@ -227,6 +259,9 @@ static int pda_power_probe(struct platform_device *pdev)
227 if (!pdata->polling_interval) 259 if (!pdata->polling_interval)
228 pdata->polling_interval = 2000; 260 pdata->polling_interval = 2000;
229 261
262 if (!pdata->ac_max_uA)
263 pdata->ac_max_uA = 500000;
264
230 setup_timer(&charger_timer, charger_timer_func, 0); 265 setup_timer(&charger_timer, charger_timer_func, 0);
231 setup_timer(&supply_timer, supply_timer_func, 0); 266 setup_timer(&supply_timer, supply_timer_func, 0);
232 267
@@ -240,6 +275,13 @@ static int pda_power_probe(struct platform_device *pdev)
240 pda_psy_usb.num_supplicants = pdata->num_supplicants; 275 pda_psy_usb.num_supplicants = pdata->num_supplicants;
241 } 276 }
242 277
278 ac_draw = regulator_get(dev, "ac_draw");
279 if (IS_ERR(ac_draw)) {
280 dev_dbg(dev, "couldn't get ac_draw regulator\n");
281 ac_draw = NULL;
282 ret = PTR_ERR(ac_draw);
283 }
284
243 if (pdata->is_ac_online) { 285 if (pdata->is_ac_online) {
244 ret = power_supply_register(&pdev->dev, &pda_psy_ac); 286 ret = power_supply_register(&pdev->dev, &pda_psy_ac);
245 if (ret) { 287 if (ret) {
@@ -261,6 +303,13 @@ static int pda_power_probe(struct platform_device *pdev)
261 } 303 }
262 } 304 }
263 305
306#ifdef CONFIG_USB_OTG_UTILS
307 transceiver = otg_get_transceiver();
308 if (transceiver && !pdata->is_usb_online) {
309 pdata->is_usb_online = otg_is_usb_online;
310 }
311#endif
312
264 if (pdata->is_usb_online) { 313 if (pdata->is_usb_online) {
265 ret = power_supply_register(&pdev->dev, &pda_psy_usb); 314 ret = power_supply_register(&pdev->dev, &pda_psy_usb);
266 if (ret) { 315 if (ret) {
@@ -300,10 +349,18 @@ usb_irq_failed:
300usb_supply_failed: 349usb_supply_failed:
301 if (pdata->is_ac_online && ac_irq) 350 if (pdata->is_ac_online && ac_irq)
302 free_irq(ac_irq->start, &pda_psy_ac); 351 free_irq(ac_irq->start, &pda_psy_ac);
352#ifdef CONFIG_USB_OTG_UTILS
353 if (transceiver)
354 otg_put_transceiver(transceiver);
355#endif
303ac_irq_failed: 356ac_irq_failed:
304 if (pdata->is_ac_online) 357 if (pdata->is_ac_online)
305 power_supply_unregister(&pda_psy_ac); 358 power_supply_unregister(&pda_psy_ac);
306ac_supply_failed: 359ac_supply_failed:
360 if (ac_draw) {
361 regulator_put(ac_draw);
362 ac_draw = NULL;
363 }
307 if (pdata->exit) 364 if (pdata->exit)
308 pdata->exit(dev); 365 pdata->exit(dev);
309init_failed: 366init_failed:
@@ -327,6 +384,14 @@ static int pda_power_remove(struct platform_device *pdev)
327 power_supply_unregister(&pda_psy_usb); 384 power_supply_unregister(&pda_psy_usb);
328 if (pdata->is_ac_online) 385 if (pdata->is_ac_online)
329 power_supply_unregister(&pda_psy_ac); 386 power_supply_unregister(&pda_psy_ac);
387#ifdef CONFIG_USB_OTG_UTILS
388 if (transceiver)
389 otg_put_transceiver(transceiver);
390#endif
391 if (ac_draw) {
392 regulator_put(ac_draw);
393 ac_draw = NULL;
394 }
330 if (pdata->exit) 395 if (pdata->exit)
331 pdata->exit(dev); 396 pdata->exit(dev);
332 397
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 687dcf2d0154..5defe5ea5eda 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1663,7 +1663,7 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1663 1663
1664 /* Load RISC code. */ 1664 /* Load RISC code. */
1665 risc_address = ha->fwstart; 1665 risc_address = ha->fwstart;
1666 fw_data = (const __le16 *)&fw->data[4]; 1666 fw_data = (const __le16 *)&fw->data[6];
1667 risc_code_size = (fw->size - 6) / 2; 1667 risc_code_size = (fw->size - 6) / 2;
1668 1668
1669 for (i = 0; i < risc_code_size; i++) { 1669 for (i = 0; i < risc_code_size; i++) {
@@ -1722,7 +1722,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1722 1722
1723 /* Load RISC code. */ 1723 /* Load RISC code. */
1724 risc_address = ha->fwstart; 1724 risc_address = ha->fwstart;
1725 fw_data = (const __le16 *)&fw->data[4]; 1725 fw_data = (const __le16 *)&fw->data[6];
1726 risc_code_size = (fw->size - 6) / 2; 1726 risc_code_size = (fw->size - 6) / 2;
1727 1727
1728 dprintk(1, "%s: DMA RISC code (%i) words\n", 1728 dprintk(1, "%s: DMA RISC code (%i) words\n",
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e1850904ff73..fbc83bebdd8e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
38 { }; 38 { };
39#endif 39#endif
40 40
41/* scsi_scan.c */
42int scsi_complete_async_scans(void);
43
44/* scsi_devinfo.c */ 41/* scsi_devinfo.c */
45extern int scsi_get_device_flags(struct scsi_device *sdev, 42extern int scsi_get_device_flags(struct scsi_device *sdev,
46 const unsigned char *vendor, 43 const unsigned char *vendor,
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 8a636103083d..2f21af21269a 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include "scsi_priv.h" 14#include <scsi/scsi_scan.h>
15 15
16static int __init wait_scan_init(void) 16static int __init wait_scan_init(void)
17{ 17{
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0328fd4006e5..343e3a35b6a3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -854,7 +854,7 @@ config SERIAL_IMX_CONSOLE
854 854
855config SERIAL_UARTLITE 855config SERIAL_UARTLITE
856 tristate "Xilinx uartlite serial port support" 856 tristate "Xilinx uartlite serial port support"
857 depends on PPC32 857 depends on PPC32 || MICROBLAZE
858 select SERIAL_CORE 858 select SERIAL_CORE
859 help 859 help
860 Say Y here if you want to use the Xilinx uartlite serial controller. 860 Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1340,7 +1340,7 @@ config SERIAL_NETX_CONSOLE
1340 1340
1341config SERIAL_OF_PLATFORM 1341config SERIAL_OF_PLATFORM
1342 tristate "Serial port on Open Firmware platform bus" 1342 tristate "Serial port on Open Firmware platform bus"
1343 depends on PPC_OF 1343 depends on PPC_OF || MICROBLAZE
1344 depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL 1344 depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
1345 help 1345 help
1346 If you have a PowerPC based system that has serial ports 1346 If you have a PowerPC based system that has serial ports
diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c
new file mode 100644
index 000000000000..9fd33e5622bd
--- /dev/null
+++ b/drivers/serial/max3100.c
@@ -0,0 +1,927 @@
1/*
2 *
3 * Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 *
11 * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have
12 * to use polling for flow control. TX empty IRQ is unusable, since
13 * writing conf clears FIFO buffer and we cannot have this interrupt
14 * always asking us for attention.
15 *
16 * Example platform data:
17
18 static struct plat_max3100 max3100_plat_data = {
19 .loopback = 0,
20 .crystal = 0,
21 .poll_time = 100,
22 };
23
24 static struct spi_board_info spi_board_info[] = {
25 {
26 .modalias = "max3100",
27 .platform_data = &max3100_plat_data,
28 .irq = IRQ_EINT12,
29 .max_speed_hz = 5*1000*1000,
30 .chip_select = 0,
31 },
32 };
33
34 * The initial minor number is 209 in the low-density serial port:
35 * mknod /dev/ttyMAX0 c 204 209
36 */
37
38#define MAX3100_MAJOR 204
39#define MAX3100_MINOR 209
40/* 4 MAX3100s should be enough for everyone */
41#define MAX_MAX3100 4
42
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <linux/serial_core.h>
46#include <linux/serial.h>
47#include <linux/spi/spi.h>
48#include <linux/freezer.h>
49
50#include <linux/serial_max3100.h>
51
52#define MAX3100_C (1<<14)
53#define MAX3100_D (0<<14)
54#define MAX3100_W (1<<15)
55#define MAX3100_RX (0<<15)
56
57#define MAX3100_WC (MAX3100_W | MAX3100_C)
58#define MAX3100_RC (MAX3100_RX | MAX3100_C)
59#define MAX3100_WD (MAX3100_W | MAX3100_D)
60#define MAX3100_RD (MAX3100_RX | MAX3100_D)
61#define MAX3100_CMD (3 << 14)
62
63#define MAX3100_T (1<<14)
64#define MAX3100_R (1<<15)
65
66#define MAX3100_FEN (1<<13)
67#define MAX3100_SHDN (1<<12)
68#define MAX3100_TM (1<<11)
69#define MAX3100_RM (1<<10)
70#define MAX3100_PM (1<<9)
71#define MAX3100_RAM (1<<8)
72#define MAX3100_IR (1<<7)
73#define MAX3100_ST (1<<6)
74#define MAX3100_PE (1<<5)
75#define MAX3100_L (1<<4)
76#define MAX3100_BAUD (0xf)
77
78#define MAX3100_TE (1<<10)
79#define MAX3100_RAFE (1<<10)
80#define MAX3100_RTS (1<<9)
81#define MAX3100_CTS (1<<9)
82#define MAX3100_PT (1<<8)
83#define MAX3100_DATA (0xff)
84
85#define MAX3100_RT (MAX3100_R | MAX3100_T)
86#define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE)
87
88/* the following simulate a status reg for ignore_status_mask */
89#define MAX3100_STATUS_PE 1
90#define MAX3100_STATUS_FE 2
91#define MAX3100_STATUS_OE 4
92
93struct max3100_port {
94 struct uart_port port;
95 struct spi_device *spi;
96
97 int cts; /* last CTS received for flow ctrl */
98 int tx_empty; /* last TX empty bit */
99
100 spinlock_t conf_lock; /* shared data */
101 int conf_commit; /* need to make changes */
102 int conf; /* configuration for the MAX31000
103 * (bits 0-7, bits 8-11 are irqs) */
104 int rts_commit; /* need to change rts */
105 int rts; /* rts status */
106 int baud; /* current baud rate */
107
108 int parity; /* keeps track if we should send parity */
109#define MAX3100_PARITY_ON 1
110#define MAX3100_PARITY_ODD 2
111#define MAX3100_7BIT 4
112 int rx_enabled; /* if we should rx chars */
113
114 int irq; /* irq assigned to the max3100 */
115
116 int minor; /* minor number */
117 int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */
118 int loopback; /* 1 if we are in loopback mode */
119
120 /* for handling irqs: need workqueue since we do spi_sync */
121 struct workqueue_struct *workqueue;
122 struct work_struct work;
123 /* set to 1 to make the workhandler exit as soon as possible */
124 int force_end_work;
125 /* need to know we are suspending to avoid deadlock on workqueue */
126 int suspending;
127
128 /* hook for suspending MAX3100 via dedicated pin */
129 void (*max3100_hw_suspend) (int suspend);
130
131 /* poll time (in ms) for ctrl lines */
132 int poll_time;
133 /* and its timer */
134 struct timer_list timer;
135};
136
137static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */
138static DEFINE_MUTEX(max3100s_lock); /* race on probe */
139
140static int max3100_do_parity(struct max3100_port *s, u16 c)
141{
142 int parity;
143
144 if (s->parity & MAX3100_PARITY_ODD)
145 parity = 1;
146 else
147 parity = 0;
148
149 if (s->parity & MAX3100_7BIT)
150 c &= 0x7f;
151 else
152 c &= 0xff;
153
154 parity = parity ^ (hweight8(c) & 1);
155 return parity;
156}
157
158static int max3100_check_parity(struct max3100_port *s, u16 c)
159{
160 return max3100_do_parity(s, c) == ((c >> 8) & 1);
161}
162
163static void max3100_calc_parity(struct max3100_port *s, u16 *c)
164{
165 if (s->parity & MAX3100_7BIT)
166 *c &= 0x7f;
167 else
168 *c &= 0xff;
169
170 if (s->parity & MAX3100_PARITY_ON)
171 *c |= max3100_do_parity(s, *c) << 8;
172}
173
174static void max3100_work(struct work_struct *w);
175
176static void max3100_dowork(struct max3100_port *s)
177{
178 if (!s->force_end_work && !work_pending(&s->work) &&
179 !freezing(current) && !s->suspending)
180 queue_work(s->workqueue, &s->work);
181}
182
183static void max3100_timeout(unsigned long data)
184{
185 struct max3100_port *s = (struct max3100_port *)data;
186
187 if (s->port.info) {
188 max3100_dowork(s);
189 mod_timer(&s->timer, jiffies + s->poll_time);
190 }
191}
192
193static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
194{
195 struct spi_message message;
196 u16 etx, erx;
197 int status;
198 struct spi_transfer tran = {
199 .tx_buf = &etx,
200 .rx_buf = &erx,
201 .len = 2,
202 };
203
204 etx = cpu_to_be16(tx);
205 spi_message_init(&message);
206 spi_message_add_tail(&tran, &message);
207 status = spi_sync(s->spi, &message);
208 if (status) {
209 dev_warn(&s->spi->dev, "error while calling spi_sync\n");
210 return -EIO;
211 }
212 *rx = be16_to_cpu(erx);
213 s->tx_empty = (*rx & MAX3100_T) > 0;
214 dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx);
215 return 0;
216}
217
218static int max3100_handlerx(struct max3100_port *s, u16 rx)
219{
220 unsigned int ch, flg, status = 0;
221 int ret = 0, cts;
222
223 if (rx & MAX3100_R && s->rx_enabled) {
224 dev_dbg(&s->spi->dev, "%s\n", __func__);
225 ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff);
226 if (rx & MAX3100_RAFE) {
227 s->port.icount.frame++;
228 flg = TTY_FRAME;
229 status |= MAX3100_STATUS_FE;
230 } else {
231 if (s->parity & MAX3100_PARITY_ON) {
232 if (max3100_check_parity(s, rx)) {
233 s->port.icount.rx++;
234 flg = TTY_NORMAL;
235 } else {
236 s->port.icount.parity++;
237 flg = TTY_PARITY;
238 status |= MAX3100_STATUS_PE;
239 }
240 } else {
241 s->port.icount.rx++;
242 flg = TTY_NORMAL;
243 }
244 }
245 uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg);
246 ret = 1;
247 }
248
249 cts = (rx & MAX3100_CTS) > 0;
250 if (s->cts != cts) {
251 s->cts = cts;
252 uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0);
253 }
254
255 return ret;
256}
257
258static void max3100_work(struct work_struct *w)
259{
260 struct max3100_port *s = container_of(w, struct max3100_port, work);
261 int rxchars;
262 u16 tx, rx;
263 int conf, cconf, rts, crts;
264 struct circ_buf *xmit = &s->port.info->xmit;
265
266 dev_dbg(&s->spi->dev, "%s\n", __func__);
267
268 rxchars = 0;
269 do {
270 spin_lock(&s->conf_lock);
271 conf = s->conf;
272 cconf = s->conf_commit;
273 s->conf_commit = 0;
274 rts = s->rts;
275 crts = s->rts_commit;
276 s->rts_commit = 0;
277 spin_unlock(&s->conf_lock);
278 if (cconf)
279 max3100_sr(s, MAX3100_WC | conf, &rx);
280 if (crts) {
281 max3100_sr(s, MAX3100_WD | MAX3100_TE |
282 (s->rts ? MAX3100_RTS : 0), &rx);
283 rxchars += max3100_handlerx(s, rx);
284 }
285
286 max3100_sr(s, MAX3100_RD, &rx);
287 rxchars += max3100_handlerx(s, rx);
288
289 if (rx & MAX3100_T) {
290 tx = 0xffff;
291 if (s->port.x_char) {
292 tx = s->port.x_char;
293 s->port.icount.tx++;
294 s->port.x_char = 0;
295 } else if (!uart_circ_empty(xmit) &&
296 !uart_tx_stopped(&s->port)) {
297 tx = xmit->buf[xmit->tail];
298 xmit->tail = (xmit->tail + 1) &
299 (UART_XMIT_SIZE - 1);
300 s->port.icount.tx++;
301 }
302 if (tx != 0xffff) {
303 max3100_calc_parity(s, &tx);
304 tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0);
305 max3100_sr(s, tx, &rx);
306 rxchars += max3100_handlerx(s, rx);
307 }
308 }
309
310 if (rxchars > 16 && s->port.info->port.tty != NULL) {
311 tty_flip_buffer_push(s->port.info->port.tty);
312 rxchars = 0;
313 }
314 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
315 uart_write_wakeup(&s->port);
316
317 } while (!s->force_end_work &&
318 !freezing(current) &&
319 ((rx & MAX3100_R) ||
320 (!uart_circ_empty(xmit) &&
321 !uart_tx_stopped(&s->port))));
322
323 if (rxchars > 0 && s->port.info->port.tty != NULL)
324 tty_flip_buffer_push(s->port.info->port.tty);
325}
326
327static irqreturn_t max3100_irq(int irqno, void *dev_id)
328{
329 struct max3100_port *s = dev_id;
330
331 dev_dbg(&s->spi->dev, "%s\n", __func__);
332
333 max3100_dowork(s);
334 return IRQ_HANDLED;
335}
336
337static void max3100_enable_ms(struct uart_port *port)
338{
339 struct max3100_port *s = container_of(port,
340 struct max3100_port,
341 port);
342
343 if (s->poll_time > 0)
344 mod_timer(&s->timer, jiffies);
345 dev_dbg(&s->spi->dev, "%s\n", __func__);
346}
347
348static void max3100_start_tx(struct uart_port *port)
349{
350 struct max3100_port *s = container_of(port,
351 struct max3100_port,
352 port);
353
354 dev_dbg(&s->spi->dev, "%s\n", __func__);
355
356 max3100_dowork(s);
357}
358
359static void max3100_stop_rx(struct uart_port *port)
360{
361 struct max3100_port *s = container_of(port,
362 struct max3100_port,
363 port);
364
365 dev_dbg(&s->spi->dev, "%s\n", __func__);
366
367 s->rx_enabled = 0;
368 spin_lock(&s->conf_lock);
369 s->conf &= ~MAX3100_RM;
370 s->conf_commit = 1;
371 spin_unlock(&s->conf_lock);
372 max3100_dowork(s);
373}
374
375static unsigned int max3100_tx_empty(struct uart_port *port)
376{
377 struct max3100_port *s = container_of(port,
378 struct max3100_port,
379 port);
380
381 dev_dbg(&s->spi->dev, "%s\n", __func__);
382
383 /* may not be truly up-to-date */
384 max3100_dowork(s);
385 return s->tx_empty;
386}
387
388static unsigned int max3100_get_mctrl(struct uart_port *port)
389{
390 struct max3100_port *s = container_of(port,
391 struct max3100_port,
392 port);
393
394 dev_dbg(&s->spi->dev, "%s\n", __func__);
395
396 /* may not be truly up-to-date */
397 max3100_dowork(s);
398 /* always assert DCD and DSR since these lines are not wired */
399 return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR;
400}
401
402static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl)
403{
404 struct max3100_port *s = container_of(port,
405 struct max3100_port,
406 port);
407 int rts;
408
409 dev_dbg(&s->spi->dev, "%s\n", __func__);
410
411 rts = (mctrl & TIOCM_RTS) > 0;
412
413 spin_lock(&s->conf_lock);
414 if (s->rts != rts) {
415 s->rts = rts;
416 s->rts_commit = 1;
417 max3100_dowork(s);
418 }
419 spin_unlock(&s->conf_lock);
420}
421
422static void
423max3100_set_termios(struct uart_port *port, struct ktermios *termios,
424 struct ktermios *old)
425{
426 struct max3100_port *s = container_of(port,
427 struct max3100_port,
428 port);
429 int baud = 0;
430 unsigned cflag;
431 u32 param_new, param_mask, parity = 0;
432 struct tty_struct *tty = s->port.info->port.tty;
433
434 dev_dbg(&s->spi->dev, "%s\n", __func__);
435 if (!tty)
436 return;
437
438 cflag = termios->c_cflag;
439 param_new = 0;
440 param_mask = 0;
441
442 baud = tty_get_baud_rate(tty);
443 param_new = s->conf & MAX3100_BAUD;
444 switch (baud) {
445 case 300:
446 if (s->crystal)
447 baud = s->baud;
448 else
449 param_new = 15;
450 break;
451 case 600:
452 param_new = 14 + s->crystal;
453 break;
454 case 1200:
455 param_new = 13 + s->crystal;
456 break;
457 case 2400:
458 param_new = 12 + s->crystal;
459 break;
460 case 4800:
461 param_new = 11 + s->crystal;
462 break;
463 case 9600:
464 param_new = 10 + s->crystal;
465 break;
466 case 19200:
467 param_new = 9 + s->crystal;
468 break;
469 case 38400:
470 param_new = 8 + s->crystal;
471 break;
472 case 57600:
473 param_new = 1 + s->crystal;
474 break;
475 case 115200:
476 param_new = 0 + s->crystal;
477 break;
478 case 230400:
479 if (s->crystal)
480 param_new = 0;
481 else
482 baud = s->baud;
483 break;
484 default:
485 baud = s->baud;
486 }
487 tty_encode_baud_rate(tty, baud, baud);
488 s->baud = baud;
489 param_mask |= MAX3100_BAUD;
490
491 if ((cflag & CSIZE) == CS8) {
492 param_new &= ~MAX3100_L;
493 parity &= ~MAX3100_7BIT;
494 } else {
495 param_new |= MAX3100_L;
496 parity |= MAX3100_7BIT;
497 cflag = (cflag & ~CSIZE) | CS7;
498 }
499 param_mask |= MAX3100_L;
500
501 if (cflag & CSTOPB)
502 param_new |= MAX3100_ST;
503 else
504 param_new &= ~MAX3100_ST;
505 param_mask |= MAX3100_ST;
506
507 if (cflag & PARENB) {
508 param_new |= MAX3100_PE;
509 parity |= MAX3100_PARITY_ON;
510 } else {
511 param_new &= ~MAX3100_PE;
512 parity &= ~MAX3100_PARITY_ON;
513 }
514 param_mask |= MAX3100_PE;
515
516 if (cflag & PARODD)
517 parity |= MAX3100_PARITY_ODD;
518 else
519 parity &= ~MAX3100_PARITY_ODD;
520
521 /* mask termios capabilities we don't support */
522 cflag &= ~CMSPAR;
523 termios->c_cflag = cflag;
524
525 s->port.ignore_status_mask = 0;
526 if (termios->c_iflag & IGNPAR)
527 s->port.ignore_status_mask |=
528 MAX3100_STATUS_PE | MAX3100_STATUS_FE |
529 MAX3100_STATUS_OE;
530
531 /* we are sending char from a workqueue so enable */
532 s->port.info->port.tty->low_latency = 1;
533
534 if (s->poll_time > 0)
535 del_timer_sync(&s->timer);
536
537 uart_update_timeout(port, termios->c_cflag, baud);
538
539 spin_lock(&s->conf_lock);
540 s->conf = (s->conf & ~param_mask) | (param_new & param_mask);
541 s->conf_commit = 1;
542 s->parity = parity;
543 spin_unlock(&s->conf_lock);
544 max3100_dowork(s);
545
546 if (UART_ENABLE_MS(&s->port, termios->c_cflag))
547 max3100_enable_ms(&s->port);
548}
549
550static void max3100_shutdown(struct uart_port *port)
551{
552 struct max3100_port *s = container_of(port,
553 struct max3100_port,
554 port);
555
556 dev_dbg(&s->spi->dev, "%s\n", __func__);
557
558 if (s->suspending)
559 return;
560
561 s->force_end_work = 1;
562
563 if (s->poll_time > 0)
564 del_timer_sync(&s->timer);
565
566 if (s->workqueue) {
567 flush_workqueue(s->workqueue);
568 destroy_workqueue(s->workqueue);
569 s->workqueue = NULL;
570 }
571 if (s->irq)
572 free_irq(s->irq, s);
573
574 /* set shutdown mode to save power */
575 if (s->max3100_hw_suspend)
576 s->max3100_hw_suspend(1);
577 else {
578 u16 tx, rx;
579
580 tx = MAX3100_WC | MAX3100_SHDN;
581 max3100_sr(s, tx, &rx);
582 }
583}
584
585static int max3100_startup(struct uart_port *port)
586{
587 struct max3100_port *s = container_of(port,
588 struct max3100_port,
589 port);
590 char b[12];
591
592 dev_dbg(&s->spi->dev, "%s\n", __func__);
593
594 s->conf = MAX3100_RM;
595 s->baud = s->crystal ? 230400 : 115200;
596 s->rx_enabled = 1;
597
598 if (s->suspending)
599 return 0;
600
601 s->force_end_work = 0;
602 s->parity = 0;
603 s->rts = 0;
604
605 sprintf(b, "max3100-%d", s->minor);
606 s->workqueue = create_freezeable_workqueue(b);
607 if (!s->workqueue) {
608 dev_warn(&s->spi->dev, "cannot create workqueue\n");
609 return -EBUSY;
610 }
611 INIT_WORK(&s->work, max3100_work);
612
613 if (request_irq(s->irq, max3100_irq,
614 IRQF_TRIGGER_FALLING, "max3100", s) < 0) {
615 dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq);
616 s->irq = 0;
617 destroy_workqueue(s->workqueue);
618 s->workqueue = NULL;
619 return -EBUSY;
620 }
621
622 if (s->loopback) {
623 u16 tx, rx;
624 tx = 0x4001;
625 max3100_sr(s, tx, &rx);
626 }
627
628 if (s->max3100_hw_suspend)
629 s->max3100_hw_suspend(0);
630 s->conf_commit = 1;
631 max3100_dowork(s);
632 /* wait for clock to settle */
633 msleep(50);
634
635 max3100_enable_ms(&s->port);
636
637 return 0;
638}
639
640static const char *max3100_type(struct uart_port *port)
641{
642 struct max3100_port *s = container_of(port,
643 struct max3100_port,
644 port);
645
646 dev_dbg(&s->spi->dev, "%s\n", __func__);
647
648 return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL;
649}
650
651static void max3100_release_port(struct uart_port *port)
652{
653 struct max3100_port *s = container_of(port,
654 struct max3100_port,
655 port);
656
657 dev_dbg(&s->spi->dev, "%s\n", __func__);
658}
659
660static void max3100_config_port(struct uart_port *port, int flags)
661{
662 struct max3100_port *s = container_of(port,
663 struct max3100_port,
664 port);
665
666 dev_dbg(&s->spi->dev, "%s\n", __func__);
667
668 if (flags & UART_CONFIG_TYPE)
669 s->port.type = PORT_MAX3100;
670}
671
672static int max3100_verify_port(struct uart_port *port,
673 struct serial_struct *ser)
674{
675 struct max3100_port *s = container_of(port,
676 struct max3100_port,
677 port);
678 int ret = -EINVAL;
679
680 dev_dbg(&s->spi->dev, "%s\n", __func__);
681
682 if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100)
683 ret = 0;
684 return ret;
685}
686
687static void max3100_stop_tx(struct uart_port *port)
688{
689 struct max3100_port *s = container_of(port,
690 struct max3100_port,
691 port);
692
693 dev_dbg(&s->spi->dev, "%s\n", __func__);
694}
695
696static int max3100_request_port(struct uart_port *port)
697{
698 struct max3100_port *s = container_of(port,
699 struct max3100_port,
700 port);
701
702 dev_dbg(&s->spi->dev, "%s\n", __func__);
703 return 0;
704}
705
706static void max3100_break_ctl(struct uart_port *port, int break_state)
707{
708 struct max3100_port *s = container_of(port,
709 struct max3100_port,
710 port);
711
712 dev_dbg(&s->spi->dev, "%s\n", __func__);
713}
714
715static struct uart_ops max3100_ops = {
716 .tx_empty = max3100_tx_empty,
717 .set_mctrl = max3100_set_mctrl,
718 .get_mctrl = max3100_get_mctrl,
719 .stop_tx = max3100_stop_tx,
720 .start_tx = max3100_start_tx,
721 .stop_rx = max3100_stop_rx,
722 .enable_ms = max3100_enable_ms,
723 .break_ctl = max3100_break_ctl,
724 .startup = max3100_startup,
725 .shutdown = max3100_shutdown,
726 .set_termios = max3100_set_termios,
727 .type = max3100_type,
728 .release_port = max3100_release_port,
729 .request_port = max3100_request_port,
730 .config_port = max3100_config_port,
731 .verify_port = max3100_verify_port,
732};
733
734static struct uart_driver max3100_uart_driver = {
735 .owner = THIS_MODULE,
736 .driver_name = "ttyMAX",
737 .dev_name = "ttyMAX",
738 .major = MAX3100_MAJOR,
739 .minor = MAX3100_MINOR,
740 .nr = MAX_MAX3100,
741};
742static int uart_driver_registered;
743
744static int __devinit max3100_probe(struct spi_device *spi)
745{
746 int i, retval;
747 struct plat_max3100 *pdata;
748 u16 tx, rx;
749
750 mutex_lock(&max3100s_lock);
751
752 if (!uart_driver_registered) {
753 uart_driver_registered = 1;
754 retval = uart_register_driver(&max3100_uart_driver);
755 if (retval) {
756 printk(KERN_ERR "Couldn't register max3100 uart driver\n");
757 mutex_unlock(&max3100s_lock);
758 return retval;
759 }
760 }
761
762 for (i = 0; i < MAX_MAX3100; i++)
763 if (!max3100s[i])
764 break;
765 if (i == MAX_MAX3100) {
766 dev_warn(&spi->dev, "too many MAX3100 chips\n");
767 mutex_unlock(&max3100s_lock);
768 return -ENOMEM;
769 }
770
771 max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL);
772 if (!max3100s[i]) {
773 dev_warn(&spi->dev,
774 "kmalloc for max3100 structure %d failed!\n", i);
775 mutex_unlock(&max3100s_lock);
776 return -ENOMEM;
777 }
778 max3100s[i]->spi = spi;
779 max3100s[i]->irq = spi->irq;
780 spin_lock_init(&max3100s[i]->conf_lock);
781 dev_set_drvdata(&spi->dev, max3100s[i]);
782 pdata = spi->dev.platform_data;
783 max3100s[i]->crystal = pdata->crystal;
784 max3100s[i]->loopback = pdata->loopback;
785 max3100s[i]->poll_time = pdata->poll_time * HZ / 1000;
786 if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0)
787 max3100s[i]->poll_time = 1;
788 max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
789 max3100s[i]->minor = i;
790 init_timer(&max3100s[i]->timer);
791 max3100s[i]->timer.function = max3100_timeout;
792 max3100s[i]->timer.data = (unsigned long) max3100s[i];
793
794 dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
795 max3100s[i]->port.irq = max3100s[i]->irq;
796 max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200;
797 max3100s[i]->port.fifosize = 16;
798 max3100s[i]->port.ops = &max3100_ops;
799 max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
800 max3100s[i]->port.line = i;
801 max3100s[i]->port.type = PORT_MAX3100;
802 max3100s[i]->port.dev = &spi->dev;
803 retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port);
804 if (retval < 0)
805 dev_warn(&spi->dev,
806 "uart_add_one_port failed for line %d with error %d\n",
807 i, retval);
808
809 /* set shutdown mode to save power. Will be woken-up on open */
810 if (max3100s[i]->max3100_hw_suspend)
811 max3100s[i]->max3100_hw_suspend(1);
812 else {
813 tx = MAX3100_WC | MAX3100_SHDN;
814 max3100_sr(max3100s[i], tx, &rx);
815 }
816 mutex_unlock(&max3100s_lock);
817 return 0;
818}
819
820static int __devexit max3100_remove(struct spi_device *spi)
821{
822 struct max3100_port *s = dev_get_drvdata(&spi->dev);
823 int i;
824
825 mutex_lock(&max3100s_lock);
826
827 /* find out the index for the chip we are removing */
828 for (i = 0; i < MAX_MAX3100; i++)
829 if (max3100s[i] == s)
830 break;
831
832 dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i);
833 uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port);
834 kfree(max3100s[i]);
835 max3100s[i] = NULL;
836
837 /* check if this is the last chip we have */
838 for (i = 0; i < MAX_MAX3100; i++)
839 if (max3100s[i]) {
840 mutex_unlock(&max3100s_lock);
841 return 0;
842 }
843 pr_debug("removing max3100 driver\n");
844 uart_unregister_driver(&max3100_uart_driver);
845
846 mutex_unlock(&max3100s_lock);
847 return 0;
848}
849
850#ifdef CONFIG_PM
851
852static int max3100_suspend(struct spi_device *spi, pm_message_t state)
853{
854 struct max3100_port *s = dev_get_drvdata(&spi->dev);
855
856 dev_dbg(&s->spi->dev, "%s\n", __func__);
857
858 disable_irq(s->irq);
859
860 s->suspending = 1;
861 uart_suspend_port(&max3100_uart_driver, &s->port);
862
863 if (s->max3100_hw_suspend)
864 s->max3100_hw_suspend(1);
865 else {
866 /* no HW suspend, so do SW one */
867 u16 tx, rx;
868
869 tx = MAX3100_WC | MAX3100_SHDN;
870 max3100_sr(s, tx, &rx);
871 }
872 return 0;
873}
874
875static int max3100_resume(struct spi_device *spi)
876{
877 struct max3100_port *s = dev_get_drvdata(&spi->dev);
878
879 dev_dbg(&s->spi->dev, "%s\n", __func__);
880
881 if (s->max3100_hw_suspend)
882 s->max3100_hw_suspend(0);
883 uart_resume_port(&max3100_uart_driver, &s->port);
884 s->suspending = 0;
885
886 enable_irq(s->irq);
887
888 s->conf_commit = 1;
889 if (s->workqueue)
890 max3100_dowork(s);
891
892 return 0;
893}
894
895#else
896#define max3100_suspend NULL
897#define max3100_resume NULL
898#endif
899
900static struct spi_driver max3100_driver = {
901 .driver = {
902 .name = "max3100",
903 .bus = &spi_bus_type,
904 .owner = THIS_MODULE,
905 },
906
907 .probe = max3100_probe,
908 .remove = __devexit_p(max3100_remove),
909 .suspend = max3100_suspend,
910 .resume = max3100_resume,
911};
912
913static int __init max3100_init(void)
914{
915 return spi_register_driver(&max3100_driver);
916}
917module_init(max3100_init);
918
919static void __exit max3100_exit(void)
920{
921 spi_unregister_driver(&max3100_driver);
922}
923module_exit(max3100_exit);
924
925MODULE_DESCRIPTION("MAX3100 driver");
926MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>");
927MODULE_LICENSE("GPL");
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index a4dc79b1d7ab..47c6837850b1 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1178,7 +1178,7 @@ static struct uart_driver sunsu_reg = {
1178 .major = TTY_MAJOR, 1178 .major = TTY_MAJOR,
1179}; 1179};
1180 1180
1181static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up) 1181static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up)
1182{ 1182{
1183 int quot, baud; 1183 int quot, baud;
1184#ifdef CONFIG_SERIO 1184#ifdef CONFIG_SERIO
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 4ed228a89943..bb5e6f671578 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -280,7 +280,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
280 * are always powered while this driver is active, and use 280 * are always powered while this driver is active, and use
281 * active-low power switches. 281 * active-low power switches.
282 */ 282 */
283 for (i = 0; i < pdata->ports; i++) { 283 for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
284 if (pdata->vbus_pin[i] <= 0) 284 if (pdata->vbus_pin[i] <= 0)
285 continue; 285 continue;
286 gpio_request(pdata->vbus_pin[i], "ohci_vbus"); 286 gpio_request(pdata->vbus_pin[i], "ohci_vbus");
@@ -298,7 +298,7 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
298 int i; 298 int i;
299 299
300 if (pdata) { 300 if (pdata) {
301 for (i = 0; i < pdata->ports; i++) { 301 for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
302 if (pdata->vbus_pin[i] <= 0) 302 if (pdata->vbus_pin[i] <= 0)
303 continue; 303 continue;
304 gpio_direction_output(pdata->vbus_pin[i], 1); 304 gpio_direction_output(pdata->vbus_pin[i], 1);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 63024145215d..5eb8f21da82e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -240,8 +240,6 @@ config ORION5X_WATCHDOG
240 To compile this driver as a module, choose M here: the 240 To compile this driver as a module, choose M here: the
241 module will be called orion5x_wdt. 241 module will be called orion5x_wdt.
242 242
243# ARM26 Architecture
244
245# AVR32 Architecture 243# AVR32 Architecture
246 244
247config AT32AP700X_WDT 245config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 806b3eb08536..7f8c56b14f58 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -42,8 +42,6 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
42obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o 42obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
43obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o 43obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o
44 44
45# ARM26 Architecture
46
47# AVR32 Architecture 45# AVR32 Architecture
48obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 46obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
49 47
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index e35d54589232..29e52c237a3b 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -197,7 +197,7 @@ static struct miscdevice at91wdt_miscdev = {
197 .fops = &at91wdt_fops, 197 .fops = &at91wdt_fops,
198}; 198};
199 199
200static int __init at91wdt_probe(struct platform_device *pdev) 200static int __devinit at91wdt_probe(struct platform_device *pdev)
201{ 201{
202 int res; 202 int res;
203 203
@@ -214,7 +214,7 @@ static int __init at91wdt_probe(struct platform_device *pdev)
214 return 0; 214 return 0;
215} 215}
216 216
217static int __exit at91wdt_remove(struct platform_device *pdev) 217static int __devexit at91wdt_remove(struct platform_device *pdev)
218{ 218{
219 int res; 219 int res;
220 220
@@ -252,7 +252,7 @@ static int at91wdt_resume(struct platform_device *pdev)
252 252
253static struct platform_driver at91wdt_driver = { 253static struct platform_driver at91wdt_driver = {
254 .probe = at91wdt_probe, 254 .probe = at91wdt_probe,
255 .remove = __exit_p(at91wdt_remove), 255 .remove = __devexit_p(at91wdt_remove),
256 .shutdown = at91wdt_shutdown, 256 .shutdown = at91wdt_shutdown,
257 .suspend = at91wdt_suspend, 257 .suspend = at91wdt_suspend,
258 .resume = at91wdt_resume, 258 .resume = at91wdt_resume,
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 2dbe83570d65..7ba0b11ec525 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -52,10 +52,10 @@
52#define ESB_LOCK_REG 0x68 /* WDT lock register */ 52#define ESB_LOCK_REG 0x68 /* WDT lock register */
53 53
54/* Memory mapped registers */ 54/* Memory mapped registers */
55#define ESB_TIMER1_REG BASEADDR + 0x00 /* Timer1 value after each reset */ 55#define ESB_TIMER1_REG (BASEADDR + 0x00)/* Timer1 value after each reset */
56#define ESB_TIMER2_REG BASEADDR + 0x04 /* Timer2 value after each reset */ 56#define ESB_TIMER2_REG (BASEADDR + 0x04)/* Timer2 value after each reset */
57#define ESB_GINTSR_REG BASEADDR + 0x08 /* General Interrupt Status Register */ 57#define ESB_GINTSR_REG (BASEADDR + 0x08)/* General Interrupt Status Register */
58#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */ 58#define ESB_RELOAD_REG (BASEADDR + 0x0c)/* Reload register */
59 59
60/* Lock register bits */ 60/* Lock register bits */
61#define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ 61#define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */
@@ -68,6 +68,7 @@
68#define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ 68#define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */
69 69
70/* Reload register bits */ 70/* Reload register bits */
71#define ESB_WDT_TIMEOUT (0x01 << 9) /* Watchdog timed out */
71#define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ 72#define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */
72 73
73/* Magic constants */ 74/* Magic constants */
@@ -87,7 +88,6 @@ static struct platform_device *esb_platform_device;
87/* 30 sec default heartbeat (1 < heartbeat < 2*1023) */ 88/* 30 sec default heartbeat (1 < heartbeat < 2*1023) */
88#define WATCHDOG_HEARTBEAT 30 89#define WATCHDOG_HEARTBEAT 30
89static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 90static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
90
91module_param(heartbeat, int, 0); 91module_param(heartbeat, int, 0);
92MODULE_PARM_DESC(heartbeat, 92MODULE_PARM_DESC(heartbeat,
93 "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" 93 "Watchdog heartbeat in seconds. (1<heartbeat<2046, default="
@@ -123,7 +123,7 @@ static int esb_timer_start(void)
123 esb_unlock_registers(); 123 esb_unlock_registers();
124 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); 124 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
125 /* Enable or Enable + Lock? */ 125 /* Enable or Enable + Lock? */
126 val = 0x02 | (nowayout ? 0x01 : 0x00); 126 val = ESB_WDT_ENABLE | (nowayout ? ESB_WDT_LOCK : 0x00);
127 pci_write_config_byte(esb_pci, ESB_LOCK_REG, val); 127 pci_write_config_byte(esb_pci, ESB_LOCK_REG, val);
128 spin_unlock(&esb_lock); 128 spin_unlock(&esb_lock);
129 return 0; 129 return 0;
@@ -143,7 +143,7 @@ static int esb_timer_stop(void)
143 spin_unlock(&esb_lock); 143 spin_unlock(&esb_lock);
144 144
145 /* Returns 0 if the timer was disabled, non-zero otherwise */ 145 /* Returns 0 if the timer was disabled, non-zero otherwise */
146 return (val & 0x01); 146 return val & ESB_WDT_ENABLE;
147} 147}
148 148
149static void esb_timer_keepalive(void) 149static void esb_timer_keepalive(void)
@@ -190,18 +190,6 @@ static int esb_timer_set_heartbeat(int time)
190 return 0; 190 return 0;
191} 191}
192 192
193static int esb_timer_read(void)
194{
195 u32 count;
196
197 /* This isn't documented, and doesn't take into
198 * acount which stage is running, but it looks
199 * like a 20 bit count down, so we might as well report it.
200 */
201 pci_read_config_dword(esb_pci, 0x64, &count);
202 return (int)count;
203}
204
205/* 193/*
206 * /dev/watchdog handling 194 * /dev/watchdog handling
207 */ 195 */
@@ -282,7 +270,7 @@ static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
282 sizeof(ident)) ? -EFAULT : 0; 270 sizeof(ident)) ? -EFAULT : 0;
283 271
284 case WDIOC_GETSTATUS: 272 case WDIOC_GETSTATUS:
285 return put_user(esb_timer_read(), p); 273 return put_user(0, p);
286 274
287 case WDIOC_GETBOOTSTATUS: 275 case WDIOC_GETBOOTSTATUS:
288 return put_user(triggered, p); 276 return put_user(triggered, p);
@@ -362,8 +350,6 @@ MODULE_DEVICE_TABLE(pci, esb_pci_tbl);
362 350
363static unsigned char __devinit esb_getdevice(void) 351static unsigned char __devinit esb_getdevice(void)
364{ 352{
365 u8 val1;
366 unsigned short val2;
367 /* 353 /*
368 * Find the PCI device 354 * Find the PCI device
369 */ 355 */
@@ -371,66 +357,79 @@ static unsigned char __devinit esb_getdevice(void)
371 esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 357 esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL,
372 PCI_DEVICE_ID_INTEL_ESB_9, NULL); 358 PCI_DEVICE_ID_INTEL_ESB_9, NULL);
373 359
374 if (esb_pci) { 360 if (!esb_pci)
375 if (pci_enable_device(esb_pci)) { 361 return 0;
376 printk(KERN_ERR PFX "failed to enable device\n");
377 goto err_devput;
378 }
379 362
380 if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { 363 if (pci_enable_device(esb_pci)) {
381 printk(KERN_ERR PFX "failed to request region\n"); 364 printk(KERN_ERR PFX "failed to enable device\n");
382 goto err_disable; 365 goto err_devput;
383 } 366 }
384 367
385 BASEADDR = pci_ioremap_bar(esb_pci, 0); 368 if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) {
386 if (BASEADDR == NULL) { 369 printk(KERN_ERR PFX "failed to request region\n");
387 /* Something's wrong here, BASEADDR has to be set */ 370 goto err_disable;
388 printk(KERN_ERR PFX "failed to get BASEADDR\n"); 371 }
389 goto err_release;
390 }
391 372
392 /* 373 BASEADDR = pci_ioremap_bar(esb_pci, 0);
393 * The watchdog has two timers, it can be setup so that the 374 if (BASEADDR == NULL) {
394 * expiry of timer1 results in an interrupt and the expiry of 375 /* Something's wrong here, BASEADDR has to be set */
395 * timer2 results in a reboot. We set it to not generate 376 printk(KERN_ERR PFX "failed to get BASEADDR\n");
396 * any interrupts as there is not much we can do with it 377 goto err_release;
397 * right now. 378 }
398 * 379
399 * We also enable reboots and set the timer frequency to 380 /* Done */
400 * the PCI clock divided by 2^15 (approx 1KHz). 381 return 1;
401 */
402 pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003);
403
404 /* Check that the WDT isn't already locked */
405 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
406 if (val1 & ESB_WDT_LOCK)
407 printk(KERN_WARNING PFX "nowayout already set\n");
408
409 /* Set the timer to watchdog mode and disable it for now */
410 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
411
412 /* Check if the watchdog was previously triggered */
413 esb_unlock_registers();
414 val2 = readw(ESB_RELOAD_REG);
415 triggered = (val2 & (0x01 << 9) >> 9);
416
417 /* Reset trigger flag and timers */
418 esb_unlock_registers();
419 writew((0x11 << 8), ESB_RELOAD_REG);
420
421 /* Done */
422 return 1;
423 382
424err_release: 383err_release:
425 pci_release_region(esb_pci, 0); 384 pci_release_region(esb_pci, 0);
426err_disable: 385err_disable:
427 pci_disable_device(esb_pci); 386 pci_disable_device(esb_pci);
428err_devput: 387err_devput:
429 pci_dev_put(esb_pci); 388 pci_dev_put(esb_pci);
430 }
431 return 0; 389 return 0;
432} 390}
433 391
392static void __devinit esb_initdevice(void)
393{
394 u8 val1;
395 u16 val2;
396
397 /*
398 * Config register:
399 * Bit 5 : 0 = Enable WDT_OUTPUT
400 * Bit 2 : 0 = set the timer frequency to the PCI clock
401 * divided by 2^15 (approx 1KHz).
402 * Bits 1:0 : 11 = WDT_INT_TYPE Disabled.
403 * The watchdog has two timers, it can be setup so that the
404 * expiry of timer1 results in an interrupt and the expiry of
405 * timer2 results in a reboot. We set it to not generate
406 * any interrupts as there is not much we can do with it
407 * right now.
408 */
409 pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003);
410
411 /* Check that the WDT isn't already locked */
412 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
413 if (val1 & ESB_WDT_LOCK)
414 printk(KERN_WARNING PFX "nowayout already set\n");
415
416 /* Set the timer to watchdog mode and disable it for now */
417 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
418
419 /* Check if the watchdog was previously triggered */
420 esb_unlock_registers();
421 val2 = readw(ESB_RELOAD_REG);
422 if (val2 & ESB_WDT_TIMEOUT)
423 triggered = WDIOF_CARDRESET;
424
425 /* Reset WDT_TIMEOUT flag and timers */
426 esb_unlock_registers();
427 writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG);
428
429 /* And set the correct timeout value */
430 esb_timer_set_heartbeat(heartbeat);
431}
432
434static int __devinit esb_probe(struct platform_device *dev) 433static int __devinit esb_probe(struct platform_device *dev)
435{ 434{
436 int ret; 435 int ret;
@@ -441,13 +440,17 @@ static int __devinit esb_probe(struct platform_device *dev)
441 440
442 /* Check that the heartbeat value is within it's range; 441 /* Check that the heartbeat value is within it's range;
443 if not reset to the default */ 442 if not reset to the default */
444 if (esb_timer_set_heartbeat(heartbeat)) { 443 if (heartbeat < 0x1 || heartbeat > 2 * 0x03ff) {
445 esb_timer_set_heartbeat(WATCHDOG_HEARTBEAT); 444 heartbeat = WATCHDOG_HEARTBEAT;
446 printk(KERN_INFO PFX 445 printk(KERN_INFO PFX
447 "heartbeat value must be 1<heartbeat<2046, using %d\n", 446 "heartbeat value must be 1<heartbeat<2046, using %d\n",
448 heartbeat); 447 heartbeat);
449 } 448 }
450 449
450 /* Initialize the watchdog and make sure it does not run */
451 esb_initdevice();
452
453 /* Register the watchdog so that userspace has access to it */
451 ret = misc_register(&esb_miscdev); 454 ret = misc_register(&esb_miscdev);
452 if (ret != 0) { 455 if (ret != 0) {
453 printk(KERN_ERR PFX 456 printk(KERN_ERR PFX
@@ -455,7 +458,6 @@ static int __devinit esb_probe(struct platform_device *dev)
455 WATCHDOG_MINOR, ret); 458 WATCHDOG_MINOR, ret);
456 goto err_unmap; 459 goto err_unmap;
457 } 460 }
458 esb_timer_stop();
459 printk(KERN_INFO PFX 461 printk(KERN_INFO PFX
460 "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", 462 "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
461 BASEADDR, heartbeat, nowayout); 463 BASEADDR, heartbeat, nowayout);
@@ -463,11 +465,8 @@ static int __devinit esb_probe(struct platform_device *dev)
463 465
464err_unmap: 466err_unmap:
465 iounmap(BASEADDR); 467 iounmap(BASEADDR);
466/* err_release: */
467 pci_release_region(esb_pci, 0); 468 pci_release_region(esb_pci, 0);
468/* err_disable: */
469 pci_disable_device(esb_pci); 469 pci_disable_device(esb_pci);
470/* err_devput: */
471 pci_dev_put(esb_pci); 470 pci_dev_put(esb_pci);
472 return ret; 471 return ret;
473} 472}
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 74c92d384112..ae3832110acb 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -221,7 +221,7 @@ static struct miscdevice ks8695wdt_miscdev = {
221 .fops = &ks8695wdt_fops, 221 .fops = &ks8695wdt_fops,
222}; 222};
223 223
224static int __init ks8695wdt_probe(struct platform_device *pdev) 224static int __devinit ks8695wdt_probe(struct platform_device *pdev)
225{ 225{
226 int res; 226 int res;
227 227
@@ -238,7 +238,7 @@ static int __init ks8695wdt_probe(struct platform_device *pdev)
238 return 0; 238 return 0;
239} 239}
240 240
241static int __exit ks8695wdt_remove(struct platform_device *pdev) 241static int __devexit ks8695wdt_remove(struct platform_device *pdev)
242{ 242{
243 int res; 243 int res;
244 244
@@ -276,7 +276,7 @@ static int ks8695wdt_resume(struct platform_device *pdev)
276 276
277static struct platform_driver ks8695wdt_driver = { 277static struct platform_driver ks8695wdt_driver = {
278 .probe = ks8695wdt_probe, 278 .probe = ks8695wdt_probe,
279 .remove = __exit_p(ks8695wdt_remove), 279 .remove = __devexit_p(ks8695wdt_remove),
280 .shutdown = ks8695wdt_shutdown, 280 .shutdown = ks8695wdt_shutdown,
281 .suspend = ks8695wdt_suspend, 281 .suspend = ks8695wdt_suspend,
282 .resume = ks8695wdt_resume, 282 .resume = ks8695wdt_resume,
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index aa5ad6e33f02..f2713851aaab 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -258,7 +258,7 @@ static const struct file_operations omap_wdt_fops = {
258 .release = omap_wdt_release, 258 .release = omap_wdt_release,
259}; 259};
260 260
261static int __init omap_wdt_probe(struct platform_device *pdev) 261static int __devinit omap_wdt_probe(struct platform_device *pdev)
262{ 262{
263 struct resource *res, *mem; 263 struct resource *res, *mem;
264 struct omap_wdt_dev *wdev; 264 struct omap_wdt_dev *wdev;
@@ -367,7 +367,7 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
367 omap_wdt_disable(wdev); 367 omap_wdt_disable(wdev);
368} 368}
369 369
370static int omap_wdt_remove(struct platform_device *pdev) 370static int __devexit omap_wdt_remove(struct platform_device *pdev)
371{ 371{
372 struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); 372 struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
373 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 373 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -426,7 +426,7 @@ static int omap_wdt_resume(struct platform_device *pdev)
426 426
427static struct platform_driver omap_wdt_driver = { 427static struct platform_driver omap_wdt_driver = {
428 .probe = omap_wdt_probe, 428 .probe = omap_wdt_probe,
429 .remove = omap_wdt_remove, 429 .remove = __devexit_p(omap_wdt_remove),
430 .shutdown = omap_wdt_shutdown, 430 .shutdown = omap_wdt_shutdown,
431 .suspend = omap_wdt_suspend, 431 .suspend = omap_wdt_suspend,
432 .resume = omap_wdt_resume, 432 .resume = omap_wdt_resume,
diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c
index e81441f103dd..7529616739d2 100644
--- a/drivers/watchdog/orion5x_wdt.c
+++ b/drivers/watchdog/orion5x_wdt.c
@@ -42,7 +42,17 @@ static unsigned int wdt_tclk;
42static unsigned long wdt_status; 42static unsigned long wdt_status;
43static spinlock_t wdt_lock; 43static spinlock_t wdt_lock;
44 44
45static void wdt_enable(void) 45static void orion5x_wdt_ping(void)
46{
47 spin_lock(&wdt_lock);
48
49 /* Reload watchdog duration */
50 writel(wdt_tclk * heartbeat, WDT_VAL);
51
52 spin_unlock(&wdt_lock);
53}
54
55static void orion5x_wdt_enable(void)
46{ 56{
47 u32 reg; 57 u32 reg;
48 58
@@ -69,7 +79,7 @@ static void wdt_enable(void)
69 spin_unlock(&wdt_lock); 79 spin_unlock(&wdt_lock);
70} 80}
71 81
72static void wdt_disable(void) 82static void orion5x_wdt_disable(void)
73{ 83{
74 u32 reg; 84 u32 reg;
75 85
@@ -101,7 +111,7 @@ static int orion5x_wdt_open(struct inode *inode, struct file *file)
101 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 111 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
102 return -EBUSY; 112 return -EBUSY;
103 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 113 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
104 wdt_enable(); 114 orion5x_wdt_enable();
105 return nonseekable_open(inode, file); 115 return nonseekable_open(inode, file);
106} 116}
107 117
@@ -122,18 +132,28 @@ static ssize_t orion5x_wdt_write(struct file *file, const char *data,
122 set_bit(WDT_OK_TO_CLOSE, &wdt_status); 132 set_bit(WDT_OK_TO_CLOSE, &wdt_status);
123 } 133 }
124 } 134 }
125 wdt_enable(); 135 orion5x_wdt_ping();
126 } 136 }
127 return len; 137 return len;
128} 138}
129 139
130static struct watchdog_info ident = { 140static int orion5x_wdt_settimeout(int new_time)
141{
142 if ((new_time <= 0) || (new_time > wdt_max_duration))
143 return -EINVAL;
144
145 /* Set new watchdog time to be used when
146 * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */
147 heartbeat = new_time;
148 return 0;
149}
150
151static const struct watchdog_info ident = {
131 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | 152 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
132 WDIOF_KEEPALIVEPING, 153 WDIOF_KEEPALIVEPING,
133 .identity = "Orion5x Watchdog", 154 .identity = "Orion5x Watchdog",
134}; 155};
135 156
136
137static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, 157static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
138 unsigned long arg) 158 unsigned long arg)
139{ 159{
@@ -152,7 +172,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
152 break; 172 break;
153 173
154 case WDIOC_KEEPALIVE: 174 case WDIOC_KEEPALIVE:
155 wdt_enable(); 175 orion5x_wdt_ping();
156 ret = 0; 176 ret = 0;
157 break; 177 break;
158 178
@@ -161,12 +181,11 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
161 if (ret) 181 if (ret)
162 break; 182 break;
163 183
164 if (time <= 0 || time > wdt_max_duration) { 184 if (orion5x_wdt_settimeout(time)) {
165 ret = -EINVAL; 185 ret = -EINVAL;
166 break; 186 break;
167 } 187 }
168 heartbeat = time; 188 orion5x_wdt_ping();
169 wdt_enable();
170 /* Fall through */ 189 /* Fall through */
171 190
172 case WDIOC_GETTIMEOUT: 191 case WDIOC_GETTIMEOUT:
@@ -187,7 +206,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
187static int orion5x_wdt_release(struct inode *inode, struct file *file) 206static int orion5x_wdt_release(struct inode *inode, struct file *file)
188{ 207{
189 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) 208 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
190 wdt_disable(); 209 orion5x_wdt_disable();
191 else 210 else
192 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " 211 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
193 "timer will not stop\n"); 212 "timer will not stop\n");
@@ -230,7 +249,7 @@ static int __devinit orion5x_wdt_probe(struct platform_device *pdev)
230 orion5x_wdt_miscdev.parent = &pdev->dev; 249 orion5x_wdt_miscdev.parent = &pdev->dev;
231 250
232 wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; 251 wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
233 if (heartbeat <= 0 || heartbeat > wdt_max_duration) 252 if (orion5x_wdt_settimeout(heartbeat))
234 heartbeat = wdt_max_duration; 253 heartbeat = wdt_max_duration;
235 254
236 ret = misc_register(&orion5x_wdt_miscdev); 255 ret = misc_register(&orion5x_wdt_miscdev);
@@ -247,7 +266,7 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev)
247 int ret; 266 int ret;
248 267
249 if (test_bit(WDT_IN_USE, &wdt_status)) { 268 if (test_bit(WDT_IN_USE, &wdt_status)) {
250 wdt_disable(); 269 orion5x_wdt_disable();
251 clear_bit(WDT_IN_USE, &wdt_status); 270 clear_bit(WDT_IN_USE, &wdt_status);
252 } 271 }
253 272
@@ -258,9 +277,16 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev)
258 return ret; 277 return ret;
259} 278}
260 279
280static void orion5x_wdt_shutdown(struct platform_device *pdev)
281{
282 if (test_bit(WDT_IN_USE, &wdt_status))
283 orion5x_wdt_disable();
284}
285
261static struct platform_driver orion5x_wdt_driver = { 286static struct platform_driver orion5x_wdt_driver = {
262 .probe = orion5x_wdt_probe, 287 .probe = orion5x_wdt_probe,
263 .remove = __devexit_p(orion5x_wdt_remove), 288 .remove = __devexit_p(orion5x_wdt_remove),
289 .shutdown = orion5x_wdt_shutdown,
264 .driver = { 290 .driver = {
265 .owner = THIS_MODULE, 291 .owner = THIS_MODULE,
266 .name = "orion5x_wdt", 292 .name = "orion5x_wdt",
@@ -285,10 +311,11 @@ MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
285MODULE_DESCRIPTION("Orion5x Processor Watchdog"); 311MODULE_DESCRIPTION("Orion5x Processor Watchdog");
286 312
287module_param(heartbeat, int, 0); 313module_param(heartbeat, int, 0);
288MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds"); 314MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds");
289 315
290module_param(nowayout, int, 0); 316module_param(nowayout, int, 0);
291MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); 317MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
318 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
292 319
293MODULE_LICENSE("GPL"); 320MODULE_LICENSE("GPL");
294MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 321MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);