aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/processor_throttling.c32
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/dd.c18
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c29
-rw-r--r--drivers/base/platform.c176
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/clock_ops.c431
-rw-r--r--drivers/base/power/generic_ops.c39
-rw-r--r--drivers/base/power/main.c82
-rw-r--r--drivers/base/power/runtime.c29
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/power/wakeup.c1
-rw-r--r--drivers/base/sys.c202
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/hpet.c6
-rw-r--r--drivers/char/mem.c42
-rw-r--r--drivers/char/raw.c34
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/cyclone.c10
-rw-r--r--drivers/clocksource/i8253.c88
-rw-r--r--drivers/cpufreq/Kconfig23
-rw-r--r--drivers/cpufreq/Kconfig.x86255
-rw-r--r--drivers/cpufreq/Makefile26
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c773
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c444
-rw-r--r--drivers/cpufreq/cpufreq.c215
-rw-r--r--drivers/cpufreq/cpufreq_performance.c5
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c5
-rw-r--r--drivers/cpufreq/cpufreq_stats.c24
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c13
-rw-r--r--drivers/cpufreq/e_powersaver.c367
-rw-r--r--drivers/cpufreq/elanfreq.c309
-rw-r--r--drivers/cpufreq/freq_table.c19
-rw-r--r--drivers/cpufreq/gx-suspmod.c514
-rw-r--r--drivers/cpufreq/longhaul.c1024
-rw-r--r--drivers/cpufreq/longhaul.h353
-rw-r--r--drivers/cpufreq/longrun.c324
-rw-r--r--drivers/cpufreq/mperf.c51
-rw-r--r--drivers/cpufreq/mperf.h9
-rw-r--r--drivers/cpufreq/p4-clockmod.c329
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c621
-rw-r--r--drivers/cpufreq/powernow-k6.c261
-rw-r--r--drivers/cpufreq/powernow-k7.c747
-rw-r--r--drivers/cpufreq/powernow-k7.h43
-rw-r--r--drivers/cpufreq/powernow-k8.c1607
-rw-r--r--drivers/cpufreq/powernow-k8.h222
-rw-r--r--drivers/cpufreq/sc520_freq.c192
-rw-r--r--drivers/cpufreq/speedstep-centrino.c633
-rw-r--r--drivers/cpufreq/speedstep-ich.c448
-rw-r--r--drivers/cpufreq/speedstep-lib.c478
-rw-r--r--drivers/cpufreq/speedstep-lib.h49
-rw-r--r--drivers/cpufreq/speedstep-smi.c464
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/edd.c22
-rw-r--r--drivers/firmware/efivars.c21
-rw-r--r--drivers/firmware/google/Kconfig31
-rw-r--r--drivers/firmware/google/Makefile3
-rw-r--r--drivers/firmware/google/gsmi.c940
-rw-r--r--drivers/firmware/google/memconsole.c166
-rw-r--r--drivers/firmware/iscsi_ibft_find.c51
-rw-r--r--drivers/infiniband/core/cma.c207
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c115
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h36
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c5
-rw-r--r--drivers/input/keyboard/atakbd.c5
-rw-r--r--drivers/input/mouse/atarimouse.c15
-rw-r--r--drivers/lguest/Kconfig6
-rw-r--r--drivers/lguest/Makefile2
-rw-r--r--drivers/macintosh/via-pmu.c56
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptscsih.c13
-rw-r--r--drivers/message/fusion/mptspi.c22
-rw-r--r--drivers/message/i2o/i2o_scsi.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig17
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1141
-rw-r--r--drivers/misc/carma/carma-fpga.c1433
-rw-r--r--drivers/misc/sgi-gru/grufault.c1
-rw-r--r--drivers/misc/sgi-gru/grumain.c1
-rw-r--r--drivers/misc/ti-st/Kconfig2
-rw-r--r--drivers/misc/ti-st/st_core.c23
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/lantiq-flash.c251
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/ehea/ehea_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/lantiq_etop.c805
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/pci/intel-iommu.c1
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/pci.h37
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/class.c23
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c138
-rw-r--r--drivers/scsi/be2iscsi/be.h10
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c13
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c51
-rw-r--r--drivers/scsi/be2iscsi/be_main.h15
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c17
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h13
-rw-r--r--drivers/scsi/bfa/bfad.c61
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c33
-rw-r--r--drivers/scsi/bfa/bfad_im.h25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/constants.c1
-rw-r--r--drivers/scsi/dc395x.c193
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c72
-rw-r--r--drivers/scsi/dpt_i2o.c6
-rw-r--r--drivers/scsi/eata.c66
-rw-r--r--drivers/scsi/eata_pio.c19
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c202
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c56
-rw-r--r--drivers/scsi/hpsa.c496
-rw-r--r--drivers/scsi/hpsa.h15
-rw-r--r--drivers/scsi/hpsa_cmd.h11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/in2000.c29
-rw-r--r--drivers/scsi/ipr.c158
-rw-r--r--drivers/scsi/ipr.h22
-rw-r--r--drivers/scsi/libfc/fc_fcp.c57
-rw-r--r--drivers/scsi/libfc/fc_lport.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h130
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c931
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h48
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/mesh.c3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c42
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h49
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c617
-rw-r--r--drivers/scsi/mvsas/Kconfig1
-rw-r--r--drivers/scsi/mvsas/Makefile1
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c1
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h1
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c1
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h1
-rw-r--r--drivers/scsi/mvsas/mv_chips.h1
-rw-r--r--drivers/scsi/mvsas/mv_defs.h3
-rw-r--r--drivers/scsi/mvsas/mv_init.c67
-rw-r--r--drivers/scsi/mvsas/mv_sas.c383
-rw-r--r--drivers/scsi/mvsas/mv_sas.h8
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c5
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_proc.c58
-rw-r--r--drivers/scsi/scsi_tgt_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c11
-rw-r--r--drivers/scsi/tmscsim.c22
-rw-r--r--drivers/scsi/u14-34f.c61
-rw-r--r--drivers/scsi/wd33c93.c45
-rw-r--r--drivers/ssb/pci.c16
-rw-r--r--drivers/ssb/sprom.c43
-rw-r--r--drivers/ssb/ssb_private.h3
-rw-r--r--drivers/staging/pohmelfs/inode.c1
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/tcm_fc/Kconfig5
-rw-r--r--drivers/target/tcm_fc/Makefile15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h215
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c696
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c677
-rw-r--r--drivers/target/tcm_fc/tfc_io.c374
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c541
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/lantiq.c756
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/uio/uio_netx.c19
-rw-r--r--drivers/uio/uio_pdrv_genirq.c4
-rw-r--r--drivers/usb/gadget/goku_udc.c1
-rw-r--r--drivers/usb/gadget/imx_udc.c1
-rw-r--r--drivers/usb/gadget/omap_udc.c1
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c1
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c1
-rw-r--r--drivers/usb/host/isp1362-hcd.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/storage/isd200.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/atafb.c2
-rw-r--r--drivers/video/udlfb.c1
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/lantiq_wdt.c261
-rw-r--r--drivers/watchdog/mtx-1_wdt.c21
-rw-r--r--drivers/xen/Makefile24
-rw-r--r--drivers/xen/balloon.c25
-rw-r--r--drivers/xen/events.c152
-rw-r--r--drivers/xen/gntalloc.c14
-rw-r--r--drivers/xen/gntdev.c16
-rw-r--r--drivers/xen/grant-table.c31
-rw-r--r--drivers/xen/manage.c8
-rw-r--r--drivers/xen/sys-hypervisor.c2
265 files changed, 24015 insertions, 2876 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 177c7d156933..557a469c7aa6 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -119,4 +119,7 @@ source "drivers/platform/Kconfig"
119source "drivers/clk/Kconfig" 119source "drivers/clk/Kconfig"
120 120
121source "drivers/hwspinlock/Kconfig" 121source "drivers/hwspinlock/Kconfig"
122
123source "drivers/clocksource/Kconfig"
124
122endmenu 125endmenu
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 3a73a93596e8..85b32376dad7 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib");
49 49
50static DEFINE_MUTEX(performance_mutex); 50static DEFINE_MUTEX(performance_mutex);
51 51
52/* Use cpufreq debug layer for _PPC changes. */
53#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
54 "cpufreq-core", msg)
55
56/* 52/*
57 * _PPC support is implemented as a CPUfreq policy notifier: 53 * _PPC support is implemented as a CPUfreq policy notifier:
58 * This means each time a CPUfreq driver registered also with 54 * This means each time a CPUfreq driver registered also with
@@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
145 return -ENODEV; 141 return -ENODEV;
146 } 142 }
147 143
148 cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 144 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
149 (int)ppc, ppc ? "" : "not"); 145 (int)ppc, ppc ? "" : "not");
150 146
151 pr->performance_platform_limit = (int)ppc; 147 pr->performance_platform_limit = (int)ppc;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ad3501739563..605a2954ef17 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -710,20 +710,14 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
710} 710}
711 711
712#ifdef CONFIG_X86 712#ifdef CONFIG_X86
713static int acpi_throttling_rdmsr(struct acpi_processor *pr, 713static int acpi_throttling_rdmsr(u64 *value)
714 u64 *value)
715{ 714{
716 struct cpuinfo_x86 *c;
717 u64 msr_high, msr_low; 715 u64 msr_high, msr_low;
718 unsigned int cpu;
719 u64 msr = 0; 716 u64 msr = 0;
720 int ret = -1; 717 int ret = -1;
721 718
722 cpu = pr->id; 719 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
723 c = &cpu_data(cpu); 720 !this_cpu_has(X86_FEATURE_ACPI)) {
724
725 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
726 !cpu_has(c, X86_FEATURE_ACPI)) {
727 printk(KERN_ERR PREFIX 721 printk(KERN_ERR PREFIX
728 "HARDWARE addr space,NOT supported yet\n"); 722 "HARDWARE addr space,NOT supported yet\n");
729 } else { 723 } else {
@@ -738,18 +732,13 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr,
738 return ret; 732 return ret;
739} 733}
740 734
741static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) 735static int acpi_throttling_wrmsr(u64 value)
742{ 736{
743 struct cpuinfo_x86 *c;
744 unsigned int cpu;
745 int ret = -1; 737 int ret = -1;
746 u64 msr; 738 u64 msr;
747 739
748 cpu = pr->id; 740 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
749 c = &cpu_data(cpu); 741 !this_cpu_has(X86_FEATURE_ACPI)) {
750
751 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
752 !cpu_has(c, X86_FEATURE_ACPI)) {
753 printk(KERN_ERR PREFIX 742 printk(KERN_ERR PREFIX
754 "HARDWARE addr space,NOT supported yet\n"); 743 "HARDWARE addr space,NOT supported yet\n");
755 } else { 744 } else {
@@ -761,15 +750,14 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
761 return ret; 750 return ret;
762} 751}
763#else 752#else
764static int acpi_throttling_rdmsr(struct acpi_processor *pr, 753static int acpi_throttling_rdmsr(u64 *value)
765 u64 *value)
766{ 754{
767 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
768 "HARDWARE addr space,NOT supported yet\n"); 756 "HARDWARE addr space,NOT supported yet\n");
769 return -1; 757 return -1;
770} 758}
771 759
772static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) 760static int acpi_throttling_wrmsr(u64 value)
773{ 761{
774 printk(KERN_ERR PREFIX 762 printk(KERN_ERR PREFIX
775 "HARDWARE addr space,NOT supported yet\n"); 763 "HARDWARE addr space,NOT supported yet\n");
@@ -801,7 +789,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
801 ret = 0; 789 ret = 0;
802 break; 790 break;
803 case ACPI_ADR_SPACE_FIXED_HARDWARE: 791 case ACPI_ADR_SPACE_FIXED_HARDWARE:
804 ret = acpi_throttling_rdmsr(pr, value); 792 ret = acpi_throttling_rdmsr(value);
805 break; 793 break;
806 default: 794 default:
807 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 795 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
@@ -834,7 +822,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr,
834 ret = 0; 822 ret = 0;
835 break; 823 break;
836 case ACPI_ADR_SPACE_FIXED_HARDWARE: 824 case ACPI_ADR_SPACE_FIXED_HARDWARE:
837 ret = acpi_throttling_wrmsr(pr, value); 825 ret = acpi_throttling_wrmsr(value);
838 break; 826 break;
839 default: 827 default:
840 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 828 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index e9e5238f3106..d57e8d0fb823 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -168,11 +168,4 @@ config SYS_HYPERVISOR
168 bool 168 bool
169 default n 169 default n
170 170
171config ARCH_NO_SYSDEV_OPS
172 bool
173 ---help---
174 To be selected by architectures that don't use sysdev class or
175 sysdev driver power management (suspend/resume) and shutdown
176 operations.
177
178endmenu 171endmenu
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 19f49e41ce5d..a34dca0ad041 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv,
111 return drv->bus->match ? drv->bus->match(dev, drv) : 1; 111 return drv->bus->match ? drv->bus->match(dev, drv) : 1;
112} 112}
113 113
114extern void sysdev_shutdown(void);
115
116extern char *make_class_name(const char *name, struct kobject *kobj); 114extern char *make_class_name(const char *name, struct kobject *kobj);
117 115
118extern int devres_release_all(struct device *dev); 116extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 81b78ede37c4..bc8729d603a7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -400,7 +400,7 @@ static void device_remove_groups(struct device *dev,
400static int device_add_attrs(struct device *dev) 400static int device_add_attrs(struct device *dev)
401{ 401{
402 struct class *class = dev->class; 402 struct class *class = dev->class;
403 struct device_type *type = dev->type; 403 const struct device_type *type = dev->type;
404 int error; 404 int error;
405 405
406 if (class) { 406 if (class) {
@@ -440,7 +440,7 @@ static int device_add_attrs(struct device *dev)
440static void device_remove_attrs(struct device *dev) 440static void device_remove_attrs(struct device *dev)
441{ 441{
442 struct class *class = dev->class; 442 struct class *class = dev->class;
443 struct device_type *type = dev->type; 443 const struct device_type *type = dev->type;
444 444
445 device_remove_groups(dev, dev->groups); 445 device_remove_groups(dev, dev->groups);
446 446
@@ -1314,8 +1314,7 @@ EXPORT_SYMBOL_GPL(put_device);
1314EXPORT_SYMBOL_GPL(device_create_file); 1314EXPORT_SYMBOL_GPL(device_create_file);
1315EXPORT_SYMBOL_GPL(device_remove_file); 1315EXPORT_SYMBOL_GPL(device_remove_file);
1316 1316
1317struct root_device 1317struct root_device {
1318{
1319 struct device dev; 1318 struct device dev;
1320 struct module *owner; 1319 struct module *owner;
1321}; 1320};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index da57ee9d63fe..6658da743c3a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -245,6 +245,10 @@ int device_attach(struct device *dev)
245 245
246 device_lock(dev); 246 device_lock(dev);
247 if (dev->driver) { 247 if (dev->driver) {
248 if (klist_node_attached(&dev->p->knode_driver)) {
249 ret = 1;
250 goto out_unlock;
251 }
248 ret = device_bind_driver(dev); 252 ret = device_bind_driver(dev);
249 if (ret == 0) 253 if (ret == 0)
250 ret = 1; 254 ret = 1;
@@ -257,6 +261,7 @@ int device_attach(struct device *dev)
257 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 261 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
258 pm_runtime_put_sync(dev); 262 pm_runtime_put_sync(dev);
259 } 263 }
264out_unlock:
260 device_unlock(dev); 265 device_unlock(dev);
261 return ret; 266 return ret;
262} 267}
@@ -316,8 +321,7 @@ static void __device_release_driver(struct device *dev)
316 321
317 drv = dev->driver; 322 drv = dev->driver;
318 if (drv) { 323 if (drv) {
319 pm_runtime_get_noresume(dev); 324 pm_runtime_get_sync(dev);
320 pm_runtime_barrier(dev);
321 325
322 driver_sysfs_remove(dev); 326 driver_sysfs_remove(dev);
323 327
@@ -326,6 +330,8 @@ static void __device_release_driver(struct device *dev)
326 BUS_NOTIFY_UNBIND_DRIVER, 330 BUS_NOTIFY_UNBIND_DRIVER,
327 dev); 331 dev);
328 332
333 pm_runtime_put_sync(dev);
334
329 if (dev->bus && dev->bus->remove) 335 if (dev->bus && dev->bus->remove)
330 dev->bus->remove(dev); 336 dev->bus->remove(dev);
331 else if (drv->remove) 337 else if (drv->remove)
@@ -338,7 +344,6 @@ static void __device_release_driver(struct device *dev)
338 BUS_NOTIFY_UNBOUND_DRIVER, 344 BUS_NOTIFY_UNBOUND_DRIVER,
339 dev); 345 dev);
340 346
341 pm_runtime_put_sync(dev);
342 } 347 }
343} 348}
344 349
@@ -408,17 +413,16 @@ void *dev_get_drvdata(const struct device *dev)
408} 413}
409EXPORT_SYMBOL(dev_get_drvdata); 414EXPORT_SYMBOL(dev_get_drvdata);
410 415
411void dev_set_drvdata(struct device *dev, void *data) 416int dev_set_drvdata(struct device *dev, void *data)
412{ 417{
413 int error; 418 int error;
414 419
415 if (!dev)
416 return;
417 if (!dev->p) { 420 if (!dev->p) {
418 error = device_private_init(dev); 421 error = device_private_init(dev);
419 if (error) 422 if (error)
420 return; 423 return error;
421 } 424 }
422 dev->p->driver_data = data; 425 dev->p->driver_data = data;
426 return 0;
423} 427}
424EXPORT_SYMBOL(dev_set_drvdata); 428EXPORT_SYMBOL(dev_set_drvdata);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8c798ef7f13f..bbb03e6f7255 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p,
521 if (!firmware_p) 521 if (!firmware_p)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 if (WARN_ON(usermodehelper_is_disabled())) {
525 dev_err(device, "firmware: %s will not be loaded\n", name);
526 return -EBUSY;
527 }
528
524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 529 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
525 if (!firmware) { 530 if (!firmware) {
526 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 531 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 3da6a43b7756..0a134a424a37 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -48,7 +48,8 @@ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
48 return MEMORY_CLASS_NAME; 48 return MEMORY_CLASS_NAME;
49} 49}
50 50
51static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env) 51static int memory_uevent(struct kset *kset, struct kobject *obj,
52 struct kobj_uevent_env *env)
52{ 53{
53 int retval = 0; 54 int retval = 0;
54 55
@@ -228,10 +229,11 @@ int memory_isolate_notify(unsigned long val, void *v)
228 * OK to have direct references to sparsemem variables in here. 229 * OK to have direct references to sparsemem variables in here.
229 */ 230 */
230static int 231static int
231memory_section_action(unsigned long phys_index, unsigned long action) 232memory_block_action(unsigned long phys_index, unsigned long action)
232{ 233{
233 int i; 234 int i;
234 unsigned long start_pfn, start_paddr; 235 unsigned long start_pfn, start_paddr;
236 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
235 struct page *first_page; 237 struct page *first_page;
236 int ret; 238 int ret;
237 239
@@ -243,7 +245,7 @@ memory_section_action(unsigned long phys_index, unsigned long action)
243 * that way. 245 * that way.
244 */ 246 */
245 if (action == MEM_ONLINE) { 247 if (action == MEM_ONLINE) {
246 for (i = 0; i < PAGES_PER_SECTION; i++) { 248 for (i = 0; i < nr_pages; i++) {
247 if (PageReserved(first_page+i)) 249 if (PageReserved(first_page+i))
248 continue; 250 continue;
249 251
@@ -257,12 +259,12 @@ memory_section_action(unsigned long phys_index, unsigned long action)
257 switch (action) { 259 switch (action) {
258 case MEM_ONLINE: 260 case MEM_ONLINE:
259 start_pfn = page_to_pfn(first_page); 261 start_pfn = page_to_pfn(first_page);
260 ret = online_pages(start_pfn, PAGES_PER_SECTION); 262 ret = online_pages(start_pfn, nr_pages);
261 break; 263 break;
262 case MEM_OFFLINE: 264 case MEM_OFFLINE:
263 start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; 265 start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
264 ret = remove_memory(start_paddr, 266 ret = remove_memory(start_paddr,
265 PAGES_PER_SECTION << PAGE_SHIFT); 267 nr_pages << PAGE_SHIFT);
266 break; 268 break;
267 default: 269 default:
268 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " 270 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
@@ -276,7 +278,7 @@ memory_section_action(unsigned long phys_index, unsigned long action)
276static int memory_block_change_state(struct memory_block *mem, 278static int memory_block_change_state(struct memory_block *mem,
277 unsigned long to_state, unsigned long from_state_req) 279 unsigned long to_state, unsigned long from_state_req)
278{ 280{
279 int i, ret = 0; 281 int ret = 0;
280 282
281 mutex_lock(&mem->state_mutex); 283 mutex_lock(&mem->state_mutex);
282 284
@@ -288,20 +290,11 @@ static int memory_block_change_state(struct memory_block *mem,
288 if (to_state == MEM_OFFLINE) 290 if (to_state == MEM_OFFLINE)
289 mem->state = MEM_GOING_OFFLINE; 291 mem->state = MEM_GOING_OFFLINE;
290 292
291 for (i = 0; i < sections_per_block; i++) { 293 ret = memory_block_action(mem->start_section_nr, to_state);
292 ret = memory_section_action(mem->start_section_nr + i,
293 to_state);
294 if (ret)
295 break;
296 }
297
298 if (ret) {
299 for (i = 0; i < sections_per_block; i++)
300 memory_section_action(mem->start_section_nr + i,
301 from_state_req);
302 294
295 if (ret)
303 mem->state = from_state_req; 296 mem->state = from_state_req;
304 } else 297 else
305 mem->state = to_state; 298 mem->state = to_state;
306 299
307out: 300out:
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e0e4fc24c46..1c291af637b3 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -192,18 +192,18 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
192int platform_device_add_resources(struct platform_device *pdev, 192int platform_device_add_resources(struct platform_device *pdev,
193 const struct resource *res, unsigned int num) 193 const struct resource *res, unsigned int num)
194{ 194{
195 struct resource *r; 195 struct resource *r = NULL;
196 196
197 if (!res) 197 if (res) {
198 return 0; 198 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
199 199 if (!r)
200 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 200 return -ENOMEM;
201 if (r) {
202 pdev->resource = r;
203 pdev->num_resources = num;
204 return 0;
205 } 201 }
206 return -ENOMEM; 202
203 kfree(pdev->resource);
204 pdev->resource = r;
205 pdev->num_resources = num;
206 return 0;
207} 207}
208EXPORT_SYMBOL_GPL(platform_device_add_resources); 208EXPORT_SYMBOL_GPL(platform_device_add_resources);
209 209
@@ -220,17 +220,17 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources);
220int platform_device_add_data(struct platform_device *pdev, const void *data, 220int platform_device_add_data(struct platform_device *pdev, const void *data,
221 size_t size) 221 size_t size)
222{ 222{
223 void *d; 223 void *d = NULL;
224 224
225 if (!data) 225 if (data) {
226 return 0; 226 d = kmemdup(data, size, GFP_KERNEL);
227 227 if (!d)
228 d = kmemdup(data, size, GFP_KERNEL); 228 return -ENOMEM;
229 if (d) {
230 pdev->dev.platform_data = d;
231 return 0;
232 } 229 }
233 return -ENOMEM; 230
231 kfree(pdev->dev.platform_data);
232 pdev->dev.platform_data = d;
233 return 0;
234} 234}
235EXPORT_SYMBOL_GPL(platform_device_add_data); 235EXPORT_SYMBOL_GPL(platform_device_add_data);
236 236
@@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev)
667 return ret; 667 return ret;
668} 668}
669 669
670static int platform_pm_prepare(struct device *dev) 670int platform_pm_prepare(struct device *dev)
671{ 671{
672 struct device_driver *drv = dev->driver; 672 struct device_driver *drv = dev->driver;
673 int ret = 0; 673 int ret = 0;
@@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev)
678 return ret; 678 return ret;
679} 679}
680 680
681static void platform_pm_complete(struct device *dev) 681void platform_pm_complete(struct device *dev)
682{ 682{
683 struct device_driver *drv = dev->driver; 683 struct device_driver *drv = dev->driver;
684 684
@@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev)
686 drv->pm->complete(dev); 686 drv->pm->complete(dev);
687} 687}
688 688
689#else /* !CONFIG_PM_SLEEP */ 689#endif /* CONFIG_PM_SLEEP */
690
691#define platform_pm_prepare NULL
692#define platform_pm_complete NULL
693
694#endif /* !CONFIG_PM_SLEEP */
695 690
696#ifdef CONFIG_SUSPEND 691#ifdef CONFIG_SUSPEND
697 692
698int __weak platform_pm_suspend(struct device *dev) 693int platform_pm_suspend(struct device *dev)
699{ 694{
700 struct device_driver *drv = dev->driver; 695 struct device_driver *drv = dev->driver;
701 int ret = 0; 696 int ret = 0;
@@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev)
713 return ret; 708 return ret;
714} 709}
715 710
716int __weak platform_pm_suspend_noirq(struct device *dev) 711int platform_pm_suspend_noirq(struct device *dev)
717{ 712{
718 struct device_driver *drv = dev->driver; 713 struct device_driver *drv = dev->driver;
719 int ret = 0; 714 int ret = 0;
@@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev)
729 return ret; 724 return ret;
730} 725}
731 726
732int __weak platform_pm_resume(struct device *dev) 727int platform_pm_resume(struct device *dev)
733{ 728{
734 struct device_driver *drv = dev->driver; 729 struct device_driver *drv = dev->driver;
735 int ret = 0; 730 int ret = 0;
@@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev)
747 return ret; 742 return ret;
748} 743}
749 744
750int __weak platform_pm_resume_noirq(struct device *dev) 745int platform_pm_resume_noirq(struct device *dev)
751{ 746{
752 struct device_driver *drv = dev->driver; 747 struct device_driver *drv = dev->driver;
753 int ret = 0; 748 int ret = 0;
@@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev)
763 return ret; 758 return ret;
764} 759}
765 760
766#else /* !CONFIG_SUSPEND */ 761#endif /* CONFIG_SUSPEND */
767
768#define platform_pm_suspend NULL
769#define platform_pm_resume NULL
770#define platform_pm_suspend_noirq NULL
771#define platform_pm_resume_noirq NULL
772
773#endif /* !CONFIG_SUSPEND */
774 762
775#ifdef CONFIG_HIBERNATE_CALLBACKS 763#ifdef CONFIG_HIBERNATE_CALLBACKS
776 764
777static int platform_pm_freeze(struct device *dev) 765int platform_pm_freeze(struct device *dev)
778{ 766{
779 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
780 int ret = 0; 768 int ret = 0;
@@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev)
792 return ret; 780 return ret;
793} 781}
794 782
795static int platform_pm_freeze_noirq(struct device *dev) 783int platform_pm_freeze_noirq(struct device *dev)
796{ 784{
797 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
798 int ret = 0; 786 int ret = 0;
@@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev)
808 return ret; 796 return ret;
809} 797}
810 798
811static int platform_pm_thaw(struct device *dev) 799int platform_pm_thaw(struct device *dev)
812{ 800{
813 struct device_driver *drv = dev->driver; 801 struct device_driver *drv = dev->driver;
814 int ret = 0; 802 int ret = 0;
@@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev)
826 return ret; 814 return ret;
827} 815}
828 816
829static int platform_pm_thaw_noirq(struct device *dev) 817int platform_pm_thaw_noirq(struct device *dev)
830{ 818{
831 struct device_driver *drv = dev->driver; 819 struct device_driver *drv = dev->driver;
832 int ret = 0; 820 int ret = 0;
@@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev)
842 return ret; 830 return ret;
843} 831}
844 832
845static int platform_pm_poweroff(struct device *dev) 833int platform_pm_poweroff(struct device *dev)
846{ 834{
847 struct device_driver *drv = dev->driver; 835 struct device_driver *drv = dev->driver;
848 int ret = 0; 836 int ret = 0;
@@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev)
860 return ret; 848 return ret;
861} 849}
862 850
863static int platform_pm_poweroff_noirq(struct device *dev) 851int platform_pm_poweroff_noirq(struct device *dev)
864{ 852{
865 struct device_driver *drv = dev->driver; 853 struct device_driver *drv = dev->driver;
866 int ret = 0; 854 int ret = 0;
@@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev)
876 return ret; 864 return ret;
877} 865}
878 866
879static int platform_pm_restore(struct device *dev) 867int platform_pm_restore(struct device *dev)
880{ 868{
881 struct device_driver *drv = dev->driver; 869 struct device_driver *drv = dev->driver;
882 int ret = 0; 870 int ret = 0;
@@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev)
894 return ret; 882 return ret;
895} 883}
896 884
897static int platform_pm_restore_noirq(struct device *dev) 885int platform_pm_restore_noirq(struct device *dev)
898{ 886{
899 struct device_driver *drv = dev->driver; 887 struct device_driver *drv = dev->driver;
900 int ret = 0; 888 int ret = 0;
@@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev)
910 return ret; 898 return ret;
911} 899}
912 900
913#else /* !CONFIG_HIBERNATE_CALLBACKS */ 901#endif /* CONFIG_HIBERNATE_CALLBACKS */
914
915#define platform_pm_freeze NULL
916#define platform_pm_thaw NULL
917#define platform_pm_poweroff NULL
918#define platform_pm_restore NULL
919#define platform_pm_freeze_noirq NULL
920#define platform_pm_thaw_noirq NULL
921#define platform_pm_poweroff_noirq NULL
922#define platform_pm_restore_noirq NULL
923
924#endif /* !CONFIG_HIBERNATE_CALLBACKS */
925
926#ifdef CONFIG_PM_RUNTIME
927
928int __weak platform_pm_runtime_suspend(struct device *dev)
929{
930 return pm_generic_runtime_suspend(dev);
931};
932
933int __weak platform_pm_runtime_resume(struct device *dev)
934{
935 return pm_generic_runtime_resume(dev);
936};
937
938int __weak platform_pm_runtime_idle(struct device *dev)
939{
940 return pm_generic_runtime_idle(dev);
941};
942
943#else /* !CONFIG_PM_RUNTIME */
944
945#define platform_pm_runtime_suspend NULL
946#define platform_pm_runtime_resume NULL
947#define platform_pm_runtime_idle NULL
948
949#endif /* !CONFIG_PM_RUNTIME */
950 902
951static const struct dev_pm_ops platform_dev_pm_ops = { 903static const struct dev_pm_ops platform_dev_pm_ops = {
952 .prepare = platform_pm_prepare, 904 .runtime_suspend = pm_generic_runtime_suspend,
953 .complete = platform_pm_complete, 905 .runtime_resume = pm_generic_runtime_resume,
954 .suspend = platform_pm_suspend, 906 .runtime_idle = pm_generic_runtime_idle,
955 .resume = platform_pm_resume, 907 USE_PLATFORM_PM_SLEEP_OPS
956 .freeze = platform_pm_freeze,
957 .thaw = platform_pm_thaw,
958 .poweroff = platform_pm_poweroff,
959 .restore = platform_pm_restore,
960 .suspend_noirq = platform_pm_suspend_noirq,
961 .resume_noirq = platform_pm_resume_noirq,
962 .freeze_noirq = platform_pm_freeze_noirq,
963 .thaw_noirq = platform_pm_thaw_noirq,
964 .poweroff_noirq = platform_pm_poweroff_noirq,
965 .restore_noirq = platform_pm_restore_noirq,
966 .runtime_suspend = platform_pm_runtime_suspend,
967 .runtime_resume = platform_pm_runtime_resume,
968 .runtime_idle = platform_pm_runtime_idle,
969}; 908};
970 909
971struct bus_type platform_bus_type = { 910struct bus_type platform_bus_type = {
@@ -977,41 +916,6 @@ struct bus_type platform_bus_type = {
977}; 916};
978EXPORT_SYMBOL_GPL(platform_bus_type); 917EXPORT_SYMBOL_GPL(platform_bus_type);
979 918
980/**
981 * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
982 *
983 * This function can be used by platform code to get the current
984 * set of dev_pm_ops functions used by the platform_bus_type.
985 */
986const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
987{
988 return platform_bus_type.pm;
989}
990
991/**
992 * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
993 *
994 * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
995 *
996 * Platform code can override the dev_pm_ops methods of
997 * platform_bus_type by using this function. It is expected that
998 * platform code will first do a platform_bus_get_pm_ops(), then
999 * kmemdup it, then customize selected methods and pass a pointer to
1000 * the new struct dev_pm_ops to this function.
1001 *
1002 * Since platform-specific code is customizing methods for *all*
1003 * devices (not just platform-specific devices) it is expected that
1004 * any custom overrides of these functions will keep existing behavior
1005 * and simply extend it. For example, any customization of the
1006 * runtime PM methods should continue to call the pm_generic_*
1007 * functions as the default ones do in addition to the
1008 * platform-specific behavior.
1009 */
1010void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
1011{
1012 platform_bus_type.pm = pm;
1013}
1014
1015int __init platform_bus_init(void) 919int __init platform_bus_init(void)
1016{ 920{
1017 int error; 921 int error;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 118c1b92a511..3647e114d0e7 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000000..c0dd09df7be8
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,431 @@
1/*
2 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
3 *
4 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_runtime.h>
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17
18#ifdef CONFIG_PM_RUNTIME
19
20struct pm_runtime_clk_data {
21 struct list_head clock_list;
22 struct mutex lock;
23};
24
25enum pce_status {
26 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED,
28 PCE_STATUS_ENABLED,
29 PCE_STATUS_ERROR,
30};
31
32struct pm_clock_entry {
33 struct list_head node;
34 char *con_id;
35 struct clk *clk;
36 enum pce_status status;
37};
38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM.
46 * @dev: Device whose clock is going to be used for runtime PM.
47 * @con_id: Connection ID of the clock.
48 *
49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev.
51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id)
53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev);
55 struct pm_clock_entry *ce;
56
57 if (!prd)
58 return -EINVAL;
59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
61 if (!ce) {
62 dev_err(dev, "Not enough memory for clock entry.\n");
63 return -ENOMEM;
64 }
65
66 if (con_id) {
67 ce->con_id = kstrdup(con_id, GFP_KERNEL);
68 if (!ce->con_id) {
69 dev_err(dev,
70 "Not enough memory for clock connection ID.\n");
71 kfree(ce);
72 return -ENOMEM;
73 }
74 }
75
76 mutex_lock(&prd->lock);
77 list_add_tail(&ce->node, &prd->clock_list);
78 mutex_unlock(&prd->lock);
79 return 0;
80}
81
82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
84 * @ce: Runtime PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the runtime PM list
87 * of clocks corresponding the the @ce's device.
88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
90{
91 if (!ce)
92 return;
93
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk);
99
100 if (ce->status >= PCE_STATUS_ACQUIRED)
101 clk_put(ce->clk);
102 }
103
104 if (ce->con_id)
105 kfree(ce->con_id);
106
107 kfree(ce);
108}
109
110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
112 * @dev: Device whose clock should not be used for runtime PM any more.
113 * @con_id: Connection ID of the clock.
114 *
115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev.
117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id)
119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev);
121 struct pm_clock_entry *ce;
122
123 if (!prd)
124 return;
125
126 mutex_lock(&prd->lock);
127
128 list_for_each_entry(ce, &prd->clock_list, node) {
129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce);
131 break;
132 } else if (!con_id || !ce->con_id) {
133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce);
136 break;
137 }
138 }
139
140 mutex_unlock(&prd->lock);
141}
142
143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for.
146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it.
149 */
150int pm_runtime_clk_init(struct device *dev)
151{
152 struct pm_runtime_clk_data *prd;
153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL);
155 if (!prd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n");
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&prd->clock_list);
161 mutex_init(&prd->lock);
162 dev->power.subsys_data = prd;
163 return 0;
164}
165
166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for.
169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free
172 * that object.
173 */
174void pm_runtime_clk_destroy(struct device *dev)
175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev);
177 struct pm_clock_entry *ce, *c;
178
179 if (!prd)
180 return;
181
182 dev->power.subsys_data = NULL;
183
184 mutex_lock(&prd->lock);
185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
187 __pm_runtime_clk_remove(ce);
188
189 mutex_unlock(&prd->lock);
190
191 kfree(prd);
192}
193
194/**
195 * pm_runtime_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock.
198 */
199static void pm_runtime_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce)
201{
202 ce->clk = clk_get(dev, ce->con_id);
203 if (IS_ERR(ce->clk)) {
204 ce->status = PCE_STATUS_ERROR;
205 } else {
206 ce->status = PCE_STATUS_ACQUIRED;
207 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
208 }
209}
210
211/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
213 * @dev: Device to disable the clocks for.
214 */
215int pm_runtime_clk_suspend(struct device *dev)
216{
217 struct pm_runtime_clk_data *prd = __to_prd(dev);
218 struct pm_clock_entry *ce;
219
220 dev_dbg(dev, "%s()\n", __func__);
221
222 if (!prd)
223 return 0;
224
225 mutex_lock(&prd->lock);
226
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce);
230
231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED;
234 }
235 }
236
237 mutex_unlock(&prd->lock);
238
239 return 0;
240}
241
242/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
244 * @dev: Device to enable the clocks for.
245 */
246int pm_runtime_clk_resume(struct device *dev)
247{
248 struct pm_runtime_clk_data *prd = __to_prd(dev);
249 struct pm_clock_entry *ce;
250
251 dev_dbg(dev, "%s()\n", __func__);
252
253 if (!prd)
254 return 0;
255
256 mutex_lock(&prd->lock);
257
258 list_for_each_entry(ce, &prd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce);
261
262 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk);
264 ce->status = PCE_STATUS_ENABLED;
265 }
266 }
267
268 mutex_unlock(&prd->lock);
269
270 return 0;
271}
272
273/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for.
278 *
279 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action.
284 *
285 * If the device's pwr_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing.
288 */
289static int pm_runtime_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data)
291{
292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data;
294 char *con_id;
295 int error;
296
297 dev_dbg(dev, "%s() %ld\n", __func__, action);
298
299 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
300
301 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain)
304 break;
305
306 error = pm_runtime_clk_init(dev);
307 if (error)
308 break;
309
310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id);
314 } else {
315 pm_runtime_clk_add(dev, NULL);
316 }
317
318 break;
319 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain)
321 break;
322
323 dev->pwr_domain = NULL;
324 pm_runtime_clk_destroy(dev);
325 break;
326 }
327
328 return 0;
329}
330
331#else /* !CONFIG_PM_RUNTIME */
332
333/**
334 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled.
336 * @con_id: Connection ID of the clock.
337 */
338static void enable_clock(struct device *dev, const char *con_id)
339{
340 struct clk *clk;
341
342 clk = clk_get(dev, con_id);
343 if (!IS_ERR(clk)) {
344 clk_enable(clk);
345 clk_put(clk);
346 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
347 }
348}
349
350/**
351 * disable_clock - Disable a device clock.
352 * @dev: Device whose clock is to be disabled.
353 * @con_id: Connection ID of the clock.
354 */
355static void disable_clock(struct device *dev, const char *con_id)
356{
357 struct clk *clk;
358
359 clk = clk_get(dev, con_id);
360 if (!IS_ERR(clk)) {
361 clk_disable(clk);
362 clk_put(clk);
363 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
364 }
365}
366
367/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for.
372 *
373 * For this function to work, @nb must be a member of an object of type
374 * struct pm_clk_notifier_block containing all of the requisite data.
375 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action.
377 */
378static int pm_runtime_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data;
383 char *con_id;
384
385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388
389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
393 enable_clock(dev, con_id);
394 } else {
395 enable_clock(dev, NULL);
396 }
397 break;
398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
401 disable_clock(dev, con_id);
402 } else {
403 disable_clock(dev, NULL);
404 }
405 break;
406 }
407
408 return 0;
409}
410
411#endif /* !CONFIG_PM_RUNTIME */
412
413/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
415 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type.
417 *
418 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this
421 * routine.
422 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb)
425{
426 if (!bus || !clknb)
427 return;
428
429 clknb->nb.notifier_call = pm_runtime_clk_notify;
430 bus_register_notifier(bus, &clknb->nb);
431}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 42f97f925629..cb3bb368681c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
74 74
75#ifdef CONFIG_PM_SLEEP 75#ifdef CONFIG_PM_SLEEP
76/** 76/**
77 * pm_generic_prepare - Generic routine preparing a device for power transition.
78 * @dev: Device to prepare.
79 *
80 * Prepare a device for a system-wide power transition.
81 */
82int pm_generic_prepare(struct device *dev)
83{
84 struct device_driver *drv = dev->driver;
85 int ret = 0;
86
87 if (drv && drv->pm && drv->pm->prepare)
88 ret = drv->pm->prepare(dev);
89
90 return ret;
91}
92
93/**
77 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
78 * @dev: Device to handle. 95 * @dev: Device to handle.
79 * @event: PM transition of the system under way. 96 * @event: PM transition of the system under way.
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev)
213 return __pm_generic_resume(dev, PM_EVENT_RESTORE); 230 return __pm_generic_resume(dev, PM_EVENT_RESTORE);
214} 231}
215EXPORT_SYMBOL_GPL(pm_generic_restore); 232EXPORT_SYMBOL_GPL(pm_generic_restore);
233
234/**
235 * pm_generic_complete - Generic routine competing a device power transition.
236 * @dev: Device to handle.
237 *
238 * Complete a device power transition during a system-wide power transition.
239 */
240void pm_generic_complete(struct device *dev)
241{
242 struct device_driver *drv = dev->driver;
243
244 if (drv && drv->pm && drv->pm->complete)
245 drv->pm->complete(dev);
246
247 /*
248 * Let runtime PM try to suspend devices that haven't been in use before
249 * going into the system-wide sleep state we're resuming from.
250 */
251 pm_runtime_idle(dev);
252}
216#endif /* CONFIG_PM_SLEEP */ 253#endif /* CONFIG_PM_SLEEP */
217 254
218struct dev_pm_ops generic_subsys_pm_ops = { 255struct dev_pm_ops generic_subsys_pm_ops = {
219#ifdef CONFIG_PM_SLEEP 256#ifdef CONFIG_PM_SLEEP
257 .prepare = pm_generic_prepare,
220 .suspend = pm_generic_suspend, 258 .suspend = pm_generic_suspend,
221 .resume = pm_generic_resume, 259 .resume = pm_generic_resume,
222 .freeze = pm_generic_freeze, 260 .freeze = pm_generic_freeze,
223 .thaw = pm_generic_thaw, 261 .thaw = pm_generic_thaw,
224 .poweroff = pm_generic_poweroff, 262 .poweroff = pm_generic_poweroff,
225 .restore = pm_generic_restore, 263 .restore = pm_generic_restore,
264 .complete = pm_generic_complete,
226#endif 265#endif
227#ifdef CONFIG_PM_RUNTIME 266#ifdef CONFIG_PM_RUNTIME
228 .runtime_suspend = pm_generic_runtime_suspend, 267 .runtime_suspend = pm_generic_runtime_suspend,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..aa6320207745 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -580,11 +579,13 @@ static bool is_async(struct device *dev)
580 * Execute the appropriate "resume" callback for all devices whose status 579 * Execute the appropriate "resume" callback for all devices whose status
581 * indicates that they are suspended. 580 * indicates that they are suspended.
582 */ 581 */
583static void dpm_resume(pm_message_t state) 582void dpm_resume(pm_message_t state)
584{ 583{
585 struct device *dev; 584 struct device *dev;
586 ktime_t starttime = ktime_get(); 585 ktime_t starttime = ktime_get();
587 586
587 might_sleep();
588
588 mutex_lock(&dpm_list_mtx); 589 mutex_lock(&dpm_list_mtx);
589 pm_transition = state; 590 pm_transition = state;
590 async_error = 0; 591 async_error = 0;
@@ -629,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 630{
630 device_lock(dev); 631 device_lock(dev);
631 632
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 633 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 634 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 635 if (dev->pwr_domain->ops.complete)
635 } 636 dev->pwr_domain->ops.complete(dev);
636 637 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 638 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 639 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 640 dev->type->pm->complete(dev);
@@ -658,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state)
658 * Execute the ->complete() callbacks for all devices whose PM status is not 658 * Execute the ->complete() callbacks for all devices whose PM status is not
659 * DPM_ON (this allows new devices to be registered). 659 * DPM_ON (this allows new devices to be registered).
660 */ 660 */
661static void dpm_complete(pm_message_t state) 661void dpm_complete(pm_message_t state)
662{ 662{
663 struct list_head list; 663 struct list_head list;
664 664
665 might_sleep();
666
665 INIT_LIST_HEAD(&list); 667 INIT_LIST_HEAD(&list);
666 mutex_lock(&dpm_list_mtx); 668 mutex_lock(&dpm_list_mtx);
667 while (!list_empty(&dpm_prepared_list)) { 669 while (!list_empty(&dpm_prepared_list)) {
@@ -690,7 +692,6 @@ static void dpm_complete(pm_message_t state)
690 */ 692 */
691void dpm_resume_end(pm_message_t state) 693void dpm_resume_end(pm_message_t state)
692{ 694{
693 might_sleep();
694 dpm_resume(state); 695 dpm_resume(state);
695 dpm_complete(state); 696 dpm_complete(state);
696} 697}
@@ -732,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 733{
733 int error; 734 int error;
734 735
735 if (dev->type && dev->type->pm) { 736 if (dev->pwr_domain) {
737 pm_dev_dbg(dev, state, "LATE power domain ");
738 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
739 if (error)
740 return error;
741 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 742 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 743 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 744 if (error)
@@ -749,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 755 return error;
750 } 756 }
751 757
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 758 return 0;
758} 759}
759 760
@@ -841,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 842 goto End;
842 } 843 }
843 844
845 if (dev->pwr_domain) {
846 pm_dev_dbg(dev, state, "power domain ");
847 error = pm_op(dev, &dev->pwr_domain->ops, state);
848 goto End;
849 }
850
844 if (dev->type && dev->type->pm) { 851 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 852 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 853 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 854 goto End;
848 } 855 }
849 856
850 if (dev->class) { 857 if (dev->class) {
851 if (dev->class->pm) { 858 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 859 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 860 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 861 goto End;
855 } else if (dev->class->suspend) { 862 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 863 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 864 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 865 goto End;
859 } 866 }
860 } 867 }
861 868
@@ -869,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 876 }
870 } 877 }
871 878
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 879 End:
879 device_unlock(dev); 880 device_unlock(dev);
880 complete_all(&dev->power.completion); 881 complete_all(&dev->power.completion);
@@ -914,11 +915,13 @@ static int device_suspend(struct device *dev)
914 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 915 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
915 * @state: PM transition of the system being carried out. 916 * @state: PM transition of the system being carried out.
916 */ 917 */
917static int dpm_suspend(pm_message_t state) 918int dpm_suspend(pm_message_t state)
918{ 919{
919 ktime_t starttime = ktime_get(); 920 ktime_t starttime = ktime_get();
920 int error = 0; 921 int error = 0;
921 922
923 might_sleep();
924
922 mutex_lock(&dpm_list_mtx); 925 mutex_lock(&dpm_list_mtx);
923 pm_transition = state; 926 pm_transition = state;
924 async_error = 0; 927 async_error = 0;
@@ -965,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 968
966 device_lock(dev); 969 device_lock(dev);
967 970
968 if (dev->type && dev->type->pm) { 971 if (dev->pwr_domain) {
972 pm_dev_dbg(dev, state, "preparing power domain ");
973 if (dev->pwr_domain->ops.prepare)
974 error = dev->pwr_domain->ops.prepare(dev);
975 suspend_report_result(dev->pwr_domain->ops.prepare, error);
976 if (error)
977 goto End;
978 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 979 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 980 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 981 error = dev->type->pm->prepare(dev);
@@ -984,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 994 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 995 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 996 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 997 }
995 998
996 End: 999 End:
@@ -1005,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state)
1005 * 1008 *
1006 * Execute the ->prepare() callback(s) for all devices. 1009 * Execute the ->prepare() callback(s) for all devices.
1007 */ 1010 */
1008static int dpm_prepare(pm_message_t state) 1011int dpm_prepare(pm_message_t state)
1009{ 1012{
1010 int error = 0; 1013 int error = 0;
1011 1014
1015 might_sleep();
1016
1012 mutex_lock(&dpm_list_mtx); 1017 mutex_lock(&dpm_list_mtx);
1013 while (!list_empty(&dpm_list)) { 1018 while (!list_empty(&dpm_list)) {
1014 struct device *dev = to_device(dpm_list.next); 1019 struct device *dev = to_device(dpm_list.next);
@@ -1057,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state)
1057{ 1062{
1058 int error; 1063 int error;
1059 1064
1060 might_sleep();
1061 error = dpm_prepare(state); 1065 error = dpm_prepare(state);
1062 if (!error) 1066 if (!error)
1063 error = dpm_suspend(state); 1067 error = dpm_suspend(state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index fff49bee781d..a9f5b8979611 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
212static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 212static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
213 autosuspend_delay_ms_store); 213 autosuspend_delay_ms_store);
214 214
215#endif 215#endif /* CONFIG_PM_RUNTIME */
216 216
217#ifdef CONFIG_PM_SLEEP
217static ssize_t 218static ssize_t
218wake_show(struct device * dev, struct device_attribute *attr, char * buf) 219wake_show(struct device * dev, struct device_attribute *attr, char * buf)
219{ 220{
@@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr,
248 249
249static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 250static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
250 251
251#ifdef CONFIG_PM_SLEEP
252static ssize_t wakeup_count_show(struct device *dev, 252static ssize_t wakeup_count_show(struct device *dev,
253 struct device_attribute *attr, char *buf) 253 struct device_attribute *attr, char *buf)
254{ 254{
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index abbbd33e8d8a..84f7c7d5a098 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws)
110 spin_lock_irq(&events_lock); 110 spin_lock_irq(&events_lock);
111 list_add_rcu(&ws->entry, &wakeup_sources); 111 list_add_rcu(&ws->entry, &wakeup_sources);
112 spin_unlock_irq(&events_lock); 112 spin_unlock_irq(&events_lock);
113 synchronize_rcu();
114} 113}
115EXPORT_SYMBOL_GPL(wakeup_source_add); 114EXPORT_SYMBOL_GPL(wakeup_source_add);
116 115
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index acde9b5ee131..9dff77bfe1e3 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -328,203 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev)
328 kobject_put(&sysdev->kobj); 328 kobject_put(&sysdev->kobj);
329} 329}
330 330
331 331EXPORT_SYMBOL_GPL(sysdev_register);
332#ifndef CONFIG_ARCH_NO_SYSDEV_OPS 332EXPORT_SYMBOL_GPL(sysdev_unregister);
333/**
334 * sysdev_shutdown - Shut down all system devices.
335 *
336 * Loop over each class of system devices, and the devices in each
337 * of those classes. For each device, we call the shutdown method for
338 * each driver registered for the device - the auxiliaries,
339 * and the class driver.
340 *
341 * Note: The list is iterated in reverse order, so that we shut down
342 * child devices before we shut down their parents. The list ordering
343 * is guaranteed by virtue of the fact that child devices are registered
344 * after their parents.
345 */
346void sysdev_shutdown(void)
347{
348 struct sysdev_class *cls;
349
350 pr_debug("Shutting Down System Devices\n");
351
352 mutex_lock(&sysdev_drivers_lock);
353 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
354 struct sys_device *sysdev;
355
356 pr_debug("Shutting down type '%s':\n",
357 kobject_name(&cls->kset.kobj));
358
359 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
360 struct sysdev_driver *drv;
361 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
362
363 /* Call auxiliary drivers first */
364 list_for_each_entry(drv, &cls->drivers, entry) {
365 if (drv->shutdown)
366 drv->shutdown(sysdev);
367 }
368
369 /* Now call the generic one */
370 if (cls->shutdown)
371 cls->shutdown(sysdev);
372 }
373 }
374 mutex_unlock(&sysdev_drivers_lock);
375}
376
377static void __sysdev_resume(struct sys_device *dev)
378{
379 struct sysdev_class *cls = dev->cls;
380 struct sysdev_driver *drv;
381
382 /* First, call the class-specific one */
383 if (cls->resume)
384 cls->resume(dev);
385 WARN_ONCE(!irqs_disabled(),
386 "Interrupts enabled after %pF\n", cls->resume);
387
388 /* Call auxiliary drivers next. */
389 list_for_each_entry(drv, &cls->drivers, entry) {
390 if (drv->resume)
391 drv->resume(dev);
392 WARN_ONCE(!irqs_disabled(),
393 "Interrupts enabled after %pF\n", drv->resume);
394 }
395}
396
397/**
398 * sysdev_suspend - Suspend all system devices.
399 * @state: Power state to enter.
400 *
401 * We perform an almost identical operation as sysdev_shutdown()
402 * above, though calling ->suspend() instead. Interrupts are disabled
403 * when this called. Devices are responsible for both saving state and
404 * quiescing or powering down the device.
405 *
406 * This is only called by the device PM core, so we let them handle
407 * all synchronization.
408 */
409int sysdev_suspend(pm_message_t state)
410{
411 struct sysdev_class *cls;
412 struct sys_device *sysdev, *err_dev;
413 struct sysdev_driver *drv, *err_drv;
414 int ret;
415
416 pr_debug("Checking wake-up interrupts\n");
417
418 /* Return error code if there are any wake-up interrupts pending */
419 ret = check_wakeup_irqs();
420 if (ret)
421 return ret;
422
423 WARN_ONCE(!irqs_disabled(),
424 "Interrupts enabled while suspending system devices\n");
425
426 pr_debug("Suspending System Devices\n");
427
428 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
429 pr_debug("Suspending type '%s':\n",
430 kobject_name(&cls->kset.kobj));
431
432 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
433 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
434
435 /* Call auxiliary drivers first */
436 list_for_each_entry(drv, &cls->drivers, entry) {
437 if (drv->suspend) {
438 ret = drv->suspend(sysdev, state);
439 if (ret)
440 goto aux_driver;
441 }
442 WARN_ONCE(!irqs_disabled(),
443 "Interrupts enabled after %pF\n",
444 drv->suspend);
445 }
446
447 /* Now call the generic one */
448 if (cls->suspend) {
449 ret = cls->suspend(sysdev, state);
450 if (ret)
451 goto cls_driver;
452 WARN_ONCE(!irqs_disabled(),
453 "Interrupts enabled after %pF\n",
454 cls->suspend);
455 }
456 }
457 }
458 return 0;
459 /* resume current sysdev */
460cls_driver:
461 drv = NULL;
462 printk(KERN_ERR "Class suspend failed for %s: %d\n",
463 kobject_name(&sysdev->kobj), ret);
464
465aux_driver:
466 if (drv)
467 printk(KERN_ERR "Class driver suspend failed for %s: %d\n",
468 kobject_name(&sysdev->kobj), ret);
469 list_for_each_entry(err_drv, &cls->drivers, entry) {
470 if (err_drv == drv)
471 break;
472 if (err_drv->resume)
473 err_drv->resume(sysdev);
474 }
475
476 /* resume other sysdevs in current class */
477 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
478 if (err_dev == sysdev)
479 break;
480 pr_debug(" %s\n", kobject_name(&err_dev->kobj));
481 __sysdev_resume(err_dev);
482 }
483
484 /* resume other classes */
485 list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) {
486 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
487 pr_debug(" %s\n", kobject_name(&err_dev->kobj));
488 __sysdev_resume(err_dev);
489 }
490 }
491 return ret;
492}
493EXPORT_SYMBOL_GPL(sysdev_suspend);
494
495/**
496 * sysdev_resume - Bring system devices back to life.
497 *
498 * Similar to sysdev_suspend(), but we iterate the list forwards
499 * to guarantee that parent devices are resumed before their children.
500 *
501 * Note: Interrupts are disabled when called.
502 */
503int sysdev_resume(void)
504{
505 struct sysdev_class *cls;
506
507 WARN_ONCE(!irqs_disabled(),
508 "Interrupts enabled while resuming system devices\n");
509
510 pr_debug("Resuming System Devices\n");
511
512 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
513 struct sys_device *sysdev;
514
515 pr_debug("Resuming type '%s':\n",
516 kobject_name(&cls->kset.kobj));
517
518 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
519 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
520
521 __sysdev_resume(sysdev);
522 }
523 }
524 return 0;
525}
526EXPORT_SYMBOL_GPL(sysdev_resume);
527#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */
528 333
529int __init system_bus_init(void) 334int __init system_bus_init(void)
530{ 335{
@@ -534,9 +339,6 @@ int __init system_bus_init(void)
534 return 0; 339 return 0;
535} 340}
536 341
537EXPORT_SYMBOL_GPL(sysdev_register);
538EXPORT_SYMBOL_GPL(sysdev_unregister);
539
540#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) 342#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr)
541 343
542ssize_t sysdev_store_ulong(struct sys_device *sysdev, 344ssize_t sysdev_store_ulong(struct sys_device *sysdev,
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ad59b4e0a9b5..49502bc5360a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -523,7 +523,7 @@ config RAW_DRIVER
523 with the O_DIRECT flag. 523 with the O_DIRECT flag.
524 524
525config MAX_RAW_DEVS 525config MAX_RAW_DEVS
526 int "Maximum number of RAW devices to support (1-8192)" 526 int "Maximum number of RAW devices to support (1-65536)"
527 depends on RAW_DRIVER 527 depends on RAW_DRIVER
528 default "256" 528 default "256"
529 help 529 help
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index a4a6c2f044b5..cf39bc08ce08 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -295,7 +295,7 @@ static int bsr_create_devs(struct device_node *bn)
295static int __init bsr_init(void) 295static int __init bsr_init(void)
296{ 296{
297 struct device_node *np; 297 struct device_node *np;
298 dev_t bsr_dev = MKDEV(bsr_major, 0); 298 dev_t bsr_dev;
299 int ret = -ENODEV; 299 int ret = -ENODEV;
300 int result; 300 int result;
301 301
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 7066e801b9d3..051474c65b78 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -84,8 +84,6 @@ static struct clocksource clocksource_hpet = {
84 .rating = 250, 84 .rating = 250,
85 .read = read_hpet, 85 .read = read_hpet,
86 .mask = CLOCKSOURCE_MASK(64), 86 .mask = CLOCKSOURCE_MASK(64),
87 .mult = 0, /* to be calculated */
88 .shift = 10,
89 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
90}; 88};
91static struct clocksource *hpet_clocksource; 89static struct clocksource *hpet_clocksource;
@@ -934,9 +932,7 @@ int hpet_alloc(struct hpet_data *hdp)
934 if (!hpet_clocksource) { 932 if (!hpet_clocksource) {
935 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; 933 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
936 CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); 934 CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
937 clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, 935 clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
938 clocksource_hpet.shift);
939 clocksource_register(&clocksource_hpet);
940 hpetp->hp_clocksource = &clocksource_hpet; 936 hpetp->hp_clocksource = &clocksource_hpet;
941 hpet_clocksource = &clocksource_hpet; 937 hpet_clocksource = &clocksource_hpet;
942 } 938 }
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 436a99017998..8fc04b4f311f 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -806,29 +806,41 @@ static const struct file_operations oldmem_fops = {
806}; 806};
807#endif 807#endif
808 808
809static ssize_t kmsg_write(struct file *file, const char __user *buf, 809static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
810 size_t count, loff_t *ppos) 810 unsigned long count, loff_t pos)
811{ 811{
812 char *tmp; 812 char *line, *p;
813 ssize_t ret; 813 int i;
814 ssize_t ret = -EFAULT;
815 size_t len = iov_length(iv, count);
814 816
815 tmp = kmalloc(count + 1, GFP_KERNEL); 817 line = kmalloc(len + 1, GFP_KERNEL);
816 if (tmp == NULL) 818 if (line == NULL)
817 return -ENOMEM; 819 return -ENOMEM;
818 ret = -EFAULT; 820
819 if (!copy_from_user(tmp, buf, count)) { 821 /*
820 tmp[count] = 0; 822 * copy all vectors into a single string, to ensure we do
821 ret = printk("%s", tmp); 823 * not interleave our log line with other printk calls
822 if (ret > count) 824 */
823 /* printk can add a prefix */ 825 p = line;
824 ret = count; 826 for (i = 0; i < count; i++) {
827 if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
828 goto out;
829 p += iv[i].iov_len;
825 } 830 }
826 kfree(tmp); 831 p[0] = '\0';
832
833 ret = printk("%s", line);
834 /* printk can add a prefix */
835 if (ret > len)
836 ret = len;
837out:
838 kfree(line);
827 return ret; 839 return ret;
828} 840}
829 841
830static const struct file_operations kmsg_fops = { 842static const struct file_operations kmsg_fops = {
831 .write = kmsg_write, 843 .aio_write = kmsg_writev,
832 .llseek = noop_llseek, 844 .llseek = noop_llseek,
833}; 845};
834 846
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b4b9d5a47885..b33e8ea314ed 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -21,6 +21,7 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/vmalloc.h>
24 25
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
@@ -30,10 +31,15 @@ struct raw_device_data {
30}; 31};
31 32
32static struct class *raw_class; 33static struct class *raw_class;
33static struct raw_device_data raw_devices[MAX_RAW_MINORS]; 34static struct raw_device_data *raw_devices;
34static DEFINE_MUTEX(raw_mutex); 35static DEFINE_MUTEX(raw_mutex);
35static const struct file_operations raw_ctl_fops; /* forward declaration */ 36static const struct file_operations raw_ctl_fops; /* forward declaration */
36 37
38static int max_raw_minors = MAX_RAW_MINORS;
39
40module_param(max_raw_minors, int, 0);
41MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
42
37/* 43/*
38 * Open/close code for raw IO. 44 * Open/close code for raw IO.
39 * 45 *
@@ -125,7 +131,7 @@ static int bind_set(int number, u64 major, u64 minor)
125 struct raw_device_data *rawdev; 131 struct raw_device_data *rawdev;
126 int err = 0; 132 int err = 0;
127 133
128 if (number <= 0 || number >= MAX_RAW_MINORS) 134 if (number <= 0 || number >= max_raw_minors)
129 return -EINVAL; 135 return -EINVAL;
130 136
131 if (MAJOR(dev) != major || MINOR(dev) != minor) 137 if (MAJOR(dev) != major || MINOR(dev) != minor)
@@ -312,14 +318,27 @@ static int __init raw_init(void)
312 dev_t dev = MKDEV(RAW_MAJOR, 0); 318 dev_t dev = MKDEV(RAW_MAJOR, 0);
313 int ret; 319 int ret;
314 320
315 ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw"); 321 if (max_raw_minors < 1 || max_raw_minors > 65536) {
322 printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
323 " between 1 and 65536), using %d\n", MAX_RAW_MINORS);
324 max_raw_minors = MAX_RAW_MINORS;
325 }
326
327 raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
328 if (!raw_devices) {
329 printk(KERN_ERR "Not enough memory for raw device structures\n");
330 ret = -ENOMEM;
331 goto error;
332 }
333 memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
334
335 ret = register_chrdev_region(dev, max_raw_minors, "raw");
316 if (ret) 336 if (ret)
317 goto error; 337 goto error;
318 338
319 cdev_init(&raw_cdev, &raw_fops); 339 cdev_init(&raw_cdev, &raw_fops);
320 ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS); 340 ret = cdev_add(&raw_cdev, dev, max_raw_minors);
321 if (ret) { 341 if (ret) {
322 kobject_put(&raw_cdev.kobj);
323 goto error_region; 342 goto error_region;
324 } 343 }
325 344
@@ -336,8 +355,9 @@ static int __init raw_init(void)
336 return 0; 355 return 0;
337 356
338error_region: 357error_region:
339 unregister_chrdev_region(dev, MAX_RAW_MINORS); 358 unregister_chrdev_region(dev, max_raw_minors);
340error: 359error:
360 vfree(raw_devices);
341 return ret; 361 return ret;
342} 362}
343 363
@@ -346,7 +366,7 @@ static void __exit raw_exit(void)
346 device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); 366 device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
347 class_destroy(raw_class); 367 class_destroy(raw_class);
348 cdev_del(&raw_cdev); 368 cdev_del(&raw_cdev);
349 unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS); 369 unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
350} 370}
351 371
352module_init(raw_init); 372module_init(raw_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
new file mode 100644
index 000000000000..110aeeb52f9a
--- /dev/null
+++ b/drivers/clocksource/Kconfig
@@ -0,0 +1,2 @@
1config CLKSRC_I8253
2 bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be61ece6330b..cfb6383b543a 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
9obj-$(CONFIG_CLKSRC_I8253) += i8253.o
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index 64e528e8bfa6..72f811f73e9c 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -29,8 +29,6 @@ static struct clocksource clocksource_cyclone = {
29 .rating = 250, 29 .rating = 250,
30 .read = read_cyclone, 30 .read = read_cyclone,
31 .mask = CYCLONE_TIMER_MASK, 31 .mask = CYCLONE_TIMER_MASK,
32 .mult = 10,
33 .shift = 0,
34 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 32 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
35}; 33};
36 34
@@ -108,12 +106,8 @@ static int __init init_cyclone_clocksource(void)
108 } 106 }
109 cyclone_ptr = cyclone_timer; 107 cyclone_ptr = cyclone_timer;
110 108
111 /* sort out mult/shift values: */ 109 return clocksource_register_hz(&clocksource_cyclone,
112 clocksource_cyclone.shift = 22; 110 CYCLONE_TIMER_FREQ);
113 clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
114 clocksource_cyclone.shift);
115
116 return clocksource_register(&clocksource_cyclone);
117} 111}
118 112
119arch_initcall(init_cyclone_clocksource); 113arch_initcall(init_cyclone_clocksource);
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
new file mode 100644
index 000000000000..225c1761b372
--- /dev/null
+++ b/drivers/clocksource/i8253.c
@@ -0,0 +1,88 @@
1/*
2 * i8253 PIT clocksource
3 */
4#include <linux/clocksource.h>
5#include <linux/init.h>
6#include <linux/io.h>
7#include <linux/spinlock.h>
8#include <linux/timex.h>
9
10#include <asm/i8253.h>
11
12/*
13 * Since the PIT overflows every tick, its not very useful
14 * to just read by itself. So use jiffies to emulate a free
15 * running counter:
16 */
17static cycle_t i8253_read(struct clocksource *cs)
18{
19 static int old_count;
20 static u32 old_jifs;
21 unsigned long flags;
22 int count;
23 u32 jifs;
24
25 raw_spin_lock_irqsave(&i8253_lock, flags);
26 /*
27 * Although our caller may have the read side of xtime_lock,
28 * this is now a seqlock, and we are cheating in this routine
29 * by having side effects on state that we cannot undo if
30 * there is a collision on the seqlock and our caller has to
31 * retry. (Namely, old_jifs and old_count.) So we must treat
32 * jiffies as volatile despite the lock. We read jiffies
33 * before latching the timer count to guarantee that although
34 * the jiffies value might be older than the count (that is,
35 * the counter may underflow between the last point where
36 * jiffies was incremented and the point where we latch the
37 * count), it cannot be newer.
38 */
39 jifs = jiffies;
40 outb_pit(0x00, PIT_MODE); /* latch the count ASAP */
41 count = inb_pit(PIT_CH0); /* read the latched count */
42 count |= inb_pit(PIT_CH0) << 8;
43
44 /* VIA686a test code... reset the latch if count > max + 1 */
45 if (count > LATCH) {
46 outb_pit(0x34, PIT_MODE);
47 outb_pit(PIT_LATCH & 0xff, PIT_CH0);
48 outb_pit(PIT_LATCH >> 8, PIT_CH0);
49 count = PIT_LATCH - 1;
50 }
51
52 /*
53 * It's possible for count to appear to go the wrong way for a
54 * couple of reasons:
55 *
56 * 1. The timer counter underflows, but we haven't handled the
57 * resulting interrupt and incremented jiffies yet.
58 * 2. Hardware problem with the timer, not giving us continuous time,
59 * the counter does small "jumps" upwards on some Pentium systems,
60 * (see c't 95/10 page 335 for Neptun bug.)
61 *
62 * Previous attempts to handle these cases intelligently were
63 * buggy, so we just do the simple thing now.
64 */
65 if (count > old_count && jifs == old_jifs)
66 count = old_count;
67
68 old_count = count;
69 old_jifs = jifs;
70
71 raw_spin_unlock_irqrestore(&i8253_lock, flags);
72
73 count = (PIT_LATCH - 1) - count;
74
75 return (cycle_t)(jifs * PIT_LATCH) + count;
76}
77
78static struct clocksource i8253_cs = {
79 .name = "pit",
80 .rating = 110,
81 .read = i8253_read,
82 .mask = CLOCKSOURCE_MASK(32),
83};
84
85int __init clocksource_i8253_init(void)
86{
87 return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
88}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index ca8ee8093d6c..9fb84853d8e3 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -1,3 +1,5 @@
1menu "CPU Frequency scaling"
2
1config CPU_FREQ 3config CPU_FREQ
2 bool "CPU Frequency scaling" 4 bool "CPU Frequency scaling"
3 help 5 help
@@ -18,19 +20,6 @@ if CPU_FREQ
18config CPU_FREQ_TABLE 20config CPU_FREQ_TABLE
19 tristate 21 tristate
20 22
21config CPU_FREQ_DEBUG
22 bool "Enable CPUfreq debugging"
23 help
24 Say Y here to enable CPUfreq subsystem (including drivers)
25 debugging. You will need to activate it via the kernel
26 command line by passing
27 cpufreq.debug=<value>
28
29 To get <value>, add
30 1 to activate CPUfreq core debugging,
31 2 to activate CPUfreq drivers debugging, and
32 4 to activate CPUfreq governor debugging
33
34config CPU_FREQ_STAT 23config CPU_FREQ_STAT
35 tristate "CPU frequency translation statistics" 24 tristate "CPU frequency translation statistics"
36 select CPU_FREQ_TABLE 25 select CPU_FREQ_TABLE
@@ -190,4 +179,10 @@ config CPU_FREQ_GOV_CONSERVATIVE
190 179
191 If in doubt, say N. 180 If in doubt, say N.
192 181
193endif # CPU_FREQ 182menu "x86 CPU frequency scaling drivers"
183depends on X86
184source "drivers/cpufreq/Kconfig.x86"
185endmenu
186
187endif
188endmenu
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
new file mode 100644
index 000000000000..78ff7ee48951
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.x86
@@ -0,0 +1,255 @@
1#
2# x86 CPU Frequency scaling drivers
3#
4
5config X86_PCC_CPUFREQ
6 tristate "Processor Clocking Control interface driver"
7 depends on ACPI && ACPI_PROCESSOR
8 help
9 This driver adds support for the PCC interface.
10
11 For details, take a look at:
12 <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
13
14 To compile this driver as a module, choose M here: the
15 module will be called pcc-cpufreq.
16
17 If in doubt, say N.
18
19config X86_ACPI_CPUFREQ
20 tristate "ACPI Processor P-States driver"
21 select CPU_FREQ_TABLE
22 depends on ACPI_PROCESSOR
23 help
24 This driver adds a CPUFreq driver which utilizes the ACPI
25 Processor Performance States.
26 This driver also supports Intel Enhanced Speedstep.
27
28 To compile this driver as a module, choose M here: the
29 module will be called acpi-cpufreq.
30
31 For details, take a look at <file:Documentation/cpu-freq/>.
32
33 If in doubt, say N.
34
35config ELAN_CPUFREQ
36 tristate "AMD Elan SC400 and SC410"
37 select CPU_FREQ_TABLE
38 depends on MELAN
39 ---help---
40 This adds the CPUFreq driver for AMD Elan SC400 and SC410
41 processors.
42
43 You need to specify the processor maximum speed as boot
44 parameter: elanfreq=maxspeed (in kHz) or as module
45 parameter "max_freq".
46
47 For details, take a look at <file:Documentation/cpu-freq/>.
48
49 If in doubt, say N.
50
51config SC520_CPUFREQ
52 tristate "AMD Elan SC520"
53 select CPU_FREQ_TABLE
54 depends on MELAN
55 ---help---
56 This adds the CPUFreq driver for AMD Elan SC520 processor.
57
58 For details, take a look at <file:Documentation/cpu-freq/>.
59
60 If in doubt, say N.
61
62
63config X86_POWERNOW_K6
64 tristate "AMD Mobile K6-2/K6-3 PowerNow!"
65 select CPU_FREQ_TABLE
66 depends on X86_32
67 help
68 This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
69 AMD K6-3+ processors.
70
71 For details, take a look at <file:Documentation/cpu-freq/>.
72
73 If in doubt, say N.
74
75config X86_POWERNOW_K7
76 tristate "AMD Mobile Athlon/Duron PowerNow!"
77 select CPU_FREQ_TABLE
78 depends on X86_32
79 help
80 This adds the CPUFreq driver for mobile AMD K7 mobile processors.
81
82 For details, take a look at <file:Documentation/cpu-freq/>.
83
84 If in doubt, say N.
85
86config X86_POWERNOW_K7_ACPI
87 bool
88 depends on X86_POWERNOW_K7 && ACPI_PROCESSOR
89 depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m)
90 depends on X86_32
91 default y
92
93config X86_POWERNOW_K8
94 tristate "AMD Opteron/Athlon64 PowerNow!"
95 select CPU_FREQ_TABLE
96 depends on ACPI && ACPI_PROCESSOR
97 help
98 This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
99
100 To compile this driver as a module, choose M here: the
101 module will be called powernow-k8.
102
103 For details, take a look at <file:Documentation/cpu-freq/>.
104
105config X86_GX_SUSPMOD
106 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
107 depends on X86_32 && PCI
108 help
109 This add the CPUFreq driver for NatSemi Geode processors which
110 support suspend modulation.
111
112 For details, take a look at <file:Documentation/cpu-freq/>.
113
114 If in doubt, say N.
115
116config X86_SPEEDSTEP_CENTRINO
117 tristate "Intel Enhanced SpeedStep (deprecated)"
118 select CPU_FREQ_TABLE
119 select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
120 depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
121 help
122 This is deprecated and this functionality is now merged into
123 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
124 speedstep_centrino.
125 This adds the CPUFreq driver for Enhanced SpeedStep enabled
126 mobile CPUs. This means Intel Pentium M (Centrino) CPUs
127 or 64bit enabled Intel Xeons.
128
129 To compile this driver as a module, choose M here: the
130 module will be called speedstep-centrino.
131
132 For details, take a look at <file:Documentation/cpu-freq/>.
133
134 If in doubt, say N.
135
136config X86_SPEEDSTEP_CENTRINO_TABLE
137 bool "Built-in tables for Banias CPUs"
138 depends on X86_32 && X86_SPEEDSTEP_CENTRINO
139 default y
140 help
141 Use built-in tables for Banias CPUs if ACPI encoding
142 is not available.
143
144 If in doubt, say N.
145
146config X86_SPEEDSTEP_ICH
147 tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
148 select CPU_FREQ_TABLE
149 depends on X86_32
150 help
151 This adds the CPUFreq driver for certain mobile Intel Pentium III
152 (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
153 mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
154 ICH3 or ICH4 southbridge.
155
156 For details, take a look at <file:Documentation/cpu-freq/>.
157
158 If in doubt, say N.
159
160config X86_SPEEDSTEP_SMI
161 tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
162 select CPU_FREQ_TABLE
163 depends on X86_32 && EXPERIMENTAL
164 help
165 This adds the CPUFreq driver for certain mobile Intel Pentium III
166 (Coppermine), all mobile Intel Pentium III-M (Tualatin)
167 on systems which have an Intel 440BX/ZX/MX southbridge.
168
169 For details, take a look at <file:Documentation/cpu-freq/>.
170
171 If in doubt, say N.
172
173config X86_P4_CLOCKMOD
174 tristate "Intel Pentium 4 clock modulation"
175 select CPU_FREQ_TABLE
176 help
177 This adds the CPUFreq driver for Intel Pentium 4 / XEON
178 processors. When enabled it will lower CPU temperature by skipping
179 clocks.
180
181 This driver should be only used in exceptional
182 circumstances when very low power is needed because it causes severe
183 slowdowns and noticeable latencies. Normally Speedstep should be used
184 instead.
185
186 To compile this driver as a module, choose M here: the
187 module will be called p4-clockmod.
188
189 For details, take a look at <file:Documentation/cpu-freq/>.
190
191 Unless you are absolutely sure say N.
192
193config X86_CPUFREQ_NFORCE2
194 tristate "nVidia nForce2 FSB changing"
195 depends on X86_32 && EXPERIMENTAL
196 help
197 This adds the CPUFreq driver for FSB changing on nVidia nForce2
198 platforms.
199
200 For details, take a look at <file:Documentation/cpu-freq/>.
201
202 If in doubt, say N.
203
204config X86_LONGRUN
205 tristate "Transmeta LongRun"
206 depends on X86_32
207 help
208 This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors
209 which support LongRun.
210
211 For details, take a look at <file:Documentation/cpu-freq/>.
212
213 If in doubt, say N.
214
215config X86_LONGHAUL
216 tristate "VIA Cyrix III Longhaul"
217 select CPU_FREQ_TABLE
218 depends on X86_32 && ACPI_PROCESSOR
219 help
220 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
221 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
222 processors.
223
224 For details, take a look at <file:Documentation/cpu-freq/>.
225
226 If in doubt, say N.
227
228config X86_E_POWERSAVER
229 tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
230 select CPU_FREQ_TABLE
231 depends on X86_32 && EXPERIMENTAL
232 help
233 This adds the CPUFreq driver for VIA C7 processors. However, this driver
234 does not have any safeguards to prevent operating the CPU out of spec
235 and is thus considered dangerous. Please use the regular ACPI cpufreq
236 driver, enabled by CONFIG_X86_ACPI_CPUFREQ.
237
238 If in doubt, say N.
239
240comment "shared options"
241
242config X86_SPEEDSTEP_LIB
243 tristate
244 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
245
246config X86_SPEEDSTEP_RELAXED_CAP_CHECK
247 bool "Relaxed speedstep capability checks"
248 depends on X86_32 && (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
249 help
250 Don't perform all checks for a speedstep capable system which would
251 normally be done. Some ancient or strange systems, though speedstep
252 capable, don't always indicate that they are speedstep capable. This
253 option lets the probing code bypass some of those checks if the
254 parameter "relaxed_check=1" is passed to the module.
255
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 71fc3b4173f1..c7f1a6f16b6e 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,3 +13,29 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
13# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
15 15
16##################################################################################d
17# x86 drivers.
18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
19# K8 systems. ACPI is preferred to all other hardware-specific drivers.
20# speedstep-* is preferred over p4-clockmod.
21
22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
26obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
27obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
28obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
29obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
30obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
31obj-$(CONFIG_X86_LONGRUN) += longrun.o
32obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
33obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
34obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
35obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
36obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
37obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
38obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
39
40##################################################################################d
41
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
new file mode 100644
index 000000000000..4e04e1274388
--- /dev/null
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -0,0 +1,773 @@
1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
33#include <linux/cpufreq.h>
34#include <linux/compiler.h>
35#include <linux/dmi.h>
36#include <linux/slab.h>
37
38#include <linux/acpi.h>
39#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/uaccess.h>
42
43#include <acpi/processor.h>
44
45#include <asm/msr.h>
46#include <asm/processor.h>
47#include <asm/cpufeature.h>
48#include "mperf.h"
49
50MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL");
53
54enum {
55 UNDEFINED_CAPABLE = 0,
56 SYSTEM_INTEL_MSR_CAPABLE,
57 SYSTEM_IO_CAPABLE,
58};
59
60#define INTEL_MSR_RANGE (0xffff)
61
62struct acpi_cpufreq_data {
63 struct acpi_processor_performance *acpi_data;
64 struct cpufreq_frequency_table *freq_table;
65 unsigned int resume;
66 unsigned int cpu_feature;
67};
68
69static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
70
71/* acpi_perf_data is a pointer to percpu data. */
72static struct acpi_processor_performance __percpu *acpi_perf_data;
73
74static struct cpufreq_driver acpi_cpufreq_driver;
75
76static unsigned int acpi_pstate_strict;
77
78static int check_est_cpu(unsigned int cpuid)
79{
80 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
81
82 return cpu_has(cpu, X86_FEATURE_EST);
83}
84
85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
86{
87 struct acpi_processor_performance *perf;
88 int i;
89
90 perf = data->acpi_data;
91
92 for (i = 0; i < perf->state_count; i++) {
93 if (value == perf->states[i].status)
94 return data->freq_table[i].frequency;
95 }
96 return 0;
97}
98
99static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
100{
101 int i;
102 struct acpi_processor_performance *perf;
103
104 msr &= INTEL_MSR_RANGE;
105 perf = data->acpi_data;
106
107 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
108 if (msr == perf->states[data->freq_table[i].index].status)
109 return data->freq_table[i].frequency;
110 }
111 return data->freq_table[0].frequency;
112}
113
114static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
115{
116 switch (data->cpu_feature) {
117 case SYSTEM_INTEL_MSR_CAPABLE:
118 return extract_msr(val, data);
119 case SYSTEM_IO_CAPABLE:
120 return extract_io(val, data);
121 default:
122 return 0;
123 }
124}
125
126struct msr_addr {
127 u32 reg;
128};
129
130struct io_addr {
131 u16 port;
132 u8 bit_width;
133};
134
135struct drv_cmd {
136 unsigned int type;
137 const struct cpumask *mask;
138 union {
139 struct msr_addr msr;
140 struct io_addr io;
141 } addr;
142 u32 val;
143};
144
145/* Called via smp_call_function_single(), on the target CPU */
146static void do_drv_read(void *_cmd)
147{
148 struct drv_cmd *cmd = _cmd;
149 u32 h;
150
151 switch (cmd->type) {
152 case SYSTEM_INTEL_MSR_CAPABLE:
153 rdmsr(cmd->addr.msr.reg, cmd->val, h);
154 break;
155 case SYSTEM_IO_CAPABLE:
156 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
157 &cmd->val,
158 (u32)cmd->addr.io.bit_width);
159 break;
160 default:
161 break;
162 }
163}
164
165/* Called via smp_call_function_many(), on the target CPUs */
166static void do_drv_write(void *_cmd)
167{
168 struct drv_cmd *cmd = _cmd;
169 u32 lo, hi;
170
171 switch (cmd->type) {
172 case SYSTEM_INTEL_MSR_CAPABLE:
173 rdmsr(cmd->addr.msr.reg, lo, hi);
174 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
175 wrmsr(cmd->addr.msr.reg, lo, hi);
176 break;
177 case SYSTEM_IO_CAPABLE:
178 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
179 cmd->val,
180 (u32)cmd->addr.io.bit_width);
181 break;
182 default:
183 break;
184 }
185}
186
187static void drv_read(struct drv_cmd *cmd)
188{
189 int err;
190 cmd->val = 0;
191
192 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
193 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
194}
195
196static void drv_write(struct drv_cmd *cmd)
197{
198 int this_cpu;
199
200 this_cpu = get_cpu();
201 if (cpumask_test_cpu(this_cpu, cmd->mask))
202 do_drv_write(cmd);
203 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
204 put_cpu();
205}
206
207static u32 get_cur_val(const struct cpumask *mask)
208{
209 struct acpi_processor_performance *perf;
210 struct drv_cmd cmd;
211
212 if (unlikely(cpumask_empty(mask)))
213 return 0;
214
215 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
216 case SYSTEM_INTEL_MSR_CAPABLE:
217 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
218 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
219 break;
220 case SYSTEM_IO_CAPABLE:
221 cmd.type = SYSTEM_IO_CAPABLE;
222 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
223 cmd.addr.io.port = perf->control_register.address;
224 cmd.addr.io.bit_width = perf->control_register.bit_width;
225 break;
226 default:
227 return 0;
228 }
229
230 cmd.mask = mask;
231 drv_read(&cmd);
232
233 pr_debug("get_cur_val = %u\n", cmd.val);
234
235 return cmd.val;
236}
237
238static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
239{
240 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
241 unsigned int freq;
242 unsigned int cached_freq;
243
244 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
245
246 if (unlikely(data == NULL ||
247 data->acpi_data == NULL || data->freq_table == NULL)) {
248 return 0;
249 }
250
251 cached_freq = data->freq_table[data->acpi_data->state].frequency;
252 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
253 if (freq != cached_freq) {
254 /*
255 * The dreaded BIOS frequency change behind our back.
256 * Force set the frequency on next target call.
257 */
258 data->resume = 1;
259 }
260
261 pr_debug("cur freq = %u\n", freq);
262
263 return freq;
264}
265
266static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
267 struct acpi_cpufreq_data *data)
268{
269 unsigned int cur_freq;
270 unsigned int i;
271
272 for (i = 0; i < 100; i++) {
273 cur_freq = extract_freq(get_cur_val(mask), data);
274 if (cur_freq == freq)
275 return 1;
276 udelay(10);
277 }
278 return 0;
279}
280
281static int acpi_cpufreq_target(struct cpufreq_policy *policy,
282 unsigned int target_freq, unsigned int relation)
283{
284 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
285 struct acpi_processor_performance *perf;
286 struct cpufreq_freqs freqs;
287 struct drv_cmd cmd;
288 unsigned int next_state = 0; /* Index into freq_table */
289 unsigned int next_perf_state = 0; /* Index into perf table */
290 unsigned int i;
291 int result = 0;
292
293 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
294
295 if (unlikely(data == NULL ||
296 data->acpi_data == NULL || data->freq_table == NULL)) {
297 return -ENODEV;
298 }
299
300 perf = data->acpi_data;
301 result = cpufreq_frequency_table_target(policy,
302 data->freq_table,
303 target_freq,
304 relation, &next_state);
305 if (unlikely(result)) {
306 result = -ENODEV;
307 goto out;
308 }
309
310 next_perf_state = data->freq_table[next_state].index;
311 if (perf->state == next_perf_state) {
312 if (unlikely(data->resume)) {
313 pr_debug("Called after resume, resetting to P%d\n",
314 next_perf_state);
315 data->resume = 0;
316 } else {
317 pr_debug("Already at target state (P%d)\n",
318 next_perf_state);
319 goto out;
320 }
321 }
322
323 switch (data->cpu_feature) {
324 case SYSTEM_INTEL_MSR_CAPABLE:
325 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
326 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
327 cmd.val = (u32) perf->states[next_perf_state].control;
328 break;
329 case SYSTEM_IO_CAPABLE:
330 cmd.type = SYSTEM_IO_CAPABLE;
331 cmd.addr.io.port = perf->control_register.address;
332 cmd.addr.io.bit_width = perf->control_register.bit_width;
333 cmd.val = (u32) perf->states[next_perf_state].control;
334 break;
335 default:
336 result = -ENODEV;
337 goto out;
338 }
339
340 /* cpufreq holds the hotplug lock, so we are safe from here on */
341 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
342 cmd.mask = policy->cpus;
343 else
344 cmd.mask = cpumask_of(policy->cpu);
345
346 freqs.old = perf->states[perf->state].core_frequency * 1000;
347 freqs.new = data->freq_table[next_state].frequency;
348 for_each_cpu(i, policy->cpus) {
349 freqs.cpu = i;
350 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
351 }
352
353 drv_write(&cmd);
354
355 if (acpi_pstate_strict) {
356 if (!check_freqs(cmd.mask, freqs.new, data)) {
357 pr_debug("acpi_cpufreq_target failed (%d)\n",
358 policy->cpu);
359 result = -EAGAIN;
360 goto out;
361 }
362 }
363
364 for_each_cpu(i, policy->cpus) {
365 freqs.cpu = i;
366 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
367 }
368 perf->state = next_perf_state;
369
370out:
371 return result;
372}
373
374static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
375{
376 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
377
378 pr_debug("acpi_cpufreq_verify\n");
379
380 return cpufreq_frequency_table_verify(policy, data->freq_table);
381}
382
383static unsigned long
384acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
385{
386 struct acpi_processor_performance *perf = data->acpi_data;
387
388 if (cpu_khz) {
389 /* search the closest match to cpu_khz */
390 unsigned int i;
391 unsigned long freq;
392 unsigned long freqn = perf->states[0].core_frequency * 1000;
393
394 for (i = 0; i < (perf->state_count-1); i++) {
395 freq = freqn;
396 freqn = perf->states[i+1].core_frequency * 1000;
397 if ((2 * cpu_khz) > (freqn + freq)) {
398 perf->state = i;
399 return freq;
400 }
401 }
402 perf->state = perf->state_count-1;
403 return freqn;
404 } else {
405 /* assume CPU is at P0... */
406 perf->state = 0;
407 return perf->states[0].core_frequency * 1000;
408 }
409}
410
411static void free_acpi_perf_data(void)
412{
413 unsigned int i;
414
415 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
416 for_each_possible_cpu(i)
417 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
418 ->shared_cpu_map);
419 free_percpu(acpi_perf_data);
420}
421
422/*
423 * acpi_cpufreq_early_init - initialize ACPI P-States library
424 *
425 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
426 * in order to determine correct frequency and voltage pairings. We can
427 * do _PDC and _PSD and find out the processor dependency for the
428 * actual init that will happen later...
429 */
430static int __init acpi_cpufreq_early_init(void)
431{
432 unsigned int i;
433 pr_debug("acpi_cpufreq_early_init\n");
434
435 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
436 if (!acpi_perf_data) {
437 pr_debug("Memory allocation error for acpi_perf_data.\n");
438 return -ENOMEM;
439 }
440 for_each_possible_cpu(i) {
441 if (!zalloc_cpumask_var_node(
442 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
443 GFP_KERNEL, cpu_to_node(i))) {
444
445 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
446 free_acpi_perf_data();
447 return -ENOMEM;
448 }
449 }
450
451 /* Do initialization in ACPI core */
452 acpi_processor_preregister_performance(acpi_perf_data);
453 return 0;
454}
455
456#ifdef CONFIG_SMP
457/*
458 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
459 * or do it in BIOS firmware and won't inform about it to OS. If not
460 * detected, this has a side effect of making CPU run at a different speed
461 * than OS intended it to run at. Detect it and handle it cleanly.
462 */
463static int bios_with_sw_any_bug;
464
465static int sw_any_bug_found(const struct dmi_system_id *d)
466{
467 bios_with_sw_any_bug = 1;
468 return 0;
469}
470
471static const struct dmi_system_id sw_any_bug_dmi_table[] = {
472 {
473 .callback = sw_any_bug_found,
474 .ident = "Supermicro Server X6DLP",
475 .matches = {
476 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
477 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
478 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
479 },
480 },
481 { }
482};
483
484static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
485{
486 /* Intel Xeon Processor 7100 Series Specification Update
487 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
488 * AL30: A Machine Check Exception (MCE) Occurring during an
489 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
490 * Both Processor Cores to Lock Up. */
491 if (c->x86_vendor == X86_VENDOR_INTEL) {
492 if ((c->x86 == 15) &&
493 (c->x86_model == 6) &&
494 (c->x86_mask == 8)) {
495 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
496 "Xeon(R) 7100 Errata AL30, processors may "
497 "lock up on frequency changes: disabling "
498 "acpi-cpufreq.\n");
499 return -ENODEV;
500 }
501 }
502 return 0;
503}
504#endif
505
506static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
507{
508 unsigned int i;
509 unsigned int valid_states = 0;
510 unsigned int cpu = policy->cpu;
511 struct acpi_cpufreq_data *data;
512 unsigned int result = 0;
513 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
514 struct acpi_processor_performance *perf;
515#ifdef CONFIG_SMP
516 static int blacklisted;
517#endif
518
519 pr_debug("acpi_cpufreq_cpu_init\n");
520
521#ifdef CONFIG_SMP
522 if (blacklisted)
523 return blacklisted;
524 blacklisted = acpi_cpufreq_blacklist(c);
525 if (blacklisted)
526 return blacklisted;
527#endif
528
529 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
530 if (!data)
531 return -ENOMEM;
532
533 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
534 per_cpu(acfreq_data, cpu) = data;
535
536 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
537 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
538
539 result = acpi_processor_register_performance(data->acpi_data, cpu);
540 if (result)
541 goto err_free;
542
543 perf = data->acpi_data;
544 policy->shared_type = perf->shared_type;
545
546 /*
547 * Will let policy->cpus know about dependency only when software
548 * coordination is required.
549 */
550 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
551 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
552 cpumask_copy(policy->cpus, perf->shared_cpu_map);
553 }
554 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
555
556#ifdef CONFIG_SMP
557 dmi_check_system(sw_any_bug_dmi_table);
558 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
559 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
560 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
561 }
562#endif
563
564 /* capability check */
565 if (perf->state_count <= 1) {
566 pr_debug("No P-States\n");
567 result = -ENODEV;
568 goto err_unreg;
569 }
570
571 if (perf->control_register.space_id != perf->status_register.space_id) {
572 result = -ENODEV;
573 goto err_unreg;
574 }
575
576 switch (perf->control_register.space_id) {
577 case ACPI_ADR_SPACE_SYSTEM_IO:
578 pr_debug("SYSTEM IO addr space\n");
579 data->cpu_feature = SYSTEM_IO_CAPABLE;
580 break;
581 case ACPI_ADR_SPACE_FIXED_HARDWARE:
582 pr_debug("HARDWARE addr space\n");
583 if (!check_est_cpu(cpu)) {
584 result = -ENODEV;
585 goto err_unreg;
586 }
587 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
588 break;
589 default:
590 pr_debug("Unknown addr space %d\n",
591 (u32) (perf->control_register.space_id));
592 result = -ENODEV;
593 goto err_unreg;
594 }
595
596 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
597 (perf->state_count+1), GFP_KERNEL);
598 if (!data->freq_table) {
599 result = -ENOMEM;
600 goto err_unreg;
601 }
602
603 /* detect transition latency */
604 policy->cpuinfo.transition_latency = 0;
605 for (i = 0; i < perf->state_count; i++) {
606 if ((perf->states[i].transition_latency * 1000) >
607 policy->cpuinfo.transition_latency)
608 policy->cpuinfo.transition_latency =
609 perf->states[i].transition_latency * 1000;
610 }
611
612 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
613 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
614 policy->cpuinfo.transition_latency > 20 * 1000) {
615 policy->cpuinfo.transition_latency = 20 * 1000;
616 printk_once(KERN_INFO
617 "P-state transition latency capped at 20 uS\n");
618 }
619
620 /* table init */
621 for (i = 0; i < perf->state_count; i++) {
622 if (i > 0 && perf->states[i].core_frequency >=
623 data->freq_table[valid_states-1].frequency / 1000)
624 continue;
625
626 data->freq_table[valid_states].index = i;
627 data->freq_table[valid_states].frequency =
628 perf->states[i].core_frequency * 1000;
629 valid_states++;
630 }
631 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
632 perf->state = 0;
633
634 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
635 if (result)
636 goto err_freqfree;
637
638 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
639 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
640
641 switch (perf->control_register.space_id) {
642 case ACPI_ADR_SPACE_SYSTEM_IO:
643 /* Current speed is unknown and not detectable by IO port */
644 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
645 break;
646 case ACPI_ADR_SPACE_FIXED_HARDWARE:
647 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
648 policy->cur = get_cur_freq_on_cpu(cpu);
649 break;
650 default:
651 break;
652 }
653
654 /* notify BIOS that we exist */
655 acpi_processor_notify_smm(THIS_MODULE);
656
657 /* Check for APERF/MPERF support in hardware */
658 if (cpu_has(c, X86_FEATURE_APERFMPERF))
659 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
660
661 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
662 for (i = 0; i < perf->state_count; i++)
663 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
664 (i == perf->state ? '*' : ' '), i,
665 (u32) perf->states[i].core_frequency,
666 (u32) perf->states[i].power,
667 (u32) perf->states[i].transition_latency);
668
669 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
670
671 /*
672 * the first call to ->target() should result in us actually
673 * writing something to the appropriate registers.
674 */
675 data->resume = 1;
676
677 return result;
678
679err_freqfree:
680 kfree(data->freq_table);
681err_unreg:
682 acpi_processor_unregister_performance(perf, cpu);
683err_free:
684 kfree(data);
685 per_cpu(acfreq_data, cpu) = NULL;
686
687 return result;
688}
689
690static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
691{
692 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
693
694 pr_debug("acpi_cpufreq_cpu_exit\n");
695
696 if (data) {
697 cpufreq_frequency_table_put_attr(policy->cpu);
698 per_cpu(acfreq_data, policy->cpu) = NULL;
699 acpi_processor_unregister_performance(data->acpi_data,
700 policy->cpu);
701 kfree(data->freq_table);
702 kfree(data);
703 }
704
705 return 0;
706}
707
708static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
709{
710 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
711
712 pr_debug("acpi_cpufreq_resume\n");
713
714 data->resume = 1;
715
716 return 0;
717}
718
719static struct freq_attr *acpi_cpufreq_attr[] = {
720 &cpufreq_freq_attr_scaling_available_freqs,
721 NULL,
722};
723
724static struct cpufreq_driver acpi_cpufreq_driver = {
725 .verify = acpi_cpufreq_verify,
726 .target = acpi_cpufreq_target,
727 .bios_limit = acpi_processor_get_bios_limit,
728 .init = acpi_cpufreq_cpu_init,
729 .exit = acpi_cpufreq_cpu_exit,
730 .resume = acpi_cpufreq_resume,
731 .name = "acpi-cpufreq",
732 .owner = THIS_MODULE,
733 .attr = acpi_cpufreq_attr,
734};
735
736static int __init acpi_cpufreq_init(void)
737{
738 int ret;
739
740 if (acpi_disabled)
741 return 0;
742
743 pr_debug("acpi_cpufreq_init\n");
744
745 ret = acpi_cpufreq_early_init();
746 if (ret)
747 return ret;
748
749 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
750 if (ret)
751 free_acpi_perf_data();
752
753 return ret;
754}
755
756static void __exit acpi_cpufreq_exit(void)
757{
758 pr_debug("acpi_cpufreq_exit\n");
759
760 cpufreq_unregister_driver(&acpi_cpufreq_driver);
761
762 free_percpu(acpi_perf_data);
763}
764
765module_param(acpi_pstate_strict, uint, 0644);
766MODULE_PARM_DESC(acpi_pstate_strict,
767 "value 0 or non-zero. non-zero -> strict ACPI checks are "
768 "performed during frequency changes.");
769
770late_initcall(acpi_cpufreq_init);
771module_exit(acpi_cpufreq_exit);
772
773MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
new file mode 100644
index 000000000000..7bac808804f3
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -0,0 +1,444 @@
1/*
2 * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 * Based upon reverse engineered information
6 *
7 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17
18#define NFORCE2_XTAL 25
19#define NFORCE2_BOOTFSB 0x48
20#define NFORCE2_PLLENABLE 0xa8
21#define NFORCE2_PLLREG 0xa4
22#define NFORCE2_PLLADR 0xa0
23#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div)
24
25#define NFORCE2_MIN_FSB 50
26#define NFORCE2_SAFE_DISTANCE 50
27
28/* Delay in ms between FSB changes */
29/* #define NFORCE2_DELAY 10 */
30
31/*
32 * nforce2_chipset:
33 * FSB is changed using the chipset
34 */
35static struct pci_dev *nforce2_dev;
36
37/* fid:
38 * multiplier * 10
39 */
40static int fid;
41
42/* min_fsb, max_fsb:
43 * minimum and maximum FSB (= FSB at boot time)
44 */
45static int min_fsb;
46static int max_fsb;
47
48MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
49MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver");
50MODULE_LICENSE("GPL");
51
52module_param(fid, int, 0444);
53module_param(min_fsb, int, 0444);
54
55MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
56MODULE_PARM_DESC(min_fsb,
57 "Minimum FSB to use, if not defined: current FSB - 50");
58
59#define PFX "cpufreq-nforce2: "
60
61/**
62 * nforce2_calc_fsb - calculate FSB
63 * @pll: PLL value
64 *
65 * Calculates FSB from PLL value
66 */
67static int nforce2_calc_fsb(int pll)
68{
69 unsigned char mul, div;
70
71 mul = (pll >> 8) & 0xff;
72 div = pll & 0xff;
73
74 if (div > 0)
75 return NFORCE2_XTAL * mul / div;
76
77 return 0;
78}
79
80/**
81 * nforce2_calc_pll - calculate PLL value
82 * @fsb: FSB
83 *
84 * Calculate PLL value for given FSB
85 */
86static int nforce2_calc_pll(unsigned int fsb)
87{
88 unsigned char xmul, xdiv;
89 unsigned char mul = 0, div = 0;
90 int tried = 0;
91
92 /* Try to calculate multiplier and divider up to 4 times */
93 while (((mul == 0) || (div == 0)) && (tried <= 3)) {
94 for (xdiv = 2; xdiv <= 0x80; xdiv++)
95 for (xmul = 1; xmul <= 0xfe; xmul++)
96 if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) ==
97 fsb + tried) {
98 mul = xmul;
99 div = xdiv;
100 }
101 tried++;
102 }
103
104 if ((mul == 0) || (div == 0))
105 return -1;
106
107 return NFORCE2_PLL(mul, div);
108}
109
110/**
111 * nforce2_write_pll - write PLL value to chipset
112 * @pll: PLL value
113 *
114 * Writes new FSB PLL value to chipset
115 */
116static void nforce2_write_pll(int pll)
117{
118 int temp;
119
120 /* Set the pll addr. to 0x00 */
121 pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0);
122
123 /* Now write the value in all 64 registers */
124 for (temp = 0; temp <= 0x3f; temp++)
125 pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
126
127 return;
128}
129
130/**
131 * nforce2_fsb_read - Read FSB
132 *
133 * Read FSB from chipset
134 * If bootfsb != 0, return FSB at boot-time
135 */
136static unsigned int nforce2_fsb_read(int bootfsb)
137{
138 struct pci_dev *nforce2_sub5;
139 u32 fsb, temp = 0;
140
141 /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
142 nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF,
143 PCI_ANY_ID, PCI_ANY_ID, NULL);
144 if (!nforce2_sub5)
145 return 0;
146
147 pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
148 fsb /= 1000000;
149
150 /* Check if PLL register is already set */
151 pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
152
153 if (bootfsb || !temp)
154 return fsb;
155
156 /* Use PLL register FSB value */
157 pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp);
158 fsb = nforce2_calc_fsb(temp);
159
160 return fsb;
161}
162
163/**
164 * nforce2_set_fsb - set new FSB
165 * @fsb: New FSB
166 *
167 * Sets new FSB
168 */
169static int nforce2_set_fsb(unsigned int fsb)
170{
171 u32 temp = 0;
172 unsigned int tfsb;
173 int diff;
174 int pll = 0;
175
176 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
177 printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
178 return -EINVAL;
179 }
180
181 tfsb = nforce2_fsb_read(0);
182 if (!tfsb) {
183 printk(KERN_ERR PFX "Error while reading the FSB\n");
184 return -EINVAL;
185 }
186
187 /* First write? Then set actual value */
188 pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
189 if (!temp) {
190 pll = nforce2_calc_pll(tfsb);
191
192 if (pll < 0)
193 return -EINVAL;
194
195 nforce2_write_pll(pll);
196 }
197
198 /* Enable write access */
199 temp = 0x01;
200 pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp);
201
202 diff = tfsb - fsb;
203
204 if (!diff)
205 return 0;
206
207 while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) {
208 if (diff < 0)
209 tfsb++;
210 else
211 tfsb--;
212
213 /* Calculate the PLL reg. value */
214 pll = nforce2_calc_pll(tfsb);
215 if (pll == -1)
216 return -EINVAL;
217
218 nforce2_write_pll(pll);
219#ifdef NFORCE2_DELAY
220 mdelay(NFORCE2_DELAY);
221#endif
222 }
223
224 temp = 0x40;
225 pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp);
226
227 return 0;
228}
229
230/**
231 * nforce2_get - get the CPU frequency
232 * @cpu: CPU number
233 *
234 * Returns the CPU frequency
235 */
236static unsigned int nforce2_get(unsigned int cpu)
237{
238 if (cpu)
239 return 0;
240 return nforce2_fsb_read(0) * fid * 100;
241}
242
243/**
244 * nforce2_target - set a new CPUFreq policy
245 * @policy: new policy
246 * @target_freq: the target frequency
247 * @relation: how that frequency relates to achieved frequency
248 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
249 *
250 * Sets a new CPUFreq policy.
251 */
252static int nforce2_target(struct cpufreq_policy *policy,
253 unsigned int target_freq, unsigned int relation)
254{
255/* unsigned long flags; */
256 struct cpufreq_freqs freqs;
257 unsigned int target_fsb;
258
259 if ((target_freq > policy->max) || (target_freq < policy->min))
260 return -EINVAL;
261
262 target_fsb = target_freq / (fid * 100);
263
264 freqs.old = nforce2_get(policy->cpu);
265 freqs.new = target_fsb * fid * 100;
266 freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
267
268 if (freqs.old == freqs.new)
269 return 0;
270
271 pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
272 freqs.old, freqs.new);
273
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
275
276 /* Disable IRQs */
277 /* local_irq_save(flags); */
278
279 if (nforce2_set_fsb(target_fsb) < 0)
280 printk(KERN_ERR PFX "Changing FSB to %d failed\n",
281 target_fsb);
282 else
283 pr_debug("Changed FSB successfully to %d\n",
284 target_fsb);
285
286 /* Enable IRQs */
287 /* local_irq_restore(flags); */
288
289 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
290
291 return 0;
292}
293
294/**
295 * nforce2_verify - verifies a new CPUFreq policy
296 * @policy: new policy
297 */
298static int nforce2_verify(struct cpufreq_policy *policy)
299{
300 unsigned int fsb_pol_max;
301
302 fsb_pol_max = policy->max / (fid * 100);
303
304 if (policy->min < (fsb_pol_max * fid * 100))
305 policy->max = (fsb_pol_max + 1) * fid * 100;
306
307 cpufreq_verify_within_limits(policy,
308 policy->cpuinfo.min_freq,
309 policy->cpuinfo.max_freq);
310 return 0;
311}
312
313static int nforce2_cpu_init(struct cpufreq_policy *policy)
314{
315 unsigned int fsb;
316 unsigned int rfid;
317
318 /* capability check */
319 if (policy->cpu != 0)
320 return -ENODEV;
321
322 /* Get current FSB */
323 fsb = nforce2_fsb_read(0);
324
325 if (!fsb)
326 return -EIO;
327
328 /* FIX: Get FID from CPU */
329 if (!fid) {
330 if (!cpu_khz) {
331 printk(KERN_WARNING PFX
332 "cpu_khz not set, can't calculate multiplier!\n");
333 return -ENODEV;
334 }
335
336 fid = cpu_khz / (fsb * 100);
337 rfid = fid % 5;
338
339 if (rfid) {
340 if (rfid > 2)
341 fid += 5 - rfid;
342 else
343 fid -= rfid;
344 }
345 }
346
347 printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
348 fid / 10, fid % 10);
349
350 /* Set maximum FSB to FSB at boot time */
351 max_fsb = nforce2_fsb_read(1);
352
353 if (!max_fsb)
354 return -EIO;
355
356 if (!min_fsb)
357 min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE;
358
359 if (min_fsb < NFORCE2_MIN_FSB)
360 min_fsb = NFORCE2_MIN_FSB;
361
362 /* cpuinfo and default policy values */
363 policy->cpuinfo.min_freq = min_fsb * fid * 100;
364 policy->cpuinfo.max_freq = max_fsb * fid * 100;
365 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
366 policy->cur = nforce2_get(policy->cpu);
367 policy->min = policy->cpuinfo.min_freq;
368 policy->max = policy->cpuinfo.max_freq;
369
370 return 0;
371}
372
373static int nforce2_cpu_exit(struct cpufreq_policy *policy)
374{
375 return 0;
376}
377
378static struct cpufreq_driver nforce2_driver = {
379 .name = "nforce2",
380 .verify = nforce2_verify,
381 .target = nforce2_target,
382 .get = nforce2_get,
383 .init = nforce2_cpu_init,
384 .exit = nforce2_cpu_exit,
385 .owner = THIS_MODULE,
386};
387
388/**
389 * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
390 *
391 * Detects nForce2 A2 and C1 stepping
392 *
393 */
394static int nforce2_detect_chipset(void)
395{
396 nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
397 PCI_DEVICE_ID_NVIDIA_NFORCE2,
398 PCI_ANY_ID, PCI_ANY_ID, NULL);
399
400 if (nforce2_dev == NULL)
401 return -ENODEV;
402
403 printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
404 nforce2_dev->revision);
405 printk(KERN_INFO PFX
406 "FSB changing is maybe unstable and can lead to "
407 "crashes and data loss.\n");
408
409 return 0;
410}
411
412/**
413 * nforce2_init - initializes the nForce2 CPUFreq driver
414 *
415 * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
416 * devices, -EINVAL on problems during initiatization, and zero on
417 * success.
418 */
419static int __init nforce2_init(void)
420{
421 /* TODO: do we need to detect the processor? */
422
423 /* detect chipset */
424 if (nforce2_detect_chipset()) {
425 printk(KERN_INFO PFX "No nForce2 chipset.\n");
426 return -ENODEV;
427 }
428
429 return cpufreq_register_driver(&nforce2_driver);
430}
431
432/**
433 * nforce2_exit - unregisters cpufreq module
434 *
435 * Unregisters nForce2 FSB change support.
436 */
437static void __exit nforce2_exit(void)
438{
439 cpufreq_unregister_driver(&nforce2_driver);
440}
441
442module_init(nforce2_init);
443module_exit(nforce2_exit);
444
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2dafc5c38ae7..0a5bea9e3585 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -32,9 +32,6 @@
32 32
33#include <trace/events/power.h> 33#include <trace/events/power.h>
34 34
35#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
36 "cpufreq-core", msg)
37
38/** 35/**
39 * The "cpufreq driver" - the arch- or hardware-dependent low 36 * The "cpufreq driver" - the arch- or hardware-dependent low
40 * level driver of CPUFreq support, and its spinlock. This lock 37 * level driver of CPUFreq support, and its spinlock. This lock
@@ -181,93 +178,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
181 178
182 179
183/********************************************************************* 180/*********************************************************************
184 * UNIFIED DEBUG HELPERS *
185 *********************************************************************/
186#ifdef CONFIG_CPU_FREQ_DEBUG
187
188/* what part(s) of the CPUfreq subsystem are debugged? */
189static unsigned int debug;
190
191/* is the debug output ratelimit'ed using printk_ratelimit? User can
192 * set or modify this value.
193 */
194static unsigned int debug_ratelimit = 1;
195
196/* is the printk_ratelimit'ing enabled? It's enabled after a successful
197 * loading of a cpufreq driver, temporarily disabled when a new policy
198 * is set, and disabled upon cpufreq driver removal
199 */
200static unsigned int disable_ratelimit = 1;
201static DEFINE_SPINLOCK(disable_ratelimit_lock);
202
203static void cpufreq_debug_enable_ratelimit(void)
204{
205 unsigned long flags;
206
207 spin_lock_irqsave(&disable_ratelimit_lock, flags);
208 if (disable_ratelimit)
209 disable_ratelimit--;
210 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
211}
212
213static void cpufreq_debug_disable_ratelimit(void)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&disable_ratelimit_lock, flags);
218 disable_ratelimit++;
219 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
220}
221
222void cpufreq_debug_printk(unsigned int type, const char *prefix,
223 const char *fmt, ...)
224{
225 char s[256];
226 va_list args;
227 unsigned int len;
228 unsigned long flags;
229
230 WARN_ON(!prefix);
231 if (type & debug) {
232 spin_lock_irqsave(&disable_ratelimit_lock, flags);
233 if (!disable_ratelimit && debug_ratelimit
234 && !printk_ratelimit()) {
235 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
236 return;
237 }
238 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
239
240 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
241
242 va_start(args, fmt);
243 len += vsnprintf(&s[len], (256 - len), fmt, args);
244 va_end(args);
245
246 printk(s);
247
248 WARN_ON(len < 5);
249 }
250}
251EXPORT_SYMBOL(cpufreq_debug_printk);
252
253
254module_param(debug, uint, 0644);
255MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
256 " 2 to debug drivers, and 4 to debug governors.");
257
258module_param(debug_ratelimit, uint, 0644);
259MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
260 " set to 0 to disable ratelimiting.");
261
262#else /* !CONFIG_CPU_FREQ_DEBUG */
263
264static inline void cpufreq_debug_enable_ratelimit(void) { return; }
265static inline void cpufreq_debug_disable_ratelimit(void) { return; }
266
267#endif /* CONFIG_CPU_FREQ_DEBUG */
268
269
270/*********************************************************************
271 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 181 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
272 *********************************************************************/ 182 *********************************************************************/
273 183
@@ -291,7 +201,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
291 if (!l_p_j_ref_freq) { 201 if (!l_p_j_ref_freq) {
292 l_p_j_ref = loops_per_jiffy; 202 l_p_j_ref = loops_per_jiffy;
293 l_p_j_ref_freq = ci->old; 203 l_p_j_ref_freq = ci->old;
294 dprintk("saving %lu as reference value for loops_per_jiffy; " 204 pr_debug("saving %lu as reference value for loops_per_jiffy; "
295 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 205 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
296 } 206 }
297 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 207 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
@@ -299,7 +209,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
299 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 209 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
300 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 210 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
301 ci->new); 211 ci->new);
302 dprintk("scaling loops_per_jiffy to %lu " 212 pr_debug("scaling loops_per_jiffy to %lu "
303 "for frequency %u kHz\n", loops_per_jiffy, ci->new); 213 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
304 } 214 }
305} 215}
@@ -326,7 +236,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
326 BUG_ON(irqs_disabled()); 236 BUG_ON(irqs_disabled());
327 237
328 freqs->flags = cpufreq_driver->flags; 238 freqs->flags = cpufreq_driver->flags;
329 dprintk("notification %u of frequency transition to %u kHz\n", 239 pr_debug("notification %u of frequency transition to %u kHz\n",
330 state, freqs->new); 240 state, freqs->new);
331 241
332 policy = per_cpu(cpufreq_cpu_data, freqs->cpu); 242 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
@@ -340,7 +250,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
340 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 250 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
341 if ((policy) && (policy->cpu == freqs->cpu) && 251 if ((policy) && (policy->cpu == freqs->cpu) &&
342 (policy->cur) && (policy->cur != freqs->old)) { 252 (policy->cur) && (policy->cur != freqs->old)) {
343 dprintk("Warning: CPU frequency is" 253 pr_debug("Warning: CPU frequency is"
344 " %u, cpufreq assumed %u kHz.\n", 254 " %u, cpufreq assumed %u kHz.\n",
345 freqs->old, policy->cur); 255 freqs->old, policy->cur);
346 freqs->old = policy->cur; 256 freqs->old = policy->cur;
@@ -353,7 +263,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
353 263
354 case CPUFREQ_POSTCHANGE: 264 case CPUFREQ_POSTCHANGE:
355 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 265 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
356 dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 266 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
357 (unsigned long)freqs->cpu); 267 (unsigned long)freqs->cpu);
358 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); 268 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
359 trace_cpu_frequency(freqs->new, freqs->cpu); 269 trace_cpu_frequency(freqs->new, freqs->cpu);
@@ -411,21 +321,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
411 t = __find_governor(str_governor); 321 t = __find_governor(str_governor);
412 322
413 if (t == NULL) { 323 if (t == NULL) {
414 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", 324 int ret;
415 str_governor);
416
417 if (name) {
418 int ret;
419 325
420 mutex_unlock(&cpufreq_governor_mutex); 326 mutex_unlock(&cpufreq_governor_mutex);
421 ret = request_module("%s", name); 327 ret = request_module("cpufreq_%s", str_governor);
422 mutex_lock(&cpufreq_governor_mutex); 328 mutex_lock(&cpufreq_governor_mutex);
423 329
424 if (ret == 0) 330 if (ret == 0)
425 t = __find_governor(str_governor); 331 t = __find_governor(str_governor);
426 }
427
428 kfree(name);
429 } 332 }
430 333
431 if (t != NULL) { 334 if (t != NULL) {
@@ -753,7 +656,7 @@ no_policy:
753static void cpufreq_sysfs_release(struct kobject *kobj) 656static void cpufreq_sysfs_release(struct kobject *kobj)
754{ 657{
755 struct cpufreq_policy *policy = to_policy(kobj); 658 struct cpufreq_policy *policy = to_policy(kobj);
756 dprintk("last reference is dropped\n"); 659 pr_debug("last reference is dropped\n");
757 complete(&policy->kobj_unregister); 660 complete(&policy->kobj_unregister);
758} 661}
759 662
@@ -788,7 +691,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
788 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); 691 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
789 if (gov) { 692 if (gov) {
790 policy->governor = gov; 693 policy->governor = gov;
791 dprintk("Restoring governor %s for cpu %d\n", 694 pr_debug("Restoring governor %s for cpu %d\n",
792 policy->governor->name, cpu); 695 policy->governor->name, cpu);
793 } 696 }
794#endif 697#endif
@@ -824,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
824 per_cpu(cpufreq_cpu_data, cpu) = managed_policy; 727 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
825 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 728 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
826 729
827 dprintk("CPU already managed, adding link\n"); 730 pr_debug("CPU already managed, adding link\n");
828 ret = sysfs_create_link(&sys_dev->kobj, 731 ret = sysfs_create_link(&sys_dev->kobj,
829 &managed_policy->kobj, 732 &managed_policy->kobj,
830 "cpufreq"); 733 "cpufreq");
@@ -865,7 +768,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
865 if (!cpu_online(j)) 768 if (!cpu_online(j))
866 continue; 769 continue;
867 770
868 dprintk("CPU %u already managed, adding link\n", j); 771 pr_debug("CPU %u already managed, adding link\n", j);
869 managed_policy = cpufreq_cpu_get(cpu); 772 managed_policy = cpufreq_cpu_get(cpu);
870 cpu_sys_dev = get_cpu_sysdev(j); 773 cpu_sys_dev = get_cpu_sysdev(j);
871 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 774 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
@@ -941,7 +844,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
941 policy->user_policy.governor = policy->governor; 844 policy->user_policy.governor = policy->governor;
942 845
943 if (ret) { 846 if (ret) {
944 dprintk("setting policy failed\n"); 847 pr_debug("setting policy failed\n");
945 if (cpufreq_driver->exit) 848 if (cpufreq_driver->exit)
946 cpufreq_driver->exit(policy); 849 cpufreq_driver->exit(policy);
947 } 850 }
@@ -977,8 +880,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
977 if (cpu_is_offline(cpu)) 880 if (cpu_is_offline(cpu))
978 return 0; 881 return 0;
979 882
980 cpufreq_debug_disable_ratelimit(); 883 pr_debug("adding CPU %u\n", cpu);
981 dprintk("adding CPU %u\n", cpu);
982 884
983#ifdef CONFIG_SMP 885#ifdef CONFIG_SMP
984 /* check whether a different CPU already registered this 886 /* check whether a different CPU already registered this
@@ -986,7 +888,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
986 policy = cpufreq_cpu_get(cpu); 888 policy = cpufreq_cpu_get(cpu);
987 if (unlikely(policy)) { 889 if (unlikely(policy)) {
988 cpufreq_cpu_put(policy); 890 cpufreq_cpu_put(policy);
989 cpufreq_debug_enable_ratelimit();
990 return 0; 891 return 0;
991 } 892 }
992#endif 893#endif
@@ -1037,7 +938,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
1037 */ 938 */
1038 ret = cpufreq_driver->init(policy); 939 ret = cpufreq_driver->init(policy);
1039 if (ret) { 940 if (ret) {
1040 dprintk("initialization failed\n"); 941 pr_debug("initialization failed\n");
1041 goto err_unlock_policy; 942 goto err_unlock_policy;
1042 } 943 }
1043 policy->user_policy.min = policy->min; 944 policy->user_policy.min = policy->min;
@@ -1063,8 +964,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
1063 964
1064 kobject_uevent(&policy->kobj, KOBJ_ADD); 965 kobject_uevent(&policy->kobj, KOBJ_ADD);
1065 module_put(cpufreq_driver->owner); 966 module_put(cpufreq_driver->owner);
1066 dprintk("initialization complete\n"); 967 pr_debug("initialization complete\n");
1067 cpufreq_debug_enable_ratelimit();
1068 968
1069 return 0; 969 return 0;
1070 970
@@ -1088,7 +988,6 @@ err_free_policy:
1088nomem_out: 988nomem_out:
1089 module_put(cpufreq_driver->owner); 989 module_put(cpufreq_driver->owner);
1090module_out: 990module_out:
1091 cpufreq_debug_enable_ratelimit();
1092 return ret; 991 return ret;
1093} 992}
1094 993
@@ -1112,15 +1011,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1112 unsigned int j; 1011 unsigned int j;
1113#endif 1012#endif
1114 1013
1115 cpufreq_debug_disable_ratelimit(); 1014 pr_debug("unregistering CPU %u\n", cpu);
1116 dprintk("unregistering CPU %u\n", cpu);
1117 1015
1118 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1016 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1119 data = per_cpu(cpufreq_cpu_data, cpu); 1017 data = per_cpu(cpufreq_cpu_data, cpu);
1120 1018
1121 if (!data) { 1019 if (!data) {
1122 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1020 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1123 cpufreq_debug_enable_ratelimit();
1124 unlock_policy_rwsem_write(cpu); 1021 unlock_policy_rwsem_write(cpu);
1125 return -EINVAL; 1022 return -EINVAL;
1126 } 1023 }
@@ -1132,12 +1029,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1132 * only need to unlink, put and exit 1029 * only need to unlink, put and exit
1133 */ 1030 */
1134 if (unlikely(cpu != data->cpu)) { 1031 if (unlikely(cpu != data->cpu)) {
1135 dprintk("removing link\n"); 1032 pr_debug("removing link\n");
1136 cpumask_clear_cpu(cpu, data->cpus); 1033 cpumask_clear_cpu(cpu, data->cpus);
1137 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1034 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1138 kobj = &sys_dev->kobj; 1035 kobj = &sys_dev->kobj;
1139 cpufreq_cpu_put(data); 1036 cpufreq_cpu_put(data);
1140 cpufreq_debug_enable_ratelimit();
1141 unlock_policy_rwsem_write(cpu); 1037 unlock_policy_rwsem_write(cpu);
1142 sysfs_remove_link(kobj, "cpufreq"); 1038 sysfs_remove_link(kobj, "cpufreq");
1143 return 0; 1039 return 0;
@@ -1170,7 +1066,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1170 for_each_cpu(j, data->cpus) { 1066 for_each_cpu(j, data->cpus) {
1171 if (j == cpu) 1067 if (j == cpu)
1172 continue; 1068 continue;
1173 dprintk("removing link for cpu %u\n", j); 1069 pr_debug("removing link for cpu %u\n", j);
1174#ifdef CONFIG_HOTPLUG_CPU 1070#ifdef CONFIG_HOTPLUG_CPU
1175 strncpy(per_cpu(cpufreq_cpu_governor, j), 1071 strncpy(per_cpu(cpufreq_cpu_governor, j),
1176 data->governor->name, CPUFREQ_NAME_LEN); 1072 data->governor->name, CPUFREQ_NAME_LEN);
@@ -1199,21 +1095,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1199 * not referenced anymore by anybody before we proceed with 1095 * not referenced anymore by anybody before we proceed with
1200 * unloading. 1096 * unloading.
1201 */ 1097 */
1202 dprintk("waiting for dropping of refcount\n"); 1098 pr_debug("waiting for dropping of refcount\n");
1203 wait_for_completion(cmp); 1099 wait_for_completion(cmp);
1204 dprintk("wait complete\n"); 1100 pr_debug("wait complete\n");
1205 1101
1206 lock_policy_rwsem_write(cpu); 1102 lock_policy_rwsem_write(cpu);
1207 if (cpufreq_driver->exit) 1103 if (cpufreq_driver->exit)
1208 cpufreq_driver->exit(data); 1104 cpufreq_driver->exit(data);
1209 unlock_policy_rwsem_write(cpu); 1105 unlock_policy_rwsem_write(cpu);
1210 1106
1107#ifdef CONFIG_HOTPLUG_CPU
1108 /* when the CPU which is the parent of the kobj is hotplugged
1109 * offline, check for siblings, and create cpufreq sysfs interface
1110 * and symlinks
1111 */
1112 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1113 /* first sibling now owns the new sysfs dir */
1114 cpumask_clear_cpu(cpu, data->cpus);
1115 cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
1116
1117 /* finally remove our own symlink */
1118 lock_policy_rwsem_write(cpu);
1119 __cpufreq_remove_dev(sys_dev);
1120 }
1121#endif
1122
1211 free_cpumask_var(data->related_cpus); 1123 free_cpumask_var(data->related_cpus);
1212 free_cpumask_var(data->cpus); 1124 free_cpumask_var(data->cpus);
1213 kfree(data); 1125 kfree(data);
1214 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1215 1126
1216 cpufreq_debug_enable_ratelimit();
1217 return 0; 1127 return 0;
1218} 1128}
1219 1129
@@ -1239,7 +1149,7 @@ static void handle_update(struct work_struct *work)
1239 struct cpufreq_policy *policy = 1149 struct cpufreq_policy *policy =
1240 container_of(work, struct cpufreq_policy, update); 1150 container_of(work, struct cpufreq_policy, update);
1241 unsigned int cpu = policy->cpu; 1151 unsigned int cpu = policy->cpu;
1242 dprintk("handle_update for cpu %u called\n", cpu); 1152 pr_debug("handle_update for cpu %u called\n", cpu);
1243 cpufreq_update_policy(cpu); 1153 cpufreq_update_policy(cpu);
1244} 1154}
1245 1155
@@ -1257,7 +1167,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1257{ 1167{
1258 struct cpufreq_freqs freqs; 1168 struct cpufreq_freqs freqs;
1259 1169
1260 dprintk("Warning: CPU frequency out of sync: cpufreq and timing " 1170 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1261 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1171 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1262 1172
1263 freqs.cpu = cpu; 1173 freqs.cpu = cpu;
@@ -1360,7 +1270,7 @@ static int cpufreq_bp_suspend(void)
1360 int cpu = smp_processor_id(); 1270 int cpu = smp_processor_id();
1361 struct cpufreq_policy *cpu_policy; 1271 struct cpufreq_policy *cpu_policy;
1362 1272
1363 dprintk("suspending cpu %u\n", cpu); 1273 pr_debug("suspending cpu %u\n", cpu);
1364 1274
1365 /* If there's no policy for the boot CPU, we have nothing to do. */ 1275 /* If there's no policy for the boot CPU, we have nothing to do. */
1366 cpu_policy = cpufreq_cpu_get(cpu); 1276 cpu_policy = cpufreq_cpu_get(cpu);
@@ -1398,7 +1308,7 @@ static void cpufreq_bp_resume(void)
1398 int cpu = smp_processor_id(); 1308 int cpu = smp_processor_id();
1399 struct cpufreq_policy *cpu_policy; 1309 struct cpufreq_policy *cpu_policy;
1400 1310
1401 dprintk("resuming cpu %u\n", cpu); 1311 pr_debug("resuming cpu %u\n", cpu);
1402 1312
1403 /* If there's no policy for the boot CPU, we have nothing to do. */ 1313 /* If there's no policy for the boot CPU, we have nothing to do. */
1404 cpu_policy = cpufreq_cpu_get(cpu); 1314 cpu_policy = cpufreq_cpu_get(cpu);
@@ -1510,7 +1420,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1510{ 1420{
1511 int retval = -EINVAL; 1421 int retval = -EINVAL;
1512 1422
1513 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1423 pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1514 target_freq, relation); 1424 target_freq, relation);
1515 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1425 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1516 retval = cpufreq_driver->target(policy, target_freq, relation); 1426 retval = cpufreq_driver->target(policy, target_freq, relation);
@@ -1596,7 +1506,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1596 if (!try_module_get(policy->governor->owner)) 1506 if (!try_module_get(policy->governor->owner))
1597 return -EINVAL; 1507 return -EINVAL;
1598 1508
1599 dprintk("__cpufreq_governor for CPU %u, event %u\n", 1509 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1600 policy->cpu, event); 1510 policy->cpu, event);
1601 ret = policy->governor->governor(policy, event); 1511 ret = policy->governor->governor(policy, event);
1602 1512
@@ -1697,8 +1607,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1697{ 1607{
1698 int ret = 0; 1608 int ret = 0;
1699 1609
1700 cpufreq_debug_disable_ratelimit(); 1610 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1701 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1702 policy->min, policy->max); 1611 policy->min, policy->max);
1703 1612
1704 memcpy(&policy->cpuinfo, &data->cpuinfo, 1613 memcpy(&policy->cpuinfo, &data->cpuinfo,
@@ -1735,19 +1644,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1735 data->min = policy->min; 1644 data->min = policy->min;
1736 data->max = policy->max; 1645 data->max = policy->max;
1737 1646
1738 dprintk("new min and max freqs are %u - %u kHz\n", 1647 pr_debug("new min and max freqs are %u - %u kHz\n",
1739 data->min, data->max); 1648 data->min, data->max);
1740 1649
1741 if (cpufreq_driver->setpolicy) { 1650 if (cpufreq_driver->setpolicy) {
1742 data->policy = policy->policy; 1651 data->policy = policy->policy;
1743 dprintk("setting range\n"); 1652 pr_debug("setting range\n");
1744 ret = cpufreq_driver->setpolicy(policy); 1653 ret = cpufreq_driver->setpolicy(policy);
1745 } else { 1654 } else {
1746 if (policy->governor != data->governor) { 1655 if (policy->governor != data->governor) {
1747 /* save old, working values */ 1656 /* save old, working values */
1748 struct cpufreq_governor *old_gov = data->governor; 1657 struct cpufreq_governor *old_gov = data->governor;
1749 1658
1750 dprintk("governor switch\n"); 1659 pr_debug("governor switch\n");
1751 1660
1752 /* end old governor */ 1661 /* end old governor */
1753 if (data->governor) 1662 if (data->governor)
@@ -1757,7 +1666,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1757 data->governor = policy->governor; 1666 data->governor = policy->governor;
1758 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1667 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1759 /* new governor failed, so re-start old one */ 1668 /* new governor failed, so re-start old one */
1760 dprintk("starting governor %s failed\n", 1669 pr_debug("starting governor %s failed\n",
1761 data->governor->name); 1670 data->governor->name);
1762 if (old_gov) { 1671 if (old_gov) {
1763 data->governor = old_gov; 1672 data->governor = old_gov;
@@ -1769,12 +1678,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1769 } 1678 }
1770 /* might be a policy change, too, so fall through */ 1679 /* might be a policy change, too, so fall through */
1771 } 1680 }
1772 dprintk("governor: change or update limits\n"); 1681 pr_debug("governor: change or update limits\n");
1773 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1682 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1774 } 1683 }
1775 1684
1776error_out: 1685error_out:
1777 cpufreq_debug_enable_ratelimit();
1778 return ret; 1686 return ret;
1779} 1687}
1780 1688
@@ -1801,7 +1709,7 @@ int cpufreq_update_policy(unsigned int cpu)
1801 goto fail; 1709 goto fail;
1802 } 1710 }
1803 1711
1804 dprintk("updating policy for CPU %u\n", cpu); 1712 pr_debug("updating policy for CPU %u\n", cpu);
1805 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 1713 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1806 policy.min = data->user_policy.min; 1714 policy.min = data->user_policy.min;
1807 policy.max = data->user_policy.max; 1715 policy.max = data->user_policy.max;
@@ -1813,7 +1721,7 @@ int cpufreq_update_policy(unsigned int cpu)
1813 if (cpufreq_driver->get) { 1721 if (cpufreq_driver->get) {
1814 policy.cur = cpufreq_driver->get(cpu); 1722 policy.cur = cpufreq_driver->get(cpu);
1815 if (!data->cur) { 1723 if (!data->cur) {
1816 dprintk("Driver did not initialize current freq"); 1724 pr_debug("Driver did not initialize current freq");
1817 data->cur = policy.cur; 1725 data->cur = policy.cur;
1818 } else { 1726 } else {
1819 if (data->cur != policy.cur) 1727 if (data->cur != policy.cur)
@@ -1889,7 +1797,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1889 ((!driver_data->setpolicy) && (!driver_data->target))) 1797 ((!driver_data->setpolicy) && (!driver_data->target)))
1890 return -EINVAL; 1798 return -EINVAL;
1891 1799
1892 dprintk("trying to register driver %s\n", driver_data->name); 1800 pr_debug("trying to register driver %s\n", driver_data->name);
1893 1801
1894 if (driver_data->setpolicy) 1802 if (driver_data->setpolicy)
1895 driver_data->flags |= CPUFREQ_CONST_LOOPS; 1803 driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -1920,15 +1828,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1920 1828
1921 /* if all ->init() calls failed, unregister */ 1829 /* if all ->init() calls failed, unregister */
1922 if (ret) { 1830 if (ret) {
1923 dprintk("no CPU initialized for driver %s\n", 1831 pr_debug("no CPU initialized for driver %s\n",
1924 driver_data->name); 1832 driver_data->name);
1925 goto err_sysdev_unreg; 1833 goto err_sysdev_unreg;
1926 } 1834 }
1927 } 1835 }
1928 1836
1929 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1837 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1930 dprintk("driver %s up and running\n", driver_data->name); 1838 pr_debug("driver %s up and running\n", driver_data->name);
1931 cpufreq_debug_enable_ratelimit();
1932 1839
1933 return 0; 1840 return 0;
1934err_sysdev_unreg: 1841err_sysdev_unreg:
@@ -1955,14 +1862,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1955{ 1862{
1956 unsigned long flags; 1863 unsigned long flags;
1957 1864
1958 cpufreq_debug_disable_ratelimit(); 1865 if (!cpufreq_driver || (driver != cpufreq_driver))
1959
1960 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1961 cpufreq_debug_enable_ratelimit();
1962 return -EINVAL; 1866 return -EINVAL;
1963 }
1964 1867
1965 dprintk("unregistering driver %s\n", driver->name); 1868 pr_debug("unregistering driver %s\n", driver->name);
1966 1869
1967 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1870 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1968 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 1871 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index 7e2e515087f8..f13a8a9af6a1 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -15,9 +15,6 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
20
21 18
22static int cpufreq_governor_performance(struct cpufreq_policy *policy, 19static int cpufreq_governor_performance(struct cpufreq_policy *policy,
23 unsigned int event) 20 unsigned int event)
@@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
25 switch (event) { 22 switch (event) {
26 case CPUFREQ_GOV_START: 23 case CPUFREQ_GOV_START:
27 case CPUFREQ_GOV_LIMITS: 24 case CPUFREQ_GOV_LIMITS:
28 dprintk("setting to %u kHz because of event %u\n", 25 pr_debug("setting to %u kHz because of event %u\n",
29 policy->max, event); 26 policy->max, event);
30 __cpufreq_driver_target(policy, policy->max, 27 __cpufreq_driver_target(policy, policy->max,
31 CPUFREQ_RELATION_H); 28 CPUFREQ_RELATION_H);
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index e6db5faf3eb1..4c2eb512f2bc 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -15,16 +15,13 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
20
21static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 18static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
22 unsigned int event) 19 unsigned int event)
23{ 20{
24 switch (event) { 21 switch (event) {
25 case CPUFREQ_GOV_START: 22 case CPUFREQ_GOV_START:
26 case CPUFREQ_GOV_LIMITS: 23 case CPUFREQ_GOV_LIMITS:
27 dprintk("setting to %u kHz because of event %u\n", 24 pr_debug("setting to %u kHz because of event %u\n",
28 policy->min, event); 25 policy->min, event);
29 __cpufreq_driver_target(policy, policy->min, 26 __cpufreq_driver_target(policy, policy->min,
30 CPUFREQ_RELATION_L); 27 CPUFREQ_RELATION_L);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 00d73fc8e4e2..b60a4c263686 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
165 return -1; 165 return -1;
166} 166}
167 167
168/* should be called late in the CPU removal sequence so that the stats
169 * memory is still available in case someone tries to use it.
170 */
168static void cpufreq_stats_free_table(unsigned int cpu) 171static void cpufreq_stats_free_table(unsigned int cpu)
169{ 172{
170 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); 173 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
171 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
172 if (policy && policy->cpu == cpu)
173 sysfs_remove_group(&policy->kobj, &stats_attr_group);
174 if (stat) { 174 if (stat) {
175 kfree(stat->time_in_state); 175 kfree(stat->time_in_state);
176 kfree(stat); 176 kfree(stat);
177 } 177 }
178 per_cpu(cpufreq_stats_table, cpu) = NULL; 178 per_cpu(cpufreq_stats_table, cpu) = NULL;
179}
180
181/* must be called early in the CPU removal sequence (before
182 * cpufreq_remove_dev) so that policy is still valid.
183 */
184static void cpufreq_stats_free_sysfs(unsigned int cpu)
185{
186 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
187 if (policy && policy->cpu == cpu)
188 sysfs_remove_group(&policy->kobj, &stats_attr_group);
179 if (policy) 189 if (policy)
180 cpufreq_cpu_put(policy); 190 cpufreq_cpu_put(policy);
181} 191}
@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
316 case CPU_ONLINE_FROZEN: 326 case CPU_ONLINE_FROZEN:
317 cpufreq_update_policy(cpu); 327 cpufreq_update_policy(cpu);
318 break; 328 break;
329 case CPU_DOWN_PREPARE:
330 cpufreq_stats_free_sysfs(cpu);
331 break;
319 case CPU_DEAD: 332 case CPU_DEAD:
320 case CPU_DEAD_FROZEN: 333 case CPU_DEAD_FROZEN:
321 cpufreq_stats_free_table(cpu); 334 cpufreq_stats_free_table(cpu);
@@ -324,9 +337,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
324 return NOTIFY_OK; 337 return NOTIFY_OK;
325} 338}
326 339
327static struct notifier_block cpufreq_stat_cpu_notifier __refdata = 340/* priority=1 so this will get called before cpufreq_remove_dev */
328{ 341static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
329 .notifier_call = cpufreq_stat_cpu_callback, 342 .notifier_call = cpufreq_stat_cpu_callback,
343 .priority = 1,
330}; 344};
331 345
332static struct notifier_block notifier_policy_block = { 346static struct notifier_block notifier_policy_block = {
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 66d2d1d6c80f..f231015904c0 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
37static DEFINE_MUTEX(userspace_mutex); 37static DEFINE_MUTEX(userspace_mutex);
38static int cpus_using_userspace_governor; 38static int cpus_using_userspace_governor;
39 39
40#define dprintk(msg...) \
41 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
42
43/* keep track of frequency transitions */ 40/* keep track of frequency transitions */
44static int 41static int
45userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 42userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
50 if (!per_cpu(cpu_is_managed, freq->cpu)) 47 if (!per_cpu(cpu_is_managed, freq->cpu))
51 return 0; 48 return 0;
52 49
53 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 50 pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
54 freq->cpu, freq->new); 51 freq->cpu, freq->new);
55 per_cpu(cpu_cur_freq, freq->cpu) = freq->new; 52 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
56 53
@@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
73{ 70{
74 int ret = -EINVAL; 71 int ret = -EINVAL;
75 72
76 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 73 pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
77 74
78 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
79 if (!per_cpu(cpu_is_managed, policy->cpu)) 76 if (!per_cpu(cpu_is_managed, policy->cpu))
@@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
134 per_cpu(cpu_max_freq, cpu) = policy->max; 131 per_cpu(cpu_max_freq, cpu) = policy->max;
135 per_cpu(cpu_cur_freq, cpu) = policy->cur; 132 per_cpu(cpu_cur_freq, cpu) = policy->cur;
136 per_cpu(cpu_set_freq, cpu) = policy->cur; 133 per_cpu(cpu_set_freq, cpu) = policy->cur;
137 dprintk("managing cpu %u started " 134 pr_debug("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n", 135 "(%u - %u kHz, currently %u kHz)\n",
139 cpu, 136 cpu,
140 per_cpu(cpu_min_freq, cpu), 137 per_cpu(cpu_min_freq, cpu),
@@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
156 per_cpu(cpu_min_freq, cpu) = 0; 153 per_cpu(cpu_min_freq, cpu) = 0;
157 per_cpu(cpu_max_freq, cpu) = 0; 154 per_cpu(cpu_max_freq, cpu) = 0;
158 per_cpu(cpu_set_freq, cpu) = 0; 155 per_cpu(cpu_set_freq, cpu) = 0;
159 dprintk("managing cpu %u stopped\n", cpu); 156 pr_debug("managing cpu %u stopped\n", cpu);
160 mutex_unlock(&userspace_mutex); 157 mutex_unlock(&userspace_mutex);
161 break; 158 break;
162 case CPUFREQ_GOV_LIMITS: 159 case CPUFREQ_GOV_LIMITS:
163 mutex_lock(&userspace_mutex); 160 mutex_lock(&userspace_mutex);
164 dprintk("limit event for cpu %u: %u - %u kHz, " 161 pr_debug("limit event for cpu %u: %u - %u kHz, "
165 "currently %u kHz, last set to %u kHz\n", 162 "currently %u kHz, last set to %u kHz\n",
166 cpu, policy->min, policy->max, 163 cpu, policy->min, policy->max,
167 per_cpu(cpu_cur_freq, cpu), 164 per_cpu(cpu_cur_freq, cpu),
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
new file mode 100644
index 000000000000..35a257dd4bb7
--- /dev/null
+++ b/drivers/cpufreq/e_powersaver.c
@@ -0,0 +1,367 @@
1/*
2 * Based on documentation provided by Dave Jones. Thanks!
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/ioport.h>
14#include <linux/slab.h>
15#include <linux/timex.h>
16#include <linux/io.h>
17#include <linux/delay.h>
18
19#include <asm/msr.h>
20#include <asm/tsc.h>
21
22#define EPS_BRAND_C7M 0
23#define EPS_BRAND_C7 1
24#define EPS_BRAND_EDEN 2
25#define EPS_BRAND_C3 3
26#define EPS_BRAND_C7D 4
27
28struct eps_cpu_data {
29 u32 fsb;
30 struct cpufreq_frequency_table freq_table[];
31};
32
33static struct eps_cpu_data *eps_cpu[NR_CPUS];
34
35
36static unsigned int eps_get(unsigned int cpu)
37{
38 struct eps_cpu_data *centaur;
39 u32 lo, hi;
40
41 if (cpu)
42 return 0;
43 centaur = eps_cpu[cpu];
44 if (centaur == NULL)
45 return 0;
46
47 /* Return current frequency */
48 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
49 return centaur->fsb * ((lo >> 8) & 0xff);
50}
51
52static int eps_set_state(struct eps_cpu_data *centaur,
53 unsigned int cpu,
54 u32 dest_state)
55{
56 struct cpufreq_freqs freqs;
57 u32 lo, hi;
58 int err = 0;
59 int i;
60
61 freqs.old = eps_get(cpu);
62 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
63 freqs.cpu = cpu;
64 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
65
66 /* Wait while CPU is busy */
67 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
68 i = 0;
69 while (lo & ((1 << 16) | (1 << 17))) {
70 udelay(16);
71 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
72 i++;
73 if (unlikely(i > 64)) {
74 err = -ENODEV;
75 goto postchange;
76 }
77 }
78 /* Set new multiplier and voltage */
79 wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
80 /* Wait until transition end */
81 i = 0;
82 do {
83 udelay(16);
84 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
85 i++;
86 if (unlikely(i > 64)) {
87 err = -ENODEV;
88 goto postchange;
89 }
90 } while (lo & ((1 << 16) | (1 << 17)));
91
92 /* Return current frequency */
93postchange:
94 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
95 freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
96
97#ifdef DEBUG
98 {
99 u8 current_multiplier, current_voltage;
100
101 /* Print voltage and multiplier */
102 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
103 current_voltage = lo & 0xff;
104 printk(KERN_INFO "eps: Current voltage = %dmV\n",
105 current_voltage * 16 + 700);
106 current_multiplier = (lo >> 8) & 0xff;
107 printk(KERN_INFO "eps: Current multiplier = %d\n",
108 current_multiplier);
109 }
110#endif
111 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
112 return err;
113}
114
115static int eps_target(struct cpufreq_policy *policy,
116 unsigned int target_freq,
117 unsigned int relation)
118{
119 struct eps_cpu_data *centaur;
120 unsigned int newstate = 0;
121 unsigned int cpu = policy->cpu;
122 unsigned int dest_state;
123 int ret;
124
125 if (unlikely(eps_cpu[cpu] == NULL))
126 return -ENODEV;
127 centaur = eps_cpu[cpu];
128
129 if (unlikely(cpufreq_frequency_table_target(policy,
130 &eps_cpu[cpu]->freq_table[0],
131 target_freq,
132 relation,
133 &newstate))) {
134 return -EINVAL;
135 }
136
137 /* Make frequency transition */
138 dest_state = centaur->freq_table[newstate].index & 0xffff;
139 ret = eps_set_state(centaur, cpu, dest_state);
140 if (ret)
141 printk(KERN_ERR "eps: Timeout!\n");
142 return ret;
143}
144
145static int eps_verify(struct cpufreq_policy *policy)
146{
147 return cpufreq_frequency_table_verify(policy,
148 &eps_cpu[policy->cpu]->freq_table[0]);
149}
150
151static int eps_cpu_init(struct cpufreq_policy *policy)
152{
153 unsigned int i;
154 u32 lo, hi;
155 u64 val;
156 u8 current_multiplier, current_voltage;
157 u8 max_multiplier, max_voltage;
158 u8 min_multiplier, min_voltage;
159 u8 brand = 0;
160 u32 fsb;
161 struct eps_cpu_data *centaur;
162 struct cpuinfo_x86 *c = &cpu_data(0);
163 struct cpufreq_frequency_table *f_table;
164 int k, step, voltage;
165 int ret;
166 int states;
167
168 if (policy->cpu != 0)
169 return -ENODEV;
170
171 /* Check brand */
172 printk(KERN_INFO "eps: Detected VIA ");
173
174 switch (c->x86_model) {
175 case 10:
176 rdmsr(0x1153, lo, hi);
177 brand = (((lo >> 2) ^ lo) >> 18) & 3;
178 printk(KERN_CONT "Model A ");
179 break;
180 case 13:
181 rdmsr(0x1154, lo, hi);
182 brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
183 printk(KERN_CONT "Model D ");
184 break;
185 }
186
187 switch (brand) {
188 case EPS_BRAND_C7M:
189 printk(KERN_CONT "C7-M\n");
190 break;
191 case EPS_BRAND_C7:
192 printk(KERN_CONT "C7\n");
193 break;
194 case EPS_BRAND_EDEN:
195 printk(KERN_CONT "Eden\n");
196 break;
197 case EPS_BRAND_C7D:
198 printk(KERN_CONT "C7-D\n");
199 break;
200 case EPS_BRAND_C3:
201 printk(KERN_CONT "C3\n");
202 return -ENODEV;
203 break;
204 }
205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV;
215 }
216 }
217
218 /* Print voltage and multiplier */
219 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
220 current_voltage = lo & 0xff;
221 printk(KERN_INFO "eps: Current voltage = %dmV\n",
222 current_voltage * 16 + 700);
223 current_multiplier = (lo >> 8) & 0xff;
224 printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
225
226 /* Print limits */
227 max_voltage = hi & 0xff;
228 printk(KERN_INFO "eps: Highest voltage = %dmV\n",
229 max_voltage * 16 + 700);
230 max_multiplier = (hi >> 8) & 0xff;
231 printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
232 min_voltage = (hi >> 16) & 0xff;
233 printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
234 min_voltage * 16 + 700);
235 min_multiplier = (hi >> 24) & 0xff;
236 printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
237
238 /* Sanity checks */
239 if (current_multiplier == 0 || max_multiplier == 0
240 || min_multiplier == 0)
241 return -EINVAL;
242 if (current_multiplier > max_multiplier
243 || max_multiplier <= min_multiplier)
244 return -EINVAL;
245 if (current_voltage > 0x1f || max_voltage > 0x1f)
246 return -EINVAL;
247 if (max_voltage < min_voltage)
248 return -EINVAL;
249
250 /* Calc FSB speed */
251 fsb = cpu_khz / current_multiplier;
252 /* Calc number of p-states supported */
253 if (brand == EPS_BRAND_C7M)
254 states = max_multiplier - min_multiplier + 1;
255 else
256 states = 2;
257
258 /* Allocate private data and frequency table for current cpu */
259 centaur = kzalloc(sizeof(struct eps_cpu_data)
260 + (states + 1) * sizeof(struct cpufreq_frequency_table),
261 GFP_KERNEL);
262 if (!centaur)
263 return -ENOMEM;
264 eps_cpu[0] = centaur;
265
266 /* Copy basic values */
267 centaur->fsb = fsb;
268
269 /* Fill frequency and MSR value table */
270 f_table = &centaur->freq_table[0];
271 if (brand != EPS_BRAND_C7M) {
272 f_table[0].frequency = fsb * min_multiplier;
273 f_table[0].index = (min_multiplier << 8) | min_voltage;
274 f_table[1].frequency = fsb * max_multiplier;
275 f_table[1].index = (max_multiplier << 8) | max_voltage;
276 f_table[2].frequency = CPUFREQ_TABLE_END;
277 } else {
278 k = 0;
279 step = ((max_voltage - min_voltage) * 256)
280 / (max_multiplier - min_multiplier);
281 for (i = min_multiplier; i <= max_multiplier; i++) {
282 voltage = (k * step) / 256 + min_voltage;
283 f_table[k].frequency = fsb * i;
284 f_table[k].index = (i << 8) | voltage;
285 k++;
286 }
287 f_table[k].frequency = CPUFREQ_TABLE_END;
288 }
289
290 policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
291 policy->cur = fsb * current_multiplier;
292
293 ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
294 if (ret) {
295 kfree(centaur);
296 return ret;
297 }
298
299 cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
300 return 0;
301}
302
303static int eps_cpu_exit(struct cpufreq_policy *policy)
304{
305 unsigned int cpu = policy->cpu;
306 struct eps_cpu_data *centaur;
307 u32 lo, hi;
308
309 if (eps_cpu[cpu] == NULL)
310 return -ENODEV;
311 centaur = eps_cpu[cpu];
312
313 /* Get max frequency */
314 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
315 /* Set max frequency */
316 eps_set_state(centaur, cpu, hi & 0xffff);
317 /* Bye */
318 cpufreq_frequency_table_put_attr(policy->cpu);
319 kfree(eps_cpu[cpu]);
320 eps_cpu[cpu] = NULL;
321 return 0;
322}
323
324static struct freq_attr *eps_attr[] = {
325 &cpufreq_freq_attr_scaling_available_freqs,
326 NULL,
327};
328
329static struct cpufreq_driver eps_driver = {
330 .verify = eps_verify,
331 .target = eps_target,
332 .init = eps_cpu_init,
333 .exit = eps_cpu_exit,
334 .get = eps_get,
335 .name = "e_powersaver",
336 .owner = THIS_MODULE,
337 .attr = eps_attr,
338};
339
340static int __init eps_init(void)
341{
342 struct cpuinfo_x86 *c = &cpu_data(0);
343
344 /* This driver will work only on Centaur C7 processors with
345 * Enhanced SpeedStep/PowerSaver registers */
346 if (c->x86_vendor != X86_VENDOR_CENTAUR
347 || c->x86 != 6 || c->x86_model < 10)
348 return -ENODEV;
349 if (!cpu_has(c, X86_FEATURE_EST))
350 return -ENODEV;
351
352 if (cpufreq_register_driver(&eps_driver))
353 return -EINVAL;
354 return 0;
355}
356
357static void __exit eps_exit(void)
358{
359 cpufreq_unregister_driver(&eps_driver);
360}
361
362MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
363MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
364MODULE_LICENSE("GPL");
365
366module_init(eps_init);
367module_exit(eps_exit);
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
new file mode 100644
index 000000000000..c587db472a75
--- /dev/null
+++ b/drivers/cpufreq/elanfreq.c
@@ -0,0 +1,309 @@
1/*
2 * elanfreq: cpufreq driver for the AMD ELAN family
3 *
4 * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
5 *
6 * Parts of this code are (c) Sven Geggus <sven@geggus.net>
7 *
8 * All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include <linux/delay.h>
24#include <linux/cpufreq.h>
25
26#include <asm/msr.h>
27#include <linux/timex.h>
28#include <linux/io.h>
29
30#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
31#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
32
33/* Module parameter */
34static int max_freq;
35
36struct s_elan_multiplier {
37 int clock; /* frequency in kHz */
38 int val40h; /* PMU Force Mode register */
39 int val80h; /* CPU Clock Speed Register */
40};
41
42/*
43 * It is important that the frequencies
44 * are listed in ascending order here!
45 */
46static struct s_elan_multiplier elan_multiplier[] = {
47 {1000, 0x02, 0x18},
48 {2000, 0x02, 0x10},
49 {4000, 0x02, 0x08},
50 {8000, 0x00, 0x00},
51 {16000, 0x00, 0x02},
52 {33000, 0x00, 0x04},
53 {66000, 0x01, 0x04},
54 {99000, 0x01, 0x05}
55};
56
57static struct cpufreq_frequency_table elanfreq_table[] = {
58 {0, 1000},
59 {1, 2000},
60 {2, 4000},
61 {3, 8000},
62 {4, 16000},
63 {5, 33000},
64 {6, 66000},
65 {7, 99000},
66 {0, CPUFREQ_TABLE_END},
67};
68
69
70/**
71 * elanfreq_get_cpu_frequency: determine current cpu speed
72 *
73 * Finds out at which frequency the CPU of the Elan SOC runs
74 * at the moment. Frequencies from 1 to 33 MHz are generated
75 * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
76 * and have the rest of the chip running with 33 MHz.
77 */
78
79static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
80{
81 u8 clockspeed_reg; /* Clock Speed Register */
82
83 local_irq_disable();
84 outb_p(0x80, REG_CSCIR);
85 clockspeed_reg = inb_p(REG_CSCDR);
86 local_irq_enable();
87
88 if ((clockspeed_reg & 0xE0) == 0xE0)
89 return 0;
90
91 /* Are we in CPU clock multiplied mode (66/99 MHz)? */
92 if ((clockspeed_reg & 0xE0) == 0xC0) {
93 if ((clockspeed_reg & 0x01) == 0)
94 return 66000;
95 else
96 return 99000;
97 }
98
99 /* 33 MHz is not 32 MHz... */
100 if ((clockspeed_reg & 0xE0) == 0xA0)
101 return 33000;
102
103 return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
104}
105
106
107/**
108 * elanfreq_set_cpu_frequency: Change the CPU core frequency
109 * @cpu: cpu number
110 * @freq: frequency in kHz
111 *
112 * This function takes a frequency value and changes the CPU frequency
113 * according to this. Note that the frequency has to be checked by
114 * elanfreq_validatespeed() for correctness!
115 *
116 * There is no return value.
117 */
118
119static void elanfreq_set_cpu_state(unsigned int state)
120{
121 struct cpufreq_freqs freqs;
122
123 freqs.old = elanfreq_get_cpu_frequency(0);
124 freqs.new = elan_multiplier[state].clock;
125 freqs.cpu = 0; /* elanfreq.c is UP only driver */
126
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128
129 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
130 elan_multiplier[state].clock);
131
132
133 /*
134 * Access to the Elan's internal registers is indexed via
135 * 0x22: Chip Setup & Control Register Index Register (CSCI)
136 * 0x23: Chip Setup & Control Register Data Register (CSCD)
137 *
138 */
139
140 /*
141 * 0x40 is the Power Management Unit's Force Mode Register.
142 * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
143 */
144
145 local_irq_disable();
146 outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
147 outb_p(0x00, REG_CSCDR);
148 local_irq_enable(); /* wait till internal pipelines and */
149 udelay(1000); /* buffers have cleaned up */
150
151 local_irq_disable();
152
153 /* now, set the CPU clock speed register (0x80) */
154 outb_p(0x80, REG_CSCIR);
155 outb_p(elan_multiplier[state].val80h, REG_CSCDR);
156
157 /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
158 outb_p(0x40, REG_CSCIR);
159 outb_p(elan_multiplier[state].val40h, REG_CSCDR);
160 udelay(10000);
161 local_irq_enable();
162
163 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
164};
165
166
167/**
168 * elanfreq_validatespeed: test if frequency range is valid
169 * @policy: the policy to validate
170 *
171 * This function checks if a given frequency range in kHz is valid
172 * for the hardware supported by the driver.
173 */
174
175static int elanfreq_verify(struct cpufreq_policy *policy)
176{
177 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
178}
179
180static int elanfreq_target(struct cpufreq_policy *policy,
181 unsigned int target_freq,
182 unsigned int relation)
183{
184 unsigned int newstate = 0;
185
186 if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
187 target_freq, relation, &newstate))
188 return -EINVAL;
189
190 elanfreq_set_cpu_state(newstate);
191
192 return 0;
193}
194
195
196/*
197 * Module init and exit code
198 */
199
200static int elanfreq_cpu_init(struct cpufreq_policy *policy)
201{
202 struct cpuinfo_x86 *c = &cpu_data(0);
203 unsigned int i;
204 int result;
205
206 /* capability check */
207 if ((c->x86_vendor != X86_VENDOR_AMD) ||
208 (c->x86 != 4) || (c->x86_model != 10))
209 return -ENODEV;
210
211 /* max freq */
212 if (!max_freq)
213 max_freq = elanfreq_get_cpu_frequency(0);
214
215 /* table init */
216 for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
217 if (elanfreq_table[i].frequency > max_freq)
218 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
219 }
220
221 /* cpuinfo and default policy values */
222 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
223 policy->cur = elanfreq_get_cpu_frequency(0);
224
225 result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
226 if (result)
227 return result;
228
229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
230 return 0;
231}
232
233
234static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
235{
236 cpufreq_frequency_table_put_attr(policy->cpu);
237 return 0;
238}
239
240
241#ifndef MODULE
242/**
243 * elanfreq_setup - elanfreq command line parameter parsing
244 *
245 * elanfreq command line parameter. Use:
246 * elanfreq=66000
247 * to set the maximum CPU frequency to 66 MHz. Note that in
248 * case you do not give this boot parameter, the maximum
249 * frequency will fall back to _current_ CPU frequency which
250 * might be lower. If you build this as a module, use the
251 * max_freq module parameter instead.
252 */
253static int __init elanfreq_setup(char *str)
254{
255 max_freq = simple_strtoul(str, &str, 0);
256 printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
257 return 1;
258}
259__setup("elanfreq=", elanfreq_setup);
260#endif
261
262
263static struct freq_attr *elanfreq_attr[] = {
264 &cpufreq_freq_attr_scaling_available_freqs,
265 NULL,
266};
267
268
269static struct cpufreq_driver elanfreq_driver = {
270 .get = elanfreq_get_cpu_frequency,
271 .verify = elanfreq_verify,
272 .target = elanfreq_target,
273 .init = elanfreq_cpu_init,
274 .exit = elanfreq_cpu_exit,
275 .name = "elanfreq",
276 .owner = THIS_MODULE,
277 .attr = elanfreq_attr,
278};
279
280
281static int __init elanfreq_init(void)
282{
283 struct cpuinfo_x86 *c = &cpu_data(0);
284
285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
287 (c->x86 != 4) || (c->x86_model != 10)) {
288 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
289 return -ENODEV;
290 }
291 return cpufreq_register_driver(&elanfreq_driver);
292}
293
294
295static void __exit elanfreq_exit(void)
296{
297 cpufreq_unregister_driver(&elanfreq_driver);
298}
299
300
301module_param(max_freq, int, 0444);
302
303MODULE_LICENSE("GPL");
304MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, "
305 "Sven Geggus <sven@geggus.net>");
306MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
307
308module_init(elanfreq_init);
309module_exit(elanfreq_exit);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 05432216e224..90431cb92804 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,9 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16 16
17#define dprintk(msg...) \
18 cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
19
20/********************************************************************* 17/*********************************************************************
21 * FREQUENCY TABLE HELPERS * 18 * FREQUENCY TABLE HELPERS *
22 *********************************************************************/ 19 *********************************************************************/
@@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
31 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 28 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
32 unsigned int freq = table[i].frequency; 29 unsigned int freq = table[i].frequency;
33 if (freq == CPUFREQ_ENTRY_INVALID) { 30 if (freq == CPUFREQ_ENTRY_INVALID) {
34 dprintk("table entry %u is invalid, skipping\n", i); 31 pr_debug("table entry %u is invalid, skipping\n", i);
35 32
36 continue; 33 continue;
37 } 34 }
38 dprintk("table entry %u: %u kHz, %u index\n", 35 pr_debug("table entry %u: %u kHz, %u index\n",
39 i, freq, table[i].index); 36 i, freq, table[i].index);
40 if (freq < min_freq) 37 if (freq < min_freq)
41 min_freq = freq; 38 min_freq = freq;
@@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
61 unsigned int i; 58 unsigned int i;
62 unsigned int count = 0; 59 unsigned int count = 0;
63 60
64 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", 61 pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
65 policy->min, policy->max, policy->cpu); 62 policy->min, policy->max, policy->cpu);
66 63
67 if (!cpu_online(policy->cpu)) 64 if (!cpu_online(policy->cpu))
@@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
86 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 83 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
87 policy->cpuinfo.max_freq); 84 policy->cpuinfo.max_freq);
88 85
89 dprintk("verification lead to (%u - %u kHz) for cpu %u\n", 86 pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
90 policy->min, policy->max, policy->cpu); 87 policy->min, policy->max, policy->cpu);
91 88
92 return 0; 89 return 0;
@@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
110 }; 107 };
111 unsigned int i; 108 unsigned int i;
112 109
113 dprintk("request for target %u kHz (relation: %u) for cpu %u\n", 110 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
114 target_freq, relation, policy->cpu); 111 target_freq, relation, policy->cpu);
115 112
116 switch (relation) { 113 switch (relation) {
@@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
167 } else 164 } else
168 *index = optimal.index; 165 *index = optimal.index;
169 166
170 dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, 167 pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
171 table[*index].index); 168 table[*index].index);
172 169
173 return 0; 170 return 0;
@@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
216void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, 213void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
217 unsigned int cpu) 214 unsigned int cpu)
218{ 215{
219 dprintk("setting show_table for cpu %u to %p\n", cpu, table); 216 pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
220 per_cpu(cpufreq_show_table, cpu) = table; 217 per_cpu(cpufreq_show_table, cpu) = table;
221} 218}
222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); 219EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
223 220
224void cpufreq_frequency_table_put_attr(unsigned int cpu) 221void cpufreq_frequency_table_put_attr(unsigned int cpu)
225{ 222{
226 dprintk("clearing show_table for cpu %u\n", cpu); 223 pr_debug("clearing show_table for cpu %u\n", cpu);
227 per_cpu(cpufreq_show_table, cpu) = NULL; 224 per_cpu(cpufreq_show_table, cpu) = NULL;
228} 225}
229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); 226EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
new file mode 100644
index 000000000000..ffe1f2c92ed3
--- /dev/null
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -0,0 +1,514 @@
1/*
2 * Cyrix MediaGX and NatSemi Geode Suspend Modulation
3 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
4 * (C) 2002 Hiroshi Miura <miura@da-cha.org>
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation
10 *
11 * The author(s) of this software shall not be held liable for damages
12 * of any nature resulting due to the use of this software. This
13 * software is provided AS-IS with no warranties.
14 *
15 * Theoretical note:
16 *
17 * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
18 *
19 * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
20 * are based on Suspend Modulation.
21 *
22 * Suspend Modulation works by asserting and de-asserting the SUSP# pin
23 * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
24 * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
25 * asserted then power consumption is reduced.
26 *
27 * Suspend Modulation's OFF/ON duration are configurable
28 * with 'Suspend Modulation OFF Count Register'
29 * and 'Suspend Modulation ON Count Register'.
30 * These registers are 8bit counters that represent the number of
31 * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
32 * to the processor.
33 *
34 * These counters define a ratio which is the effective frequency
35 * of operation of the system.
36 *
37 * OFF Count
38 * F_eff = Fgx * ----------------------
39 * OFF Count + ON Count
40 *
41 * 0 <= On Count, Off Count <= 255
42 *
43 * From these limits, we can get register values
44 *
45 * off_duration + on_duration <= MAX_DURATION
46 * on_duration = off_duration * (stock_freq - freq) / freq
47 *
48 * off_duration = (freq * DURATION) / stock_freq
49 * on_duration = DURATION - off_duration
50 *
51 *
52 *---------------------------------------------------------------------------
53 *
54 * ChangeLog:
55 * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
56 * - fix on/off register mistake
57 * - fix cpu_khz calc when it stops cpu modulation.
58 *
59 * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
60 * - rewrite for Cyrix MediaGX Cx5510/5520 and
61 * NatSemi Geode Cs5530(A).
62 *
63 * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
64 * - cs5530_mod patch for 2.4.19-rc1.
65 *
66 *---------------------------------------------------------------------------
67 *
68 * Todo
69 * Test on machines with 5510, 5530, 5530A
70 */
71
72/************************************************************************
73 * Suspend Modulation - Definitions *
74 ************************************************************************/
75
76#include <linux/kernel.h>
77#include <linux/module.h>
78#include <linux/init.h>
79#include <linux/smp.h>
80#include <linux/cpufreq.h>
81#include <linux/pci.h>
82#include <linux/errno.h>
83#include <linux/slab.h>
84
85#include <asm/processor-cyrix.h>
86
87/* PCI config registers, all at F0 */
88#define PCI_PMER1 0x80 /* power management enable register 1 */
89#define PCI_PMER2 0x81 /* power management enable register 2 */
90#define PCI_PMER3 0x82 /* power management enable register 3 */
91#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
92#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
93#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
94#define PCI_MODON 0x95 /* suspend modulation ON counter register */
95#define PCI_SUSCFG 0x96 /* suspend configuration register */
96
97/* PMER1 bits */
98#define GPM (1<<0) /* global power management */
99#define GIT (1<<1) /* globally enable PM device idle timers */
100#define GTR (1<<2) /* globally enable IO traps */
101#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
102#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
103
104/* SUSCFG bits */
105#define SUSMOD (1<<0) /* enable/disable suspend modulation */
106/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
107#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
108 /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
109#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
110/* the below is supported only with cs5530A */
111#define PWRSVE_ISA (1<<3) /* stop ISA clock */
112#define PWRSVE (1<<4) /* active idle */
113
114struct gxfreq_params {
115 u8 on_duration;
116 u8 off_duration;
117 u8 pci_suscfg;
118 u8 pci_pmer1;
119 u8 pci_pmer2;
120 struct pci_dev *cs55x0;
121};
122
123static struct gxfreq_params *gx_params;
124static int stock_freq;
125
126/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
127static int pci_busclk;
128module_param(pci_busclk, int, 0444);
129
130/* maximum duration for which the cpu may be suspended
131 * (32us * MAX_DURATION). If no parameter is given, this defaults
132 * to 255.
133 * Note that this leads to a maximum of 8 ms(!) where the CPU clock
134 * is suspended -- processing power is just 0.39% of what it used to be,
135 * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
136static int max_duration = 255;
137module_param(max_duration, int, 0444);
138
139/* For the default policy, we want at least some processing power
140 * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
141 */
142#define POLICY_MIN_DIV 20
143
144
145/**
146 * we can detect a core multipiler from dir0_lsb
147 * from GX1 datasheet p.56,
148 * MULT[3:0]:
149 * 0000 = SYSCLK multiplied by 4 (test only)
150 * 0001 = SYSCLK multiplied by 10
151 * 0010 = SYSCLK multiplied by 4
152 * 0011 = SYSCLK multiplied by 6
153 * 0100 = SYSCLK multiplied by 9
154 * 0101 = SYSCLK multiplied by 5
155 * 0110 = SYSCLK multiplied by 7
156 * 0111 = SYSCLK multiplied by 8
157 * of 33.3MHz
158 **/
159static int gx_freq_mult[16] = {
160 4, 10, 4, 6, 9, 5, 7, 8,
161 0, 0, 0, 0, 0, 0, 0, 0
162};
163
164
165/****************************************************************
166 * Low Level chipset interface *
167 ****************************************************************/
168static struct pci_device_id gx_chipset_tbl[] __initdata = {
169 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
170 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
171 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
172 { 0, },
173};
174
175static void gx_write_byte(int reg, int value)
176{
177 pci_write_config_byte(gx_params->cs55x0, reg, value);
178}
179
180/**
181 * gx_detect_chipset:
182 *
183 **/
184static __init struct pci_dev *gx_detect_chipset(void)
185{
186 struct pci_dev *gx_pci = NULL;
187
188 /* check if CPU is a MediaGX or a Geode. */
189 if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
190 (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
191 pr_debug("error: no MediaGX/Geode processor found!\n");
192 return NULL;
193 }
194
195 /* detect which companion chip is used */
196 for_each_pci_dev(gx_pci) {
197 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
198 return gx_pci;
199 }
200
201 pr_debug("error: no supported chipset found!\n");
202 return NULL;
203}
204
205/**
206 * gx_get_cpuspeed:
207 *
208 * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
209 * Geode CPU runs.
210 */
211static unsigned int gx_get_cpuspeed(unsigned int cpu)
212{
213 if ((gx_params->pci_suscfg & SUSMOD) == 0)
214 return stock_freq;
215
216 return (stock_freq * gx_params->off_duration)
217 / (gx_params->on_duration + gx_params->off_duration);
218}
219
220/**
221 * gx_validate_speed:
222 * determine current cpu speed
223 *
224 **/
225
226static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
227 u8 *off_duration)
228{
229 unsigned int i;
230 u8 tmp_on, tmp_off;
231 int old_tmp_freq = stock_freq;
232 int tmp_freq;
233
234 *off_duration = 1;
235 *on_duration = 0;
236
237 for (i = max_duration; i > 0; i--) {
238 tmp_off = ((khz * i) / stock_freq) & 0xff;
239 tmp_on = i - tmp_off;
240 tmp_freq = (stock_freq * tmp_off) / i;
241 /* if this relation is closer to khz, use this. If it's equal,
242 * prefer it, too - lower latency */
243 if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
244 *on_duration = tmp_on;
245 *off_duration = tmp_off;
246 old_tmp_freq = tmp_freq;
247 }
248 }
249
250 return old_tmp_freq;
251}
252
253
254/**
255 * gx_set_cpuspeed:
256 * set cpu speed in khz.
257 **/
258
259static void gx_set_cpuspeed(unsigned int khz)
260{
261 u8 suscfg, pmer1;
262 unsigned int new_khz;
263 unsigned long flags;
264 struct cpufreq_freqs freqs;
265
266 freqs.cpu = 0;
267 freqs.old = gx_get_cpuspeed(0);
268
269 new_khz = gx_validate_speed(khz, &gx_params->on_duration,
270 &gx_params->off_duration);
271
272 freqs.new = new_khz;
273
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
275 local_irq_save(flags);
276
277
278
279 if (new_khz != stock_freq) {
280 /* if new khz == 100% of CPU speed, it is special case */
281 switch (gx_params->cs55x0->device) {
282 case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
283 pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
284 /* FIXME: need to test other values -- Zwane,Miura */
285 /* typical 2 to 4ms */
286 gx_write_byte(PCI_IRQTC, 4);
287 /* typical 50 to 100ms */
288 gx_write_byte(PCI_VIDTC, 100);
289 gx_write_byte(PCI_PMER1, pmer1);
290
291 if (gx_params->cs55x0->revision < 0x10) {
292 /* CS5530(rev 1.2, 1.3) */
293 suscfg = gx_params->pci_suscfg|SUSMOD;
294 } else {
295 /* CS5530A,B.. */
296 suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
297 }
298 break;
299 case PCI_DEVICE_ID_CYRIX_5520:
300 case PCI_DEVICE_ID_CYRIX_5510:
301 suscfg = gx_params->pci_suscfg | SUSMOD;
302 break;
303 default:
304 local_irq_restore(flags);
305 pr_debug("fatal: try to set unknown chipset.\n");
306 return;
307 }
308 } else {
309 suscfg = gx_params->pci_suscfg & ~(SUSMOD);
310 gx_params->off_duration = 0;
311 gx_params->on_duration = 0;
312 pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n");
313 }
314
315 gx_write_byte(PCI_MODOFF, gx_params->off_duration);
316 gx_write_byte(PCI_MODON, gx_params->on_duration);
317
318 gx_write_byte(PCI_SUSCFG, suscfg);
319 pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
320
321 local_irq_restore(flags);
322
323 gx_params->pci_suscfg = suscfg;
324
325 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
326
327 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
328 gx_params->on_duration * 32, gx_params->off_duration * 32);
329 pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
330}
331
332/****************************************************************
333 * High level functions *
334 ****************************************************************/
335
336/*
337 * cpufreq_gx_verify: test if frequency range is valid
338 *
339 * This function checks if a given frequency range in kHz is valid
340 * for the hardware supported by the driver.
341 */
342
343static int cpufreq_gx_verify(struct cpufreq_policy *policy)
344{
345 unsigned int tmp_freq = 0;
346 u8 tmp1, tmp2;
347
348 if (!stock_freq || !policy)
349 return -EINVAL;
350
351 policy->cpu = 0;
352 cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
353 stock_freq);
354
355 /* it needs to be assured that at least one supported frequency is
356 * within policy->min and policy->max. If it is not, policy->max
357 * needs to be increased until one freuqency is supported.
358 * policy->min may not be decreased, though. This way we guarantee a
359 * specific processing capacity.
360 */
361 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
362 if (tmp_freq < policy->min)
363 tmp_freq += stock_freq / max_duration;
364 policy->min = tmp_freq;
365 if (policy->min > policy->max)
366 policy->max = tmp_freq;
367 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
368 if (tmp_freq > policy->max)
369 tmp_freq -= stock_freq / max_duration;
370 policy->max = tmp_freq;
371 if (policy->max < policy->min)
372 policy->max = policy->min;
373 cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
374 stock_freq);
375
376 return 0;
377}
378
379/*
380 * cpufreq_gx_target:
381 *
382 */
383static int cpufreq_gx_target(struct cpufreq_policy *policy,
384 unsigned int target_freq,
385 unsigned int relation)
386{
387 u8 tmp1, tmp2;
388 unsigned int tmp_freq;
389
390 if (!stock_freq || !policy)
391 return -EINVAL;
392
393 policy->cpu = 0;
394
395 tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
396 while (tmp_freq < policy->min) {
397 tmp_freq += stock_freq / max_duration;
398 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
399 }
400 while (tmp_freq > policy->max) {
401 tmp_freq -= stock_freq / max_duration;
402 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
403 }
404
405 gx_set_cpuspeed(tmp_freq);
406
407 return 0;
408}
409
410static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
411{
412 unsigned int maxfreq, curfreq;
413
414 if (!policy || policy->cpu != 0)
415 return -ENODEV;
416
417 /* determine maximum frequency */
418 if (pci_busclk)
419 maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
420 else if (cpu_khz)
421 maxfreq = cpu_khz;
422 else
423 maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
424
425 stock_freq = maxfreq;
426 curfreq = gx_get_cpuspeed(0);
427
428 pr_debug("cpu max frequency is %d.\n", maxfreq);
429 pr_debug("cpu current frequency is %dkHz.\n", curfreq);
430
431 /* setup basic struct for cpufreq API */
432 policy->cpu = 0;
433
434 if (max_duration < POLICY_MIN_DIV)
435 policy->min = maxfreq / max_duration;
436 else
437 policy->min = maxfreq / POLICY_MIN_DIV;
438 policy->max = maxfreq;
439 policy->cur = curfreq;
440 policy->cpuinfo.min_freq = maxfreq / max_duration;
441 policy->cpuinfo.max_freq = maxfreq;
442 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
443
444 return 0;
445}
446
447/*
448 * cpufreq_gx_init:
449 * MediaGX/Geode GX initialize cpufreq driver
450 */
451static struct cpufreq_driver gx_suspmod_driver = {
452 .get = gx_get_cpuspeed,
453 .verify = cpufreq_gx_verify,
454 .target = cpufreq_gx_target,
455 .init = cpufreq_gx_cpu_init,
456 .name = "gx-suspmod",
457 .owner = THIS_MODULE,
458};
459
460static int __init cpufreq_gx_init(void)
461{
462 int ret;
463 struct gxfreq_params *params;
464 struct pci_dev *gx_pci;
465
466 /* Test if we have the right hardware */
467 gx_pci = gx_detect_chipset();
468 if (gx_pci == NULL)
469 return -ENODEV;
470
471 /* check whether module parameters are sane */
472 if (max_duration > 0xff)
473 max_duration = 0xff;
474
475 pr_debug("geode suspend modulation available.\n");
476
477 params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
478 if (params == NULL)
479 return -ENOMEM;
480
481 params->cs55x0 = gx_pci;
482 gx_params = params;
483
484 /* keep cs55x0 configurations */
485 pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
486 pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
487 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
488 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
489 pci_read_config_byte(params->cs55x0, PCI_MODOFF,
490 &(params->off_duration));
491
492 ret = cpufreq_register_driver(&gx_suspmod_driver);
493 if (ret) {
494 kfree(params);
495 return ret; /* register error! */
496 }
497
498 return 0;
499}
500
501static void __exit cpufreq_gx_exit(void)
502{
503 cpufreq_unregister_driver(&gx_suspmod_driver);
504 pci_dev_put(gx_params->cs55x0);
505 kfree(gx_params);
506}
507
508MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
509MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
510MODULE_LICENSE("GPL");
511
512module_init(cpufreq_gx_init);
513module_exit(cpufreq_gx_exit);
514
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
new file mode 100644
index 000000000000..f47d26e2a135
--- /dev/null
+++ b/drivers/cpufreq/longhaul.c
@@ -0,0 +1,1024 @@
1/*
2 * (C) 2001-2004 Dave Jones. <davej@redhat.com>
3 * (C) 2002 Padraig Brady. <padraig@antefacto.com>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon datasheets & sample CPUs kindly provided by VIA.
7 *
8 * VIA have currently 3 different versions of Longhaul.
9 * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
10 * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
11 * Version 2 of longhaul is backward compatible with v1, but adds
12 * LONGHAUL MSR for purpose of both frequency and voltage scaling.
13 * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
14 * Version 3 of longhaul got renamed to Powersaver and redesigned
15 * to use only the POWERSAVER MSR at 0x110a.
16 * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
17 * It's pretty much the same feature wise to longhaul v2, though
18 * there is provision for scaling FSB too, but this doesn't work
19 * too well in practice so we don't even try to use this.
20 *
21 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/cpufreq.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timex.h>
34#include <linux/io.h>
35#include <linux/acpi.h>
36
37#include <asm/msr.h>
38#include <acpi/processor.h>
39
40#include "longhaul.h"
41
42#define PFX "longhaul: "
43
44#define TYPE_LONGHAUL_V1 1
45#define TYPE_LONGHAUL_V2 2
46#define TYPE_POWERSAVER 3
47
48#define CPU_SAMUEL 1
49#define CPU_SAMUEL2 2
50#define CPU_EZRA 3
51#define CPU_EZRA_T 4
52#define CPU_NEHEMIAH 5
53#define CPU_NEHEMIAH_C 6
54
55/* Flags */
56#define USE_ACPI_C3 (1 << 1)
57#define USE_NORTHBRIDGE (1 << 2)
58
59static int cpu_model;
60static unsigned int numscales = 16;
61static unsigned int fsb;
62
63static const struct mV_pos *vrm_mV_table;
64static const unsigned char *mV_vrm_table;
65
66static unsigned int highest_speed, lowest_speed; /* kHz */
67static unsigned int minmult, maxmult;
68static int can_scale_voltage;
69static struct acpi_processor *pr;
70static struct acpi_processor_cx *cx;
71static u32 acpi_regs_addr;
72static u8 longhaul_flags;
73static unsigned int longhaul_index;
74
75/* Module parameters */
76static int scale_voltage;
77static int disable_acpi_c3;
78static int revid_errata;
79
80
81/* Clock ratios multiplied by 10 */
82static int mults[32];
83static int eblcr[32];
84static int longhaul_version;
85static struct cpufreq_frequency_table *longhaul_table;
86
87static char speedbuffer[8];
88
89static char *print_speed(int speed)
90{
91 if (speed < 1000) {
92 snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
93 return speedbuffer;
94 }
95
96 if (speed%1000 == 0)
97 snprintf(speedbuffer, sizeof(speedbuffer),
98 "%dGHz", speed/1000);
99 else
100 snprintf(speedbuffer, sizeof(speedbuffer),
101 "%d.%dGHz", speed/1000, (speed%1000)/100);
102
103 return speedbuffer;
104}
105
106
107static unsigned int calc_speed(int mult)
108{
109 int khz;
110 khz = (mult/10)*fsb;
111 if (mult%10)
112 khz += fsb/2;
113 khz *= 1000;
114 return khz;
115}
116
117
118static int longhaul_get_cpu_mult(void)
119{
120 unsigned long invalue = 0, lo, hi;
121
122 rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
123 invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
124 if (longhaul_version == TYPE_LONGHAUL_V2 ||
125 longhaul_version == TYPE_POWERSAVER) {
126 if (lo & (1<<27))
127 invalue += 16;
128 }
129 return eblcr[invalue];
130}
131
132/* For processor with BCR2 MSR */
133
134static void do_longhaul1(unsigned int mults_index)
135{
136 union msr_bcr2 bcr2;
137
138 rdmsrl(MSR_VIA_BCR2, bcr2.val);
139 /* Enable software clock multiplier */
140 bcr2.bits.ESOFTBF = 1;
141 bcr2.bits.CLOCKMUL = mults_index & 0xff;
142
143 /* Sync to timer tick */
144 safe_halt();
145 /* Change frequency on next halt or sleep */
146 wrmsrl(MSR_VIA_BCR2, bcr2.val);
147 /* Invoke transition */
148 ACPI_FLUSH_CPU_CACHE();
149 halt();
150
151 /* Disable software clock multiplier */
152 local_irq_disable();
153 rdmsrl(MSR_VIA_BCR2, bcr2.val);
154 bcr2.bits.ESOFTBF = 0;
155 wrmsrl(MSR_VIA_BCR2, bcr2.val);
156}
157
158/* For processor with Longhaul MSR */
159
160static void do_powersaver(int cx_address, unsigned int mults_index,
161 unsigned int dir)
162{
163 union msr_longhaul longhaul;
164 u32 t;
165
166 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
167 /* Setup new frequency */
168 if (!revid_errata)
169 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
170 else
171 longhaul.bits.RevisionKey = 0;
172 longhaul.bits.SoftBusRatio = mults_index & 0xf;
173 longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
174 /* Setup new voltage */
175 if (can_scale_voltage)
176 longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
177 /* Sync to timer tick */
178 safe_halt();
179 /* Raise voltage if necessary */
180 if (can_scale_voltage && dir) {
181 longhaul.bits.EnableSoftVID = 1;
182 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
183 /* Change voltage */
184 if (!cx_address) {
185 ACPI_FLUSH_CPU_CACHE();
186 halt();
187 } else {
188 ACPI_FLUSH_CPU_CACHE();
189 /* Invoke C3 */
190 inb(cx_address);
191 /* Dummy op - must do something useless after P_LVL3
192 * read */
193 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
194 }
195 longhaul.bits.EnableSoftVID = 0;
196 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
197 }
198
199 /* Change frequency on next halt or sleep */
200 longhaul.bits.EnableSoftBusRatio = 1;
201 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
202 if (!cx_address) {
203 ACPI_FLUSH_CPU_CACHE();
204 halt();
205 } else {
206 ACPI_FLUSH_CPU_CACHE();
207 /* Invoke C3 */
208 inb(cx_address);
209 /* Dummy op - must do something useless after P_LVL3 read */
210 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
211 }
212 /* Disable bus ratio bit */
213 longhaul.bits.EnableSoftBusRatio = 0;
214 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
215
216 /* Reduce voltage if necessary */
217 if (can_scale_voltage && !dir) {
218 longhaul.bits.EnableSoftVID = 1;
219 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
220 /* Change voltage */
221 if (!cx_address) {
222 ACPI_FLUSH_CPU_CACHE();
223 halt();
224 } else {
225 ACPI_FLUSH_CPU_CACHE();
226 /* Invoke C3 */
227 inb(cx_address);
228 /* Dummy op - must do something useless after P_LVL3
229 * read */
230 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
231 }
232 longhaul.bits.EnableSoftVID = 0;
233 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
234 }
235}
236
237/**
238 * longhaul_set_cpu_frequency()
239 * @mults_index : bitpattern of the new multiplier.
240 *
241 * Sets a new clock ratio.
242 */
243
244static void longhaul_setstate(unsigned int table_index)
245{
246 unsigned int mults_index;
247 int speed, mult;
248 struct cpufreq_freqs freqs;
249 unsigned long flags;
250 unsigned int pic1_mask, pic2_mask;
251 u16 bm_status = 0;
252 u32 bm_timeout = 1000;
253 unsigned int dir = 0;
254
255 mults_index = longhaul_table[table_index].index;
256 /* Safety precautions */
257 mult = mults[mults_index & 0x1f];
258 if (mult == -1)
259 return;
260 speed = calc_speed(mult);
261 if ((speed > highest_speed) || (speed < lowest_speed))
262 return;
263 /* Voltage transition before frequency transition? */
264 if (can_scale_voltage && longhaul_index < table_index)
265 dir = 1;
266
267 freqs.old = calc_speed(longhaul_get_cpu_mult());
268 freqs.new = speed;
269 freqs.cpu = 0; /* longhaul.c is UP only driver */
270
271 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
272
273 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
274 fsb, mult/10, mult%10, print_speed(speed/1000));
275retry_loop:
276 preempt_disable();
277 local_irq_save(flags);
278
279 pic2_mask = inb(0xA1);
280 pic1_mask = inb(0x21); /* works on C3. save mask. */
281 outb(0xFF, 0xA1); /* Overkill */
282 outb(0xFE, 0x21); /* TMR0 only */
283
284 /* Wait while PCI bus is busy. */
285 if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
286 || ((pr != NULL) && pr->flags.bm_control))) {
287 bm_status = inw(acpi_regs_addr);
288 bm_status &= 1 << 4;
289 while (bm_status && bm_timeout) {
290 outw(1 << 4, acpi_regs_addr);
291 bm_timeout--;
292 bm_status = inw(acpi_regs_addr);
293 bm_status &= 1 << 4;
294 }
295 }
296
297 if (longhaul_flags & USE_NORTHBRIDGE) {
298 /* Disable AGP and PCI arbiters */
299 outb(3, 0x22);
300 } else if ((pr != NULL) && pr->flags.bm_control) {
301 /* Disable bus master arbitration */
302 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
303 }
304 switch (longhaul_version) {
305
306 /*
307 * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
308 * Software controlled multipliers only.
309 */
310 case TYPE_LONGHAUL_V1:
311 do_longhaul1(mults_index);
312 break;
313
314 /*
315 * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
316 *
317 * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
318 * Nehemiah can do FSB scaling too, but this has never been proven
319 * to work in practice.
320 */
321 case TYPE_LONGHAUL_V2:
322 case TYPE_POWERSAVER:
323 if (longhaul_flags & USE_ACPI_C3) {
324 /* Don't allow wakeup */
325 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
326 do_powersaver(cx->address, mults_index, dir);
327 } else {
328 do_powersaver(0, mults_index, dir);
329 }
330 break;
331 }
332
333 if (longhaul_flags & USE_NORTHBRIDGE) {
334 /* Enable arbiters */
335 outb(0, 0x22);
336 } else if ((pr != NULL) && pr->flags.bm_control) {
337 /* Enable bus master arbitration */
338 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
339 }
340 outb(pic2_mask, 0xA1); /* restore mask */
341 outb(pic1_mask, 0x21);
342
343 local_irq_restore(flags);
344 preempt_enable();
345
346 freqs.new = calc_speed(longhaul_get_cpu_mult());
347 /* Check if requested frequency is set. */
348 if (unlikely(freqs.new != speed)) {
349 printk(KERN_INFO PFX "Failed to set requested frequency!\n");
350 /* Revision ID = 1 but processor is expecting revision key
351 * equal to 0. Jumpers at the bottom of processor will change
352 * multiplier and FSB, but will not change bits in Longhaul
353 * MSR nor enable voltage scaling. */
354 if (!revid_errata) {
355 printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
356 "option.\n");
357 revid_errata = 1;
358 msleep(200);
359 goto retry_loop;
360 }
361 /* Why ACPI C3 sometimes doesn't work is a mystery for me.
362 * But it does happen. Processor is entering ACPI C3 state,
363 * but it doesn't change frequency. I tried poking various
364 * bits in northbridge registers, but without success. */
365 if (longhaul_flags & USE_ACPI_C3) {
366 printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
367 longhaul_flags &= ~USE_ACPI_C3;
368 if (revid_errata) {
369 printk(KERN_INFO PFX "Disabling \"Ignore "
370 "Revision ID\" option.\n");
371 revid_errata = 0;
372 }
373 msleep(200);
374 goto retry_loop;
375 }
376 /* This shouldn't happen. Longhaul ver. 2 was reported not
377 * working on processors without voltage scaling, but with
378 * RevID = 1. RevID errata will make things right. Just
379 * to be 100% sure. */
380 if (longhaul_version == TYPE_LONGHAUL_V2) {
381 printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
382 longhaul_version = TYPE_LONGHAUL_V1;
383 msleep(200);
384 goto retry_loop;
385 }
386 }
387 /* Report true CPU frequency */
388 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
389
390 if (!bm_timeout)
391 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
392 "idle PCI bus.\n");
393}
394
395/*
396 * Centaur decided to make life a little more tricky.
397 * Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
398 * Samuel2 and above have to try and guess what the FSB is.
399 * We do this by assuming we booted at maximum multiplier, and interpolate
400 * between that value multiplied by possible FSBs and cpu_mhz which
401 * was calculated at boot time. Really ugly, but no other way to do this.
402 */
403
404#define ROUNDING 0xf
405
406static int guess_fsb(int mult)
407{
408 int speed = cpu_khz / 1000;
409 int i;
410 int speeds[] = { 666, 1000, 1333, 2000 };
411 int f_max, f_min;
412
413 for (i = 0; i < 4; i++) {
414 f_max = ((speeds[i] * mult) + 50) / 100;
415 f_max += (ROUNDING / 2);
416 f_min = f_max - ROUNDING;
417 if ((speed <= f_max) && (speed >= f_min))
418 return speeds[i] / 10;
419 }
420 return 0;
421}
422
423
424static int __cpuinit longhaul_get_ranges(void)
425{
426 unsigned int i, j, k = 0;
427 unsigned int ratio;
428 int mult;
429
430 /* Get current frequency */
431 mult = longhaul_get_cpu_mult();
432 if (mult == -1) {
433 printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
434 return -EINVAL;
435 }
436 fsb = guess_fsb(mult);
437 if (fsb == 0) {
438 printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
439 return -EINVAL;
440 }
441 /* Get max multiplier - as we always did.
442 * Longhaul MSR is useful only when voltage scaling is enabled.
443 * C3 is booting at max anyway. */
444 maxmult = mult;
445 /* Get min multiplier */
446 switch (cpu_model) {
447 case CPU_NEHEMIAH:
448 minmult = 50;
449 break;
450 case CPU_NEHEMIAH_C:
451 minmult = 40;
452 break;
453 default:
454 minmult = 30;
455 break;
456 }
457
458 pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n",
459 minmult/10, minmult%10, maxmult/10, maxmult%10);
460
461 highest_speed = calc_speed(maxmult);
462 lowest_speed = calc_speed(minmult);
463 pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
464 print_speed(lowest_speed/1000),
465 print_speed(highest_speed/1000));
466
467 if (lowest_speed == highest_speed) {
468 printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
469 return -EINVAL;
470 }
471 if (lowest_speed > highest_speed) {
472 printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
473 lowest_speed, highest_speed);
474 return -EINVAL;
475 }
476
477 longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
478 GFP_KERNEL);
479 if (!longhaul_table)
480 return -ENOMEM;
481
482 for (j = 0; j < numscales; j++) {
483 ratio = mults[j];
484 if (ratio == -1)
485 continue;
486 if (ratio > maxmult || ratio < minmult)
487 continue;
488 longhaul_table[k].frequency = calc_speed(ratio);
489 longhaul_table[k].index = j;
490 k++;
491 }
492 if (k <= 1) {
493 kfree(longhaul_table);
494 return -ENODEV;
495 }
496 /* Sort */
497 for (j = 0; j < k - 1; j++) {
498 unsigned int min_f, min_i;
499 min_f = longhaul_table[j].frequency;
500 min_i = j;
501 for (i = j + 1; i < k; i++) {
502 if (longhaul_table[i].frequency < min_f) {
503 min_f = longhaul_table[i].frequency;
504 min_i = i;
505 }
506 }
507 if (min_i != j) {
508 swap(longhaul_table[j].frequency,
509 longhaul_table[min_i].frequency);
510 swap(longhaul_table[j].index,
511 longhaul_table[min_i].index);
512 }
513 }
514
515 longhaul_table[k].frequency = CPUFREQ_TABLE_END;
516
517 /* Find index we are running on */
518 for (j = 0; j < k; j++) {
519 if (mults[longhaul_table[j].index & 0x1f] == mult) {
520 longhaul_index = j;
521 break;
522 }
523 }
524 return 0;
525}
526
527
528static void __cpuinit longhaul_setup_voltagescaling(void)
529{
530 union msr_longhaul longhaul;
531 struct mV_pos minvid, maxvid, vid;
532 unsigned int j, speed, pos, kHz_step, numvscales;
533 int min_vid_speed;
534
535 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
536 if (!(longhaul.bits.RevisionID & 1)) {
537 printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
538 return;
539 }
540
541 if (!longhaul.bits.VRMRev) {
542 printk(KERN_INFO PFX "VRM 8.5\n");
543 vrm_mV_table = &vrm85_mV[0];
544 mV_vrm_table = &mV_vrm85[0];
545 } else {
546 printk(KERN_INFO PFX "Mobile VRM\n");
547 if (cpu_model < CPU_NEHEMIAH)
548 return;
549 vrm_mV_table = &mobilevrm_mV[0];
550 mV_vrm_table = &mV_mobilevrm[0];
551 }
552
553 minvid = vrm_mV_table[longhaul.bits.MinimumVID];
554 maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
555
556 if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
557 printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
558 "Voltage scaling disabled.\n",
559 minvid.mV/1000, minvid.mV%1000,
560 maxvid.mV/1000, maxvid.mV%1000);
561 return;
562 }
563
564 if (minvid.mV == maxvid.mV) {
565 printk(KERN_INFO PFX "Claims to support voltage scaling but "
566 "min & max are both %d.%03d. "
567 "Voltage scaling disabled\n",
568 maxvid.mV/1000, maxvid.mV%1000);
569 return;
570 }
571
572 /* How many voltage steps*/
573 numvscales = maxvid.pos - minvid.pos + 1;
574 printk(KERN_INFO PFX
575 "Max VID=%d.%03d "
576 "Min VID=%d.%03d, "
577 "%d possible voltage scales\n",
578 maxvid.mV/1000, maxvid.mV%1000,
579 minvid.mV/1000, minvid.mV%1000,
580 numvscales);
581
582 /* Calculate max frequency at min voltage */
583 j = longhaul.bits.MinMHzBR;
584 if (longhaul.bits.MinMHzBR4)
585 j += 16;
586 min_vid_speed = eblcr[j];
587 if (min_vid_speed == -1)
588 return;
589 switch (longhaul.bits.MinMHzFSB) {
590 case 0:
591 min_vid_speed *= 13333;
592 break;
593 case 1:
594 min_vid_speed *= 10000;
595 break;
596 case 3:
597 min_vid_speed *= 6666;
598 break;
599 default:
600 return;
601 break;
602 }
603 if (min_vid_speed >= highest_speed)
604 return;
605 /* Calculate kHz for one voltage step */
606 kHz_step = (highest_speed - min_vid_speed) / numvscales;
607
608 j = 0;
609 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
610 speed = longhaul_table[j].frequency;
611 if (speed > min_vid_speed)
612 pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
613 else
614 pos = minvid.pos;
615 longhaul_table[j].index |= mV_vrm_table[pos] << 8;
616 vid = vrm_mV_table[mV_vrm_table[pos]];
617 printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
618 speed, j, vid.mV);
619 j++;
620 }
621
622 can_scale_voltage = 1;
623 printk(KERN_INFO PFX "Voltage scaling enabled.\n");
624}
625
626
627static int longhaul_verify(struct cpufreq_policy *policy)
628{
629 return cpufreq_frequency_table_verify(policy, longhaul_table);
630}
631
632
633static int longhaul_target(struct cpufreq_policy *policy,
634 unsigned int target_freq, unsigned int relation)
635{
636 unsigned int table_index = 0;
637 unsigned int i;
638 unsigned int dir = 0;
639 u8 vid, current_vid;
640
641 if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
642 relation, &table_index))
643 return -EINVAL;
644
645 /* Don't set same frequency again */
646 if (longhaul_index == table_index)
647 return 0;
648
649 if (!can_scale_voltage)
650 longhaul_setstate(table_index);
651 else {
652 /* On test system voltage transitions exceeding single
653 * step up or down were turning motherboard off. Both
654 * "ondemand" and "userspace" are unsafe. C7 is doing
655 * this in hardware, C3 is old and we need to do this
656 * in software. */
657 i = longhaul_index;
658 current_vid = (longhaul_table[longhaul_index].index >> 8);
659 current_vid &= 0x1f;
660 if (table_index > longhaul_index)
661 dir = 1;
662 while (i != table_index) {
663 vid = (longhaul_table[i].index >> 8) & 0x1f;
664 if (vid != current_vid) {
665 longhaul_setstate(i);
666 current_vid = vid;
667 msleep(200);
668 }
669 if (dir)
670 i++;
671 else
672 i--;
673 }
674 longhaul_setstate(table_index);
675 }
676 longhaul_index = table_index;
677 return 0;
678}
679
680
681static unsigned int longhaul_get(unsigned int cpu)
682{
683 if (cpu)
684 return 0;
685 return calc_speed(longhaul_get_cpu_mult());
686}
687
688static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
689 u32 nesting_level,
690 void *context, void **return_value)
691{
692 struct acpi_device *d;
693
694 if (acpi_bus_get_device(obj_handle, &d))
695 return 0;
696
697 *return_value = acpi_driver_data(d);
698 return 1;
699}
700
701/* VIA don't support PM2 reg, but have something similar */
702static int enable_arbiter_disable(void)
703{
704 struct pci_dev *dev;
705 int status = 1;
706 int reg;
707 u8 pci_cmd;
708
709 /* Find PLE133 host bridge */
710 reg = 0x78;
711 dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
712 NULL);
713 /* Find PM133/VT8605 host bridge */
714 if (dev == NULL)
715 dev = pci_get_device(PCI_VENDOR_ID_VIA,
716 PCI_DEVICE_ID_VIA_8605_0, NULL);
717 /* Find CLE266 host bridge */
718 if (dev == NULL) {
719 reg = 0x76;
720 dev = pci_get_device(PCI_VENDOR_ID_VIA,
721 PCI_DEVICE_ID_VIA_862X_0, NULL);
722 /* Find CN400 V-Link host bridge */
723 if (dev == NULL)
724 dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
725 }
726 if (dev != NULL) {
727 /* Enable access to port 0x22 */
728 pci_read_config_byte(dev, reg, &pci_cmd);
729 if (!(pci_cmd & 1<<7)) {
730 pci_cmd |= 1<<7;
731 pci_write_config_byte(dev, reg, pci_cmd);
732 pci_read_config_byte(dev, reg, &pci_cmd);
733 if (!(pci_cmd & 1<<7)) {
734 printk(KERN_ERR PFX
735 "Can't enable access to port 0x22.\n");
736 status = 0;
737 }
738 }
739 pci_dev_put(dev);
740 return status;
741 }
742 return 0;
743}
744
745static int longhaul_setup_southbridge(void)
746{
747 struct pci_dev *dev;
748 u8 pci_cmd;
749
750 /* Find VT8235 southbridge */
751 dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
752 if (dev == NULL)
753 /* Find VT8237 southbridge */
754 dev = pci_get_device(PCI_VENDOR_ID_VIA,
755 PCI_DEVICE_ID_VIA_8237, NULL);
756 if (dev != NULL) {
757 /* Set transition time to max */
758 pci_read_config_byte(dev, 0xec, &pci_cmd);
759 pci_cmd &= ~(1 << 2);
760 pci_write_config_byte(dev, 0xec, pci_cmd);
761 pci_read_config_byte(dev, 0xe4, &pci_cmd);
762 pci_cmd &= ~(1 << 7);
763 pci_write_config_byte(dev, 0xe4, pci_cmd);
764 pci_read_config_byte(dev, 0xe5, &pci_cmd);
765 pci_cmd |= 1 << 7;
766 pci_write_config_byte(dev, 0xe5, pci_cmd);
767 /* Get address of ACPI registers block*/
768 pci_read_config_byte(dev, 0x81, &pci_cmd);
769 if (pci_cmd & 1 << 7) {
770 pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
771 acpi_regs_addr &= 0xff00;
772 printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
773 acpi_regs_addr);
774 }
775
776 pci_dev_put(dev);
777 return 1;
778 }
779 return 0;
780}
781
782static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
783{
784 struct cpuinfo_x86 *c = &cpu_data(0);
785 char *cpuname = NULL;
786 int ret;
787 u32 lo, hi;
788
789 /* Check what we have on this motherboard */
790 switch (c->x86_model) {
791 case 6:
792 cpu_model = CPU_SAMUEL;
793 cpuname = "C3 'Samuel' [C5A]";
794 longhaul_version = TYPE_LONGHAUL_V1;
795 memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
796 memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
797 break;
798
799 case 7:
800 switch (c->x86_mask) {
801 case 0:
802 longhaul_version = TYPE_LONGHAUL_V1;
803 cpu_model = CPU_SAMUEL2;
804 cpuname = "C3 'Samuel 2' [C5B]";
805 /* Note, this is not a typo, early Samuel2's had
806 * Samuel1 ratios. */
807 memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
808 memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
809 break;
810 case 1 ... 15:
811 longhaul_version = TYPE_LONGHAUL_V2;
812 if (c->x86_mask < 8) {
813 cpu_model = CPU_SAMUEL2;
814 cpuname = "C3 'Samuel 2' [C5B]";
815 } else {
816 cpu_model = CPU_EZRA;
817 cpuname = "C3 'Ezra' [C5C]";
818 }
819 memcpy(mults, ezra_mults, sizeof(ezra_mults));
820 memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
821 break;
822 }
823 break;
824
825 case 8:
826 cpu_model = CPU_EZRA_T;
827 cpuname = "C3 'Ezra-T' [C5M]";
828 longhaul_version = TYPE_POWERSAVER;
829 numscales = 32;
830 memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
831 memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
832 break;
833
834 case 9:
835 longhaul_version = TYPE_POWERSAVER;
836 numscales = 32;
837 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
838 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
839 switch (c->x86_mask) {
840 case 0 ... 1:
841 cpu_model = CPU_NEHEMIAH;
842 cpuname = "C3 'Nehemiah A' [C5XLOE]";
843 break;
844 case 2 ... 4:
845 cpu_model = CPU_NEHEMIAH;
846 cpuname = "C3 'Nehemiah B' [C5XLOH]";
847 break;
848 case 5 ... 15:
849 cpu_model = CPU_NEHEMIAH_C;
850 cpuname = "C3 'Nehemiah C' [C5P]";
851 break;
852 }
853 break;
854
855 default:
856 cpuname = "Unknown";
857 break;
858 }
859 /* Check Longhaul ver. 2 */
860 if (longhaul_version == TYPE_LONGHAUL_V2) {
861 rdmsr(MSR_VIA_LONGHAUL, lo, hi);
862 if (lo == 0 && hi == 0)
863 /* Looks like MSR isn't present */
864 longhaul_version = TYPE_LONGHAUL_V1;
865 }
866
867 printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
868 switch (longhaul_version) {
869 case TYPE_LONGHAUL_V1:
870 case TYPE_LONGHAUL_V2:
871 printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
872 break;
873 case TYPE_POWERSAVER:
874 printk(KERN_CONT "Powersaver supported.\n");
875 break;
876 };
877
878 /* Doesn't hurt */
879 longhaul_setup_southbridge();
880
881 /* Find ACPI data for processor */
882 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
883 ACPI_UINT32_MAX, &longhaul_walk_callback, NULL,
884 NULL, (void *)&pr);
885
886 /* Check ACPI support for C3 state */
887 if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
888 cx = &pr->power.states[ACPI_STATE_C3];
889 if (cx->address > 0 && cx->latency <= 1000)
890 longhaul_flags |= USE_ACPI_C3;
891 }
892 /* Disable if it isn't working */
893 if (disable_acpi_c3)
894 longhaul_flags &= ~USE_ACPI_C3;
895 /* Check if northbridge is friendly */
896 if (enable_arbiter_disable())
897 longhaul_flags |= USE_NORTHBRIDGE;
898
899 /* Check ACPI support for bus master arbiter disable */
900 if (!(longhaul_flags & USE_ACPI_C3
901 || longhaul_flags & USE_NORTHBRIDGE)
902 && ((pr == NULL) || !(pr->flags.bm_control))) {
903 printk(KERN_ERR PFX
904 "No ACPI support. Unsupported northbridge.\n");
905 return -ENODEV;
906 }
907
908 if (longhaul_flags & USE_NORTHBRIDGE)
909 printk(KERN_INFO PFX "Using northbridge support.\n");
910 if (longhaul_flags & USE_ACPI_C3)
911 printk(KERN_INFO PFX "Using ACPI support.\n");
912
913 ret = longhaul_get_ranges();
914 if (ret != 0)
915 return ret;
916
917 if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
918 longhaul_setup_voltagescaling();
919
920 policy->cpuinfo.transition_latency = 200000; /* nsec */
921 policy->cur = calc_speed(longhaul_get_cpu_mult());
922
923 ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
924 if (ret)
925 return ret;
926
927 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
928
929 return 0;
930}
931
932static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
933{
934 cpufreq_frequency_table_put_attr(policy->cpu);
935 return 0;
936}
937
938static struct freq_attr *longhaul_attr[] = {
939 &cpufreq_freq_attr_scaling_available_freqs,
940 NULL,
941};
942
943static struct cpufreq_driver longhaul_driver = {
944 .verify = longhaul_verify,
945 .target = longhaul_target,
946 .get = longhaul_get,
947 .init = longhaul_cpu_init,
948 .exit = __devexit_p(longhaul_cpu_exit),
949 .name = "longhaul",
950 .owner = THIS_MODULE,
951 .attr = longhaul_attr,
952};
953
954
955static int __init longhaul_init(void)
956{
957 struct cpuinfo_x86 *c = &cpu_data(0);
958
959 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
960 return -ENODEV;
961
962#ifdef CONFIG_SMP
963 if (num_online_cpus() > 1) {
964 printk(KERN_ERR PFX "More than 1 CPU detected, "
965 "longhaul disabled.\n");
966 return -ENODEV;
967 }
968#endif
969#ifdef CONFIG_X86_IO_APIC
970 if (cpu_has_apic) {
971 printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
972 "broken in this configuration.\n");
973 return -ENODEV;
974 }
975#endif
976 switch (c->x86_model) {
977 case 6 ... 9:
978 return cpufreq_register_driver(&longhaul_driver);
979 case 10:
980 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
981 default:
982 ;
983 }
984
985 return -ENODEV;
986}
987
988
989static void __exit longhaul_exit(void)
990{
991 int i;
992
993 for (i = 0; i < numscales; i++) {
994 if (mults[i] == maxmult) {
995 longhaul_setstate(i);
996 break;
997 }
998 }
999
1000 cpufreq_unregister_driver(&longhaul_driver);
1001 kfree(longhaul_table);
1002}
1003
1004/* Even if BIOS is exporting ACPI C3 state, and it is used
1005 * with success when CPU is idle, this state doesn't
1006 * trigger frequency transition in some cases. */
1007module_param(disable_acpi_c3, int, 0644);
1008MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
1009/* Change CPU voltage with frequency. Very useful to save
1010 * power, but most VIA C3 processors aren't supporting it. */
1011module_param(scale_voltage, int, 0644);
1012MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
1013/* Force revision key to 0 for processors which doesn't
1014 * support voltage scaling, but are introducing itself as
1015 * such. */
1016module_param(revid_errata, int, 0644);
1017MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
1018
1019MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1020MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
1021MODULE_LICENSE("GPL");
1022
1023late_initcall(longhaul_init);
1024module_exit(longhaul_exit);
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
new file mode 100644
index 000000000000..cbf48fbca881
--- /dev/null
+++ b/drivers/cpufreq/longhaul.h
@@ -0,0 +1,353 @@
1/*
2 * longhaul.h
3 * (C) 2003 Dave Jones.
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 *
7 * VIA-specific information
8 */
9
10union msr_bcr2 {
11 struct {
12 unsigned Reseved:19, // 18:0
13 ESOFTBF:1, // 19
14 Reserved2:3, // 22:20
15 CLOCKMUL:4, // 26:23
16 Reserved3:5; // 31:27
17 } bits;
18 unsigned long val;
19};
20
21union msr_longhaul {
22 struct {
23 unsigned RevisionID:4, // 3:0
24 RevisionKey:4, // 7:4
25 EnableSoftBusRatio:1, // 8
26 EnableSoftVID:1, // 9
27 EnableSoftBSEL:1, // 10
28 Reserved:3, // 11:13
29 SoftBusRatio4:1, // 14
30 VRMRev:1, // 15
31 SoftBusRatio:4, // 19:16
32 SoftVID:5, // 24:20
33 Reserved2:3, // 27:25
34 SoftBSEL:2, // 29:28
35 Reserved3:2, // 31:30
36 MaxMHzBR:4, // 35:32
37 MaximumVID:5, // 40:36
38 MaxMHzFSB:2, // 42:41
39 MaxMHzBR4:1, // 43
40 Reserved4:4, // 47:44
41 MinMHzBR:4, // 51:48
42 MinimumVID:5, // 56:52
43 MinMHzFSB:2, // 58:57
44 MinMHzBR4:1, // 59
45 Reserved5:4; // 63:60
46 } bits;
47 unsigned long long val;
48};
49
50/*
51 * Clock ratio tables. Div/Mod by 10 to get ratio.
52 * The eblcr values specify the ratio read from the CPU.
53 * The mults values specify what to write to the CPU.
54 */
55
56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */
59static const int __cpuinitdata samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */
63 -1, /* 0011 -> RESERVED */
64 -1, /* 0100 -> RESERVED */
65 35, /* 0101 -> 3.5x */
66 45, /* 0110 -> 4.5x */
67 55, /* 0111 -> 5.5x */
68 60, /* 1000 -> 6.0x */
69 70, /* 1001 -> 7.0x */
70 80, /* 1010 -> 8.0x */
71 50, /* 1011 -> 5.0x */
72 65, /* 1100 -> 6.5x */
73 75, /* 1101 -> 7.5x */
74 -1, /* 1110 -> RESERVED */
75 -1, /* 1111 -> RESERVED */
76};
77
78static const int __cpuinitdata samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */
82 -1, /* 0011 -> RESERVED */
83 55, /* 0100 -> 5.5x */
84 35, /* 0101 -> 3.5x */
85 45, /* 0110 -> 4.5x */
86 -1, /* 0111 -> RESERVED */
87 -1, /* 1000 -> RESERVED */
88 70, /* 1001 -> 7.0x */
89 80, /* 1010 -> 8.0x */
90 60, /* 1011 -> 6.0x */
91 -1, /* 1100 -> RESERVED */
92 75, /* 1101 -> 7.5x */
93 -1, /* 1110 -> RESERVED */
94 65, /* 1111 -> 6.5x */
95};
96
97/*
98 * VIA C3 Samuel2 Stepping 1->15
99 */
100static const int __cpuinitdata samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */
104 100, /* 0011 -> 10.0x */
105 55, /* 0100 -> 5.5x */
106 35, /* 0101 -> 3.5x */
107 45, /* 0110 -> 4.5x */
108 110, /* 0111 -> 11.0x */
109 90, /* 1000 -> 9.0x */
110 70, /* 1001 -> 7.0x */
111 80, /* 1010 -> 8.0x */
112 60, /* 1011 -> 6.0x */
113 120, /* 1100 -> 12.0x */
114 75, /* 1101 -> 7.5x */
115 130, /* 1110 -> 13.0x */
116 65, /* 1111 -> 6.5x */
117};
118
119/*
120 * VIA C3 Ezra
121 */
122static const int __cpuinitdata ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */
126 90, /* 0011 -> 9.0x */
127 95, /* 0100 -> 9.5x */
128 35, /* 0101 -> 3.5x */
129 45, /* 0110 -> 4.5x */
130 55, /* 0111 -> 5.5x */
131 60, /* 1000 -> 6.0x */
132 70, /* 1001 -> 7.0x */
133 80, /* 1010 -> 8.0x */
134 50, /* 1011 -> 5.0x */
135 65, /* 1100 -> 6.5x */
136 75, /* 1101 -> 7.5x */
137 85, /* 1110 -> 8.5x */
138 120, /* 1111 -> 12.0x */
139};
140
141static const int __cpuinitdata ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */
145 100, /* 0011 -> 10.0x */
146 55, /* 0100 -> 5.5x */
147 35, /* 0101 -> 3.5x */
148 45, /* 0110 -> 4.5x */
149 95, /* 0111 -> 9.5x */
150 90, /* 1000 -> 9.0x */
151 70, /* 1001 -> 7.0x */
152 80, /* 1010 -> 8.0x */
153 60, /* 1011 -> 6.0x */
154 120, /* 1100 -> 12.0x */
155 75, /* 1101 -> 7.5x */
156 85, /* 1110 -> 8.5x */
157 65, /* 1111 -> 6.5x */
158};
159
160/*
161 * VIA C3 (Ezra-T) [C5M].
162 */
163static const int __cpuinitdata ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */
167 90, /* 0011 -> 9.0x */
168 95, /* 0100 -> 9.5x */
169 35, /* 0101 -> 3.5x */
170 45, /* 0110 -> 4.5x */
171 55, /* 0111 -> 5.5x */
172 60, /* 1000 -> 6.0x */
173 70, /* 1001 -> 7.0x */
174 80, /* 1010 -> 8.0x */
175 50, /* 1011 -> 5.0x */
176 65, /* 1100 -> 6.5x */
177 75, /* 1101 -> 7.5x */
178 85, /* 1110 -> 8.5x */
179 120, /* 1111 -> 12.0x */
180
181 -1, /* 0000 -> RESERVED (10.0x) */
182 110, /* 0001 -> 11.0x */
183 -1, /* 0010 -> 12.0x */
184 -1, /* 0011 -> RESERVED (9.0x)*/
185 105, /* 0100 -> 10.5x */
186 115, /* 0101 -> 11.5x */
187 125, /* 0110 -> 12.5x */
188 135, /* 0111 -> 13.5x */
189 140, /* 1000 -> 14.0x */
190 150, /* 1001 -> 15.0x */
191 160, /* 1010 -> 16.0x */
192 130, /* 1011 -> 13.0x */
193 145, /* 1100 -> 14.5x */
194 155, /* 1101 -> 15.5x */
195 -1, /* 1110 -> RESERVED (13.0x) */
196 -1, /* 1111 -> RESERVED (12.0x) */
197};
198
199static const int __cpuinitdata ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */
203 100, /* 0011 -> 10.0x */
204 55, /* 0100 -> 5.5x */
205 35, /* 0101 -> 3.5x */
206 45, /* 0110 -> 4.5x */
207 95, /* 0111 -> 9.5x */
208 90, /* 1000 -> 9.0x */
209 70, /* 1001 -> 7.0x */
210 80, /* 1010 -> 8.0x */
211 60, /* 1011 -> 6.0x */
212 120, /* 1100 -> 12.0x */
213 75, /* 1101 -> 7.5x */
214 85, /* 1110 -> 8.5x */
215 65, /* 1111 -> 6.5x */
216
217 -1, /* 0000 -> RESERVED (9.0x) */
218 110, /* 0001 -> 11.0x */
219 120, /* 0010 -> 12.0x */
220 -1, /* 0011 -> RESERVED (10.0x)*/
221 135, /* 0100 -> 13.5x */
222 115, /* 0101 -> 11.5x */
223 125, /* 0110 -> 12.5x */
224 105, /* 0111 -> 10.5x */
225 130, /* 1000 -> 13.0x */
226 150, /* 1001 -> 15.0x */
227 160, /* 1010 -> 16.0x */
228 140, /* 1011 -> 14.0x */
229 -1, /* 1100 -> RESERVED (12.0x) */
230 155, /* 1101 -> 15.5x */
231 -1, /* 1110 -> RESERVED (13.0x) */
232 145, /* 1111 -> 14.5x */
233};
234
235/*
236 * VIA C3 Nehemiah */
237
238static const int __cpuinitdata nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */
242 90, /* 0011 -> 9.0x */
243 95, /* 0100 -> 9.5x */
244 -1, /* 0101 -> RESERVED */
245 45, /* 0110 -> 4.5x */
246 55, /* 0111 -> 5.5x */
247 60, /* 1000 -> 6.0x */
248 70, /* 1001 -> 7.0x */
249 80, /* 1010 -> 8.0x */
250 50, /* 1011 -> 5.0x */
251 65, /* 1100 -> 6.5x */
252 75, /* 1101 -> 7.5x */
253 85, /* 1110 -> 8.5x */
254 120, /* 1111 -> 12.0x */
255 -1, /* 0000 -> 10.0x */
256 110, /* 0001 -> 11.0x */
257 -1, /* 0010 -> 12.0x */
258 -1, /* 0011 -> 9.0x */
259 105, /* 0100 -> 10.5x */
260 115, /* 0101 -> 11.5x */
261 125, /* 0110 -> 12.5x */
262 135, /* 0111 -> 13.5x */
263 140, /* 1000 -> 14.0x */
264 150, /* 1001 -> 15.0x */
265 160, /* 1010 -> 16.0x */
266 130, /* 1011 -> 13.0x */
267 145, /* 1100 -> 14.5x */
268 155, /* 1101 -> 15.5x */
269 -1, /* 1110 -> RESERVED (13.0x) */
270 -1, /* 1111 -> 12.0x */
271};
272
273static const int __cpuinitdata nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */
277 100, /* 0011 -> 10.0x */
278 55, /* 0100 -> 5.5x */
279 -1, /* 0101 -> RESERVED */
280 45, /* 0110 -> 4.5x */
281 95, /* 0111 -> 9.5x */
282 90, /* 1000 -> 9.0x */
283 70, /* 1001 -> 7.0x */
284 80, /* 1010 -> 8.0x */
285 60, /* 1011 -> 6.0x */
286 120, /* 1100 -> 12.0x */
287 75, /* 1101 -> 7.5x */
288 85, /* 1110 -> 8.5x */
289 65, /* 1111 -> 6.5x */
290 90, /* 0000 -> 9.0x */
291 110, /* 0001 -> 11.0x */
292 120, /* 0010 -> 12.0x */
293 100, /* 0011 -> 10.0x */
294 135, /* 0100 -> 13.5x */
295 115, /* 0101 -> 11.5x */
296 125, /* 0110 -> 12.5x */
297 105, /* 0111 -> 10.5x */
298 130, /* 1000 -> 13.0x */
299 150, /* 1001 -> 15.0x */
300 160, /* 1010 -> 16.0x */
301 140, /* 1011 -> 14.0x */
302 120, /* 1100 -> 12.0x */
303 155, /* 1101 -> 15.5x */
304 -1, /* 1110 -> RESERVED (13.0x) */
305 145 /* 1111 -> 14.5x */
306};
307
308/*
309 * Voltage scales. Div/Mod by 1000 to get actual voltage.
310 * Which scale to use depends on the VRM type in use.
311 */
312
313struct mV_pos {
314 unsigned short mV;
315 unsigned short pos;
316};
317
318static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
322 {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10},
323 {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3},
324 {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27},
325 {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19},
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327};
328
329static const unsigned char __cpuinitdata mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334};
335
336static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
340 {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16},
341 {975, 15}, {950, 14}, {925, 13}, {900, 12},
342 {875, 11}, {850, 10}, {825, 9}, {800, 8},
343 {775, 7}, {750, 6}, {725, 5}, {700, 4},
344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345};
346
347static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
351 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
352};
353
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
new file mode 100644
index 000000000000..34ea359b370e
--- /dev/null
+++ b/drivers/cpufreq/longrun.c
@@ -0,0 +1,324 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/timex.h>
14
15#include <asm/msr.h>
16#include <asm/processor.h>
17
18static struct cpufreq_driver longrun_driver;
19
20/**
21 * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
22 * values into per cent values. In TMTA microcode, the following is valid:
23 * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
24 */
25static unsigned int longrun_low_freq, longrun_high_freq;
26
27
28/**
29 * longrun_get_policy - get the current LongRun policy
30 * @policy: struct cpufreq_policy where current policy is written into
31 *
32 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
33 * and MSR_TMTA_LONGRUN_CTRL
34 */
35static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy)
36{
37 u32 msr_lo, msr_hi;
38
39 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
40 pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
41 if (msr_lo & 0x01)
42 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
43 else
44 policy->policy = CPUFREQ_POLICY_POWERSAVE;
45
46 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
47 pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
48 msr_lo &= 0x0000007F;
49 msr_hi &= 0x0000007F;
50
51 if (longrun_high_freq <= longrun_low_freq) {
52 /* Assume degenerate Longrun table */
53 policy->min = policy->max = longrun_high_freq;
54 } else {
55 policy->min = longrun_low_freq + msr_lo *
56 ((longrun_high_freq - longrun_low_freq) / 100);
57 policy->max = longrun_low_freq + msr_hi *
58 ((longrun_high_freq - longrun_low_freq) / 100);
59 }
60 policy->cpu = 0;
61}
62
63
64/**
65 * longrun_set_policy - sets a new CPUFreq policy
66 * @policy: new policy
67 *
68 * Sets a new CPUFreq policy on LongRun-capable processors. This function
69 * has to be called with cpufreq_driver locked.
70 */
71static int longrun_set_policy(struct cpufreq_policy *policy)
72{
73 u32 msr_lo, msr_hi;
74 u32 pctg_lo, pctg_hi;
75
76 if (!policy)
77 return -EINVAL;
78
79 if (longrun_high_freq <= longrun_low_freq) {
80 /* Assume degenerate Longrun table */
81 pctg_lo = pctg_hi = 100;
82 } else {
83 pctg_lo = (policy->min - longrun_low_freq) /
84 ((longrun_high_freq - longrun_low_freq) / 100);
85 pctg_hi = (policy->max - longrun_low_freq) /
86 ((longrun_high_freq - longrun_low_freq) / 100);
87 }
88
89 if (pctg_hi > 100)
90 pctg_hi = 100;
91 if (pctg_lo > pctg_hi)
92 pctg_lo = pctg_hi;
93
94 /* performance or economy mode */
95 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
96 msr_lo &= 0xFFFFFFFE;
97 switch (policy->policy) {
98 case CPUFREQ_POLICY_PERFORMANCE:
99 msr_lo |= 0x00000001;
100 break;
101 case CPUFREQ_POLICY_POWERSAVE:
102 break;
103 }
104 wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
105
106 /* lower and upper boundary */
107 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
108 msr_lo &= 0xFFFFFF80;
109 msr_hi &= 0xFFFFFF80;
110 msr_lo |= pctg_lo;
111 msr_hi |= pctg_hi;
112 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
113
114 return 0;
115}
116
117
118/**
119 * longrun_verify_poliy - verifies a new CPUFreq policy
120 * @policy: the policy to verify
121 *
122 * Validates a new CPUFreq policy. This function has to be called with
123 * cpufreq_driver locked.
124 */
125static int longrun_verify_policy(struct cpufreq_policy *policy)
126{
127 if (!policy)
128 return -EINVAL;
129
130 policy->cpu = 0;
131 cpufreq_verify_within_limits(policy,
132 policy->cpuinfo.min_freq,
133 policy->cpuinfo.max_freq);
134
135 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
136 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
137 return -EINVAL;
138
139 return 0;
140}
141
142static unsigned int longrun_get(unsigned int cpu)
143{
144 u32 eax, ebx, ecx, edx;
145
146 if (cpu)
147 return 0;
148
149 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
150 pr_debug("cpuid eax is %u\n", eax);
151
152 return eax * 1000;
153}
154
155/**
156 * longrun_determine_freqs - determines the lowest and highest possible core frequency
157 * @low_freq: an int to put the lowest frequency into
158 * @high_freq: an int to put the highest frequency into
159 *
160 * Determines the lowest and highest possible core frequencies on this CPU.
161 * This is necessary to calculate the performance percentage according to
162 * TMTA rules:
163 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
164 */
165static int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
166 unsigned int *high_freq)
167{
168 u32 msr_lo, msr_hi;
169 u32 save_lo, save_hi;
170 u32 eax, ebx, ecx, edx;
171 u32 try_hi;
172 struct cpuinfo_x86 *c = &cpu_data(0);
173
174 if (!low_freq || !high_freq)
175 return -EINVAL;
176
177 if (cpu_has(c, X86_FEATURE_LRTI)) {
178 /* if the LongRun Table Interface is present, the
179 * detection is a bit easier:
180 * For minimum frequency, read out the maximum
181 * level (msr_hi), write that into "currently
182 * selected level", and read out the frequency.
183 * For maximum frequency, read out level zero.
184 */
185 /* minimum */
186 rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
187 wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
188 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
189 *low_freq = msr_lo * 1000; /* to kHz */
190
191 /* maximum */
192 wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
193 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
194 *high_freq = msr_lo * 1000; /* to kHz */
195
196 pr_debug("longrun table interface told %u - %u kHz\n",
197 *low_freq, *high_freq);
198
199 if (*low_freq > *high_freq)
200 *low_freq = *high_freq;
201 return 0;
202 }
203
204 /* set the upper border to the value determined during TSC init */
205 *high_freq = (cpu_khz / 1000);
206 *high_freq = *high_freq * 1000;
207 pr_debug("high frequency is %u kHz\n", *high_freq);
208
209 /* get current borders */
210 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
211 save_lo = msr_lo & 0x0000007F;
212 save_hi = msr_hi & 0x0000007F;
213
214 /* if current perf_pctg is larger than 90%, we need to decrease the
215 * upper limit to make the calculation more accurate.
216 */
217 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
218 /* try decreasing in 10% steps, some processors react only
219 * on some barrier values */
220 for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
221 /* set to 0 to try_hi perf_pctg */
222 msr_lo &= 0xFFFFFF80;
223 msr_hi &= 0xFFFFFF80;
224 msr_hi |= try_hi;
225 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
226
227 /* read out current core MHz and current perf_pctg */
228 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
229
230 /* restore values */
231 wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
232 }
233 pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
234
235 /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
236 * eqals
237 * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
238 *
239 * high_freq * perf_pctg is stored tempoarily into "ebx".
240 */
241 ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
242
243 if ((ecx > 95) || (ecx == 0) || (eax < ebx))
244 return -EIO;
245
246 edx = ((eax - ebx) * 100) / (100 - ecx);
247 *low_freq = edx * 1000; /* back to kHz */
248
249 pr_debug("low frequency is %u kHz\n", *low_freq);
250
251 if (*low_freq > *high_freq)
252 *low_freq = *high_freq;
253
254 return 0;
255}
256
257
258static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy)
259{
260 int result = 0;
261
262 /* capability check */
263 if (policy->cpu != 0)
264 return -ENODEV;
265
266 /* detect low and high frequency */
267 result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
268 if (result)
269 return result;
270
271 /* cpuinfo and default policy values */
272 policy->cpuinfo.min_freq = longrun_low_freq;
273 policy->cpuinfo.max_freq = longrun_high_freq;
274 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
275 longrun_get_policy(policy);
276
277 return 0;
278}
279
280
281static struct cpufreq_driver longrun_driver = {
282 .flags = CPUFREQ_CONST_LOOPS,
283 .verify = longrun_verify_policy,
284 .setpolicy = longrun_set_policy,
285 .get = longrun_get,
286 .init = longrun_cpu_init,
287 .name = "longrun",
288 .owner = THIS_MODULE,
289};
290
291
292/**
293 * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
294 *
295 * Initializes the LongRun support.
296 */
297static int __init longrun_init(void)
298{
299 struct cpuinfo_x86 *c = &cpu_data(0);
300
301 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
302 !cpu_has(c, X86_FEATURE_LONGRUN))
303 return -ENODEV;
304
305 return cpufreq_register_driver(&longrun_driver);
306}
307
308
309/**
310 * longrun_exit - unregisters LongRun support
311 */
312static void __exit longrun_exit(void)
313{
314 cpufreq_unregister_driver(&longrun_driver);
315}
316
317
318MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
319MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
320 "Efficeon processors.");
321MODULE_LICENSE("GPL");
322
323module_init(longrun_init);
324module_exit(longrun_exit);
diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c
new file mode 100644
index 000000000000..911e193018ae
--- /dev/null
+++ b/drivers/cpufreq/mperf.c
@@ -0,0 +1,51 @@
1#include <linux/kernel.h>
2#include <linux/smp.h>
3#include <linux/module.h>
4#include <linux/init.h>
5#include <linux/cpufreq.h>
6#include <linux/slab.h>
7
8#include "mperf.h"
9
10static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
11
12/* Called via smp_call_function_single(), on the target CPU */
13static void read_measured_perf_ctrs(void *_cur)
14{
15 struct aperfmperf *am = _cur;
16
17 get_aperfmperf(am);
18}
19
20/*
21 * Return the measured active (C0) frequency on this CPU since last call
22 * to this function.
23 * Input: cpu number
24 * Return: Average CPU frequency in terms of max frequency (zero on error)
25 *
26 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
27 * over a period of time, while CPU is in C0 state.
28 * IA32_MPERF counts at the rate of max advertised frequency
29 * IA32_APERF counts at the rate of actual CPU frequency
30 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
31 * no meaning should be associated with absolute values of these MSRs.
32 */
33unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
34 unsigned int cpu)
35{
36 struct aperfmperf perf;
37 unsigned long ratio;
38 unsigned int retval;
39
40 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
41 return 0;
42
43 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
44 per_cpu(acfreq_old_perf, cpu) = perf;
45
46 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
47
48 return retval;
49}
50EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
51MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h
new file mode 100644
index 000000000000..5dbf2950dc22
--- /dev/null
+++ b/drivers/cpufreq/mperf.h
@@ -0,0 +1,9 @@
1/*
2 * (c) 2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
9 unsigned int cpu);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
new file mode 100644
index 000000000000..6be3e0760c26
--- /dev/null
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -0,0 +1,329 @@
1/*
2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
6 * (C) 2002 Tora T. Engstad
7 * All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties.
17 *
18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/smp.h>
27#include <linux/cpufreq.h>
28#include <linux/cpumask.h>
29#include <linux/timex.h>
30
31#include <asm/processor.h>
32#include <asm/msr.h>
33#include <asm/timer.h>
34
35#include "speedstep-lib.h"
36
37#define PFX "p4-clockmod: "
38
39/*
40 * Duty Cycle (3bits), note DC_DISABLE is not specified in
41 * intel docs i just use it to mean disable
42 */
43enum {
44 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
45 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
46};
47
48#define DC_ENTRIES 8
49
50
51static int has_N44_O17_errata[NR_CPUS];
52static unsigned int stock_freq;
53static struct cpufreq_driver p4clockmod_driver;
54static unsigned int cpufreq_p4_get(unsigned int cpu);
55
56static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
57{
58 u32 l, h;
59
60 if (!cpu_online(cpu) ||
61 (newstate > DC_DISABLE) || (newstate == DC_RESV))
62 return -EINVAL;
63
64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
65
66 if (l & 0x01)
67 pr_debug("CPU#%d currently thermal throttled\n", cpu);
68
69 if (has_N44_O17_errata[cpu] &&
70 (newstate == DC_25PT || newstate == DC_DFLT))
71 newstate = DC_38PT;
72
73 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
74 if (newstate == DC_DISABLE) {
75 pr_debug("CPU#%d disabling modulation\n", cpu);
76 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
77 } else {
78 pr_debug("CPU#%d setting duty cycle to %d%%\n",
79 cpu, ((125 * newstate) / 10));
80 /* bits 63 - 5 : reserved
81 * bit 4 : enable/disable
82 * bits 3-1 : duty cycle
83 * bit 0 : reserved
84 */
85 l = (l & ~14);
86 l = l | (1<<4) | ((newstate & 0x7)<<1);
87 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
88 }
89
90 return 0;
91}
92
93
94static struct cpufreq_frequency_table p4clockmod_table[] = {
95 {DC_RESV, CPUFREQ_ENTRY_INVALID},
96 {DC_DFLT, 0},
97 {DC_25PT, 0},
98 {DC_38PT, 0},
99 {DC_50PT, 0},
100 {DC_64PT, 0},
101 {DC_75PT, 0},
102 {DC_88PT, 0},
103 {DC_DISABLE, 0},
104 {DC_RESV, CPUFREQ_TABLE_END},
105};
106
107
108static int cpufreq_p4_target(struct cpufreq_policy *policy,
109 unsigned int target_freq,
110 unsigned int relation)
111{
112 unsigned int newstate = DC_RESV;
113 struct cpufreq_freqs freqs;
114 int i;
115
116 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
117 target_freq, relation, &newstate))
118 return -EINVAL;
119
120 freqs.old = cpufreq_p4_get(policy->cpu);
121 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
122
123 if (freqs.new == freqs.old)
124 return 0;
125
126 /* notifiers */
127 for_each_cpu(i, policy->cpus) {
128 freqs.cpu = i;
129 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
130 }
131
132 /* run on each logical CPU,
133 * see section 13.15.3 of IA32 Intel Architecture Software
134 * Developer's Manual, Volume 3
135 */
136 for_each_cpu(i, policy->cpus)
137 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
138
139 /* notifiers */
140 for_each_cpu(i, policy->cpus) {
141 freqs.cpu = i;
142 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
143 }
144
145 return 0;
146}
147
148
149static int cpufreq_p4_verify(struct cpufreq_policy *policy)
150{
151 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
152}
153
154
155static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
156{
157 if (c->x86 == 0x06) {
158 if (cpu_has(c, X86_FEATURE_EST))
159 printk_once(KERN_WARNING PFX "Warning: EST-capable "
160 "CPU detected. The acpi-cpufreq module offers "
161 "voltage scaling in addition to frequency "
162 "scaling. You should use that instead of "
163 "p4-clockmod, if possible.\n");
164 switch (c->x86_model) {
165 case 0x0E: /* Core */
166 case 0x0F: /* Core Duo */
167 case 0x16: /* Celeron Core */
168 case 0x1C: /* Atom */
169 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
170 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
171 case 0x0D: /* Pentium M (Dothan) */
172 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
173 /* fall through */
174 case 0x09: /* Pentium M (Banias) */
175 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
176 }
177 }
178
179 if (c->x86 != 0xF)
180 return 0;
181
182 /* on P-4s, the TSC runs with constant frequency independent whether
183 * throttling is active or not. */
184 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
185
186 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
187 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
188 "The speedstep-ich or acpi cpufreq modules offer "
189 "voltage scaling in addition of frequency scaling. "
190 "You should use either one instead of p4-clockmod, "
191 "if possible.\n");
192 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
193 }
194
195 return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
196}
197
198
199
200static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
201{
202 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
203 int cpuid = 0;
204 unsigned int i;
205
206#ifdef CONFIG_SMP
207 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
208#endif
209
210 /* Errata workaround */
211 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
212 switch (cpuid) {
213 case 0x0f07:
214 case 0x0f0a:
215 case 0x0f11:
216 case 0x0f12:
217 has_N44_O17_errata[policy->cpu] = 1;
218 pr_debug("has errata -- disabling low frequencies\n");
219 }
220
221 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
222 c->x86_model < 2) {
223 /* switch to maximum frequency and measure result */
224 cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
225 recalibrate_cpu_khz();
226 }
227 /* get max frequency */
228 stock_freq = cpufreq_p4_get_frequency(c);
229 if (!stock_freq)
230 return -EINVAL;
231
232 /* table init */
233 for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
234 if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
235 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
236 else
237 p4clockmod_table[i].frequency = (stock_freq * i)/8;
238 }
239 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
240
241 /* cpuinfo and default policy values */
242
243 /* the transition latency is set to be 1 higher than the maximum
244 * transition latency of the ondemand governor */
245 policy->cpuinfo.transition_latency = 10000001;
246 policy->cur = stock_freq;
247
248 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
249}
250
251
252static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
253{
254 cpufreq_frequency_table_put_attr(policy->cpu);
255 return 0;
256}
257
258static unsigned int cpufreq_p4_get(unsigned int cpu)
259{
260 u32 l, h;
261
262 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
263
264 if (l & 0x10) {
265 l = l >> 1;
266 l &= 0x7;
267 } else
268 l = DC_DISABLE;
269
270 if (l != DC_DISABLE)
271 return stock_freq * l / 8;
272
273 return stock_freq;
274}
275
276static struct freq_attr *p4clockmod_attr[] = {
277 &cpufreq_freq_attr_scaling_available_freqs,
278 NULL,
279};
280
281static struct cpufreq_driver p4clockmod_driver = {
282 .verify = cpufreq_p4_verify,
283 .target = cpufreq_p4_target,
284 .init = cpufreq_p4_cpu_init,
285 .exit = cpufreq_p4_cpu_exit,
286 .get = cpufreq_p4_get,
287 .name = "p4-clockmod",
288 .owner = THIS_MODULE,
289 .attr = p4clockmod_attr,
290};
291
292
293static int __init cpufreq_p4_init(void)
294{
295 struct cpuinfo_x86 *c = &cpu_data(0);
296 int ret;
297
298 /*
299 * THERM_CONTROL is architectural for IA32 now, so
300 * we can rely on the capability checks
301 */
302 if (c->x86_vendor != X86_VENDOR_INTEL)
303 return -ENODEV;
304
305 if (!test_cpu_cap(c, X86_FEATURE_ACPI) ||
306 !test_cpu_cap(c, X86_FEATURE_ACC))
307 return -ENODEV;
308
309 ret = cpufreq_register_driver(&p4clockmod_driver);
310 if (!ret)
311 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
312 "Modulation available\n");
313
314 return ret;
315}
316
317
318static void __exit cpufreq_p4_exit(void)
319{
320 cpufreq_unregister_driver(&p4clockmod_driver);
321}
322
323
324MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
325MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
326MODULE_LICENSE("GPL");
327
328late_initcall(cpufreq_p4_init);
329module_exit(cpufreq_p4_exit);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
new file mode 100644
index 000000000000..7b0603eb0129
--- /dev/null
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -0,0 +1,621 @@
1/*
2 * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
3 *
4 * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
5 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
6 * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
17 * INFRINGEMENT. See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/smp.h>
30#include <linux/sched.h>
31#include <linux/cpufreq.h>
32#include <linux/compiler.h>
33#include <linux/slab.h>
34
35#include <linux/acpi.h>
36#include <linux/io.h>
37#include <linux/spinlock.h>
38#include <linux/uaccess.h>
39
40#include <acpi/processor.h>
41
42#define PCC_VERSION "1.10.00"
43#define POLL_LOOPS 300
44
45#define CMD_COMPLETE 0x1
46#define CMD_GET_FREQ 0x0
47#define CMD_SET_FREQ 0x1
48
49#define BUF_SZ 4
50
51struct pcc_register_resource {
52 u8 descriptor;
53 u16 length;
54 u8 space_id;
55 u8 bit_width;
56 u8 bit_offset;
57 u8 access_size;
58 u64 address;
59} __attribute__ ((packed));
60
61struct pcc_memory_resource {
62 u8 descriptor;
63 u16 length;
64 u8 space_id;
65 u8 resource_usage;
66 u8 type_specific;
67 u64 granularity;
68 u64 minimum;
69 u64 maximum;
70 u64 translation_offset;
71 u64 address_length;
72} __attribute__ ((packed));
73
74static struct cpufreq_driver pcc_cpufreq_driver;
75
76struct pcc_header {
77 u32 signature;
78 u16 length;
79 u8 major;
80 u8 minor;
81 u32 features;
82 u16 command;
83 u16 status;
84 u32 latency;
85 u32 minimum_time;
86 u32 maximum_time;
87 u32 nominal;
88 u32 throttled_frequency;
89 u32 minimum_frequency;
90};
91
92static void __iomem *pcch_virt_addr;
93static struct pcc_header __iomem *pcch_hdr;
94
95static DEFINE_SPINLOCK(pcc_lock);
96
97static struct acpi_generic_address doorbell;
98
99static u64 doorbell_preserve;
100static u64 doorbell_write;
101
102static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
103 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
104
105struct pcc_cpu {
106 u32 input_offset;
107 u32 output_offset;
108};
109
110static struct pcc_cpu __percpu *pcc_cpu_info;
111
112static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
113{
114 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
115 policy->cpuinfo.max_freq);
116 return 0;
117}
118
119static inline void pcc_cmd(void)
120{
121 u64 doorbell_value;
122 int i;
123
124 acpi_read(&doorbell_value, &doorbell);
125 acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
126 &doorbell);
127
128 for (i = 0; i < POLL_LOOPS; i++) {
129 if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
130 break;
131 }
132}
133
134static inline void pcc_clear_mapping(void)
135{
136 if (pcch_virt_addr)
137 iounmap(pcch_virt_addr);
138 pcch_virt_addr = NULL;
139}
140
141static unsigned int pcc_get_freq(unsigned int cpu)
142{
143 struct pcc_cpu *pcc_cpu_data;
144 unsigned int curr_freq;
145 unsigned int freq_limit;
146 u16 status;
147 u32 input_buffer;
148 u32 output_buffer;
149
150 spin_lock(&pcc_lock);
151
152 pr_debug("get: get_freq for CPU %d\n", cpu);
153 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
154
155 input_buffer = 0x1;
156 iowrite32(input_buffer,
157 (pcch_virt_addr + pcc_cpu_data->input_offset));
158 iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
159
160 pcc_cmd();
161
162 output_buffer =
163 ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
164
165 /* Clear the input buffer - we are done with the current command */
166 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
167
168 status = ioread16(&pcch_hdr->status);
169 if (status != CMD_COMPLETE) {
170 pr_debug("get: FAILED: for CPU %d, status is %d\n",
171 cpu, status);
172 goto cmd_incomplete;
173 }
174 iowrite16(0, &pcch_hdr->status);
175 curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
176 / 100) * 1000);
177
178 pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is "
179 "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n",
180 cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
181 output_buffer, curr_freq);
182
183 freq_limit = (output_buffer >> 8) & 0xff;
184 if (freq_limit != 0xff) {
185 pr_debug("get: frequency for cpu %d is being temporarily"
186 " capped at %d\n", cpu, curr_freq);
187 }
188
189 spin_unlock(&pcc_lock);
190 return curr_freq;
191
192cmd_incomplete:
193 iowrite16(0, &pcch_hdr->status);
194 spin_unlock(&pcc_lock);
195 return 0;
196}
197
198static int pcc_cpufreq_target(struct cpufreq_policy *policy,
199 unsigned int target_freq,
200 unsigned int relation)
201{
202 struct pcc_cpu *pcc_cpu_data;
203 struct cpufreq_freqs freqs;
204 u16 status;
205 u32 input_buffer;
206 int cpu;
207
208 spin_lock(&pcc_lock);
209 cpu = policy->cpu;
210 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
211
212 pr_debug("target: CPU %d should go to target freq: %d "
213 "(virtual) input_offset is 0x%p\n",
214 cpu, target_freq,
215 (pcch_virt_addr + pcc_cpu_data->input_offset));
216
217 freqs.new = target_freq;
218 freqs.cpu = cpu;
219 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
220
221 input_buffer = 0x1 | (((target_freq * 100)
222 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
223 iowrite32(input_buffer,
224 (pcch_virt_addr + pcc_cpu_data->input_offset));
225 iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
226
227 pcc_cmd();
228
229 /* Clear the input buffer - we are done with the current command */
230 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
231
232 status = ioread16(&pcch_hdr->status);
233 if (status != CMD_COMPLETE) {
234 pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
235 cpu, status);
236 goto cmd_incomplete;
237 }
238 iowrite16(0, &pcch_hdr->status);
239
240 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
241 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
242 spin_unlock(&pcc_lock);
243
244 return 0;
245
246cmd_incomplete:
247 iowrite16(0, &pcch_hdr->status);
248 spin_unlock(&pcc_lock);
249 return -EINVAL;
250}
251
252static int pcc_get_offset(int cpu)
253{
254 acpi_status status;
255 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
256 union acpi_object *pccp, *offset;
257 struct pcc_cpu *pcc_cpu_data;
258 struct acpi_processor *pr;
259 int ret = 0;
260
261 pr = per_cpu(processors, cpu);
262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
263
264 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
265 if (ACPI_FAILURE(status))
266 return -ENODEV;
267
268 pccp = buffer.pointer;
269 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
270 ret = -ENODEV;
271 goto out_free;
272 };
273
274 offset = &(pccp->package.elements[0]);
275 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
276 ret = -ENODEV;
277 goto out_free;
278 }
279
280 pcc_cpu_data->input_offset = offset->integer.value;
281
282 offset = &(pccp->package.elements[1]);
283 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
284 ret = -ENODEV;
285 goto out_free;
286 }
287
288 pcc_cpu_data->output_offset = offset->integer.value;
289
290 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
291 memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
292
293 pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data "
294 "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
295 cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
296out_free:
297 kfree(buffer.pointer);
298 return ret;
299}
300
301static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
302{
303 acpi_status status;
304 struct acpi_object_list input;
305 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
306 union acpi_object in_params[4];
307 union acpi_object *out_obj;
308 u32 capabilities[2];
309 u32 errors;
310 u32 supported;
311 int ret = 0;
312
313 input.count = 4;
314 input.pointer = in_params;
315 in_params[0].type = ACPI_TYPE_BUFFER;
316 in_params[0].buffer.length = 16;
317 in_params[0].buffer.pointer = OSC_UUID;
318 in_params[1].type = ACPI_TYPE_INTEGER;
319 in_params[1].integer.value = 1;
320 in_params[2].type = ACPI_TYPE_INTEGER;
321 in_params[2].integer.value = 2;
322 in_params[3].type = ACPI_TYPE_BUFFER;
323 in_params[3].buffer.length = 8;
324 in_params[3].buffer.pointer = (u8 *)&capabilities;
325
326 capabilities[0] = OSC_QUERY_ENABLE;
327 capabilities[1] = 0x1;
328
329 status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
330 if (ACPI_FAILURE(status))
331 return -ENODEV;
332
333 if (!output.length)
334 return -ENODEV;
335
336 out_obj = output.pointer;
337 if (out_obj->type != ACPI_TYPE_BUFFER) {
338 ret = -ENODEV;
339 goto out_free;
340 }
341
342 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
343 if (errors) {
344 ret = -ENODEV;
345 goto out_free;
346 }
347
348 supported = *((u32 *)(out_obj->buffer.pointer + 4));
349 if (!(supported & 0x1)) {
350 ret = -ENODEV;
351 goto out_free;
352 }
353
354 kfree(output.pointer);
355 capabilities[0] = 0x0;
356 capabilities[1] = 0x1;
357
358 status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
359 if (ACPI_FAILURE(status))
360 return -ENODEV;
361
362 if (!output.length)
363 return -ENODEV;
364
365 out_obj = output.pointer;
366 if (out_obj->type != ACPI_TYPE_BUFFER) {
367 ret = -ENODEV;
368 goto out_free;
369 }
370
371 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
372 if (errors) {
373 ret = -ENODEV;
374 goto out_free;
375 }
376
377 supported = *((u32 *)(out_obj->buffer.pointer + 4));
378 if (!(supported & 0x1)) {
379 ret = -ENODEV;
380 goto out_free;
381 }
382
383out_free:
384 kfree(output.pointer);
385 return ret;
386}
387
388static int __init pcc_cpufreq_probe(void)
389{
390 acpi_status status;
391 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
392 struct pcc_memory_resource *mem_resource;
393 struct pcc_register_resource *reg_resource;
394 union acpi_object *out_obj, *member;
395 acpi_handle handle, osc_handle, pcch_handle;
396 int ret = 0;
397
398 status = acpi_get_handle(NULL, "\\_SB", &handle);
399 if (ACPI_FAILURE(status))
400 return -ENODEV;
401
402 status = acpi_get_handle(handle, "PCCH", &pcch_handle);
403 if (ACPI_FAILURE(status))
404 return -ENODEV;
405
406 status = acpi_get_handle(handle, "_OSC", &osc_handle);
407 if (ACPI_SUCCESS(status)) {
408 ret = pcc_cpufreq_do_osc(&osc_handle);
409 if (ret)
410 pr_debug("probe: _OSC evaluation did not succeed\n");
411 /* Firmware's use of _OSC is optional */
412 ret = 0;
413 }
414
415 status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
416 if (ACPI_FAILURE(status))
417 return -ENODEV;
418
419 out_obj = output.pointer;
420 if (out_obj->type != ACPI_TYPE_PACKAGE) {
421 ret = -ENODEV;
422 goto out_free;
423 }
424
425 member = &out_obj->package.elements[0];
426 if (member->type != ACPI_TYPE_BUFFER) {
427 ret = -ENODEV;
428 goto out_free;
429 }
430
431 mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
432
433 pr_debug("probe: mem_resource descriptor: 0x%x,"
434 " length: %d, space_id: %d, resource_usage: %d,"
435 " type_specific: %d, granularity: 0x%llx,"
436 " minimum: 0x%llx, maximum: 0x%llx,"
437 " translation_offset: 0x%llx, address_length: 0x%llx\n",
438 mem_resource->descriptor, mem_resource->length,
439 mem_resource->space_id, mem_resource->resource_usage,
440 mem_resource->type_specific, mem_resource->granularity,
441 mem_resource->minimum, mem_resource->maximum,
442 mem_resource->translation_offset,
443 mem_resource->address_length);
444
445 if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
446 ret = -ENODEV;
447 goto out_free;
448 }
449
450 pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
451 mem_resource->address_length);
452 if (pcch_virt_addr == NULL) {
453 pr_debug("probe: could not map shared mem region\n");
454 goto out_free;
455 }
456 pcch_hdr = pcch_virt_addr;
457
458 pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
459 pr_debug("probe: PCCH header is at physical address: 0x%llx,"
460 " signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
461 " supported features: 0x%x, command field: 0x%x,"
462 " status field: 0x%x, nominal latency: %d us\n",
463 mem_resource->minimum, ioread32(&pcch_hdr->signature),
464 ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
465 ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
466 ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
467 ioread32(&pcch_hdr->latency));
468
469 pr_debug("probe: min time between commands: %d us,"
470 " max time between commands: %d us,"
471 " nominal CPU frequency: %d MHz,"
472 " minimum CPU frequency: %d MHz,"
473 " minimum CPU frequency without throttling: %d MHz\n",
474 ioread32(&pcch_hdr->minimum_time),
475 ioread32(&pcch_hdr->maximum_time),
476 ioread32(&pcch_hdr->nominal),
477 ioread32(&pcch_hdr->throttled_frequency),
478 ioread32(&pcch_hdr->minimum_frequency));
479
480 member = &out_obj->package.elements[1];
481 if (member->type != ACPI_TYPE_BUFFER) {
482 ret = -ENODEV;
483 goto pcch_free;
484 }
485
486 reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
487
488 doorbell.space_id = reg_resource->space_id;
489 doorbell.bit_width = reg_resource->bit_width;
490 doorbell.bit_offset = reg_resource->bit_offset;
491 doorbell.access_width = 64;
492 doorbell.address = reg_resource->address;
493
494 pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
495 "bit_offset is %d, access_width is %d, address is 0x%llx\n",
496 doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
497 doorbell.access_width, reg_resource->address);
498
499 member = &out_obj->package.elements[2];
500 if (member->type != ACPI_TYPE_INTEGER) {
501 ret = -ENODEV;
502 goto pcch_free;
503 }
504
505 doorbell_preserve = member->integer.value;
506
507 member = &out_obj->package.elements[3];
508 if (member->type != ACPI_TYPE_INTEGER) {
509 ret = -ENODEV;
510 goto pcch_free;
511 }
512
513 doorbell_write = member->integer.value;
514
515 pr_debug("probe: doorbell_preserve: 0x%llx,"
516 " doorbell_write: 0x%llx\n",
517 doorbell_preserve, doorbell_write);
518
519 pcc_cpu_info = alloc_percpu(struct pcc_cpu);
520 if (!pcc_cpu_info) {
521 ret = -ENOMEM;
522 goto pcch_free;
523 }
524
525 printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
526 " limits: %d MHz, %d MHz\n", PCC_VERSION,
527 ioread32(&pcch_hdr->minimum_frequency),
528 ioread32(&pcch_hdr->nominal));
529 kfree(output.pointer);
530 return ret;
531pcch_free:
532 pcc_clear_mapping();
533out_free:
534 kfree(output.pointer);
535 return ret;
536}
537
538static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
539{
540 unsigned int cpu = policy->cpu;
541 unsigned int result = 0;
542
543 if (!pcch_virt_addr) {
544 result = -1;
545 goto out;
546 }
547
548 result = pcc_get_offset(cpu);
549 if (result) {
550 pr_debug("init: PCCP evaluation failed\n");
551 goto out;
552 }
553
554 policy->max = policy->cpuinfo.max_freq =
555 ioread32(&pcch_hdr->nominal) * 1000;
556 policy->min = policy->cpuinfo.min_freq =
557 ioread32(&pcch_hdr->minimum_frequency) * 1000;
558 policy->cur = pcc_get_freq(cpu);
559
560 if (!policy->cur) {
561 pr_debug("init: Unable to get current CPU frequency\n");
562 result = -EINVAL;
563 goto out;
564 }
565
566 pr_debug("init: policy->max is %d, policy->min is %d\n",
567 policy->max, policy->min);
568out:
569 return result;
570}
571
572static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
573{
574 return 0;
575}
576
577static struct cpufreq_driver pcc_cpufreq_driver = {
578 .flags = CPUFREQ_CONST_LOOPS,
579 .get = pcc_get_freq,
580 .verify = pcc_cpufreq_verify,
581 .target = pcc_cpufreq_target,
582 .init = pcc_cpufreq_cpu_init,
583 .exit = pcc_cpufreq_cpu_exit,
584 .name = "pcc-cpufreq",
585 .owner = THIS_MODULE,
586};
587
588static int __init pcc_cpufreq_init(void)
589{
590 int ret;
591
592 if (acpi_disabled)
593 return 0;
594
595 ret = pcc_cpufreq_probe();
596 if (ret) {
597 pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n");
598 return ret;
599 }
600
601 ret = cpufreq_register_driver(&pcc_cpufreq_driver);
602
603 return ret;
604}
605
606static void __exit pcc_cpufreq_exit(void)
607{
608 cpufreq_unregister_driver(&pcc_cpufreq_driver);
609
610 pcc_clear_mapping();
611
612 free_percpu(pcc_cpu_info);
613}
614
615MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
616MODULE_VERSION(PCC_VERSION);
617MODULE_DESCRIPTION("Processor Clocking Control interface driver");
618MODULE_LICENSE("GPL");
619
620late_initcall(pcc_cpufreq_init);
621module_exit(pcc_cpufreq_exit);
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
new file mode 100644
index 000000000000..b3379d6a5c57
--- /dev/null
+++ b/drivers/cpufreq/powernow-k6.c
@@ -0,0 +1,261 @@
1/*
2 * This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
3 * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
4 * Dominik Brodowski.
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/ioport.h>
16#include <linux/timex.h>
17#include <linux/io.h>
18
19#include <asm/msr.h>
20
21#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
22 as it is unused */
23
24#define PFX "powernow-k6: "
25static unsigned int busfreq; /* FSB, in 10 kHz */
26static unsigned int max_multiplier;
27
28
29/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
30static struct cpufreq_frequency_table clock_ratio[] = {
31 {45, /* 000 -> 4.5x */ 0},
32 {50, /* 001 -> 5.0x */ 0},
33 {40, /* 010 -> 4.0x */ 0},
34 {55, /* 011 -> 5.5x */ 0},
35 {20, /* 100 -> 2.0x */ 0},
36 {30, /* 101 -> 3.0x */ 0},
37 {60, /* 110 -> 6.0x */ 0},
38 {35, /* 111 -> 3.5x */ 0},
39 {0, CPUFREQ_TABLE_END}
40};
41
42
43/**
44 * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
45 *
46 * Returns the current setting of the frequency multiplier. Core clock
47 * speed is frequency of the Front-Side Bus multiplied with this value.
48 */
49static int powernow_k6_get_cpu_multiplier(void)
50{
51 u64 invalue = 0;
52 u32 msrval;
53
54 msrval = POWERNOW_IOPORT + 0x1;
55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
56 invalue = inl(POWERNOW_IOPORT + 0x8);
57 msrval = POWERNOW_IOPORT + 0x0;
58 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
59
60 return clock_ratio[(invalue >> 5)&7].index;
61}
62
63
64/**
65 * powernow_k6_set_state - set the PowerNow! multiplier
66 * @best_i: clock_ratio[best_i] is the target multiplier
67 *
68 * Tries to change the PowerNow! multiplier
69 */
70static void powernow_k6_set_state(unsigned int best_i)
71{
72 unsigned long outvalue = 0, invalue = 0;
73 unsigned long msrval;
74 struct cpufreq_freqs freqs;
75
76 if (clock_ratio[best_i].index > max_multiplier) {
77 printk(KERN_ERR PFX "invalid target frequency\n");
78 return;
79 }
80
81 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
82 freqs.new = busfreq * clock_ratio[best_i].index;
83 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
84
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
86
87 /* we now need to transform best_i to the BVC format, see AMD#23446 */
88
89 outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
90
91 msrval = POWERNOW_IOPORT + 0x1;
92 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
93 invalue = inl(POWERNOW_IOPORT + 0x8);
94 invalue = invalue & 0xf;
95 outvalue = outvalue | invalue;
96 outl(outvalue , (POWERNOW_IOPORT + 0x8));
97 msrval = POWERNOW_IOPORT + 0x0;
98 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
99
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
101
102 return;
103}
104
105
106/**
107 * powernow_k6_verify - verifies a new CPUfreq policy
108 * @policy: new policy
109 *
110 * Policy must be within lowest and highest possible CPU Frequency,
111 * and at least one possible state must be within min and max.
112 */
113static int powernow_k6_verify(struct cpufreq_policy *policy)
114{
115 return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
116}
117
118
119/**
120 * powernow_k6_setpolicy - sets a new CPUFreq policy
121 * @policy: new policy
122 * @target_freq: the target frequency
123 * @relation: how that frequency relates to achieved frequency
124 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
125 *
126 * sets a new CPUFreq policy
127 */
128static int powernow_k6_target(struct cpufreq_policy *policy,
129 unsigned int target_freq,
130 unsigned int relation)
131{
132 unsigned int newstate = 0;
133
134 if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
135 target_freq, relation, &newstate))
136 return -EINVAL;
137
138 powernow_k6_set_state(newstate);
139
140 return 0;
141}
142
143
144static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
145{
146 unsigned int i, f;
147 int result;
148
149 if (policy->cpu != 0)
150 return -ENODEV;
151
152 /* get frequencies */
153 max_multiplier = powernow_k6_get_cpu_multiplier();
154 busfreq = cpu_khz / max_multiplier;
155
156 /* table init */
157 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
158 f = clock_ratio[i].index;
159 if (f > max_multiplier)
160 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
161 else
162 clock_ratio[i].frequency = busfreq * f;
163 }
164
165 /* cpuinfo and default policy values */
166 policy->cpuinfo.transition_latency = 200000;
167 policy->cur = busfreq * max_multiplier;
168
169 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
170 if (result)
171 return result;
172
173 cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
174
175 return 0;
176}
177
178
179static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
180{
181 unsigned int i;
182 for (i = 0; i < 8; i++) {
183 if (i == max_multiplier)
184 powernow_k6_set_state(i);
185 }
186 cpufreq_frequency_table_put_attr(policy->cpu);
187 return 0;
188}
189
190static unsigned int powernow_k6_get(unsigned int cpu)
191{
192 unsigned int ret;
193 ret = (busfreq * powernow_k6_get_cpu_multiplier());
194 return ret;
195}
196
197static struct freq_attr *powernow_k6_attr[] = {
198 &cpufreq_freq_attr_scaling_available_freqs,
199 NULL,
200};
201
202static struct cpufreq_driver powernow_k6_driver = {
203 .verify = powernow_k6_verify,
204 .target = powernow_k6_target,
205 .init = powernow_k6_cpu_init,
206 .exit = powernow_k6_cpu_exit,
207 .get = powernow_k6_get,
208 .name = "powernow-k6",
209 .owner = THIS_MODULE,
210 .attr = powernow_k6_attr,
211};
212
213
214/**
215 * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
216 *
217 * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
218 * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
219 * on success.
220 */
221static int __init powernow_k6_init(void)
222{
223 struct cpuinfo_x86 *c = &cpu_data(0);
224
225 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
226 ((c->x86_model != 12) && (c->x86_model != 13)))
227 return -ENODEV;
228
229 if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
230 printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
231 return -EIO;
232 }
233
234 if (cpufreq_register_driver(&powernow_k6_driver)) {
235 release_region(POWERNOW_IOPORT, 16);
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
242
243/**
244 * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
245 *
246 * Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
247 */
248static void __exit powernow_k6_exit(void)
249{
250 cpufreq_unregister_driver(&powernow_k6_driver);
251 release_region(POWERNOW_IOPORT, 16);
252}
253
254
255MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
256 "Dominik Brodowski <linux@brodo.de>");
257MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
258MODULE_LICENSE("GPL");
259
260module_init(powernow_k6_init);
261module_exit(powernow_k6_exit);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
new file mode 100644
index 000000000000..d71d9f372359
--- /dev/null
+++ b/drivers/cpufreq/powernow-k7.c
@@ -0,0 +1,747 @@
1/*
2 * AMD K7 Powernow driver.
3 * (C) 2003 Dave Jones on behalf of SuSE Labs.
4 * (C) 2003-2004 Dave Jones <davej@redhat.com>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 * Based upon datasheets & sample CPUs kindly provided by AMD.
8 *
9 * Errata 5:
10 * CPU may fail to execute a FID/VID change in presence of interrupt.
11 * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
12 * Errata 15:
13 * CPU with half frequency multipliers may hang upon wakeup from disconnect.
14 * - We disable half multipliers if ACPI is used on A0 stepping CPUs.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/init.h>
21#include <linux/cpufreq.h>
22#include <linux/slab.h>
23#include <linux/string.h>
24#include <linux/dmi.h>
25#include <linux/timex.h>
26#include <linux/io.h>
27
28#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
29#include <asm/msr.h>
30#include <asm/system.h>
31
32#ifdef CONFIG_X86_POWERNOW_K7_ACPI
33#include <linux/acpi.h>
34#include <acpi/processor.h>
35#endif
36
37#include "powernow-k7.h"
38
39#define PFX "powernow: "
40
41
42struct psb_s {
43 u8 signature[10];
44 u8 tableversion;
45 u8 flags;
46 u16 settlingtime;
47 u8 reserved1;
48 u8 numpst;
49};
50
51struct pst_s {
52 u32 cpuid;
53 u8 fsbspeed;
54 u8 maxfid;
55 u8 startvid;
56 u8 numpstates;
57};
58
59#ifdef CONFIG_X86_POWERNOW_K7_ACPI
60union powernow_acpi_control_t {
61 struct {
62 unsigned long fid:5,
63 vid:5,
64 sgtc:20,
65 res1:2;
66 } bits;
67 unsigned long val;
68};
69#endif
70
71/* divide by 1000 to get VCore voltage in V. */
72static const int mobile_vid_table[32] = {
73 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
74 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
75 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
76 1075, 1050, 1025, 1000, 975, 950, 925, 0,
77};
78
79/* divide by 10 to get FID. */
80static const int fid_codes[32] = {
81 110, 115, 120, 125, 50, 55, 60, 65,
82 70, 75, 80, 85, 90, 95, 100, 105,
83 30, 190, 40, 200, 130, 135, 140, 210,
84 150, 225, 160, 165, 170, 180, -1, -1,
85};
86
87/* This parameter is used in order to force ACPI instead of legacy method for
88 * configuration purpose.
89 */
90
91static int acpi_force;
92
93static struct cpufreq_frequency_table *powernow_table;
94
95static unsigned int can_scale_bus;
96static unsigned int can_scale_vid;
97static unsigned int minimum_speed = -1;
98static unsigned int maximum_speed;
99static unsigned int number_scales;
100static unsigned int fsb;
101static unsigned int latency;
102static char have_a0;
103
104static int check_fsb(unsigned int fsbspeed)
105{
106 int delta;
107 unsigned int f = fsb / 1000;
108
109 delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
110 return delta < 5;
111}
112
113static int check_powernow(void)
114{
115 struct cpuinfo_x86 *c = &cpu_data(0);
116 unsigned int maxei, eax, ebx, ecx, edx;
117
118 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) {
119#ifdef MODULE
120 printk(KERN_INFO PFX "This module only works with "
121 "AMD K7 CPUs\n");
122#endif
123 return 0;
124 }
125
126 /* Get maximum capabilities */
127 maxei = cpuid_eax(0x80000000);
128 if (maxei < 0x80000007) { /* Any powernow info ? */
129#ifdef MODULE
130 printk(KERN_INFO PFX "No powernow capabilities detected\n");
131#endif
132 return 0;
133 }
134
135 if ((c->x86_model == 6) && (c->x86_mask == 0)) {
136 printk(KERN_INFO PFX "K7 660[A0] core detected, "
137 "enabling errata workarounds\n");
138 have_a0 = 1;
139 }
140
141 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
142
143 /* Check we can actually do something before we say anything.*/
144 if (!(edx & (1 << 1 | 1 << 2)))
145 return 0;
146
147 printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
148
149 if (edx & 1 << 1) {
150 printk("frequency");
151 can_scale_bus = 1;
152 }
153
154 if ((edx & (1 << 1 | 1 << 2)) == 0x6)
155 printk(" and ");
156
157 if (edx & 1 << 2) {
158 printk("voltage");
159 can_scale_vid = 1;
160 }
161
162 printk(".\n");
163 return 1;
164}
165
166#ifdef CONFIG_X86_POWERNOW_K7_ACPI
167static void invalidate_entry(unsigned int entry)
168{
169 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
170}
171#endif
172
173static int get_ranges(unsigned char *pst)
174{
175 unsigned int j;
176 unsigned int speed;
177 u8 fid, vid;
178
179 powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
180 (number_scales + 1)), GFP_KERNEL);
181 if (!powernow_table)
182 return -ENOMEM;
183
184 for (j = 0 ; j < number_scales; j++) {
185 fid = *pst++;
186
187 powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
188 powernow_table[j].index = fid; /* lower 8 bits */
189
190 speed = powernow_table[j].frequency;
191
192 if ((fid_codes[fid] % 10) == 5) {
193#ifdef CONFIG_X86_POWERNOW_K7_ACPI
194 if (have_a0 == 1)
195 invalidate_entry(j);
196#endif
197 }
198
199 if (speed < minimum_speed)
200 minimum_speed = speed;
201 if (speed > maximum_speed)
202 maximum_speed = speed;
203
204 vid = *pst++;
205 powernow_table[j].index |= (vid << 8); /* upper 8 bits */
206
207 pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
208 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
209 fid_codes[fid] % 10, speed/1000, vid,
210 mobile_vid_table[vid]/1000,
211 mobile_vid_table[vid]%1000);
212 }
213 powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
214 powernow_table[number_scales].index = 0;
215
216 return 0;
217}
218
219
220static void change_FID(int fid)
221{
222 union msr_fidvidctl fidvidctl;
223
224 rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
225 if (fidvidctl.bits.FID != fid) {
226 fidvidctl.bits.SGTC = latency;
227 fidvidctl.bits.FID = fid;
228 fidvidctl.bits.VIDC = 0;
229 fidvidctl.bits.FIDC = 1;
230 wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
231 }
232}
233
234
235static void change_VID(int vid)
236{
237 union msr_fidvidctl fidvidctl;
238
239 rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
240 if (fidvidctl.bits.VID != vid) {
241 fidvidctl.bits.SGTC = latency;
242 fidvidctl.bits.VID = vid;
243 fidvidctl.bits.FIDC = 0;
244 fidvidctl.bits.VIDC = 1;
245 wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
246 }
247}
248
249
250static void change_speed(unsigned int index)
251{
252 u8 fid, vid;
253 struct cpufreq_freqs freqs;
254 union msr_fidvidstatus fidvidstatus;
255 int cfid;
256
257 /* fid are the lower 8 bits of the index we stored into
258 * the cpufreq frequency table in powernow_decode_bios,
259 * vid are the upper 8 bits.
260 */
261
262 fid = powernow_table[index].index & 0xFF;
263 vid = (powernow_table[index].index & 0xFF00) >> 8;
264
265 freqs.cpu = 0;
266
267 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
268 cfid = fidvidstatus.bits.CFID;
269 freqs.old = fsb * fid_codes[cfid] / 10;
270
271 freqs.new = powernow_table[index].frequency;
272
273 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
274
275 /* Now do the magic poking into the MSRs. */
276
277 if (have_a0 == 1) /* A0 errata 5 */
278 local_irq_disable();
279
280 if (freqs.old > freqs.new) {
281 /* Going down, so change FID first */
282 change_FID(fid);
283 change_VID(vid);
284 } else {
285 /* Going up, so change VID first */
286 change_VID(vid);
287 change_FID(fid);
288 }
289
290
291 if (have_a0 == 1)
292 local_irq_enable();
293
294 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
295}
296
297
298#ifdef CONFIG_X86_POWERNOW_K7_ACPI
299
300static struct acpi_processor_performance *acpi_processor_perf;
301
302static int powernow_acpi_init(void)
303{
304 int i;
305 int retval = 0;
306 union powernow_acpi_control_t pc;
307
308 if (acpi_processor_perf != NULL && powernow_table != NULL) {
309 retval = -EINVAL;
310 goto err0;
311 }
312
313 acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
314 GFP_KERNEL);
315 if (!acpi_processor_perf) {
316 retval = -ENOMEM;
317 goto err0;
318 }
319
320 if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
321 GFP_KERNEL)) {
322 retval = -ENOMEM;
323 goto err05;
324 }
325
326 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
327 retval = -EIO;
328 goto err1;
329 }
330
331 if (acpi_processor_perf->control_register.space_id !=
332 ACPI_ADR_SPACE_FIXED_HARDWARE) {
333 retval = -ENODEV;
334 goto err2;
335 }
336
337 if (acpi_processor_perf->status_register.space_id !=
338 ACPI_ADR_SPACE_FIXED_HARDWARE) {
339 retval = -ENODEV;
340 goto err2;
341 }
342
343 number_scales = acpi_processor_perf->state_count;
344
345 if (number_scales < 2) {
346 retval = -ENODEV;
347 goto err2;
348 }
349
350 powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
351 (number_scales + 1)), GFP_KERNEL);
352 if (!powernow_table) {
353 retval = -ENOMEM;
354 goto err2;
355 }
356
357 pc.val = (unsigned long) acpi_processor_perf->states[0].control;
358 for (i = 0; i < number_scales; i++) {
359 u8 fid, vid;
360 struct acpi_processor_px *state =
361 &acpi_processor_perf->states[i];
362 unsigned int speed, speed_mhz;
363
364 pc.val = (unsigned long) state->control;
365 pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
366 i,
367 (u32) state->core_frequency,
368 (u32) state->power,
369 (u32) state->transition_latency,
370 (u32) state->control,
371 pc.bits.sgtc);
372
373 vid = pc.bits.vid;
374 fid = pc.bits.fid;
375
376 powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
377 powernow_table[i].index = fid; /* lower 8 bits */
378 powernow_table[i].index |= (vid << 8); /* upper 8 bits */
379
380 speed = powernow_table[i].frequency;
381 speed_mhz = speed / 1000;
382
383 /* processor_perflib will multiply the MHz value by 1000 to
384 * get a KHz value (e.g. 1266000). However, powernow-k7 works
385 * with true KHz values (e.g. 1266768). To ensure that all
386 * powernow frequencies are available, we must ensure that
387 * ACPI doesn't restrict them, so we round up the MHz value
388 * to ensure that perflib's computed KHz value is greater than
389 * or equal to powernow's KHz value.
390 */
391 if (speed % 1000 > 0)
392 speed_mhz++;
393
394 if ((fid_codes[fid] % 10) == 5) {
395 if (have_a0 == 1)
396 invalidate_entry(i);
397 }
398
399 pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
400 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
401 fid_codes[fid] % 10, speed_mhz, vid,
402 mobile_vid_table[vid]/1000,
403 mobile_vid_table[vid]%1000);
404
405 if (state->core_frequency != speed_mhz) {
406 state->core_frequency = speed_mhz;
407 pr_debug(" Corrected ACPI frequency to %d\n",
408 speed_mhz);
409 }
410
411 if (latency < pc.bits.sgtc)
412 latency = pc.bits.sgtc;
413
414 if (speed < minimum_speed)
415 minimum_speed = speed;
416 if (speed > maximum_speed)
417 maximum_speed = speed;
418 }
419
420 powernow_table[i].frequency = CPUFREQ_TABLE_END;
421 powernow_table[i].index = 0;
422
423 /* notify BIOS that we exist */
424 acpi_processor_notify_smm(THIS_MODULE);
425
426 return 0;
427
428err2:
429 acpi_processor_unregister_performance(acpi_processor_perf, 0);
430err1:
431 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
432err05:
433 kfree(acpi_processor_perf);
434err0:
435 printk(KERN_WARNING PFX "ACPI perflib can not be used on "
436 "this platform\n");
437 acpi_processor_perf = NULL;
438 return retval;
439}
440#else
441static int powernow_acpi_init(void)
442{
443 printk(KERN_INFO PFX "no support for ACPI processor found."
444 " Please recompile your kernel with ACPI processor\n");
445 return -EINVAL;
446}
447#endif
448
449static void print_pst_entry(struct pst_s *pst, unsigned int j)
450{
451 pr_debug("PST:%d (@%p)\n", j, pst);
452 pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
453 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
454}
455
456static int powernow_decode_bios(int maxfid, int startvid)
457{
458 struct psb_s *psb;
459 struct pst_s *pst;
460 unsigned int i, j;
461 unsigned char *p;
462 unsigned int etuple;
463 unsigned int ret;
464
465 etuple = cpuid_eax(0x80000001);
466
467 for (i = 0xC0000; i < 0xffff0 ; i += 16) {
468
469 p = phys_to_virt(i);
470
471 if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
472 pr_debug("Found PSB header at %p\n", p);
473 psb = (struct psb_s *) p;
474 pr_debug("Table version: 0x%x\n", psb->tableversion);
475 if (psb->tableversion != 0x12) {
476 printk(KERN_INFO PFX "Sorry, only v1.2 tables"
477 " supported right now\n");
478 return -ENODEV;
479 }
480
481 pr_debug("Flags: 0x%x\n", psb->flags);
482 if ((psb->flags & 1) == 0)
483 pr_debug("Mobile voltage regulator\n");
484 else
485 pr_debug("Desktop voltage regulator\n");
486
487 latency = psb->settlingtime;
488 if (latency < 100) {
489 printk(KERN_INFO PFX "BIOS set settling time "
490 "to %d microseconds. "
491 "Should be at least 100. "
492 "Correcting.\n", latency);
493 latency = 100;
494 }
495 pr_debug("Settling Time: %d microseconds.\n",
496 psb->settlingtime);
497 pr_debug("Has %d PST tables. (Only dumping ones "
498 "relevant to this CPU).\n",
499 psb->numpst);
500
501 p += sizeof(struct psb_s);
502
503 pst = (struct pst_s *) p;
504
505 for (j = 0; j < psb->numpst; j++) {
506 pst = (struct pst_s *) p;
507 number_scales = pst->numpstates;
508
509 if ((etuple == pst->cpuid) &&
510 check_fsb(pst->fsbspeed) &&
511 (maxfid == pst->maxfid) &&
512 (startvid == pst->startvid)) {
513 print_pst_entry(pst, j);
514 p = (char *)pst + sizeof(struct pst_s);
515 ret = get_ranges(p);
516 return ret;
517 } else {
518 unsigned int k;
519 p = (char *)pst + sizeof(struct pst_s);
520 for (k = 0; k < number_scales; k++)
521 p += 2;
522 }
523 }
524 printk(KERN_INFO PFX "No PST tables match this cpuid "
525 "(0x%x)\n", etuple);
526 printk(KERN_INFO PFX "This is indicative of a broken "
527 "BIOS.\n");
528
529 return -EINVAL;
530 }
531 p++;
532 }
533
534 return -ENODEV;
535}
536
537
538static int powernow_target(struct cpufreq_policy *policy,
539 unsigned int target_freq,
540 unsigned int relation)
541{
542 unsigned int newstate;
543
544 if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
545 relation, &newstate))
546 return -EINVAL;
547
548 change_speed(newstate);
549
550 return 0;
551}
552
553
554static int powernow_verify(struct cpufreq_policy *policy)
555{
556 return cpufreq_frequency_table_verify(policy, powernow_table);
557}
558
559/*
560 * We use the fact that the bus frequency is somehow
561 * a multiple of 100000/3 khz, then we compute sgtc according
562 * to this multiple.
563 * That way, we match more how AMD thinks all of that work.
564 * We will then get the same kind of behaviour already tested under
565 * the "well-known" other OS.
566 */
567static int __cpuinit fixup_sgtc(void)
568{
569 unsigned int sgtc;
570 unsigned int m;
571
572 m = fsb / 3333;
573 if ((m % 10) >= 5)
574 m += 5;
575
576 m /= 10;
577
578 sgtc = 100 * m * latency;
579 sgtc = sgtc / 3;
580 if (sgtc > 0xfffff) {
581 printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
582 sgtc = 0xfffff;
583 }
584 return sgtc;
585}
586
587static unsigned int powernow_get(unsigned int cpu)
588{
589 union msr_fidvidstatus fidvidstatus;
590 unsigned int cfid;
591
592 if (cpu)
593 return 0;
594 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
595 cfid = fidvidstatus.bits.CFID;
596
597 return fsb * fid_codes[cfid] / 10;
598}
599
600
601static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d)
602{
603 printk(KERN_WARNING PFX
604 "%s laptop with broken PST tables in BIOS detected.\n",
605 d->ident);
606 printk(KERN_WARNING PFX
607 "You need to downgrade to 3A21 (09/09/2002), or try a newer "
608 "BIOS than 3A71 (01/20/2003)\n");
609 printk(KERN_WARNING PFX
610 "cpufreq scaling has been disabled as a result of this.\n");
611 return 0;
612}
613
614/*
615 * Some Athlon laptops have really fucked PST tables.
616 * A BIOS update is all that can save them.
617 * Mention this, and disable cpufreq.
618 */
619static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = {
620 {
621 .callback = acer_cpufreq_pst,
622 .ident = "Acer Aspire",
623 .matches = {
624 DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
625 DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
626 },
627 },
628 { }
629};
630
631static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy)
632{
633 union msr_fidvidstatus fidvidstatus;
634 int result;
635
636 if (policy->cpu != 0)
637 return -ENODEV;
638
639 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
640
641 recalibrate_cpu_khz();
642
643 fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
644 if (!fsb) {
645 printk(KERN_WARNING PFX "can not determine bus frequency\n");
646 return -EINVAL;
647 }
648 pr_debug("FSB: %3dMHz\n", fsb/1000);
649
650 if (dmi_check_system(powernow_dmi_table) || acpi_force) {
651 printk(KERN_INFO PFX "PSB/PST known to be broken. "
652 "Trying ACPI instead\n");
653 result = powernow_acpi_init();
654 } else {
655 result = powernow_decode_bios(fidvidstatus.bits.MFID,
656 fidvidstatus.bits.SVID);
657 if (result) {
658 printk(KERN_INFO PFX "Trying ACPI perflib\n");
659 maximum_speed = 0;
660 minimum_speed = -1;
661 latency = 0;
662 result = powernow_acpi_init();
663 if (result) {
664 printk(KERN_INFO PFX
665 "ACPI and legacy methods failed\n");
666 }
667 } else {
668 /* SGTC use the bus clock as timer */
669 latency = fixup_sgtc();
670 printk(KERN_INFO PFX "SGTC: %d\n", latency);
671 }
672 }
673
674 if (result)
675 return result;
676
677 printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
678 minimum_speed/1000, maximum_speed/1000);
679
680 policy->cpuinfo.transition_latency =
681 cpufreq_scale(2000000UL, fsb, latency);
682
683 policy->cur = powernow_get(0);
684
685 cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
686
687 return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
688}
689
690static int powernow_cpu_exit(struct cpufreq_policy *policy)
691{
692 cpufreq_frequency_table_put_attr(policy->cpu);
693
694#ifdef CONFIG_X86_POWERNOW_K7_ACPI
695 if (acpi_processor_perf) {
696 acpi_processor_unregister_performance(acpi_processor_perf, 0);
697 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
698 kfree(acpi_processor_perf);
699 }
700#endif
701
702 kfree(powernow_table);
703 return 0;
704}
705
706static struct freq_attr *powernow_table_attr[] = {
707 &cpufreq_freq_attr_scaling_available_freqs,
708 NULL,
709};
710
711static struct cpufreq_driver powernow_driver = {
712 .verify = powernow_verify,
713 .target = powernow_target,
714 .get = powernow_get,
715#ifdef CONFIG_X86_POWERNOW_K7_ACPI
716 .bios_limit = acpi_processor_get_bios_limit,
717#endif
718 .init = powernow_cpu_init,
719 .exit = powernow_cpu_exit,
720 .name = "powernow-k7",
721 .owner = THIS_MODULE,
722 .attr = powernow_table_attr,
723};
724
725static int __init powernow_init(void)
726{
727 if (check_powernow() == 0)
728 return -ENODEV;
729 return cpufreq_register_driver(&powernow_driver);
730}
731
732
733static void __exit powernow_exit(void)
734{
735 cpufreq_unregister_driver(&powernow_driver);
736}
737
738module_param(acpi_force, int, 0444);
739MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
740
741MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
742MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
743MODULE_LICENSE("GPL");
744
745late_initcall(powernow_init);
746module_exit(powernow_exit);
747
diff --git a/drivers/cpufreq/powernow-k7.h b/drivers/cpufreq/powernow-k7.h
new file mode 100644
index 000000000000..35fb4eaf6e1c
--- /dev/null
+++ b/drivers/cpufreq/powernow-k7.h
@@ -0,0 +1,43 @@
1/*
2 * (C) 2003 Dave Jones.
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * AMD-specific information
7 *
8 */
9
10union msr_fidvidctl {
11 struct {
12 unsigned FID:5, // 4:0
13 reserved1:3, // 7:5
14 VID:5, // 12:8
15 reserved2:3, // 15:13
16 FIDC:1, // 16
17 VIDC:1, // 17
18 reserved3:2, // 19:18
19 FIDCHGRATIO:1, // 20
20 reserved4:11, // 31-21
21 SGTC:20, // 32:51
22 reserved5:12; // 63:52
23 } bits;
24 unsigned long long val;
25};
26
27union msr_fidvidstatus {
28 struct {
29 unsigned CFID:5, // 4:0
30 reserved1:3, // 7:5
31 SFID:5, // 12:8
32 reserved2:3, // 15:13
33 MFID:5, // 20:16
34 reserved3:11, // 31:21
35 CVID:5, // 36:32
36 reserved4:3, // 39:37
37 SVID:5, // 44:40
38 reserved5:3, // 47:45
39 MVID:5, // 52:48
40 reserved6:11; // 63:53
41 } bits;
42 unsigned long long val;
43};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
new file mode 100644
index 000000000000..83479b6fb9a1
--- /dev/null
+++ b/drivers/cpufreq/powernow-k8.c
@@ -0,0 +1,1607 @@
1/*
2 * (c) 2003-2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Support : mark.langsdorf@amd.com
8 *
9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@ucw.cz>
13 * Licensed under the terms of the GNU GPL License version 2.
14 * Based upon datasheets & sample CPUs kindly provided by AMD.
15 *
16 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, Jacob Shin, and others.
18 * Originally developed by Paul Devriendt.
19 * Processor information obtained from Chapter 9 (Power and Thermal Management)
20 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
21 * Opteron Processors" available for download from www.amd.com
22 *
23 * Tables for specific CPUs can be inferred from
24 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
25 */
26
27#include <linux/kernel.h>
28#include <linux/smp.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/cpufreq.h>
32#include <linux/slab.h>
33#include <linux/string.h>
34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
36#include <linux/io.h>
37#include <linux/delay.h>
38
39#include <asm/msr.h>
40
41#include <linux/acpi.h>
42#include <linux/mutex.h>
43#include <acpi/processor.h>
44
45#define PFX "powernow-k8: "
46#define VERSION "version 2.20.00"
47#include "powernow-k8.h"
48#include "mperf.h"
49
50/* serialize freq changes */
51static DEFINE_MUTEX(fidvid_mutex);
52
53static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54
55static int cpu_family = CPU_OPTERON;
56
57/* core performance boost */
58static bool cpb_capable, cpb_enabled;
59static struct msr __percpu *msrs;
60
61static struct cpufreq_driver cpufreq_amd64_driver;
62
63#ifndef CONFIG_SMP
64static inline const struct cpumask *cpu_core_mask(int cpu)
65{
66 return cpumask_of(0);
67}
68#endif
69
70/* Return a frequency in MHz, given an input fid */
71static u32 find_freq_from_fid(u32 fid)
72{
73 return 800 + (fid * 100);
74}
75
76/* Return a frequency in KHz, given an input fid */
77static u32 find_khz_freq_from_fid(u32 fid)
78{
79 return 1000 * find_freq_from_fid(fid);
80}
81
82static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
83 u32 pstate)
84{
85 return data[pstate].frequency;
86}
87
88/* Return the vco fid for an input fid
89 *
90 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
91 * only from corresponding high fids. This returns "high" fid corresponding to
92 * "low" one.
93 */
94static u32 convert_fid_to_vco_fid(u32 fid)
95{
96 if (fid < HI_FID_TABLE_BOTTOM)
97 return 8 + (2 * fid);
98 else
99 return fid;
100}
101
102/*
103 * Return 1 if the pending bit is set. Unless we just instructed the processor
104 * to transition to a new state, seeing this bit set is really bad news.
105 */
106static int pending_bit_stuck(void)
107{
108 u32 lo, hi;
109
110 if (cpu_family == CPU_HW_PSTATE)
111 return 0;
112
113 rdmsr(MSR_FIDVID_STATUS, lo, hi);
114 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
115}
116
117/*
118 * Update the global current fid / vid values from the status msr.
119 * Returns 1 on error.
120 */
121static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
122{
123 u32 lo, hi;
124 u32 i = 0;
125
126 if (cpu_family == CPU_HW_PSTATE) {
127 rdmsr(MSR_PSTATE_STATUS, lo, hi);
128 i = lo & HW_PSTATE_MASK;
129 data->currpstate = i;
130
131 /*
132 * a workaround for family 11h erratum 311 might cause
133 * an "out-of-range Pstate if the core is in Pstate-0
134 */
135 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
136 data->currpstate = HW_PSTATE_0;
137
138 return 0;
139 }
140 do {
141 if (i++ > 10000) {
142 pr_debug("detected change pending stuck\n");
143 return 1;
144 }
145 rdmsr(MSR_FIDVID_STATUS, lo, hi);
146 } while (lo & MSR_S_LO_CHANGE_PENDING);
147
148 data->currvid = hi & MSR_S_HI_CURRENT_VID;
149 data->currfid = lo & MSR_S_LO_CURRENT_FID;
150
151 return 0;
152}
153
154/* the isochronous relief time */
155static void count_off_irt(struct powernow_k8_data *data)
156{
157 udelay((1 << data->irt) * 10);
158 return;
159}
160
161/* the voltage stabilization time */
162static void count_off_vst(struct powernow_k8_data *data)
163{
164 udelay(data->vstable * VST_UNITS_20US);
165 return;
166}
167
168/* need to init the control msr to a safe value (for each cpu) */
169static void fidvid_msr_init(void)
170{
171 u32 lo, hi;
172 u8 fid, vid;
173
174 rdmsr(MSR_FIDVID_STATUS, lo, hi);
175 vid = hi & MSR_S_HI_CURRENT_VID;
176 fid = lo & MSR_S_LO_CURRENT_FID;
177 lo = fid | (vid << MSR_C_LO_VID_SHIFT);
178 hi = MSR_C_HI_STP_GNT_BENIGN;
179 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
180 wrmsr(MSR_FIDVID_CTL, lo, hi);
181}
182
183/* write the new fid value along with the other control fields to the msr */
184static int write_new_fid(struct powernow_k8_data *data, u32 fid)
185{
186 u32 lo;
187 u32 savevid = data->currvid;
188 u32 i = 0;
189
190 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
191 printk(KERN_ERR PFX "internal error - overflow on fid write\n");
192 return 1;
193 }
194
195 lo = fid;
196 lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
197 lo |= MSR_C_LO_INIT_FID_VID;
198
199 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
200 fid, lo, data->plllock * PLL_LOCK_CONVERSION);
201
202 do {
203 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
204 if (i++ > 100) {
205 printk(KERN_ERR PFX
206 "Hardware error - pending bit very stuck - "
207 "no further pstate changes possible\n");
208 return 1;
209 }
210 } while (query_current_values_with_pending_wait(data));
211
212 count_off_irt(data);
213
214 if (savevid != data->currvid) {
215 printk(KERN_ERR PFX
216 "vid change on fid trans, old 0x%x, new 0x%x\n",
217 savevid, data->currvid);
218 return 1;
219 }
220
221 if (fid != data->currfid) {
222 printk(KERN_ERR PFX
223 "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
224 data->currfid);
225 return 1;
226 }
227
228 return 0;
229}
230
231/* Write a new vid to the hardware */
232static int write_new_vid(struct powernow_k8_data *data, u32 vid)
233{
234 u32 lo;
235 u32 savefid = data->currfid;
236 int i = 0;
237
238 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
239 printk(KERN_ERR PFX "internal error - overflow on vid write\n");
240 return 1;
241 }
242
243 lo = data->currfid;
244 lo |= (vid << MSR_C_LO_VID_SHIFT);
245 lo |= MSR_C_LO_INIT_FID_VID;
246
247 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
248 vid, lo, STOP_GRANT_5NS);
249
250 do {
251 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
252 if (i++ > 100) {
253 printk(KERN_ERR PFX "internal error - pending bit "
254 "very stuck - no further pstate "
255 "changes possible\n");
256 return 1;
257 }
258 } while (query_current_values_with_pending_wait(data));
259
260 if (savefid != data->currfid) {
261 printk(KERN_ERR PFX "fid changed on vid trans, old "
262 "0x%x new 0x%x\n",
263 savefid, data->currfid);
264 return 1;
265 }
266
267 if (vid != data->currvid) {
268 printk(KERN_ERR PFX "vid trans failed, vid 0x%x, "
269 "curr 0x%x\n",
270 vid, data->currvid);
271 return 1;
272 }
273
274 return 0;
275}
276
277/*
278 * Reduce the vid by the max of step or reqvid.
279 * Decreasing vid codes represent increasing voltages:
280 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
281 */
282static int decrease_vid_code_by_step(struct powernow_k8_data *data,
283 u32 reqvid, u32 step)
284{
285 if ((data->currvid - reqvid) > step)
286 reqvid = data->currvid - step;
287
288 if (write_new_vid(data, reqvid))
289 return 1;
290
291 count_off_vst(data);
292
293 return 0;
294}
295
296/* Change hardware pstate by single MSR write */
297static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
298{
299 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
300 data->currpstate = pstate;
301 return 0;
302}
303
304/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
305static int transition_fid_vid(struct powernow_k8_data *data,
306 u32 reqfid, u32 reqvid)
307{
308 if (core_voltage_pre_transition(data, reqvid, reqfid))
309 return 1;
310
311 if (core_frequency_transition(data, reqfid))
312 return 1;
313
314 if (core_voltage_post_transition(data, reqvid))
315 return 1;
316
317 if (query_current_values_with_pending_wait(data))
318 return 1;
319
320 if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
321 printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, "
322 "curr 0x%x 0x%x\n",
323 smp_processor_id(),
324 reqfid, reqvid, data->currfid, data->currvid);
325 return 1;
326 }
327
328 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
329 smp_processor_id(), data->currfid, data->currvid);
330
331 return 0;
332}
333
334/* Phase 1 - core voltage transition ... setup voltage */
335static int core_voltage_pre_transition(struct powernow_k8_data *data,
336 u32 reqvid, u32 reqfid)
337{
338 u32 rvosteps = data->rvo;
339 u32 savefid = data->currfid;
340 u32 maxvid, lo, rvomult = 1;
341
342 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
343 "reqvid 0x%x, rvo 0x%x\n",
344 smp_processor_id(),
345 data->currfid, data->currvid, reqvid, data->rvo);
346
347 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
348 rvomult = 2;
349 rvosteps *= rvomult;
350 rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
351 maxvid = 0x1f & (maxvid >> 16);
352 pr_debug("ph1 maxvid=0x%x\n", maxvid);
353 if (reqvid < maxvid) /* lower numbers are higher voltages */
354 reqvid = maxvid;
355
356 while (data->currvid > reqvid) {
357 pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
358 data->currvid, reqvid);
359 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
360 return 1;
361 }
362
363 while ((rvosteps > 0) &&
364 ((rvomult * data->rvo + data->currvid) > reqvid)) {
365 if (data->currvid == maxvid) {
366 rvosteps = 0;
367 } else {
368 pr_debug("ph1: changing vid for rvo, req 0x%x\n",
369 data->currvid - 1);
370 if (decrease_vid_code_by_step(data, data->currvid-1, 1))
371 return 1;
372 rvosteps--;
373 }
374 }
375
376 if (query_current_values_with_pending_wait(data))
377 return 1;
378
379 if (savefid != data->currfid) {
380 printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n",
381 data->currfid);
382 return 1;
383 }
384
385 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
386 data->currfid, data->currvid);
387
388 return 0;
389}
390
391/* Phase 2 - core frequency transition */
392static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
393{
394 u32 vcoreqfid, vcocurrfid, vcofiddiff;
395 u32 fid_interval, savevid = data->currvid;
396
397 if (data->currfid == reqfid) {
398 printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
399 data->currfid);
400 return 0;
401 }
402
403 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
404 "reqfid 0x%x\n",
405 smp_processor_id(),
406 data->currfid, data->currvid, reqfid);
407
408 vcoreqfid = convert_fid_to_vco_fid(reqfid);
409 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
410 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
411 : vcoreqfid - vcocurrfid;
412
413 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
414 vcofiddiff = 0;
415
416 while (vcofiddiff > 2) {
417 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
418
419 if (reqfid > data->currfid) {
420 if (data->currfid > LO_FID_TABLE_TOP) {
421 if (write_new_fid(data,
422 data->currfid + fid_interval))
423 return 1;
424 } else {
425 if (write_new_fid
426 (data,
427 2 + convert_fid_to_vco_fid(data->currfid)))
428 return 1;
429 }
430 } else {
431 if (write_new_fid(data, data->currfid - fid_interval))
432 return 1;
433 }
434
435 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
436 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
437 : vcoreqfid - vcocurrfid;
438 }
439
440 if (write_new_fid(data, reqfid))
441 return 1;
442
443 if (query_current_values_with_pending_wait(data))
444 return 1;
445
446 if (data->currfid != reqfid) {
447 printk(KERN_ERR PFX
448 "ph2: mismatch, failed fid transition, "
449 "curr 0x%x, req 0x%x\n",
450 data->currfid, reqfid);
451 return 1;
452 }
453
454 if (savevid != data->currvid) {
455 printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
456 savevid, data->currvid);
457 return 1;
458 }
459
460 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
461 data->currfid, data->currvid);
462
463 return 0;
464}
465
466/* Phase 3 - core voltage transition flow ... jump to the final vid. */
467static int core_voltage_post_transition(struct powernow_k8_data *data,
468 u32 reqvid)
469{
470 u32 savefid = data->currfid;
471 u32 savereqvid = reqvid;
472
473 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
474 smp_processor_id(),
475 data->currfid, data->currvid);
476
477 if (reqvid != data->currvid) {
478 if (write_new_vid(data, reqvid))
479 return 1;
480
481 if (savefid != data->currfid) {
482 printk(KERN_ERR PFX
483 "ph3: bad fid change, save 0x%x, curr 0x%x\n",
484 savefid, data->currfid);
485 return 1;
486 }
487
488 if (data->currvid != reqvid) {
489 printk(KERN_ERR PFX
490 "ph3: failed vid transition\n, "
491 "req 0x%x, curr 0x%x",
492 reqvid, data->currvid);
493 return 1;
494 }
495 }
496
497 if (query_current_values_with_pending_wait(data))
498 return 1;
499
500 if (savereqvid != data->currvid) {
501 pr_debug("ph3 failed, currvid 0x%x\n", data->currvid);
502 return 1;
503 }
504
505 if (savefid != data->currfid) {
506 pr_debug("ph3 failed, currfid changed 0x%x\n",
507 data->currfid);
508 return 1;
509 }
510
511 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
512 data->currfid, data->currvid);
513
514 return 0;
515}
516
517static void check_supported_cpu(void *_rc)
518{
519 u32 eax, ebx, ecx, edx;
520 int *rc = _rc;
521
522 *rc = -ENODEV;
523
524 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
525 return;
526
527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
528 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
529 ((eax & CPUID_XFAM) < CPUID_XFAM_10H))
530 return;
531
532 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
533 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
534 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
535 printk(KERN_INFO PFX
536 "Processor cpuid %x not supported\n", eax);
537 return;
538 }
539
540 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
541 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
542 printk(KERN_INFO PFX
543 "No frequency change capabilities detected\n");
544 return;
545 }
546
547 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
548 if ((edx & P_STATE_TRANSITION_CAPABLE)
549 != P_STATE_TRANSITION_CAPABLE) {
550 printk(KERN_INFO PFX
551 "Power state transitions not supported\n");
552 return;
553 }
554 } else { /* must be a HW Pstate capable processor */
555 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
556 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
557 cpu_family = CPU_HW_PSTATE;
558 else
559 return;
560 }
561
562 *rc = 0;
563}
564
565static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
566 u8 maxvid)
567{
568 unsigned int j;
569 u8 lastfid = 0xff;
570
571 for (j = 0; j < data->numps; j++) {
572 if (pst[j].vid > LEAST_VID) {
573 printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n",
574 j, pst[j].vid);
575 return -EINVAL;
576 }
577 if (pst[j].vid < data->rvo) {
578 /* vid + rvo >= 0 */
579 printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate"
580 " %d\n", j);
581 return -ENODEV;
582 }
583 if (pst[j].vid < maxvid + data->rvo) {
584 /* vid + rvo >= maxvid */
585 printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate"
586 " %d\n", j);
587 return -ENODEV;
588 }
589 if (pst[j].fid > MAX_FID) {
590 printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate"
591 " %d\n", j);
592 return -ENODEV;
593 }
594 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
595 /* Only first fid is allowed to be in "low" range */
596 printk(KERN_ERR FW_BUG PFX "two low fids - %d : "
597 "0x%x\n", j, pst[j].fid);
598 return -EINVAL;
599 }
600 if (pst[j].fid < lastfid)
601 lastfid = pst[j].fid;
602 }
603 if (lastfid & 1) {
604 printk(KERN_ERR FW_BUG PFX "lastfid invalid\n");
605 return -EINVAL;
606 }
607 if (lastfid > LO_FID_TABLE_TOP)
608 printk(KERN_INFO FW_BUG PFX
609 "first fid not from lo freq table\n");
610
611 return 0;
612}
613
614static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
615 unsigned int entry)
616{
617 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
618}
619
620static void print_basics(struct powernow_k8_data *data)
621{
622 int j;
623 for (j = 0; j < data->numps; j++) {
624 if (data->powernow_table[j].frequency !=
625 CPUFREQ_ENTRY_INVALID) {
626 if (cpu_family == CPU_HW_PSTATE) {
627 printk(KERN_INFO PFX
628 " %d : pstate %d (%d MHz)\n", j,
629 data->powernow_table[j].index,
630 data->powernow_table[j].frequency/1000);
631 } else {
632 printk(KERN_INFO PFX
633 "fid 0x%x (%d MHz), vid 0x%x\n",
634 data->powernow_table[j].index & 0xff,
635 data->powernow_table[j].frequency/1000,
636 data->powernow_table[j].index >> 8);
637 }
638 }
639 }
640 if (data->batps)
641 printk(KERN_INFO PFX "Only %d pstates on battery\n",
642 data->batps);
643}
644
645static u32 freq_from_fid_did(u32 fid, u32 did)
646{
647 u32 mhz = 0;
648
649 if (boot_cpu_data.x86 == 0x10)
650 mhz = (100 * (fid + 0x10)) >> did;
651 else if (boot_cpu_data.x86 == 0x11)
652 mhz = (100 * (fid + 8)) >> did;
653 else
654 BUG();
655
656 return mhz * 1000;
657}
658
659static int fill_powernow_table(struct powernow_k8_data *data,
660 struct pst_s *pst, u8 maxvid)
661{
662 struct cpufreq_frequency_table *powernow_table;
663 unsigned int j;
664
665 if (data->batps) {
666 /* use ACPI support to get full speed on mains power */
667 printk(KERN_WARNING PFX
668 "Only %d pstates usable (use ACPI driver for full "
669 "range\n", data->batps);
670 data->numps = data->batps;
671 }
672
673 for (j = 1; j < data->numps; j++) {
674 if (pst[j-1].fid >= pst[j].fid) {
675 printk(KERN_ERR PFX "PST out of sequence\n");
676 return -EINVAL;
677 }
678 }
679
680 if (data->numps < 2) {
681 printk(KERN_ERR PFX "no p states to transition\n");
682 return -ENODEV;
683 }
684
685 if (check_pst_table(data, pst, maxvid))
686 return -EINVAL;
687
688 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
689 * (data->numps + 1)), GFP_KERNEL);
690 if (!powernow_table) {
691 printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
692 return -ENOMEM;
693 }
694
695 for (j = 0; j < data->numps; j++) {
696 int freq;
697 powernow_table[j].index = pst[j].fid; /* lower 8 bits */
698 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
699 freq = find_khz_freq_from_fid(pst[j].fid);
700 powernow_table[j].frequency = freq;
701 }
702 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
703 powernow_table[data->numps].index = 0;
704
705 if (query_current_values_with_pending_wait(data)) {
706 kfree(powernow_table);
707 return -EIO;
708 }
709
710 pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
711 data->powernow_table = powernow_table;
712 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
713 print_basics(data);
714
715 for (j = 0; j < data->numps; j++)
716 if ((pst[j].fid == data->currfid) &&
717 (pst[j].vid == data->currvid))
718 return 0;
719
720 pr_debug("currfid/vid do not match PST, ignoring\n");
721 return 0;
722}
723
724/* Find and validate the PSB/PST table in BIOS. */
725static int find_psb_table(struct powernow_k8_data *data)
726{
727 struct psb_s *psb;
728 unsigned int i;
729 u32 mvs;
730 u8 maxvid;
731 u32 cpst = 0;
732 u32 thiscpuid;
733
734 for (i = 0xc0000; i < 0xffff0; i += 0x10) {
735 /* Scan BIOS looking for the signature. */
736 /* It can not be at ffff0 - it is too big. */
737
738 psb = phys_to_virt(i);
739 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
740 continue;
741
742 pr_debug("found PSB header at 0x%p\n", psb);
743
744 pr_debug("table vers: 0x%x\n", psb->tableversion);
745 if (psb->tableversion != PSB_VERSION_1_4) {
746 printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n");
747 return -ENODEV;
748 }
749
750 pr_debug("flags: 0x%x\n", psb->flags1);
751 if (psb->flags1) {
752 printk(KERN_ERR FW_BUG PFX "unknown flags\n");
753 return -ENODEV;
754 }
755
756 data->vstable = psb->vstable;
757 pr_debug("voltage stabilization time: %d(*20us)\n",
758 data->vstable);
759
760 pr_debug("flags2: 0x%x\n", psb->flags2);
761 data->rvo = psb->flags2 & 3;
762 data->irt = ((psb->flags2) >> 2) & 3;
763 mvs = ((psb->flags2) >> 4) & 3;
764 data->vidmvs = 1 << mvs;
765 data->batps = ((psb->flags2) >> 6) & 3;
766
767 pr_debug("ramp voltage offset: %d\n", data->rvo);
768 pr_debug("isochronous relief time: %d\n", data->irt);
769 pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
770
771 pr_debug("numpst: 0x%x\n", psb->num_tables);
772 cpst = psb->num_tables;
773 if ((psb->cpuid == 0x00000fc0) ||
774 (psb->cpuid == 0x00000fe0)) {
775 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
776 if ((thiscpuid == 0x00000fc0) ||
777 (thiscpuid == 0x00000fe0))
778 cpst = 1;
779 }
780 if (cpst != 1) {
781 printk(KERN_ERR FW_BUG PFX "numpst must be 1\n");
782 return -ENODEV;
783 }
784
785 data->plllock = psb->plllocktime;
786 pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
787 pr_debug("maxfid: 0x%x\n", psb->maxfid);
788 pr_debug("maxvid: 0x%x\n", psb->maxvid);
789 maxvid = psb->maxvid;
790
791 data->numps = psb->numps;
792 pr_debug("numpstates: 0x%x\n", data->numps);
793 return fill_powernow_table(data,
794 (struct pst_s *)(psb+1), maxvid);
795 }
796 /*
797 * If you see this message, complain to BIOS manufacturer. If
798 * he tells you "we do not support Linux" or some similar
799 * nonsense, remember that Windows 2000 uses the same legacy
800 * mechanism that the old Linux PSB driver uses. Tell them it
801 * is broken with Windows 2000.
802 *
803 * The reference to the AMD documentation is chapter 9 in the
804 * BIOS and Kernel Developer's Guide, which is available on
805 * www.amd.com
806 */
807 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
808 printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
809 " and Cool'N'Quiet support is enabled in BIOS setup\n");
810 return -ENODEV;
811}
812
813static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
814 unsigned int index)
815{
816 u64 control;
817
818 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
819 return;
820
821 control = data->acpi_data.states[index].control;
822 data->irt = (control >> IRT_SHIFT) & IRT_MASK;
823 data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
824 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
825 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
826 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
827 data->vstable = (control >> VST_SHIFT) & VST_MASK;
828}
829
830static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
831{
832 struct cpufreq_frequency_table *powernow_table;
833 int ret_val = -ENODEV;
834 u64 control, status;
835
836 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
837 pr_debug("register performance failed: bad ACPI data\n");
838 return -EIO;
839 }
840
841 /* verify the data contained in the ACPI structures */
842 if (data->acpi_data.state_count <= 1) {
843 pr_debug("No ACPI P-States\n");
844 goto err_out;
845 }
846
847 control = data->acpi_data.control_register.space_id;
848 status = data->acpi_data.status_register.space_id;
849
850 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
851 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
852 pr_debug("Invalid control/status registers (%llx - %llx)\n",
853 control, status);
854 goto err_out;
855 }
856
857 /* fill in data->powernow_table */
858 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
859 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
860 if (!powernow_table) {
861 pr_debug("powernow_table memory alloc failure\n");
862 goto err_out;
863 }
864
865 /* fill in data */
866 data->numps = data->acpi_data.state_count;
867 powernow_k8_acpi_pst_values(data, 0);
868
869 if (cpu_family == CPU_HW_PSTATE)
870 ret_val = fill_powernow_table_pstate(data, powernow_table);
871 else
872 ret_val = fill_powernow_table_fidvid(data, powernow_table);
873 if (ret_val)
874 goto err_out_mem;
875
876 powernow_table[data->acpi_data.state_count].frequency =
877 CPUFREQ_TABLE_END;
878 powernow_table[data->acpi_data.state_count].index = 0;
879 data->powernow_table = powernow_table;
880
881 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
882 print_basics(data);
883
884 /* notify BIOS that we exist */
885 acpi_processor_notify_smm(THIS_MODULE);
886
887 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
888 printk(KERN_ERR PFX
889 "unable to alloc powernow_k8_data cpumask\n");
890 ret_val = -ENOMEM;
891 goto err_out_mem;
892 }
893
894 return 0;
895
896err_out_mem:
897 kfree(powernow_table);
898
899err_out:
900 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
901
902 /* data->acpi_data.state_count informs us at ->exit()
903 * whether ACPI was used */
904 data->acpi_data.state_count = 0;
905
906 return ret_val;
907}
908
909static int fill_powernow_table_pstate(struct powernow_k8_data *data,
910 struct cpufreq_frequency_table *powernow_table)
911{
912 int i;
913 u32 hi = 0, lo = 0;
914 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
915 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
916
917 for (i = 0; i < data->acpi_data.state_count; i++) {
918 u32 index;
919
920 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
921 if (index > data->max_hw_pstate) {
922 printk(KERN_ERR PFX "invalid pstate %d - "
923 "bad value %d.\n", i, index);
924 printk(KERN_ERR PFX "Please report to BIOS "
925 "manufacturer\n");
926 invalidate_entry(powernow_table, i);
927 continue;
928 }
929 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
930 if (!(hi & HW_PSTATE_VALID_MASK)) {
931 pr_debug("invalid pstate %d, ignoring\n", index);
932 invalidate_entry(powernow_table, i);
933 continue;
934 }
935
936 powernow_table[i].index = index;
937
938 /* Frequency may be rounded for these */
939 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
940 || boot_cpu_data.x86 == 0x11) {
941 powernow_table[i].frequency =
942 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
943 } else
944 powernow_table[i].frequency =
945 data->acpi_data.states[i].core_frequency * 1000;
946 }
947 return 0;
948}
949
950static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
951 struct cpufreq_frequency_table *powernow_table)
952{
953 int i;
954
955 for (i = 0; i < data->acpi_data.state_count; i++) {
956 u32 fid;
957 u32 vid;
958 u32 freq, index;
959 u64 status, control;
960
961 if (data->exttype) {
962 status = data->acpi_data.states[i].status;
963 fid = status & EXT_FID_MASK;
964 vid = (status >> VID_SHIFT) & EXT_VID_MASK;
965 } else {
966 control = data->acpi_data.states[i].control;
967 fid = control & FID_MASK;
968 vid = (control >> VID_SHIFT) & VID_MASK;
969 }
970
971 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
972
973 index = fid | (vid<<8);
974 powernow_table[i].index = index;
975
976 freq = find_khz_freq_from_fid(fid);
977 powernow_table[i].frequency = freq;
978
979 /* verify frequency is OK */
980 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
981 pr_debug("invalid freq %u kHz, ignoring\n", freq);
982 invalidate_entry(powernow_table, i);
983 continue;
984 }
985
986 /* verify voltage is OK -
987 * BIOSs are using "off" to indicate invalid */
988 if (vid == VID_OFF) {
989 pr_debug("invalid vid %u, ignoring\n", vid);
990 invalidate_entry(powernow_table, i);
991 continue;
992 }
993
994 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
995 printk(KERN_INFO PFX "invalid freq entries "
996 "%u kHz vs. %u kHz\n", freq,
997 (unsigned int)
998 (data->acpi_data.states[i].core_frequency
999 * 1000));
1000 invalidate_entry(powernow_table, i);
1001 continue;
1002 }
1003 }
1004 return 0;
1005}
1006
1007static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
1008{
1009 if (data->acpi_data.state_count)
1010 acpi_processor_unregister_performance(&data->acpi_data,
1011 data->cpu);
1012 free_cpumask_var(data->acpi_data.shared_cpu_map);
1013}
1014
1015static int get_transition_latency(struct powernow_k8_data *data)
1016{
1017 int max_latency = 0;
1018 int i;
1019 for (i = 0; i < data->acpi_data.state_count; i++) {
1020 int cur_latency = data->acpi_data.states[i].transition_latency
1021 + data->acpi_data.states[i].bus_master_latency;
1022 if (cur_latency > max_latency)
1023 max_latency = cur_latency;
1024 }
1025 if (max_latency == 0) {
1026 /*
1027 * Fam 11h and later may return 0 as transition latency. This
1028 * is intended and means "very fast". While cpufreq core and
1029 * governors currently can handle that gracefully, better set it
1030 * to 1 to avoid problems in the future.
1031 */
1032 if (boot_cpu_data.x86 < 0x11)
1033 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1034 "latency\n");
1035 max_latency = 1;
1036 }
1037 /* value in usecs, needs to be in nanoseconds */
1038 return 1000 * max_latency;
1039}
1040
1041/* Take a frequency, and issue the fid/vid transition command */
1042static int transition_frequency_fidvid(struct powernow_k8_data *data,
1043 unsigned int index)
1044{
1045 u32 fid = 0;
1046 u32 vid = 0;
1047 int res, i;
1048 struct cpufreq_freqs freqs;
1049
1050 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1051
1052 /* fid/vid correctness check for k8 */
1053 /* fid are the lower 8 bits of the index we stored into
1054 * the cpufreq frequency table in find_psb_table, vid
1055 * are the upper 8 bits.
1056 */
1057 fid = data->powernow_table[index].index & 0xFF;
1058 vid = (data->powernow_table[index].index & 0xFF00) >> 8;
1059
1060 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
1061
1062 if (query_current_values_with_pending_wait(data))
1063 return 1;
1064
1065 if ((data->currvid == vid) && (data->currfid == fid)) {
1066 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
1067 fid, vid);
1068 return 0;
1069 }
1070
1071 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
1072 smp_processor_id(), fid, vid);
1073 freqs.old = find_khz_freq_from_fid(data->currfid);
1074 freqs.new = find_khz_freq_from_fid(fid);
1075
1076 for_each_cpu(i, data->available_cores) {
1077 freqs.cpu = i;
1078 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1079 }
1080
1081 res = transition_fid_vid(data, fid, vid);
1082 freqs.new = find_khz_freq_from_fid(data->currfid);
1083
1084 for_each_cpu(i, data->available_cores) {
1085 freqs.cpu = i;
1086 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1087 }
1088 return res;
1089}
1090
1091/* Take a frequency, and issue the hardware pstate transition command */
1092static int transition_frequency_pstate(struct powernow_k8_data *data,
1093 unsigned int index)
1094{
1095 u32 pstate = 0;
1096 int res, i;
1097 struct cpufreq_freqs freqs;
1098
1099 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1100
1101 /* get MSR index for hardware pstate transition */
1102 pstate = index & HW_PSTATE_MASK;
1103 if (pstate > data->max_hw_pstate)
1104 return 0;
1105 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1106 data->currpstate);
1107 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1108
1109 for_each_cpu(i, data->available_cores) {
1110 freqs.cpu = i;
1111 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1112 }
1113
1114 res = transition_pstate(data, pstate);
1115 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1116
1117 for_each_cpu(i, data->available_cores) {
1118 freqs.cpu = i;
1119 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1120 }
1121 return res;
1122}
1123
1124/* Driver entry point to switch to the target frequency */
1125static int powernowk8_target(struct cpufreq_policy *pol,
1126 unsigned targfreq, unsigned relation)
1127{
1128 cpumask_var_t oldmask;
1129 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1130 u32 checkfid;
1131 u32 checkvid;
1132 unsigned int newstate;
1133 int ret = -EIO;
1134
1135 if (!data)
1136 return -EINVAL;
1137
1138 checkfid = data->currfid;
1139 checkvid = data->currvid;
1140
1141 /* only run on specific CPU from here on. */
1142 /* This is poor form: use a workqueue or smp_call_function_single */
1143 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1144 return -ENOMEM;
1145
1146 cpumask_copy(oldmask, tsk_cpus_allowed(current));
1147 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1148
1149 if (smp_processor_id() != pol->cpu) {
1150 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
1151 goto err_out;
1152 }
1153
1154 if (pending_bit_stuck()) {
1155 printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1156 goto err_out;
1157 }
1158
1159 pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1160 pol->cpu, targfreq, pol->min, pol->max, relation);
1161
1162 if (query_current_values_with_pending_wait(data))
1163 goto err_out;
1164
1165 if (cpu_family != CPU_HW_PSTATE) {
1166 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1167 data->currfid, data->currvid);
1168
1169 if ((checkvid != data->currvid) ||
1170 (checkfid != data->currfid)) {
1171 printk(KERN_INFO PFX
1172 "error - out of sync, fix 0x%x 0x%x, "
1173 "vid 0x%x 0x%x\n",
1174 checkfid, data->currfid,
1175 checkvid, data->currvid);
1176 }
1177 }
1178
1179 if (cpufreq_frequency_table_target(pol, data->powernow_table,
1180 targfreq, relation, &newstate))
1181 goto err_out;
1182
1183 mutex_lock(&fidvid_mutex);
1184
1185 powernow_k8_acpi_pst_values(data, newstate);
1186
1187 if (cpu_family == CPU_HW_PSTATE)
1188 ret = transition_frequency_pstate(data, newstate);
1189 else
1190 ret = transition_frequency_fidvid(data, newstate);
1191 if (ret) {
1192 printk(KERN_ERR PFX "transition frequency failed\n");
1193 ret = 1;
1194 mutex_unlock(&fidvid_mutex);
1195 goto err_out;
1196 }
1197 mutex_unlock(&fidvid_mutex);
1198
1199 if (cpu_family == CPU_HW_PSTATE)
1200 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1201 newstate);
1202 else
1203 pol->cur = find_khz_freq_from_fid(data->currfid);
1204 ret = 0;
1205
1206err_out:
1207 set_cpus_allowed_ptr(current, oldmask);
1208 free_cpumask_var(oldmask);
1209 return ret;
1210}
1211
1212/* Driver entry point to verify the policy and range of frequencies */
1213static int powernowk8_verify(struct cpufreq_policy *pol)
1214{
1215 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1216
1217 if (!data)
1218 return -EINVAL;
1219
1220 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1221}
1222
1223struct init_on_cpu {
1224 struct powernow_k8_data *data;
1225 int rc;
1226};
1227
1228static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1229{
1230 struct init_on_cpu *init_on_cpu = _init_on_cpu;
1231
1232 if (pending_bit_stuck()) {
1233 printk(KERN_ERR PFX "failing init, change pending bit set\n");
1234 init_on_cpu->rc = -ENODEV;
1235 return;
1236 }
1237
1238 if (query_current_values_with_pending_wait(init_on_cpu->data)) {
1239 init_on_cpu->rc = -ENODEV;
1240 return;
1241 }
1242
1243 if (cpu_family == CPU_OPTERON)
1244 fidvid_msr_init();
1245
1246 init_on_cpu->rc = 0;
1247}
1248
1249/* per CPU init entry point to the driver */
1250static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1251{
1252 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1253 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1254 FW_BUG PFX "Try again with latest BIOS.\n";
1255 struct powernow_k8_data *data;
1256 struct init_on_cpu init_on_cpu;
1257 int rc;
1258 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1259
1260 if (!cpu_online(pol->cpu))
1261 return -ENODEV;
1262
1263 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1264 if (rc)
1265 return -ENODEV;
1266
1267 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
1268 if (!data) {
1269 printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
1270 return -ENOMEM;
1271 }
1272
1273 data->cpu = pol->cpu;
1274 data->currpstate = HW_PSTATE_INVALID;
1275
1276 if (powernow_k8_cpu_init_acpi(data)) {
1277 /*
1278 * Use the PSB BIOS structure. This is only available on
1279 * an UP version, and is deprecated by AMD.
1280 */
1281 if (num_online_cpus() != 1) {
1282 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1283 goto err_out;
1284 }
1285 if (pol->cpu != 0) {
1286 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1287 "CPU other than CPU0. Complain to your BIOS "
1288 "vendor.\n");
1289 goto err_out;
1290 }
1291 rc = find_psb_table(data);
1292 if (rc)
1293 goto err_out;
1294
1295 /* Take a crude guess here.
1296 * That guess was in microseconds, so multiply with 1000 */
1297 pol->cpuinfo.transition_latency = (
1298 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1299 ((1 << data->irt) * 30)) * 1000;
1300 } else /* ACPI _PSS objects available */
1301 pol->cpuinfo.transition_latency = get_transition_latency(data);
1302
1303 /* only run on specific CPU from here on */
1304 init_on_cpu.data = data;
1305 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
1306 &init_on_cpu, 1);
1307 rc = init_on_cpu.rc;
1308 if (rc != 0)
1309 goto err_out_exit_acpi;
1310
1311 if (cpu_family == CPU_HW_PSTATE)
1312 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1313 else
1314 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1315 data->available_cores = pol->cpus;
1316
1317 if (cpu_family == CPU_HW_PSTATE)
1318 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1319 data->currpstate);
1320 else
1321 pol->cur = find_khz_freq_from_fid(data->currfid);
1322 pr_debug("policy current frequency %d kHz\n", pol->cur);
1323
1324 /* min/max the cpu is capable of */
1325 if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
1326 printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
1327 powernow_k8_cpu_exit_acpi(data);
1328 kfree(data->powernow_table);
1329 kfree(data);
1330 return -EINVAL;
1331 }
1332
1333 /* Check for APERF/MPERF support in hardware */
1334 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1335 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1336
1337 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1338
1339 if (cpu_family == CPU_HW_PSTATE)
1340 pr_debug("cpu_init done, current pstate 0x%x\n",
1341 data->currpstate);
1342 else
1343 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1344 data->currfid, data->currvid);
1345
1346 per_cpu(powernow_data, pol->cpu) = data;
1347
1348 return 0;
1349
1350err_out_exit_acpi:
1351 powernow_k8_cpu_exit_acpi(data);
1352
1353err_out:
1354 kfree(data);
1355 return -ENODEV;
1356}
1357
1358static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1359{
1360 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1361
1362 if (!data)
1363 return -EINVAL;
1364
1365 powernow_k8_cpu_exit_acpi(data);
1366
1367 cpufreq_frequency_table_put_attr(pol->cpu);
1368
1369 kfree(data->powernow_table);
1370 kfree(data);
1371 per_cpu(powernow_data, pol->cpu) = NULL;
1372
1373 return 0;
1374}
1375
1376static void query_values_on_cpu(void *_err)
1377{
1378 int *err = _err;
1379 struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1380
1381 *err = query_current_values_with_pending_wait(data);
1382}
1383
1384static unsigned int powernowk8_get(unsigned int cpu)
1385{
1386 struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
1387 unsigned int khz = 0;
1388 int err;
1389
1390 if (!data)
1391 return 0;
1392
1393 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1394 if (err)
1395 goto out;
1396
1397 if (cpu_family == CPU_HW_PSTATE)
1398 khz = find_khz_freq_from_pstate(data->powernow_table,
1399 data->currpstate);
1400 else
1401 khz = find_khz_freq_from_fid(data->currfid);
1402
1403
1404out:
1405 return khz;
1406}
1407
1408static void _cpb_toggle_msrs(bool t)
1409{
1410 int cpu;
1411
1412 get_online_cpus();
1413
1414 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1415
1416 for_each_cpu(cpu, cpu_online_mask) {
1417 struct msr *reg = per_cpu_ptr(msrs, cpu);
1418 if (t)
1419 reg->l &= ~BIT(25);
1420 else
1421 reg->l |= BIT(25);
1422 }
1423 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1424
1425 put_online_cpus();
1426}
1427
1428/*
1429 * Switch on/off core performance boosting.
1430 *
1431 * 0=disable
1432 * 1=enable.
1433 */
1434static void cpb_toggle(bool t)
1435{
1436 if (!cpb_capable)
1437 return;
1438
1439 if (t && !cpb_enabled) {
1440 cpb_enabled = true;
1441 _cpb_toggle_msrs(t);
1442 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1443 } else if (!t && cpb_enabled) {
1444 cpb_enabled = false;
1445 _cpb_toggle_msrs(t);
1446 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1447 }
1448}
1449
1450static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1451 size_t count)
1452{
1453 int ret = -EINVAL;
1454 unsigned long val = 0;
1455
1456 ret = strict_strtoul(buf, 10, &val);
1457 if (!ret && (val == 0 || val == 1) && cpb_capable)
1458 cpb_toggle(val);
1459 else
1460 return -EINVAL;
1461
1462 return count;
1463}
1464
1465static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1466{
1467 return sprintf(buf, "%u\n", cpb_enabled);
1468}
1469
1470#define define_one_rw(_name) \
1471static struct freq_attr _name = \
1472__ATTR(_name, 0644, show_##_name, store_##_name)
1473
1474define_one_rw(cpb);
1475
1476static struct freq_attr *powernow_k8_attr[] = {
1477 &cpufreq_freq_attr_scaling_available_freqs,
1478 &cpb,
1479 NULL,
1480};
1481
1482static struct cpufreq_driver cpufreq_amd64_driver = {
1483 .verify = powernowk8_verify,
1484 .target = powernowk8_target,
1485 .bios_limit = acpi_processor_get_bios_limit,
1486 .init = powernowk8_cpu_init,
1487 .exit = __devexit_p(powernowk8_cpu_exit),
1488 .get = powernowk8_get,
1489 .name = "powernow-k8",
1490 .owner = THIS_MODULE,
1491 .attr = powernow_k8_attr,
1492};
1493
1494/*
1495 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1496 * cannot block the remaining ones from boosting. On the CPU_UP path we
1497 * simply keep the boost-disable flag in sync with the current global
1498 * state.
1499 */
1500static int cpb_notify(struct notifier_block *nb, unsigned long action,
1501 void *hcpu)
1502{
1503 unsigned cpu = (long)hcpu;
1504 u32 lo, hi;
1505
1506 switch (action) {
1507 case CPU_UP_PREPARE:
1508 case CPU_UP_PREPARE_FROZEN:
1509
1510 if (!cpb_enabled) {
1511 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1512 lo |= BIT(25);
1513 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1514 }
1515 break;
1516
1517 case CPU_DOWN_PREPARE:
1518 case CPU_DOWN_PREPARE_FROZEN:
1519 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1520 lo &= ~BIT(25);
1521 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1522 break;
1523
1524 default:
1525 break;
1526 }
1527
1528 return NOTIFY_OK;
1529}
1530
1531static struct notifier_block cpb_nb = {
1532 .notifier_call = cpb_notify,
1533};
1534
1535/* driver entry point for init */
1536static int __cpuinit powernowk8_init(void)
1537{
1538 unsigned int i, supported_cpus = 0, cpu;
1539 int rv;
1540
1541 for_each_online_cpu(i) {
1542 int rc;
1543 smp_call_function_single(i, check_supported_cpu, &rc, 1);
1544 if (rc == 0)
1545 supported_cpus++;
1546 }
1547
1548 if (supported_cpus != num_online_cpus())
1549 return -ENODEV;
1550
1551 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1552 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1553
1554 if (boot_cpu_has(X86_FEATURE_CPB)) {
1555
1556 cpb_capable = true;
1557
1558 msrs = msrs_alloc();
1559 if (!msrs) {
1560 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1561 return -ENOMEM;
1562 }
1563
1564 register_cpu_notifier(&cpb_nb);
1565
1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1567
1568 for_each_cpu(cpu, cpu_online_mask) {
1569 struct msr *reg = per_cpu_ptr(msrs, cpu);
1570 cpb_enabled |= !(!!(reg->l & BIT(25)));
1571 }
1572
1573 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1574 (cpb_enabled ? "on" : "off"));
1575 }
1576
1577 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1578 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1579 unregister_cpu_notifier(&cpb_nb);
1580 msrs_free(msrs);
1581 msrs = NULL;
1582 }
1583 return rv;
1584}
1585
1586/* driver entry point for term */
1587static void __exit powernowk8_exit(void)
1588{
1589 pr_debug("exit\n");
1590
1591 if (boot_cpu_has(X86_FEATURE_CPB)) {
1592 msrs_free(msrs);
1593 msrs = NULL;
1594
1595 unregister_cpu_notifier(&cpb_nb);
1596 }
1597
1598 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1599}
1600
1601MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
1602 "Mark Langsdorf <mark.langsdorf@amd.com>");
1603MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1604MODULE_LICENSE("GPL");
1605
1606late_initcall(powernowk8_init);
1607module_exit(powernowk8_exit);
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
new file mode 100644
index 000000000000..3744d26cdc2b
--- /dev/null
+++ b/drivers/cpufreq/powernow-k8.h
@@ -0,0 +1,222 @@
1/*
2 * (c) 2003-2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data {
21 unsigned int cpu;
22
23 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26
27 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call
29 * when ACPI is used */
30 u32 rvo; /* ramp voltage offset */
31 u32 irt; /* isochronous relief time */
32 u32 vidmvs; /* usable value calculated from mvs */
33 u32 vstable; /* voltage stabilization time, units 20 us */
34 u32 plllock; /* pll lock time, units 1 us */
35 u32 exttype; /* extended interface = 1 */
36
37 /* keep track of the current fid / vid or pstate */
38 u32 currvid;
39 u32 currfid;
40 enum pstate currpstate;
41
42 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
44 * frequency is in kHz */
45 struct cpufreq_frequency_table *powernow_table;
46
47 /* the acpi table needs to be kept. it's only available if ACPI was
48 * used to determine valid frequency/vid/fid states */
49 struct acpi_processor_performance acpi_data;
50
51 /* we need to keep track of associated cores, but let cpufreq
52 * handle hotplug events - so just point at cpufreq pol->cpus
53 * structure */
54 struct cpumask *available_cores;
55};
56
57/* processor's cpuid instruction support */
58#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
59#define CPUID_XFAM 0x0ff00000 /* extended family */
60#define CPUID_XFAM_K8 0
61#define CPUID_XMOD 0x000f0000 /* extended model */
62#define CPUID_XMOD_REV_MASK 0x000c0000
63#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
64#define CPUID_USE_XFAM_XMOD 0x00000f00
65#define CPUID_GET_MAX_CAPABILITIES 0x80000000
66#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
67#define P_STATE_TRANSITION_CAPABLE 6
68
69/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */
70/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */
71/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */
72/* the register number is placed in ecx, and the data is returned in edx:eax. */
73
74#define MSR_FIDVID_CTL 0xc0010041
75#define MSR_FIDVID_STATUS 0xc0010042
76
77/* Field definitions within the FID VID Low Control MSR : */
78#define MSR_C_LO_INIT_FID_VID 0x00010000
79#define MSR_C_LO_NEW_VID 0x00003f00
80#define MSR_C_LO_NEW_FID 0x0000003f
81#define MSR_C_LO_VID_SHIFT 8
82
83/* Field definitions within the FID VID High Control MSR : */
84#define MSR_C_HI_STP_GNT_TO 0x000fffff
85
86/* Field definitions within the FID VID Low Status MSR : */
87#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
88#define MSR_S_LO_MAX_RAMP_VID 0x3f000000
89#define MSR_S_LO_MAX_FID 0x003f0000
90#define MSR_S_LO_START_FID 0x00003f00
91#define MSR_S_LO_CURRENT_FID 0x0000003f
92
93/* Field definitions within the FID VID High Status MSR : */
94#define MSR_S_HI_MIN_WORKING_VID 0x3f000000
95#define MSR_S_HI_MAX_WORKING_VID 0x003f0000
96#define MSR_S_HI_START_VID 0x00003f00
97#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/*
118 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz )
120 * - lowest entry in the high fid table must be >= 2 * the entry in the
121 * low fid table
122 * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
123 * in the low fid table
124 * - the parts can only step at <= 200 MHz intervals, odd fid values are
125 * supported in revision G and later revisions.
126 * - lowest frequency must be >= interprocessor hypertransport link speed
127 * (only applies to MP systems obviously)
128 */
129
130/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
131#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */
132#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
133
134#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
135#define HI_VCOFREQ_TABLE_BOTTOM 1600
136
137#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
138
139#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
140#define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */
141
142#define MIN_FREQ 800 /* Min and max freqs, per spec */
143#define MAX_FREQ 5000
144
145#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */
146#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */
147
148#define VID_OFF 0x3f
149
150#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
151
152#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
153
154#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
155#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
156
157/*
158 * Most values of interest are encoded in a single field of the _PSS
159 * entries: the "control" value.
160 */
161
162#define IRT_SHIFT 30
163#define RVO_SHIFT 28
164#define EXT_TYPE_SHIFT 27
165#define PLL_L_SHIFT 20
166#define MVS_SHIFT 18
167#define VST_SHIFT 11
168#define VID_SHIFT 6
169#define IRT_MASK 3
170#define RVO_MASK 3
171#define EXT_TYPE_MASK 1
172#define PLL_L_MASK 0x7f
173#define MVS_MASK 3
174#define VST_MASK 0x7f
175#define VID_MASK 0x1f
176#define FID_MASK 0x1f
177#define EXT_VID_MASK 0x3f
178#define EXT_FID_MASK 0x3f
179
180
181/*
182 * Version 1.4 of the PSB table. This table is constructed by BIOS and is
183 * to tell the OS's power management driver which VIDs and FIDs are
184 * supported by this particular processor.
185 * If the data in the PSB / PST is wrong, then this driver will program the
186 * wrong values into hardware, which is very likely to lead to a crash.
187 */
188
189#define PSB_ID_STRING "AMDK7PNOW!"
190#define PSB_ID_STRING_LEN 10
191
192#define PSB_VERSION_1_4 0x14
193
194struct psb_s {
195 u8 signature[10];
196 u8 tableversion;
197 u8 flags1;
198 u16 vstable;
199 u8 flags2;
200 u8 num_tables;
201 u32 cpuid;
202 u8 plllocktime;
203 u8 maxfid;
204 u8 maxvid;
205 u8 numps;
206};
207
208/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */
209struct pst_s {
210 u8 fid;
211 u8 vid;
212};
213
214static int core_voltage_pre_transition(struct powernow_k8_data *data,
215 u32 reqvid, u32 regfid);
216static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
217static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
218
219static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
220
221static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
222static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
new file mode 100644
index 000000000000..1e205e6b1727
--- /dev/null
+++ b/drivers/cpufreq/sc520_freq.c
@@ -0,0 +1,192 @@
1/*
2 * sc520_freq.c: cpufreq driver for the AMD Elan sc520
3 *
4 * Copyright (C) 2005 Sean Young <sean@mess.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Based on elanfreq.c
12 *
13 * 2005-03-30: - initial revision
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#include <linux/delay.h>
21#include <linux/cpufreq.h>
22#include <linux/timex.h>
23#include <linux/io.h>
24
25#include <asm/msr.h>
26
27#define MMCR_BASE 0xfffef000 /* The default base address */
28#define OFFS_CPUCTL 0x2 /* CPU Control Register */
29
30static __u8 __iomem *cpuctl;
31
32#define PFX "sc520_freq: "
33
34static struct cpufreq_frequency_table sc520_freq_table[] = {
35 {0x01, 100000},
36 {0x02, 133000},
37 {0, CPUFREQ_TABLE_END},
38};
39
40static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
41{
42 u8 clockspeed_reg = *cpuctl;
43
44 switch (clockspeed_reg & 0x03) {
45 default:
46 printk(KERN_ERR PFX "error: cpuctl register has unexpected "
47 "value %02x\n", clockspeed_reg);
48 case 0x01:
49 return 100000;
50 case 0x02:
51 return 133000;
52 }
53}
54
55static void sc520_freq_set_cpu_state(unsigned int state)
56{
57
58 struct cpufreq_freqs freqs;
59 u8 clockspeed_reg;
60
61 freqs.old = sc520_freq_get_cpu_frequency(0);
62 freqs.new = sc520_freq_table[state].frequency;
63 freqs.cpu = 0; /* AMD Elan is UP */
64
65 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
66
67 pr_debug("attempting to set frequency to %i kHz\n",
68 sc520_freq_table[state].frequency);
69
70 local_irq_disable();
71
72 clockspeed_reg = *cpuctl & ~0x03;
73 *cpuctl = clockspeed_reg | sc520_freq_table[state].index;
74
75 local_irq_enable();
76
77 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
78};
79
80static int sc520_freq_verify(struct cpufreq_policy *policy)
81{
82 return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
83}
84
85static int sc520_freq_target(struct cpufreq_policy *policy,
86 unsigned int target_freq,
87 unsigned int relation)
88{
89 unsigned int newstate = 0;
90
91 if (cpufreq_frequency_table_target(policy, sc520_freq_table,
92 target_freq, relation, &newstate))
93 return -EINVAL;
94
95 sc520_freq_set_cpu_state(newstate);
96
97 return 0;
98}
99
100
101/*
102 * Module init and exit code
103 */
104
105static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
106{
107 struct cpuinfo_x86 *c = &cpu_data(0);
108 int result;
109
110 /* capability check */
111 if (c->x86_vendor != X86_VENDOR_AMD ||
112 c->x86 != 4 || c->x86_model != 9)
113 return -ENODEV;
114
115 /* cpuinfo and default policy values */
116 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
117 policy->cur = sc520_freq_get_cpu_frequency(0);
118
119 result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
120 if (result)
121 return result;
122
123 cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
124
125 return 0;
126}
127
128
129static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
130{
131 cpufreq_frequency_table_put_attr(policy->cpu);
132 return 0;
133}
134
135
136static struct freq_attr *sc520_freq_attr[] = {
137 &cpufreq_freq_attr_scaling_available_freqs,
138 NULL,
139};
140
141
142static struct cpufreq_driver sc520_freq_driver = {
143 .get = sc520_freq_get_cpu_frequency,
144 .verify = sc520_freq_verify,
145 .target = sc520_freq_target,
146 .init = sc520_freq_cpu_init,
147 .exit = sc520_freq_cpu_exit,
148 .name = "sc520_freq",
149 .owner = THIS_MODULE,
150 .attr = sc520_freq_attr,
151};
152
153
154static int __init sc520_freq_init(void)
155{
156 struct cpuinfo_x86 *c = &cpu_data(0);
157 int err;
158
159 /* Test if we have the right hardware */
160 if (c->x86_vendor != X86_VENDOR_AMD ||
161 c->x86 != 4 || c->x86_model != 9) {
162 pr_debug("no Elan SC520 processor found!\n");
163 return -ENODEV;
164 }
165 cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
166 if (!cpuctl) {
167 printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
168 return -ENOMEM;
169 }
170
171 err = cpufreq_register_driver(&sc520_freq_driver);
172 if (err)
173 iounmap(cpuctl);
174
175 return err;
176}
177
178
179static void __exit sc520_freq_exit(void)
180{
181 cpufreq_unregister_driver(&sc520_freq_driver);
182 iounmap(cpuctl);
183}
184
185
186MODULE_LICENSE("GPL");
187MODULE_AUTHOR("Sean Young <sean@mess.org>");
188MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
189
190module_init(sc520_freq_init);
191module_exit(sc520_freq_exit);
192
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
new file mode 100644
index 000000000000..6ea3455def21
--- /dev/null
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -0,0 +1,633 @@
1/*
2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset).
4 *
5 * Since the original Pentium M, most new Intel CPUs support Enhanced
6 * SpeedStep.
7 *
8 * Despite the "SpeedStep" in the name, this is almost entirely unlike
9 * traditional SpeedStep.
10 *
11 * Modelled on speedstep.c
12 *
13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/cpufreq.h>
20#include <linux/sched.h> /* current */
21#include <linux/delay.h>
22#include <linux/compiler.h>
23#include <linux/gfp.h>
24
25#include <asm/msr.h>
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28
29#define PFX "speedstep-centrino: "
30#define MAINTAINER "cpufreq@vger.kernel.org"
31
32#define INTEL_MSR_RANGE (0xffff)
33
34struct cpu_id
35{
36 __u8 x86; /* CPU family */
37 __u8 x86_model; /* model */
38 __u8 x86_mask; /* stepping */
39};
40
41enum {
42 CPU_BANIAS,
43 CPU_DOTHAN_A1,
44 CPU_DOTHAN_A2,
45 CPU_DOTHAN_B0,
46 CPU_MP4HT_D0,
47 CPU_MP4HT_E0,
48};
49
50static const struct cpu_id cpu_ids[] = {
51 [CPU_BANIAS] = { 6, 9, 5 },
52 [CPU_DOTHAN_A1] = { 6, 13, 1 },
53 [CPU_DOTHAN_A2] = { 6, 13, 2 },
54 [CPU_DOTHAN_B0] = { 6, 13, 6 },
55 [CPU_MP4HT_D0] = {15, 3, 4 },
56 [CPU_MP4HT_E0] = {15, 4, 1 },
57};
58#define N_IDS ARRAY_SIZE(cpu_ids)
59
60struct cpu_model
61{
62 const struct cpu_id *cpu_id;
63 const char *model_name;
64 unsigned max_freq; /* max clock in kHz */
65
66 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
67};
68static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
69 const struct cpu_id *x);
70
71/* Operating points for current CPU */
72static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
73static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
74
75static struct cpufreq_driver centrino_driver;
76
77#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
78
79/* Computes the correct form for IA32_PERF_CTL MSR for a particular
80 frequency/voltage operating point; frequency in MHz, volts in mV.
81 This is stored as "index" in the structure. */
82#define OP(mhz, mv) \
83 { \
84 .frequency = (mhz) * 1000, \
85 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
86 }
87
88/*
89 * These voltage tables were derived from the Intel Pentium M
90 * datasheet, document 25261202.pdf, Table 5. I have verified they
91 * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
92 * M.
93 */
94
95/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
96static struct cpufreq_frequency_table banias_900[] =
97{
98 OP(600, 844),
99 OP(800, 988),
100 OP(900, 1004),
101 { .frequency = CPUFREQ_TABLE_END }
102};
103
104/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
105static struct cpufreq_frequency_table banias_1000[] =
106{
107 OP(600, 844),
108 OP(800, 972),
109 OP(900, 988),
110 OP(1000, 1004),
111 { .frequency = CPUFREQ_TABLE_END }
112};
113
114/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
115static struct cpufreq_frequency_table banias_1100[] =
116{
117 OP( 600, 956),
118 OP( 800, 1020),
119 OP( 900, 1100),
120 OP(1000, 1164),
121 OP(1100, 1180),
122 { .frequency = CPUFREQ_TABLE_END }
123};
124
125
126/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
127static struct cpufreq_frequency_table banias_1200[] =
128{
129 OP( 600, 956),
130 OP( 800, 1004),
131 OP( 900, 1020),
132 OP(1000, 1100),
133 OP(1100, 1164),
134 OP(1200, 1180),
135 { .frequency = CPUFREQ_TABLE_END }
136};
137
138/* Intel Pentium M processor 1.30GHz (Banias) */
139static struct cpufreq_frequency_table banias_1300[] =
140{
141 OP( 600, 956),
142 OP( 800, 1260),
143 OP(1000, 1292),
144 OP(1200, 1356),
145 OP(1300, 1388),
146 { .frequency = CPUFREQ_TABLE_END }
147};
148
149/* Intel Pentium M processor 1.40GHz (Banias) */
150static struct cpufreq_frequency_table banias_1400[] =
151{
152 OP( 600, 956),
153 OP( 800, 1180),
154 OP(1000, 1308),
155 OP(1200, 1436),
156 OP(1400, 1484),
157 { .frequency = CPUFREQ_TABLE_END }
158};
159
160/* Intel Pentium M processor 1.50GHz (Banias) */
161static struct cpufreq_frequency_table banias_1500[] =
162{
163 OP( 600, 956),
164 OP( 800, 1116),
165 OP(1000, 1228),
166 OP(1200, 1356),
167 OP(1400, 1452),
168 OP(1500, 1484),
169 { .frequency = CPUFREQ_TABLE_END }
170};
171
172/* Intel Pentium M processor 1.60GHz (Banias) */
173static struct cpufreq_frequency_table banias_1600[] =
174{
175 OP( 600, 956),
176 OP( 800, 1036),
177 OP(1000, 1164),
178 OP(1200, 1276),
179 OP(1400, 1420),
180 OP(1600, 1484),
181 { .frequency = CPUFREQ_TABLE_END }
182};
183
184/* Intel Pentium M processor 1.70GHz (Banias) */
185static struct cpufreq_frequency_table banias_1700[] =
186{
187 OP( 600, 956),
188 OP( 800, 1004),
189 OP(1000, 1116),
190 OP(1200, 1228),
191 OP(1400, 1308),
192 OP(1700, 1484),
193 { .frequency = CPUFREQ_TABLE_END }
194};
195#undef OP
196
197#define _BANIAS(cpuid, max, name) \
198{ .cpu_id = cpuid, \
199 .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
200 .max_freq = (max)*1000, \
201 .op_points = banias_##max, \
202}
203#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
204
205/* CPU models, their operating frequency range, and freq/voltage
206 operating points */
207static struct cpu_model models[] =
208{
209 _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
210 BANIAS(1000),
211 BANIAS(1100),
212 BANIAS(1200),
213 BANIAS(1300),
214 BANIAS(1400),
215 BANIAS(1500),
216 BANIAS(1600),
217 BANIAS(1700),
218
219 /* NULL model_name is a wildcard */
220 { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
221 { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
222 { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
223 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
224 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
225
226 { NULL, }
227};
228#undef _BANIAS
229#undef BANIAS
230
231static int centrino_cpu_init_table(struct cpufreq_policy *policy)
232{
233 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
234 struct cpu_model *model;
235
236 for(model = models; model->cpu_id != NULL; model++)
237 if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
238 (model->model_name == NULL ||
239 strcmp(cpu->x86_model_id, model->model_name) == 0))
240 break;
241
242 if (model->cpu_id == NULL) {
243 /* No match at all */
244 pr_debug("no support for CPU model \"%s\": "
245 "send /proc/cpuinfo to " MAINTAINER "\n",
246 cpu->x86_model_id);
247 return -ENOENT;
248 }
249
250 if (model->op_points == NULL) {
251 /* Matched a non-match */
252 pr_debug("no table support for CPU model \"%s\"\n",
253 cpu->x86_model_id);
254 pr_debug("try using the acpi-cpufreq driver\n");
255 return -ENOENT;
256 }
257
258 per_cpu(centrino_model, policy->cpu) = model;
259
260 pr_debug("found \"%s\": max frequency: %dkHz\n",
261 model->model_name, model->max_freq);
262
263 return 0;
264}
265
266#else
267static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
268{
269 return -ENODEV;
270}
271#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
272
273static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
274 const struct cpu_id *x)
275{
276 if ((c->x86 == x->x86) &&
277 (c->x86_model == x->x86_model) &&
278 (c->x86_mask == x->x86_mask))
279 return 1;
280 return 0;
281}
282
283/* To be called only after centrino_model is initialized */
284static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
285{
286 int i;
287
288 /*
289 * Extract clock in kHz from PERF_CTL value
290 * for centrino, as some DSDTs are buggy.
291 * Ideally, this can be done using the acpi_data structure.
292 */
293 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
294 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
296 msr = (msr >> 8) & 0xff;
297 return msr * 100000;
298 }
299
300 if ((!per_cpu(centrino_model, cpu)) ||
301 (!per_cpu(centrino_model, cpu)->op_points))
302 return 0;
303
304 msr &= 0xffff;
305 for (i = 0;
306 per_cpu(centrino_model, cpu)->op_points[i].frequency
307 != CPUFREQ_TABLE_END;
308 i++) {
309 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
310 return per_cpu(centrino_model, cpu)->
311 op_points[i].frequency;
312 }
313 if (failsafe)
314 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
315 else
316 return 0;
317}
318
319/* Return the current CPU frequency in kHz */
320static unsigned int get_cur_freq(unsigned int cpu)
321{
322 unsigned l, h;
323 unsigned clock_freq;
324
325 rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
326 clock_freq = extract_clock(l, cpu, 0);
327
328 if (unlikely(clock_freq == 0)) {
329 /*
330 * On some CPUs, we can see transient MSR values (which are
331 * not present in _PSS), while CPU is doing some automatic
332 * P-state transition (like TM2). Get the last freq set
333 * in PERF_CTL.
334 */
335 rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
336 clock_freq = extract_clock(l, cpu, 1);
337 }
338 return clock_freq;
339}
340
341
342static int centrino_cpu_init(struct cpufreq_policy *policy)
343{
344 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
345 unsigned freq;
346 unsigned l, h;
347 int ret;
348 int i;
349
350 /* Only Intel makes Enhanced Speedstep-capable CPUs */
351 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
352 !cpu_has(cpu, X86_FEATURE_EST))
353 return -ENODEV;
354
355 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
356 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
357
358 if (policy->cpu != 0)
359 return -ENODEV;
360
361 for (i = 0; i < N_IDS; i++)
362 if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
363 break;
364
365 if (i != N_IDS)
366 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
367
368 if (!per_cpu(centrino_cpu, policy->cpu)) {
369 pr_debug("found unsupported CPU with "
370 "Enhanced SpeedStep: send /proc/cpuinfo to "
371 MAINTAINER "\n");
372 return -ENODEV;
373 }
374
375 if (centrino_cpu_init_table(policy)) {
376 return -ENODEV;
377 }
378
379 /* Check to see if Enhanced SpeedStep is enabled, and try to
380 enable it if not. */
381 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
382
383 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
384 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
385 pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l);
386 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
387
388 /* check to see if it stuck */
389 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
390 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
391 printk(KERN_INFO PFX
392 "couldn't enable Enhanced SpeedStep\n");
393 return -ENODEV;
394 }
395 }
396
397 freq = get_cur_freq(policy->cpu);
398 policy->cpuinfo.transition_latency = 10000;
399 /* 10uS transition latency */
400 policy->cur = freq;
401
402 pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
403
404 ret = cpufreq_frequency_table_cpuinfo(policy,
405 per_cpu(centrino_model, policy->cpu)->op_points);
406 if (ret)
407 return (ret);
408
409 cpufreq_frequency_table_get_attr(
410 per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
411
412 return 0;
413}
414
415static int centrino_cpu_exit(struct cpufreq_policy *policy)
416{
417 unsigned int cpu = policy->cpu;
418
419 if (!per_cpu(centrino_model, cpu))
420 return -ENODEV;
421
422 cpufreq_frequency_table_put_attr(cpu);
423
424 per_cpu(centrino_model, cpu) = NULL;
425
426 return 0;
427}
428
429/**
430 * centrino_verify - verifies a new CPUFreq policy
431 * @policy: new policy
432 *
433 * Limit must be within this model's frequency range at least one
434 * border included.
435 */
436static int centrino_verify (struct cpufreq_policy *policy)
437{
438 return cpufreq_frequency_table_verify(policy,
439 per_cpu(centrino_model, policy->cpu)->op_points);
440}
441
442/**
443 * centrino_setpolicy - set a new CPUFreq policy
444 * @policy: new policy
445 * @target_freq: the target frequency
446 * @relation: how that frequency relates to achieved frequency
447 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
448 *
449 * Sets a new CPUFreq policy.
450 */
451static int centrino_target (struct cpufreq_policy *policy,
452 unsigned int target_freq,
453 unsigned int relation)
454{
455 unsigned int newstate = 0;
456 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
457 struct cpufreq_freqs freqs;
458 int retval = 0;
459 unsigned int j, k, first_cpu, tmp;
460 cpumask_var_t covered_cpus;
461
462 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
463 return -ENOMEM;
464
465 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
466 retval = -ENODEV;
467 goto out;
468 }
469
470 if (unlikely(cpufreq_frequency_table_target(policy,
471 per_cpu(centrino_model, cpu)->op_points,
472 target_freq,
473 relation,
474 &newstate))) {
475 retval = -EINVAL;
476 goto out;
477 }
478
479 first_cpu = 1;
480 for_each_cpu(j, policy->cpus) {
481 int good_cpu;
482
483 /* cpufreq holds the hotplug lock, so we are safe here */
484 if (!cpu_online(j))
485 continue;
486
487 /*
488 * Support for SMP systems.
489 * Make sure we are running on CPU that wants to change freq
490 */
491 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
492 good_cpu = cpumask_any_and(policy->cpus,
493 cpu_online_mask);
494 else
495 good_cpu = j;
496
497 if (good_cpu >= nr_cpu_ids) {
498 pr_debug("couldn't limit to CPUs in this domain\n");
499 retval = -EAGAIN;
500 if (first_cpu) {
501 /* We haven't started the transition yet. */
502 goto out;
503 }
504 break;
505 }
506
507 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
508
509 if (first_cpu) {
510 rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
511 if (msr == (oldmsr & 0xffff)) {
512 pr_debug("no change needed - msr was and needs "
513 "to be %x\n", oldmsr);
514 retval = 0;
515 goto out;
516 }
517
518 freqs.old = extract_clock(oldmsr, cpu, 0);
519 freqs.new = extract_clock(msr, cpu, 0);
520
521 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
522 target_freq, freqs.old, freqs.new, msr);
523
524 for_each_cpu(k, policy->cpus) {
525 if (!cpu_online(k))
526 continue;
527 freqs.cpu = k;
528 cpufreq_notify_transition(&freqs,
529 CPUFREQ_PRECHANGE);
530 }
531
532 first_cpu = 0;
533 /* all but 16 LSB are reserved, treat them with care */
534 oldmsr &= ~0xffff;
535 msr &= 0xffff;
536 oldmsr |= msr;
537 }
538
539 wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
540 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
541 break;
542
543 cpumask_set_cpu(j, covered_cpus);
544 }
545
546 for_each_cpu(k, policy->cpus) {
547 if (!cpu_online(k))
548 continue;
549 freqs.cpu = k;
550 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
551 }
552
553 if (unlikely(retval)) {
554 /*
555 * We have failed halfway through the frequency change.
556 * We have sent callbacks to policy->cpus and
557 * MSRs have already been written on coverd_cpus.
558 * Best effort undo..
559 */
560
561 for_each_cpu(j, covered_cpus)
562 wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
563
564 tmp = freqs.new;
565 freqs.new = freqs.old;
566 freqs.old = tmp;
567 for_each_cpu(j, policy->cpus) {
568 if (!cpu_online(j))
569 continue;
570 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
571 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
572 }
573 }
574 retval = 0;
575
576out:
577 free_cpumask_var(covered_cpus);
578 return retval;
579}
580
581static struct freq_attr* centrino_attr[] = {
582 &cpufreq_freq_attr_scaling_available_freqs,
583 NULL,
584};
585
586static struct cpufreq_driver centrino_driver = {
587 .name = "centrino", /* should be speedstep-centrino,
588 but there's a 16 char limit */
589 .init = centrino_cpu_init,
590 .exit = centrino_cpu_exit,
591 .verify = centrino_verify,
592 .target = centrino_target,
593 .get = get_cur_freq,
594 .attr = centrino_attr,
595 .owner = THIS_MODULE,
596};
597
598
599/**
600 * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
601 *
602 * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
603 * unsupported devices, -ENOENT if there's no voltage table for this
604 * particular CPU model, -EINVAL on problems during initiatization,
605 * and zero on success.
606 *
607 * This is quite picky. Not only does the CPU have to advertise the
608 * "est" flag in the cpuid capability flags, we look for a specific
609 * CPU model and stepping, and we need to have the exact model name in
610 * our voltage tables. That is, be paranoid about not releasing
611 * someone's valuable magic smoke.
612 */
613static int __init centrino_init(void)
614{
615 struct cpuinfo_x86 *cpu = &cpu_data(0);
616
617 if (!cpu_has(cpu, X86_FEATURE_EST))
618 return -ENODEV;
619
620 return cpufreq_register_driver(&centrino_driver);
621}
622
623static void __exit centrino_exit(void)
624{
625 cpufreq_unregister_driver(&centrino_driver);
626}
627
628MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
629MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
630MODULE_LICENSE ("GPL");
631
632late_initcall(centrino_init);
633module_exit(centrino_exit);
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
new file mode 100644
index 000000000000..a748ce782fee
--- /dev/null
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -0,0 +1,448 @@
1/*
2 * (C) 2001 Dave Jones, Arjan van de ven.
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon reverse engineered information, and on Intel documentation
7 * for chipsets ICH2-M and ICH3-M.
8 *
9 * Many thanks to Ducrot Bruno for finding and fixing the last
10 * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
11 * for extensive testing.
12 *
13 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
14 */
15
16
17/*********************************************************************
18 * SPEEDSTEP - DEFINITIONS *
19 *********************************************************************/
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/cpufreq.h>
25#include <linux/pci.h>
26#include <linux/sched.h>
27
28#include "speedstep-lib.h"
29
30
31/* speedstep_chipset:
32 * It is necessary to know which chipset is used. As accesses to
33 * this device occur at various places in this module, we need a
34 * static struct pci_dev * pointing to that device.
35 */
36static struct pci_dev *speedstep_chipset_dev;
37
38
39/* speedstep_processor
40 */
41static enum speedstep_processor speedstep_processor;
42
43static u32 pmbase;
44
45/*
46 * There are only two frequency states for each processor. Values
47 * are in kHz for the time being.
48 */
49static struct cpufreq_frequency_table speedstep_freqs[] = {
50 {SPEEDSTEP_HIGH, 0},
51 {SPEEDSTEP_LOW, 0},
52 {0, CPUFREQ_TABLE_END},
53};
54
55
56/**
57 * speedstep_find_register - read the PMBASE address
58 *
59 * Returns: -ENODEV if no register could be found
60 */
61static int speedstep_find_register(void)
62{
63 if (!speedstep_chipset_dev)
64 return -ENODEV;
65
66 /* get PMBASE */
67 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
68 if (!(pmbase & 0x01)) {
69 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
70 return -ENODEV;
71 }
72
73 pmbase &= 0xFFFFFFFE;
74 if (!pmbase) {
75 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
76 return -ENODEV;
77 }
78
79 pr_debug("pmbase is 0x%x\n", pmbase);
80 return 0;
81}
82
83/**
84 * speedstep_set_state - set the SpeedStep state
85 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
86 *
87 * Tries to change the SpeedStep state. Can be called from
88 * smp_call_function_single.
89 */
90static void speedstep_set_state(unsigned int state)
91{
92 u8 pm2_blk;
93 u8 value;
94 unsigned long flags;
95
96 if (state > 0x1)
97 return;
98
99 /* Disable IRQs */
100 local_irq_save(flags);
101
102 /* read state */
103 value = inb(pmbase + 0x50);
104
105 pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
106
107 /* write new state */
108 value &= 0xFE;
109 value |= state;
110
111 pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
112
113 /* Disable bus master arbitration */
114 pm2_blk = inb(pmbase + 0x20);
115 pm2_blk |= 0x01;
116 outb(pm2_blk, (pmbase + 0x20));
117
118 /* Actual transition */
119 outb(value, (pmbase + 0x50));
120
121 /* Restore bus master arbitration */
122 pm2_blk &= 0xfe;
123 outb(pm2_blk, (pmbase + 0x20));
124
125 /* check if transition was successful */
126 value = inb(pmbase + 0x50);
127
128 /* Enable IRQs */
129 local_irq_restore(flags);
130
131 pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
132
133 if (state == (value & 0x1))
134 pr_debug("change to %u MHz succeeded\n",
135 speedstep_get_frequency(speedstep_processor) / 1000);
136 else
137 printk(KERN_ERR "cpufreq: change failed - I/O error\n");
138
139 return;
140}
141
142/* Wrapper for smp_call_function_single. */
143static void _speedstep_set_state(void *_state)
144{
145 speedstep_set_state(*(unsigned int *)_state);
146}
147
148/**
149 * speedstep_activate - activate SpeedStep control in the chipset
150 *
151 * Tries to activate the SpeedStep status and control registers.
152 * Returns -EINVAL on an unsupported chipset, and zero on success.
153 */
154static int speedstep_activate(void)
155{
156 u16 value = 0;
157
158 if (!speedstep_chipset_dev)
159 return -EINVAL;
160
161 pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
162 if (!(value & 0x08)) {
163 value |= 0x08;
164 pr_debug("activating SpeedStep (TM) registers\n");
165 pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
166 }
167
168 return 0;
169}
170
171
172/**
173 * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
174 *
175 * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
176 * the LPC bridge / PM module which contains all power-management
177 * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
178 * chipset, or zero on failure.
179 */
180static unsigned int speedstep_detect_chipset(void)
181{
182 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
183 PCI_DEVICE_ID_INTEL_82801DB_12,
184 PCI_ANY_ID, PCI_ANY_ID,
185 NULL);
186 if (speedstep_chipset_dev)
187 return 4; /* 4-M */
188
189 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
190 PCI_DEVICE_ID_INTEL_82801CA_12,
191 PCI_ANY_ID, PCI_ANY_ID,
192 NULL);
193 if (speedstep_chipset_dev)
194 return 3; /* 3-M */
195
196
197 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
198 PCI_DEVICE_ID_INTEL_82801BA_10,
199 PCI_ANY_ID, PCI_ANY_ID,
200 NULL);
201 if (speedstep_chipset_dev) {
202 /* speedstep.c causes lockups on Dell Inspirons 8000 and
203 * 8100 which use a pretty old revision of the 82815
204 * host brige. Abort on these systems.
205 */
206 static struct pci_dev *hostbridge;
207
208 hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
209 PCI_DEVICE_ID_INTEL_82815_MC,
210 PCI_ANY_ID, PCI_ANY_ID,
211 NULL);
212
213 if (!hostbridge)
214 return 2; /* 2-M */
215
216 if (hostbridge->revision < 5) {
217 pr_debug("hostbridge does not support speedstep\n");
218 speedstep_chipset_dev = NULL;
219 pci_dev_put(hostbridge);
220 return 0;
221 }
222
223 pci_dev_put(hostbridge);
224 return 2; /* 2-M */
225 }
226
227 return 0;
228}
229
230static void get_freq_data(void *_speed)
231{
232 unsigned int *speed = _speed;
233
234 *speed = speedstep_get_frequency(speedstep_processor);
235}
236
237static unsigned int speedstep_get(unsigned int cpu)
238{
239 unsigned int speed;
240
241 /* You're supposed to ensure CPU is online. */
242 if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
243 BUG();
244
245 pr_debug("detected %u kHz as current frequency\n", speed);
246 return speed;
247}
248
249/**
250 * speedstep_target - set a new CPUFreq policy
251 * @policy: new policy
252 * @target_freq: the target frequency
253 * @relation: how that frequency relates to achieved frequency
254 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
255 *
256 * Sets a new CPUFreq policy.
257 */
258static int speedstep_target(struct cpufreq_policy *policy,
259 unsigned int target_freq,
260 unsigned int relation)
261{
262 unsigned int newstate = 0, policy_cpu;
263 struct cpufreq_freqs freqs;
264 int i;
265
266 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
267 target_freq, relation, &newstate))
268 return -EINVAL;
269
270 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
271 freqs.old = speedstep_get(policy_cpu);
272 freqs.new = speedstep_freqs[newstate].frequency;
273 freqs.cpu = policy->cpu;
274
275 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
276
277 /* no transition necessary */
278 if (freqs.old == freqs.new)
279 return 0;
280
281 for_each_cpu(i, policy->cpus) {
282 freqs.cpu = i;
283 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
284 }
285
286 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
287 true);
288
289 for_each_cpu(i, policy->cpus) {
290 freqs.cpu = i;
291 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
292 }
293
294 return 0;
295}
296
297
298/**
299 * speedstep_verify - verifies a new CPUFreq policy
300 * @policy: new policy
301 *
302 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
303 * at least one border included.
304 */
305static int speedstep_verify(struct cpufreq_policy *policy)
306{
307 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
308}
309
310struct get_freqs {
311 struct cpufreq_policy *policy;
312 int ret;
313};
314
315static void get_freqs_on_cpu(void *_get_freqs)
316{
317 struct get_freqs *get_freqs = _get_freqs;
318
319 get_freqs->ret =
320 speedstep_get_freqs(speedstep_processor,
321 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
322 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
323 &get_freqs->policy->cpuinfo.transition_latency,
324 &speedstep_set_state);
325}
326
327static int speedstep_cpu_init(struct cpufreq_policy *policy)
328{
329 int result;
330 unsigned int policy_cpu, speed;
331 struct get_freqs gf;
332
333 /* only run on CPU to be set, or on its sibling */
334#ifdef CONFIG_SMP
335 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
336#endif
337 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
338
339 /* detect low and high frequency and transition latency */
340 gf.policy = policy;
341 smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
342 if (gf.ret)
343 return gf.ret;
344
345 /* get current speed setting */
346 speed = speedstep_get(policy_cpu);
347 if (!speed)
348 return -EIO;
349
350 pr_debug("currently at %s speed setting - %i MHz\n",
351 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
352 ? "low" : "high",
353 (speed / 1000));
354
355 /* cpuinfo and default policy values */
356 policy->cur = speed;
357
358 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
359 if (result)
360 return result;
361
362 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
363
364 return 0;
365}
366
367
368static int speedstep_cpu_exit(struct cpufreq_policy *policy)
369{
370 cpufreq_frequency_table_put_attr(policy->cpu);
371 return 0;
372}
373
374static struct freq_attr *speedstep_attr[] = {
375 &cpufreq_freq_attr_scaling_available_freqs,
376 NULL,
377};
378
379
380static struct cpufreq_driver speedstep_driver = {
381 .name = "speedstep-ich",
382 .verify = speedstep_verify,
383 .target = speedstep_target,
384 .init = speedstep_cpu_init,
385 .exit = speedstep_cpu_exit,
386 .get = speedstep_get,
387 .owner = THIS_MODULE,
388 .attr = speedstep_attr,
389};
390
391
392/**
393 * speedstep_init - initializes the SpeedStep CPUFreq driver
394 *
395 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
396 * devices, -EINVAL on problems during initiatization, and zero on
397 * success.
398 */
399static int __init speedstep_init(void)
400{
401 /* detect processor */
402 speedstep_processor = speedstep_detect_processor();
403 if (!speedstep_processor) {
404 pr_debug("Intel(R) SpeedStep(TM) capable processor "
405 "not found\n");
406 return -ENODEV;
407 }
408
409 /* detect chipset */
410 if (!speedstep_detect_chipset()) {
411 pr_debug("Intel(R) SpeedStep(TM) for this chipset not "
412 "(yet) available.\n");
413 return -ENODEV;
414 }
415
416 /* activate speedstep support */
417 if (speedstep_activate()) {
418 pci_dev_put(speedstep_chipset_dev);
419 return -EINVAL;
420 }
421
422 if (speedstep_find_register())
423 return -ENODEV;
424
425 return cpufreq_register_driver(&speedstep_driver);
426}
427
428
429/**
430 * speedstep_exit - unregisters SpeedStep support
431 *
432 * Unregisters SpeedStep support.
433 */
434static void __exit speedstep_exit(void)
435{
436 pci_dev_put(speedstep_chipset_dev);
437 cpufreq_unregister_driver(&speedstep_driver);
438}
439
440
441MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
442 "Dominik Brodowski <linux@brodo.de>");
443MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
444 "with ICH-M southbridges.");
445MODULE_LICENSE("GPL");
446
447module_init(speedstep_init);
448module_exit(speedstep_exit);
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
new file mode 100644
index 000000000000..8af2d2fd9d51
--- /dev/null
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -0,0 +1,478 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16
17#include <asm/msr.h>
18#include <asm/tsc.h>
19#include "speedstep-lib.h"
20
21#define PFX "speedstep-lib: "
22
23#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
24static int relaxed_check;
25#else
26#define relaxed_check 0
27#endif
28
29/*********************************************************************
30 * GET PROCESSOR CORE SPEED IN KHZ *
31 *********************************************************************/
32
33static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
34{
35 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
36 struct {
37 unsigned int ratio; /* Frequency Multiplier (x10) */
38 u8 bitmap; /* power on configuration bits
39 [27, 25:22] (in MSR 0x2a) */
40 } msr_decode_mult[] = {
41 { 30, 0x01 },
42 { 35, 0x05 },
43 { 40, 0x02 },
44 { 45, 0x06 },
45 { 50, 0x00 },
46 { 55, 0x04 },
47 { 60, 0x0b },
48 { 65, 0x0f },
49 { 70, 0x09 },
50 { 75, 0x0d },
51 { 80, 0x0a },
52 { 85, 0x26 },
53 { 90, 0x20 },
54 { 100, 0x2b },
55 { 0, 0xff } /* error or unknown value */
56 };
57
58 /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
59 struct {
60 unsigned int value; /* Front Side Bus speed in MHz */
61 u8 bitmap; /* power on configuration bits [18: 19]
62 (in MSR 0x2a) */
63 } msr_decode_fsb[] = {
64 { 66, 0x0 },
65 { 100, 0x2 },
66 { 133, 0x1 },
67 { 0, 0xff}
68 };
69
70 u32 msr_lo, msr_tmp;
71 int i = 0, j = 0;
72
73 /* read MSR 0x2a - we only need the low 32 bits */
74 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
75 pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
76 msr_tmp = msr_lo;
77
78 /* decode the FSB */
79 msr_tmp &= 0x00c0000;
80 msr_tmp >>= 18;
81 while (msr_tmp != msr_decode_fsb[i].bitmap) {
82 if (msr_decode_fsb[i].bitmap == 0xff)
83 return 0;
84 i++;
85 }
86
87 /* decode the multiplier */
88 if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) {
89 pr_debug("workaround for early PIIIs\n");
90 msr_lo &= 0x03c00000;
91 } else
92 msr_lo &= 0x0bc00000;
93 msr_lo >>= 22;
94 while (msr_lo != msr_decode_mult[j].bitmap) {
95 if (msr_decode_mult[j].bitmap == 0xff)
96 return 0;
97 j++;
98 }
99
100 pr_debug("speed is %u\n",
101 (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
102
103 return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100;
104}
105
106
107static unsigned int pentiumM_get_frequency(void)
108{
109 u32 msr_lo, msr_tmp;
110
111 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
112 pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
113
114 /* see table B-2 of 24547212.pdf */
115 if (msr_lo & 0x00040000) {
116 printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n",
117 msr_lo, msr_tmp);
118 return 0;
119 }
120
121 msr_tmp = (msr_lo >> 22) & 0x1f;
122 pr_debug("bits 22-26 are 0x%x, speed is %u\n",
123 msr_tmp, (msr_tmp * 100 * 1000));
124
125 return msr_tmp * 100 * 1000;
126}
127
128static unsigned int pentium_core_get_frequency(void)
129{
130 u32 fsb = 0;
131 u32 msr_lo, msr_tmp;
132 int ret;
133
134 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
135 /* see table B-2 of 25366920.pdf */
136 switch (msr_lo & 0x07) {
137 case 5:
138 fsb = 100000;
139 break;
140 case 1:
141 fsb = 133333;
142 break;
143 case 3:
144 fsb = 166667;
145 break;
146 case 2:
147 fsb = 200000;
148 break;
149 case 0:
150 fsb = 266667;
151 break;
152 case 4:
153 fsb = 333333;
154 break;
155 default:
156 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
157 }
158
159 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
160 pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n",
161 msr_lo, msr_tmp);
162
163 msr_tmp = (msr_lo >> 22) & 0x1f;
164 pr_debug("bits 22-26 are 0x%x, speed is %u\n",
165 msr_tmp, (msr_tmp * fsb));
166
167 ret = (msr_tmp * fsb);
168 return ret;
169}
170
171
172static unsigned int pentium4_get_frequency(void)
173{
174 struct cpuinfo_x86 *c = &boot_cpu_data;
175 u32 msr_lo, msr_hi, mult;
176 unsigned int fsb = 0;
177 unsigned int ret;
178 u8 fsb_code;
179
180 /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency
181 * to System Bus Frequency Ratio Field in the Processor Frequency
182 * Configuration Register of the MSR. Therefore the current
183 * frequency cannot be calculated and has to be measured.
184 */
185 if (c->x86_model < 2)
186 return cpu_khz;
187
188 rdmsr(0x2c, msr_lo, msr_hi);
189
190 pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
191
192 /* decode the FSB: see IA-32 Intel (C) Architecture Software
193 * Developer's Manual, Volume 3: System Prgramming Guide,
194 * revision #12 in Table B-1: MSRs in the Pentium 4 and
195 * Intel Xeon Processors, on page B-4 and B-5.
196 */
197 fsb_code = (msr_lo >> 16) & 0x7;
198 switch (fsb_code) {
199 case 0:
200 fsb = 100 * 1000;
201 break;
202 case 1:
203 fsb = 13333 * 10;
204 break;
205 case 2:
206 fsb = 200 * 1000;
207 break;
208 }
209
210 if (!fsb)
211 printk(KERN_DEBUG PFX "couldn't detect FSB speed. "
212 "Please send an e-mail to <linux@brodo.de>\n");
213
214 /* Multiplier. */
215 mult = msr_lo >> 24;
216
217 pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n",
218 fsb, mult, (fsb * mult));
219
220 ret = (fsb * mult);
221 return ret;
222}
223
224
225/* Warning: may get called from smp_call_function_single. */
226unsigned int speedstep_get_frequency(enum speedstep_processor processor)
227{
228 switch (processor) {
229 case SPEEDSTEP_CPU_PCORE:
230 return pentium_core_get_frequency();
231 case SPEEDSTEP_CPU_PM:
232 return pentiumM_get_frequency();
233 case SPEEDSTEP_CPU_P4D:
234 case SPEEDSTEP_CPU_P4M:
235 return pentium4_get_frequency();
236 case SPEEDSTEP_CPU_PIII_T:
237 case SPEEDSTEP_CPU_PIII_C:
238 case SPEEDSTEP_CPU_PIII_C_EARLY:
239 return pentium3_get_frequency(processor);
240 default:
241 return 0;
242 };
243 return 0;
244}
245EXPORT_SYMBOL_GPL(speedstep_get_frequency);
246
247
248/*********************************************************************
249 * DETECT SPEEDSTEP-CAPABLE PROCESSOR *
250 *********************************************************************/
251
252unsigned int speedstep_detect_processor(void)
253{
254 struct cpuinfo_x86 *c = &cpu_data(0);
255 u32 ebx, msr_lo, msr_hi;
256
257 pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model);
258
259 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
260 ((c->x86 != 6) && (c->x86 != 0xF)))
261 return 0;
262
263 if (c->x86 == 0xF) {
264 /* Intel Mobile Pentium 4-M
265 * or Intel Mobile Pentium 4 with 533 MHz FSB */
266 if (c->x86_model != 2)
267 return 0;
268
269 ebx = cpuid_ebx(0x00000001);
270 ebx &= 0x000000FF;
271
272 pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
273
274 switch (c->x86_mask) {
275 case 4:
276 /*
277 * B-stepping [M-P4-M]
278 * sample has ebx = 0x0f, production has 0x0e.
279 */
280 if ((ebx == 0x0e) || (ebx == 0x0f))
281 return SPEEDSTEP_CPU_P4M;
282 break;
283 case 7:
284 /*
285 * C-stepping [M-P4-M]
286 * needs to have ebx=0x0e, else it's a celeron:
287 * cf. 25130917.pdf / page 7, footnote 5 even
288 * though 25072120.pdf / page 7 doesn't say
289 * samples are only of B-stepping...
290 */
291 if (ebx == 0x0e)
292 return SPEEDSTEP_CPU_P4M;
293 break;
294 case 9:
295 /*
296 * D-stepping [M-P4-M or M-P4/533]
297 *
298 * this is totally strange: CPUID 0x0F29 is
299 * used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
300 * The latter need to be sorted out as they don't
301 * support speedstep.
302 * Celerons with CPUID 0x0F29 may have either
303 * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
304 * specific.
305 * M-P4-Ms may have either ebx=0xe or 0xf [see above]
306 * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
307 * also, M-P4M HTs have ebx=0x8, too
308 * For now, they are distinguished by the model_id
309 * string
310 */
311 if ((ebx == 0x0e) ||
312 (strstr(c->x86_model_id,
313 "Mobile Intel(R) Pentium(R) 4") != NULL))
314 return SPEEDSTEP_CPU_P4M;
315 break;
316 default:
317 break;
318 }
319 return 0;
320 }
321
322 switch (c->x86_model) {
323 case 0x0B: /* Intel PIII [Tualatin] */
324 /* cpuid_ebx(1) is 0x04 for desktop PIII,
325 * 0x06 for mobile PIII-M */
326 ebx = cpuid_ebx(0x00000001);
327 pr_debug("ebx is %x\n", ebx);
328
329 ebx &= 0x000000FF;
330
331 if (ebx != 0x06)
332 return 0;
333
334 /* So far all PIII-M processors support SpeedStep. See
335 * Intel's 24540640.pdf of June 2003
336 */
337 return SPEEDSTEP_CPU_PIII_T;
338
339 case 0x08: /* Intel PIII [Coppermine] */
340
341 /* all mobile PIII Coppermines have FSB 100 MHz
342 * ==> sort out a few desktop PIIIs. */
343 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
344 pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n",
345 msr_lo, msr_hi);
346 msr_lo &= 0x00c0000;
347 if (msr_lo != 0x0080000)
348 return 0;
349
350 /*
351 * If the processor is a mobile version,
352 * platform ID has bit 50 set
353 * it has SpeedStep technology if either
354 * bit 56 or 57 is set
355 */
356 rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
357 pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n",
358 msr_lo, msr_hi);
359 if ((msr_hi & (1<<18)) &&
360 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
361 if (c->x86_mask == 0x01) {
362 pr_debug("early PIII version\n");
363 return SPEEDSTEP_CPU_PIII_C_EARLY;
364 } else
365 return SPEEDSTEP_CPU_PIII_C;
366 }
367
368 default:
369 return 0;
370 }
371}
372EXPORT_SYMBOL_GPL(speedstep_detect_processor);
373
374
375/*********************************************************************
376 * DETECT SPEEDSTEP SPEEDS *
377 *********************************************************************/
378
379unsigned int speedstep_get_freqs(enum speedstep_processor processor,
380 unsigned int *low_speed,
381 unsigned int *high_speed,
382 unsigned int *transition_latency,
383 void (*set_state) (unsigned int state))
384{
385 unsigned int prev_speed;
386 unsigned int ret = 0;
387 unsigned long flags;
388 struct timeval tv1, tv2;
389
390 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
391 return -EINVAL;
392
393 pr_debug("trying to determine both speeds\n");
394
395 /* get current speed */
396 prev_speed = speedstep_get_frequency(processor);
397 if (!prev_speed)
398 return -EIO;
399
400 pr_debug("previous speed is %u\n", prev_speed);
401
402 local_irq_save(flags);
403
404 /* switch to low state */
405 set_state(SPEEDSTEP_LOW);
406 *low_speed = speedstep_get_frequency(processor);
407 if (!*low_speed) {
408 ret = -EIO;
409 goto out;
410 }
411
412 pr_debug("low speed is %u\n", *low_speed);
413
414 /* start latency measurement */
415 if (transition_latency)
416 do_gettimeofday(&tv1);
417
418 /* switch to high state */
419 set_state(SPEEDSTEP_HIGH);
420
421 /* end latency measurement */
422 if (transition_latency)
423 do_gettimeofday(&tv2);
424
425 *high_speed = speedstep_get_frequency(processor);
426 if (!*high_speed) {
427 ret = -EIO;
428 goto out;
429 }
430
431 pr_debug("high speed is %u\n", *high_speed);
432
433 if (*low_speed == *high_speed) {
434 ret = -ENODEV;
435 goto out;
436 }
437
438 /* switch to previous state, if necessary */
439 if (*high_speed != prev_speed)
440 set_state(SPEEDSTEP_LOW);
441
442 if (transition_latency) {
443 *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
444 tv2.tv_usec - tv1.tv_usec;
445 pr_debug("transition latency is %u uSec\n", *transition_latency);
446
447 /* convert uSec to nSec and add 20% for safety reasons */
448 *transition_latency *= 1200;
449
450 /* check if the latency measurement is too high or too low
451 * and set it to a safe value (500uSec) in that case
452 */
453 if (*transition_latency > 10000000 ||
454 *transition_latency < 50000) {
455 printk(KERN_WARNING PFX "frequency transition "
456 "measured seems out of range (%u "
457 "nSec), falling back to a safe one of"
458 "%u nSec.\n",
459 *transition_latency, 500000);
460 *transition_latency = 500000;
461 }
462 }
463
464out:
465 local_irq_restore(flags);
466 return ret;
467}
468EXPORT_SYMBOL_GPL(speedstep_get_freqs);
469
470#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
471module_param(relaxed_check, int, 0444);
472MODULE_PARM_DESC(relaxed_check,
473 "Don't do all checks for speedstep capability.");
474#endif
475
476MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
477MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
478MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
new file mode 100644
index 000000000000..70d9cea1219d
--- /dev/null
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -0,0 +1,49 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11
12
13/* processors */
14enum speedstep_processor {
15 SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
16 SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
17 SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
18 SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
19/* the following processors are not speedstep-capable and are not auto-detected
20 * in speedstep_detect_processor(). However, their speed can be detected using
21 * the speedstep_get_frequency() call. */
22 SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
23 SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
24 SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
25};
26
27/* speedstep states -- only two of them */
28
29#define SPEEDSTEP_HIGH 0x00000000
30#define SPEEDSTEP_LOW 0x00000001
31
32
33/* detect a speedstep-capable processor */
34extern enum speedstep_processor speedstep_detect_processor(void);
35
36/* detect the current speed (in khz) of the processor */
37extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
38
39
40/* detect the low and high speeds of the processor. The callback
41 * set_state"'s first argument is either SPEEDSTEP_HIGH or
42 * SPEEDSTEP_LOW; the second argument is zero so that no
43 * cpufreq_notify_transition calls are initiated.
44 */
45extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
46 unsigned int *low_speed,
47 unsigned int *high_speed,
48 unsigned int *transition_latency,
49 void (*set_state) (unsigned int state));
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
new file mode 100644
index 000000000000..c76ead3490bf
--- /dev/null
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -0,0 +1,464 @@
1/*
2 * Intel SpeedStep SMI driver.
3 *
4 * (C) 2003 Hiroshi Miura <miura@da-cha.org>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 *
8 */
9
10
11/*********************************************************************
12 * SPEEDSTEP - DEFINITIONS *
13 *********************************************************************/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <asm/ist.h>
23
24#include "speedstep-lib.h"
25
26/* speedstep system management interface port/command.
27 *
28 * These parameters are got from IST-SMI BIOS call.
29 * If user gives it, these are used.
30 *
31 */
32static int smi_port;
33static int smi_cmd;
34static unsigned int smi_sig;
35
36/* info about the processor */
37static enum speedstep_processor speedstep_processor;
38
39/*
40 * There are only two frequency states for each processor. Values
41 * are in kHz for the time being.
42 */
43static struct cpufreq_frequency_table speedstep_freqs[] = {
44 {SPEEDSTEP_HIGH, 0},
45 {SPEEDSTEP_LOW, 0},
46 {0, CPUFREQ_TABLE_END},
47};
48
49#define GET_SPEEDSTEP_OWNER 0
50#define GET_SPEEDSTEP_STATE 1
51#define SET_SPEEDSTEP_STATE 2
52#define GET_SPEEDSTEP_FREQS 4
53
54/* how often shall the SMI call be tried if it failed, e.g. because
55 * of DMA activity going on? */
56#define SMI_TRIES 5
57
58/**
59 * speedstep_smi_ownership
60 */
61static int speedstep_smi_ownership(void)
62{
63 u32 command, result, magic, dummy;
64 u32 function = GET_SPEEDSTEP_OWNER;
65 unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
66
67 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
68 magic = virt_to_phys(magic_data);
69
70 pr_debug("trying to obtain ownership with command %x at port %x\n",
71 command, smi_port);
72
73 __asm__ __volatile__(
74 "push %%ebp\n"
75 "out %%al, (%%dx)\n"
76 "pop %%ebp\n"
77 : "=D" (result),
78 "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
79 "=S" (dummy)
80 : "a" (command), "b" (function), "c" (0), "d" (smi_port),
81 "D" (0), "S" (magic)
82 : "memory"
83 );
84
85 pr_debug("result is %x\n", result);
86
87 return result;
88}
89
90/**
91 * speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
92 * @low: the low frequency value is placed here
93 * @high: the high frequency value is placed here
94 *
95 * Only available on later SpeedStep-enabled systems, returns false results or
96 * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
97 * shows that the latter occurs if !(ist_info.event & 0xFFFF).
98 */
99static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
100{
101 u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
102 u32 state = 0;
103 u32 function = GET_SPEEDSTEP_FREQS;
104
105 if (!(ist_info.event & 0xFFFF)) {
106 pr_debug("bug #1422 -- can't read freqs from BIOS\n");
107 return -ENODEV;
108 }
109
110 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
111
112 pr_debug("trying to determine frequencies with command %x at port %x\n",
113 command, smi_port);
114
115 __asm__ __volatile__(
116 "push %%ebp\n"
117 "out %%al, (%%dx)\n"
118 "pop %%ebp"
119 : "=a" (result),
120 "=b" (high_mhz),
121 "=c" (low_mhz),
122 "=d" (state), "=D" (edi), "=S" (dummy)
123 : "a" (command),
124 "b" (function),
125 "c" (state),
126 "d" (smi_port), "S" (0), "D" (0)
127 );
128
129 pr_debug("result %x, low_freq %u, high_freq %u\n",
130 result, low_mhz, high_mhz);
131
132 /* abort if results are obviously incorrect... */
133 if ((high_mhz + low_mhz) < 600)
134 return -EINVAL;
135
136 *high = high_mhz * 1000;
137 *low = low_mhz * 1000;
138
139 return result;
140}
141
142/**
143 * speedstep_get_state - set the SpeedStep state
144 * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
145 *
146 */
147static int speedstep_get_state(void)
148{
149 u32 function = GET_SPEEDSTEP_STATE;
150 u32 result, state, edi, command, dummy;
151
152 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
153
154 pr_debug("trying to determine current setting with command %x "
155 "at port %x\n", command, smi_port);
156
157 __asm__ __volatile__(
158 "push %%ebp\n"
159 "out %%al, (%%dx)\n"
160 "pop %%ebp\n"
161 : "=a" (result),
162 "=b" (state), "=D" (edi),
163 "=c" (dummy), "=d" (dummy), "=S" (dummy)
164 : "a" (command), "b" (function), "c" (0),
165 "d" (smi_port), "S" (0), "D" (0)
166 );
167
168 pr_debug("state is %x, result is %x\n", state, result);
169
170 return state & 1;
171}
172
173
174/**
175 * speedstep_set_state - set the SpeedStep state
176 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
177 *
178 */
179static void speedstep_set_state(unsigned int state)
180{
181 unsigned int result = 0, command, new_state, dummy;
182 unsigned long flags;
183 unsigned int function = SET_SPEEDSTEP_STATE;
184 unsigned int retry = 0;
185
186 if (state > 0x1)
187 return;
188
189 /* Disable IRQs */
190 local_irq_save(flags);
191
192 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
193
194 pr_debug("trying to set frequency to state %u "
195 "with command %x at port %x\n",
196 state, command, smi_port);
197
198 do {
199 if (retry) {
200 pr_debug("retry %u, previous result %u, waiting...\n",
201 retry, result);
202 mdelay(retry * 50);
203 }
204 retry++;
205 __asm__ __volatile__(
206 "push %%ebp\n"
207 "out %%al, (%%dx)\n"
208 "pop %%ebp"
209 : "=b" (new_state), "=D" (result),
210 "=c" (dummy), "=a" (dummy),
211 "=d" (dummy), "=S" (dummy)
212 : "a" (command), "b" (function), "c" (state),
213 "d" (smi_port), "S" (0), "D" (0)
214 );
215 } while ((new_state != state) && (retry <= SMI_TRIES));
216
217 /* enable IRQs */
218 local_irq_restore(flags);
219
220 if (new_state == state)
221 pr_debug("change to %u MHz succeeded after %u tries "
222 "with result %u\n",
223 (speedstep_freqs[new_state].frequency / 1000),
224 retry, result);
225 else
226 printk(KERN_ERR "cpufreq: change to state %u "
227 "failed with new_state %u and result %u\n",
228 state, new_state, result);
229
230 return;
231}
232
233
234/**
235 * speedstep_target - set a new CPUFreq policy
236 * @policy: new policy
237 * @target_freq: new freq
238 * @relation:
239 *
240 * Sets a new CPUFreq policy/freq.
241 */
242static int speedstep_target(struct cpufreq_policy *policy,
243 unsigned int target_freq, unsigned int relation)
244{
245 unsigned int newstate = 0;
246 struct cpufreq_freqs freqs;
247
248 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
249 target_freq, relation, &newstate))
250 return -EINVAL;
251
252 freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
253 freqs.new = speedstep_freqs[newstate].frequency;
254 freqs.cpu = 0; /* speedstep.c is UP only driver */
255
256 if (freqs.old == freqs.new)
257 return 0;
258
259 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
260 speedstep_set_state(newstate);
261 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
262
263 return 0;
264}
265
266
267/**
268 * speedstep_verify - verifies a new CPUFreq policy
269 * @policy: new policy
270 *
271 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
272 * at least one border included.
273 */
274static int speedstep_verify(struct cpufreq_policy *policy)
275{
276 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
277}
278
279
280static int speedstep_cpu_init(struct cpufreq_policy *policy)
281{
282 int result;
283 unsigned int speed, state;
284 unsigned int *low, *high;
285
286 /* capability check */
287 if (policy->cpu != 0)
288 return -ENODEV;
289
290 result = speedstep_smi_ownership();
291 if (result) {
292 pr_debug("fails in acquiring ownership of a SMI interface.\n");
293 return -EINVAL;
294 }
295
296 /* detect low and high frequency */
297 low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
298 high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
299
300 result = speedstep_smi_get_freqs(low, high);
301 if (result) {
302 /* fall back to speedstep_lib.c dection mechanism:
303 * try both states out */
304 pr_debug("could not detect low and high frequencies "
305 "by SMI call.\n");
306 result = speedstep_get_freqs(speedstep_processor,
307 low, high,
308 NULL,
309 &speedstep_set_state);
310
311 if (result) {
312 pr_debug("could not detect two different speeds"
313 " -- aborting.\n");
314 return result;
315 } else
316 pr_debug("workaround worked.\n");
317 }
318
319 /* get current speed setting */
320 state = speedstep_get_state();
321 speed = speedstep_freqs[state].frequency;
322
323 pr_debug("currently at %s speed setting - %i MHz\n",
324 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
325 ? "low" : "high",
326 (speed / 1000));
327
328 /* cpuinfo and default policy values */
329 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
330 policy->cur = speed;
331
332 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
333 if (result)
334 return result;
335
336 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
337
338 return 0;
339}
340
341static int speedstep_cpu_exit(struct cpufreq_policy *policy)
342{
343 cpufreq_frequency_table_put_attr(policy->cpu);
344 return 0;
345}
346
347static unsigned int speedstep_get(unsigned int cpu)
348{
349 if (cpu)
350 return -ENODEV;
351 return speedstep_get_frequency(speedstep_processor);
352}
353
354
355static int speedstep_resume(struct cpufreq_policy *policy)
356{
357 int result = speedstep_smi_ownership();
358
359 if (result)
360 pr_debug("fails in re-acquiring ownership of a SMI interface.\n");
361
362 return result;
363}
364
365static struct freq_attr *speedstep_attr[] = {
366 &cpufreq_freq_attr_scaling_available_freqs,
367 NULL,
368};
369
370static struct cpufreq_driver speedstep_driver = {
371 .name = "speedstep-smi",
372 .verify = speedstep_verify,
373 .target = speedstep_target,
374 .init = speedstep_cpu_init,
375 .exit = speedstep_cpu_exit,
376 .get = speedstep_get,
377 .resume = speedstep_resume,
378 .owner = THIS_MODULE,
379 .attr = speedstep_attr,
380};
381
382/**
383 * speedstep_init - initializes the SpeedStep CPUFreq driver
384 *
385 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
386 * BIOS, -EINVAL on problems during initiatization, and zero on
387 * success.
388 */
389static int __init speedstep_init(void)
390{
391 speedstep_processor = speedstep_detect_processor();
392
393 switch (speedstep_processor) {
394 case SPEEDSTEP_CPU_PIII_T:
395 case SPEEDSTEP_CPU_PIII_C:
396 case SPEEDSTEP_CPU_PIII_C_EARLY:
397 break;
398 default:
399 speedstep_processor = 0;
400 }
401
402 if (!speedstep_processor) {
403 pr_debug("No supported Intel CPU detected.\n");
404 return -ENODEV;
405 }
406
407 pr_debug("signature:0x%.8ulx, command:0x%.8ulx, "
408 "event:0x%.8ulx, perf_level:0x%.8ulx.\n",
409 ist_info.signature, ist_info.command,
410 ist_info.event, ist_info.perf_level);
411
412 /* Error if no IST-SMI BIOS or no PARM
413 sig= 'ISGE' aka 'Intel Speedstep Gate E' */
414 if ((ist_info.signature != 0x47534943) && (
415 (smi_port == 0) || (smi_cmd == 0)))
416 return -ENODEV;
417
418 if (smi_sig == 1)
419 smi_sig = 0x47534943;
420 else
421 smi_sig = ist_info.signature;
422
423 /* setup smi_port from MODLULE_PARM or BIOS */
424 if ((smi_port > 0xff) || (smi_port < 0))
425 return -EINVAL;
426 else if (smi_port == 0)
427 smi_port = ist_info.command & 0xff;
428
429 if ((smi_cmd > 0xff) || (smi_cmd < 0))
430 return -EINVAL;
431 else if (smi_cmd == 0)
432 smi_cmd = (ist_info.command >> 16) & 0xff;
433
434 return cpufreq_register_driver(&speedstep_driver);
435}
436
437
438/**
439 * speedstep_exit - unregisters SpeedStep support
440 *
441 * Unregisters SpeedStep support.
442 */
443static void __exit speedstep_exit(void)
444{
445 cpufreq_unregister_driver(&speedstep_driver);
446}
447
448module_param(smi_port, int, 0444);
449module_param(smi_cmd, int, 0444);
450module_param(smi_sig, uint, 0444);
451
452MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
453 "-- Intel's default setting is 0xb2");
454MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
455 "-- Intel's default setting is 0x82");
456MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
457 "SMI interface.");
458
459MODULE_AUTHOR("Hiroshi Miura");
460MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
461MODULE_LICENSE("GPL");
462
463module_init(speedstep_init);
464module_exit(speedstep_exit);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index b3a25a55ba23..efba163595db 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -157,4 +157,6 @@ config SIGMA
157 If unsure, say N here. Drivers that need these helpers will select 157 If unsure, say N here. Drivers that need these helpers will select
158 this option automatically. 158 this option automatically.
159 159
160source "drivers/firmware/google/Kconfig"
161
160endmenu 162endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 00bb0b80a79f..47338c979126 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
15obj-$(CONFIG_SIGMA) += sigma.o 15obj-$(CONFIG_SIGMA) += sigma.o
16
17obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 96c25d93eed1..f1b7f659d3c9 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -531,8 +531,8 @@ static int
531edd_has_edd30(struct edd_device *edev) 531edd_has_edd30(struct edd_device *edev)
532{ 532{
533 struct edd_info *info; 533 struct edd_info *info;
534 int i, nonzero_path = 0; 534 int i;
535 char c; 535 u8 csum = 0;
536 536
537 if (!edev) 537 if (!edev)
538 return 0; 538 return 0;
@@ -544,16 +544,16 @@ edd_has_edd30(struct edd_device *edev)
544 return 0; 544 return 0;
545 } 545 }
546 546
547 for (i = 30; i <= 73; i++) { 547
548 c = *(((uint8_t *) info) + i + 4); 548 /* We support only T13 spec */
549 if (c) { 549 if (info->params.device_path_info_length != 44)
550 nonzero_path++; 550 return 0;
551 break; 551
552 } 552 for (i = 30; i < info->params.device_path_info_length + 30; i++)
553 } 553 csum += *(((u8 *)&info->params) + i);
554 if (!nonzero_path) { 554
555 if (csum)
555 return 0; 556 return 0;
556 }
557 557
558 return 1; 558 return 1;
559} 559}
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index ff0c373e3bbf..a2d2f1f0d4f3 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -677,8 +677,8 @@ create_efivars_bin_attributes(struct efivars *efivars)
677 677
678 return 0; 678 return 0;
679out_free: 679out_free:
680 kfree(efivars->new_var); 680 kfree(efivars->del_var);
681 efivars->new_var = NULL; 681 efivars->del_var = NULL;
682 kfree(efivars->new_var); 682 kfree(efivars->new_var);
683 efivars->new_var = NULL; 683 efivars->new_var = NULL;
684 return error; 684 return error;
@@ -803,6 +803,8 @@ efivars_init(void)
803 ops.set_variable = efi.set_variable; 803 ops.set_variable = efi.set_variable;
804 ops.get_next_variable = efi.get_next_variable; 804 ops.get_next_variable = efi.get_next_variable;
805 error = register_efivars(&__efivars, &ops, efi_kobj); 805 error = register_efivars(&__efivars, &ops, efi_kobj);
806 if (error)
807 goto err_put;
806 808
807 /* Don't forget the systab entry */ 809 /* Don't forget the systab entry */
808 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 810 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
@@ -810,18 +812,25 @@ efivars_init(void)
810 printk(KERN_ERR 812 printk(KERN_ERR
811 "efivars: Sysfs attribute export failed with error %d.\n", 813 "efivars: Sysfs attribute export failed with error %d.\n",
812 error); 814 error);
813 unregister_efivars(&__efivars); 815 goto err_unregister;
814 kobject_put(efi_kobj);
815 } 816 }
816 817
818 return 0;
819
820err_unregister:
821 unregister_efivars(&__efivars);
822err_put:
823 kobject_put(efi_kobj);
817 return error; 824 return error;
818} 825}
819 826
820static void __exit 827static void __exit
821efivars_exit(void) 828efivars_exit(void)
822{ 829{
823 unregister_efivars(&__efivars); 830 if (efi_enabled) {
824 kobject_put(efi_kobj); 831 unregister_efivars(&__efivars);
832 kobject_put(efi_kobj);
833 }
825} 834}
826 835
827module_init(efivars_init); 836module_init(efivars_init);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
new file mode 100644
index 000000000000..87096b6ca5c9
--- /dev/null
+++ b/drivers/firmware/google/Kconfig
@@ -0,0 +1,31 @@
1config GOOGLE_FIRMWARE
2 bool "Google Firmware Drivers"
3 depends on X86
4 default n
5 help
6 These firmware drivers are used by Google's servers. They are
7 only useful if you are working directly on one of their
8 proprietary servers. If in doubt, say "N".
9
10menu "Google Firmware Drivers"
11 depends on GOOGLE_FIRMWARE
12
13config GOOGLE_SMI
14 tristate "SMI interface for Google platforms"
15 depends on ACPI && DMI
16 select EFI_VARS
17 help
18 Say Y here if you want to enable SMI callbacks for Google
19 platforms. This provides an interface for writing to and
20 clearing the EFI event log and reading and writing NVRAM
21 variables.
22
23config GOOGLE_MEMCONSOLE
24 tristate "Firmware Memory Console"
25 depends on DMI
26 help
27 This option enables the kernel to search for a firmware log in
28 the EBDA on Google servers. If found, this log is exported to
29 userland in the file /sys/firmware/log.
30
31endmenu
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
new file mode 100644
index 000000000000..54a294e3cb61
--- /dev/null
+++ b/drivers/firmware/google/Makefile
@@ -0,0 +1,3 @@
1
2obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
3obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
new file mode 100644
index 000000000000..fa7f0b3e81dd
--- /dev/null
+++ b/drivers/firmware/google/gsmi.c
@@ -0,0 +1,940 @@
1/*
2 * Copyright 2010 Google Inc. All Rights Reserved.
3 * Author: dlaurie@google.com (Duncan Laurie)
4 *
5 * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison)
6 *
7 * EFI SMI interface for Google platforms
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/spinlock.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmapool.h>
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include <linux/ioctl.h>
23#include <linux/acpi.h>
24#include <linux/io.h>
25#include <linux/uaccess.h>
26#include <linux/dmi.h>
27#include <linux/kdebug.h>
28#include <linux/reboot.h>
29#include <linux/efi.h>
30
31#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
32/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
33#define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */
34#define GSMI_SHUTDOWN_PANIC 2 /* Panic */
35#define GSMI_SHUTDOWN_OOPS 3 /* Oops */
36#define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */
37#define GSMI_SHUTDOWN_MCE 5 /* Machine Check */
38#define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */
39#define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */
40#define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */
41
42#define DRIVER_VERSION "1.0"
43#define GSMI_GUID_SIZE 16
44#define GSMI_BUF_SIZE 1024
45#define GSMI_BUF_ALIGN sizeof(u64)
46#define GSMI_CALLBACK 0xef
47
48/* SMI return codes */
49#define GSMI_SUCCESS 0x00
50#define GSMI_UNSUPPORTED2 0x03
51#define GSMI_LOG_FULL 0x0b
52#define GSMI_VAR_NOT_FOUND 0x0e
53#define GSMI_HANDSHAKE_SPIN 0x7d
54#define GSMI_HANDSHAKE_CF 0x7e
55#define GSMI_HANDSHAKE_NONE 0x7f
56#define GSMI_INVALID_PARAMETER 0x82
57#define GSMI_UNSUPPORTED 0x83
58#define GSMI_BUFFER_TOO_SMALL 0x85
59#define GSMI_NOT_READY 0x86
60#define GSMI_DEVICE_ERROR 0x87
61#define GSMI_NOT_FOUND 0x8e
62
63#define QUIRKY_BOARD_HASH 0x78a30a50
64
65/* Internally used commands passed to the firmware */
66#define GSMI_CMD_GET_NVRAM_VAR 0x01
67#define GSMI_CMD_GET_NEXT_VAR 0x02
68#define GSMI_CMD_SET_NVRAM_VAR 0x03
69#define GSMI_CMD_SET_EVENT_LOG 0x08
70#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
71#define GSMI_CMD_CLEAR_CONFIG 0x20
72#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
73
74/* Magic entry type for kernel events */
75#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
76
77/* SMI buffers must be in 32bit physical address space */
78struct gsmi_buf {
79 u8 *start; /* start of buffer */
80 size_t length; /* length of buffer */
81 dma_addr_t handle; /* dma allocation handle */
82 u32 address; /* physical address of buffer */
83};
84
85struct gsmi_device {
86 struct platform_device *pdev; /* platform device */
87 struct gsmi_buf *name_buf; /* variable name buffer */
88 struct gsmi_buf *data_buf; /* generic data buffer */
89 struct gsmi_buf *param_buf; /* parameter buffer */
90 spinlock_t lock; /* serialize access to SMIs */
91 u16 smi_cmd; /* SMI command port */
92 int handshake_type; /* firmware handler interlock type */
93 struct dma_pool *dma_pool; /* DMA buffer pool */
94} gsmi_dev;
95
96/* Packed structures for communicating with the firmware */
97struct gsmi_nvram_var_param {
98 efi_guid_t guid;
99 u32 name_ptr;
100 u32 attributes;
101 u32 data_len;
102 u32 data_ptr;
103} __packed;
104
105struct gsmi_get_next_var_param {
106 u8 guid[GSMI_GUID_SIZE];
107 u32 name_ptr;
108 u32 name_len;
109} __packed;
110
111struct gsmi_set_eventlog_param {
112 u32 data_ptr;
113 u32 data_len;
114 u32 type;
115} __packed;
116
117/* Event log formats */
118struct gsmi_log_entry_type_1 {
119 u16 type;
120 u32 instance;
121} __packed;
122
123
124/*
125 * Some platforms don't have explicit SMI handshake
126 * and need to wait for SMI to complete.
127 */
128#define GSMI_DEFAULT_SPINCOUNT 0x10000
129static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT;
130module_param(spincount, uint, 0600);
131MODULE_PARM_DESC(spincount,
132 "The number of loop iterations to use when using the spin handshake.");
133
134static struct gsmi_buf *gsmi_buf_alloc(void)
135{
136 struct gsmi_buf *smibuf;
137
138 smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL);
139 if (!smibuf) {
140 printk(KERN_ERR "gsmi: out of memory\n");
141 return NULL;
142 }
143
144 /* allocate buffer in 32bit address space */
145 smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
146 &smibuf->handle);
147 if (!smibuf->start) {
148 printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
149 kfree(smibuf);
150 return NULL;
151 }
152
153 /* fill in the buffer handle */
154 smibuf->length = GSMI_BUF_SIZE;
155 smibuf->address = (u32)virt_to_phys(smibuf->start);
156
157 return smibuf;
158}
159
160static void gsmi_buf_free(struct gsmi_buf *smibuf)
161{
162 if (smibuf) {
163 if (smibuf->start)
164 dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
165 smibuf->handle);
166 kfree(smibuf);
167 }
168}
169
170/*
171 * Make a call to gsmi func(sub). GSMI error codes are translated to
172 * in-kernel errnos (0 on success, -ERRNO on error).
173 */
174static int gsmi_exec(u8 func, u8 sub)
175{
176 u16 cmd = (sub << 8) | func;
177 u16 result = 0;
178 int rc = 0;
179
180 /*
181 * AH : Subfunction number
182 * AL : Function number
183 * EBX : Parameter block address
184 * DX : SMI command port
185 *
186 * Three protocols here. See also the comment in gsmi_init().
187 */
188 if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) {
189 /*
190 * If handshake_type == HANDSHAKE_CF then set CF on the
191 * way in and wait for the handler to clear it; this avoids
192 * corrupting register state on those chipsets which have
193 * a delay between writing the SMI trigger register and
194 * entering SMM.
195 */
196 asm volatile (
197 "stc\n"
198 "outb %%al, %%dx\n"
199 "1: jc 1b\n"
200 : "=a" (result)
201 : "0" (cmd),
202 "d" (gsmi_dev.smi_cmd),
203 "b" (gsmi_dev.param_buf->address)
204 : "memory", "cc"
205 );
206 } else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) {
207 /*
208 * If handshake_type == HANDSHAKE_SPIN we spin a
209 * hundred-ish usecs to ensure the SMI has triggered.
210 */
211 asm volatile (
212 "outb %%al, %%dx\n"
213 "1: loop 1b\n"
214 : "=a" (result)
215 : "0" (cmd),
216 "d" (gsmi_dev.smi_cmd),
217 "b" (gsmi_dev.param_buf->address),
218 "c" (spincount)
219 : "memory", "cc"
220 );
221 } else {
222 /*
223 * If handshake_type == HANDSHAKE_NONE we do nothing;
224 * either we don't need to or it's legacy firmware that
225 * doesn't understand the CF protocol.
226 */
227 asm volatile (
228 "outb %%al, %%dx\n\t"
229 : "=a" (result)
230 : "0" (cmd),
231 "d" (gsmi_dev.smi_cmd),
232 "b" (gsmi_dev.param_buf->address)
233 : "memory", "cc"
234 );
235 }
236
237 /* check return code from SMI handler */
238 switch (result) {
239 case GSMI_SUCCESS:
240 break;
241 case GSMI_VAR_NOT_FOUND:
242 /* not really an error, but let the caller know */
243 rc = 1;
244 break;
245 case GSMI_INVALID_PARAMETER:
246 printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd);
247 rc = -EINVAL;
248 break;
249 case GSMI_BUFFER_TOO_SMALL:
250 printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd);
251 rc = -ENOMEM;
252 break;
253 case GSMI_UNSUPPORTED:
254 case GSMI_UNSUPPORTED2:
255 if (sub != GSMI_CMD_HANDSHAKE_TYPE)
256 printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n",
257 cmd);
258 rc = -ENOSYS;
259 break;
260 case GSMI_NOT_READY:
261 printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd);
262 rc = -EBUSY;
263 break;
264 case GSMI_DEVICE_ERROR:
265 printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd);
266 rc = -EFAULT;
267 break;
268 case GSMI_NOT_FOUND:
269 printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd);
270 rc = -ENOENT;
271 break;
272 case GSMI_LOG_FULL:
273 printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd);
274 rc = -ENOSPC;
275 break;
276 case GSMI_HANDSHAKE_CF:
277 case GSMI_HANDSHAKE_SPIN:
278 case GSMI_HANDSHAKE_NONE:
279 rc = result;
280 break;
281 default:
282 printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n",
283 cmd, result);
284 rc = -ENXIO;
285 }
286
287 return rc;
288}
289
290/* Return the number of unicode characters in data */
291static size_t
292utf16_strlen(efi_char16_t *data, unsigned long maxlength)
293{
294 unsigned long length = 0;
295
296 while (*data++ != 0 && length < maxlength)
297 length++;
298 return length;
299}
300
301static efi_status_t gsmi_get_variable(efi_char16_t *name,
302 efi_guid_t *vendor, u32 *attr,
303 unsigned long *data_size,
304 void *data)
305{
306 struct gsmi_nvram_var_param param = {
307 .name_ptr = gsmi_dev.name_buf->address,
308 .data_ptr = gsmi_dev.data_buf->address,
309 .data_len = (u32)*data_size,
310 };
311 efi_status_t ret = EFI_SUCCESS;
312 unsigned long flags;
313 size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2);
314 int rc;
315
316 if (name_len >= GSMI_BUF_SIZE / 2)
317 return EFI_BAD_BUFFER_SIZE;
318
319 spin_lock_irqsave(&gsmi_dev.lock, flags);
320
321 /* Vendor guid */
322 memcpy(&param.guid, vendor, sizeof(param.guid));
323
324 /* variable name, already in UTF-16 */
325 memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
326 memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
327
328 /* data pointer */
329 memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
330
331 /* parameter buffer */
332 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
333 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
334
335 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR);
336 if (rc < 0) {
337 printk(KERN_ERR "gsmi: Get Variable failed\n");
338 ret = EFI_LOAD_ERROR;
339 } else if (rc == 1) {
340 /* variable was not found */
341 ret = EFI_NOT_FOUND;
342 } else {
343 /* Get the arguments back */
344 memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
345
346 /* The size reported is the min of all of our buffers */
347 *data_size = min(*data_size, gsmi_dev.data_buf->length);
348 *data_size = min_t(unsigned long, *data_size, param.data_len);
349
350 /* Copy data back to return buffer. */
351 memcpy(data, gsmi_dev.data_buf->start, *data_size);
352
353 /* All variables are have the following attributes */
354 *attr = EFI_VARIABLE_NON_VOLATILE |
355 EFI_VARIABLE_BOOTSERVICE_ACCESS |
356 EFI_VARIABLE_RUNTIME_ACCESS;
357 }
358
359 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
360
361 return ret;
362}
363
364static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
365 efi_char16_t *name,
366 efi_guid_t *vendor)
367{
368 struct gsmi_get_next_var_param param = {
369 .name_ptr = gsmi_dev.name_buf->address,
370 .name_len = gsmi_dev.name_buf->length,
371 };
372 efi_status_t ret = EFI_SUCCESS;
373 int rc;
374 unsigned long flags;
375
376 /* For the moment, only support buffers that exactly match in size */
377 if (*name_size != GSMI_BUF_SIZE)
378 return EFI_BAD_BUFFER_SIZE;
379
380 /* Let's make sure the thing is at least null-terminated */
381 if (utf16_strlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
382 return EFI_INVALID_PARAMETER;
383
384 spin_lock_irqsave(&gsmi_dev.lock, flags);
385
386 /* guid */
387 memcpy(&param.guid, vendor, sizeof(param.guid));
388
389 /* variable name, already in UTF-16 */
390 memcpy(gsmi_dev.name_buf->start, name, *name_size);
391
392 /* parameter buffer */
393 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
394 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
395
396 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR);
397 if (rc < 0) {
398 printk(KERN_ERR "gsmi: Get Next Variable Name failed\n");
399 ret = EFI_LOAD_ERROR;
400 } else if (rc == 1) {
401 /* variable not found -- end of list */
402 ret = EFI_NOT_FOUND;
403 } else {
404 /* copy variable data back to return buffer */
405 memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
406
407 /* Copy the name back */
408 memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
409 *name_size = utf16_strlen(name, GSMI_BUF_SIZE / 2) * 2;
410
411 /* copy guid to return buffer */
412 memcpy(vendor, &param.guid, sizeof(param.guid));
413 ret = EFI_SUCCESS;
414 }
415
416 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
417
418 return ret;
419}
420
421static efi_status_t gsmi_set_variable(efi_char16_t *name,
422 efi_guid_t *vendor,
423 unsigned long attr,
424 unsigned long data_size,
425 void *data)
426{
427 struct gsmi_nvram_var_param param = {
428 .name_ptr = gsmi_dev.name_buf->address,
429 .data_ptr = gsmi_dev.data_buf->address,
430 .data_len = (u32)data_size,
431 .attributes = EFI_VARIABLE_NON_VOLATILE |
432 EFI_VARIABLE_BOOTSERVICE_ACCESS |
433 EFI_VARIABLE_RUNTIME_ACCESS,
434 };
435 size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2);
436 efi_status_t ret = EFI_SUCCESS;
437 int rc;
438 unsigned long flags;
439
440 if (name_len >= GSMI_BUF_SIZE / 2)
441 return EFI_BAD_BUFFER_SIZE;
442
443 spin_lock_irqsave(&gsmi_dev.lock, flags);
444
445 /* guid */
446 memcpy(&param.guid, vendor, sizeof(param.guid));
447
448 /* variable name, already in UTF-16 */
449 memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
450 memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
451
452 /* data pointer */
453 memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
454 memcpy(gsmi_dev.data_buf->start, data, data_size);
455
456 /* parameter buffer */
457 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
458 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
459
460 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR);
461 if (rc < 0) {
462 printk(KERN_ERR "gsmi: Set Variable failed\n");
463 ret = EFI_INVALID_PARAMETER;
464 }
465
466 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
467
468 return ret;
469}
470
471static const struct efivar_operations efivar_ops = {
472 .get_variable = gsmi_get_variable,
473 .set_variable = gsmi_set_variable,
474 .get_next_variable = gsmi_get_next_variable,
475};
476
477static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
478 struct bin_attribute *bin_attr,
479 char *buf, loff_t pos, size_t count)
480{
481 struct gsmi_set_eventlog_param param = {
482 .data_ptr = gsmi_dev.data_buf->address,
483 };
484 int rc = 0;
485 unsigned long flags;
486
487 /* Pull the type out */
488 if (count < sizeof(u32))
489 return -EINVAL;
490 param.type = *(u32 *)buf;
491 count -= sizeof(u32);
492 buf += sizeof(u32);
493
494 /* The remaining buffer is the data payload */
495 if (count > gsmi_dev.data_buf->length)
496 return -EINVAL;
497 param.data_len = count - sizeof(u32);
498
499 spin_lock_irqsave(&gsmi_dev.lock, flags);
500
501 /* data pointer */
502 memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
503 memcpy(gsmi_dev.data_buf->start, buf, param.data_len);
504
505 /* parameter buffer */
506 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
507 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
508
509 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
510 if (rc < 0)
511 printk(KERN_ERR "gsmi: Set Event Log failed\n");
512
513 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
514
515 return rc;
516
517}
518
519static struct bin_attribute eventlog_bin_attr = {
520 .attr = {.name = "append_to_eventlog", .mode = 0200},
521 .write = eventlog_write,
522};
523
524static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
525 struct kobj_attribute *attr,
526 const char *buf, size_t count)
527{
528 int rc;
529 unsigned long flags;
530 unsigned long val;
531 struct {
532 u32 percentage;
533 u32 data_type;
534 } param;
535
536 rc = strict_strtoul(buf, 0, &val);
537 if (rc)
538 return rc;
539
540 /*
541 * Value entered is a percentage, 0 through 100, anything else
542 * is invalid.
543 */
544 if (val > 100)
545 return -EINVAL;
546
547 /* data_type here selects the smbios event log. */
548 param.percentage = val;
549 param.data_type = 0;
550
551 spin_lock_irqsave(&gsmi_dev.lock, flags);
552
553 /* parameter buffer */
554 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
555 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
556
557 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG);
558
559 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
560
561 if (rc)
562 return rc;
563 return count;
564}
565
566static struct kobj_attribute gsmi_clear_eventlog_attr = {
567 .attr = {.name = "clear_eventlog", .mode = 0200},
568 .store = gsmi_clear_eventlog_store,
569};
570
571static ssize_t gsmi_clear_config_store(struct kobject *kobj,
572 struct kobj_attribute *attr,
573 const char *buf, size_t count)
574{
575 int rc;
576 unsigned long flags;
577
578 spin_lock_irqsave(&gsmi_dev.lock, flags);
579
580 /* clear parameter buffer */
581 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
582
583 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG);
584
585 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
586
587 if (rc)
588 return rc;
589 return count;
590}
591
592static struct kobj_attribute gsmi_clear_config_attr = {
593 .attr = {.name = "clear_config", .mode = 0200},
594 .store = gsmi_clear_config_store,
595};
596
597static const struct attribute *gsmi_attrs[] = {
598 &gsmi_clear_config_attr.attr,
599 &gsmi_clear_eventlog_attr.attr,
600 NULL,
601};
602
603static int gsmi_shutdown_reason(int reason)
604{
605 struct gsmi_log_entry_type_1 entry = {
606 .type = GSMI_LOG_ENTRY_TYPE_KERNEL,
607 .instance = reason,
608 };
609 struct gsmi_set_eventlog_param param = {
610 .data_len = sizeof(entry),
611 .type = 1,
612 };
613 static int saved_reason;
614 int rc = 0;
615 unsigned long flags;
616
617 /* avoid duplicate entries in the log */
618 if (saved_reason & (1 << reason))
619 return 0;
620
621 spin_lock_irqsave(&gsmi_dev.lock, flags);
622
623 saved_reason |= (1 << reason);
624
625 /* data pointer */
626 memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
627 memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry));
628
629 /* parameter buffer */
630 param.data_ptr = gsmi_dev.data_buf->address;
631 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
632 memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
633
634 rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
635
636 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
637
638 if (rc < 0)
639 printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n");
640 else
641 printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n",
642 reason);
643
644 return rc;
645}
646
647static int gsmi_reboot_callback(struct notifier_block *nb,
648 unsigned long reason, void *arg)
649{
650 gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN);
651 return NOTIFY_DONE;
652}
653
654static struct notifier_block gsmi_reboot_notifier = {
655 .notifier_call = gsmi_reboot_callback
656};
657
658static int gsmi_die_callback(struct notifier_block *nb,
659 unsigned long reason, void *arg)
660{
661 if (reason == DIE_OOPS)
662 gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS);
663 return NOTIFY_DONE;
664}
665
666static struct notifier_block gsmi_die_notifier = {
667 .notifier_call = gsmi_die_callback
668};
669
670static int gsmi_panic_callback(struct notifier_block *nb,
671 unsigned long reason, void *arg)
672{
673 gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
674 return NOTIFY_DONE;
675}
676
677static struct notifier_block gsmi_panic_notifier = {
678 .notifier_call = gsmi_panic_callback,
679};
680
681/*
682 * This hash function was blatantly copied from include/linux/hash.h.
683 * It is used by this driver to obfuscate a board name that requires a
684 * quirk within this driver.
685 *
686 * Please do not remove this copy of the function as any changes to the
687 * global utility hash_64() function would break this driver's ability
688 * to identify a board and provide the appropriate quirk -- mikew@google.com
689 */
690static u64 __init local_hash_64(u64 val, unsigned bits)
691{
692 u64 hash = val;
693
694 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
695 u64 n = hash;
696 n <<= 18;
697 hash -= n;
698 n <<= 33;
699 hash -= n;
700 n <<= 3;
701 hash += n;
702 n <<= 3;
703 hash -= n;
704 n <<= 4;
705 hash += n;
706 n <<= 2;
707 hash += n;
708
709 /* High bits are more random, so use them. */
710 return hash >> (64 - bits);
711}
712
713static u32 __init hash_oem_table_id(char s[8])
714{
715 u64 input;
716 memcpy(&input, s, 8);
717 return local_hash_64(input, 32);
718}
719
720static struct dmi_system_id gsmi_dmi_table[] __initdata = {
721 {
722 .ident = "Google Board",
723 .matches = {
724 DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
725 },
726 },
727 {}
728};
729MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
730
731static __init int gsmi_system_valid(void)
732{
733 u32 hash;
734
735 if (!dmi_check_system(gsmi_dmi_table))
736 return -ENODEV;
737
738 /*
739 * Only newer firmware supports the gsmi interface. All older
740 * firmware that didn't support this interface used to plug the
741 * table name in the first four bytes of the oem_table_id field.
742 * Newer firmware doesn't do that though, so use that as the
743 * discriminant factor. We have to do this in order to
744 * whitewash our board names out of the public driver.
745 */
746 if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) {
747 printk(KERN_INFO "gsmi: Board is too old\n");
748 return -ENODEV;
749 }
750
751 /* Disable on board with 1.0 BIOS due to Google bug 2602657 */
752 hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id);
753 if (hash == QUIRKY_BOARD_HASH) {
754 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
755 if (strncmp(bios_ver, "1.0", 3) == 0) {
756 pr_info("gsmi: disabled on this board's BIOS %s\n",
757 bios_ver);
758 return -ENODEV;
759 }
760 }
761
762 /* check for valid SMI command port in ACPI FADT */
763 if (acpi_gbl_FADT.smi_command == 0) {
764 pr_info("gsmi: missing smi_command\n");
765 return -ENODEV;
766 }
767
768 /* Found */
769 return 0;
770}
771
772static struct kobject *gsmi_kobj;
773static struct efivars efivars;
774
775static __init int gsmi_init(void)
776{
777 unsigned long flags;
778 int ret;
779
780 ret = gsmi_system_valid();
781 if (ret)
782 return ret;
783
784 gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
785
786 /* register device */
787 gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
788 if (IS_ERR(gsmi_dev.pdev)) {
789 printk(KERN_ERR "gsmi: unable to register platform device\n");
790 return PTR_ERR(gsmi_dev.pdev);
791 }
792
793 /* SMI access needs to be serialized */
794 spin_lock_init(&gsmi_dev.lock);
795
796 /* SMI callbacks require 32bit addresses */
797 gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
798 gsmi_dev.pdev->dev.dma_mask =
799 &gsmi_dev.pdev->dev.coherent_dma_mask;
800 ret = -ENOMEM;
801 gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
802 GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
803 if (!gsmi_dev.dma_pool)
804 goto out_err;
805
806 /*
807 * pre-allocate buffers because sometimes we are called when
808 * this is not feasible: oops, panic, die, mce, etc
809 */
810 gsmi_dev.name_buf = gsmi_buf_alloc();
811 if (!gsmi_dev.name_buf) {
812 printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
813 goto out_err;
814 }
815
816 gsmi_dev.data_buf = gsmi_buf_alloc();
817 if (!gsmi_dev.data_buf) {
818 printk(KERN_ERR "gsmi: failed to allocate data buffer\n");
819 goto out_err;
820 }
821
822 gsmi_dev.param_buf = gsmi_buf_alloc();
823 if (!gsmi_dev.param_buf) {
824 printk(KERN_ERR "gsmi: failed to allocate param buffer\n");
825 goto out_err;
826 }
827
828 /*
829 * Determine type of handshake used to serialize the SMI
830 * entry. See also gsmi_exec().
831 *
832 * There's a "behavior" present on some chipsets where writing the
833 * SMI trigger register in the southbridge doesn't result in an
834 * immediate SMI. Rather, the processor can execute "a few" more
835 * instructions before the SMI takes effect. To ensure synchronous
836 * behavior, implement a handshake between the kernel driver and the
837 * firmware handler to spin until released. This ioctl determines
838 * the type of handshake.
839 *
840 * NONE: The firmware handler does not implement any
841 * handshake. Either it doesn't need to, or it's legacy firmware
842 * that doesn't know it needs to and never will.
843 *
844 * CF: The firmware handler will clear the CF in the saved
845 * state before returning. The driver may set the CF and test for
846 * it to clear before proceeding.
847 *
848 * SPIN: The firmware handler does not implement any handshake
849 * but the driver should spin for a hundred or so microseconds
850 * to ensure the SMI has triggered.
851 *
852 * Finally, the handler will return -ENOSYS if
853 * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies
854 * HANDSHAKE_NONE.
855 */
856 spin_lock_irqsave(&gsmi_dev.lock, flags);
857 gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN;
858 gsmi_dev.handshake_type =
859 gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE);
860 if (gsmi_dev.handshake_type == -ENOSYS)
861 gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE;
862 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
863
864 /* Remove and clean up gsmi if the handshake could not complete. */
865 if (gsmi_dev.handshake_type == -ENXIO) {
866 printk(KERN_INFO "gsmi version " DRIVER_VERSION
867 " failed to load\n");
868 ret = -ENODEV;
869 goto out_err;
870 }
871
872 printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
873
874 /* Register in the firmware directory */
875 ret = -ENOMEM;
876 gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
877 if (!gsmi_kobj) {
878 printk(KERN_INFO "gsmi: Failed to create firmware kobj\n");
879 goto out_err;
880 }
881
882 /* Setup eventlog access */
883 ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr);
884 if (ret) {
885 printk(KERN_INFO "gsmi: Failed to setup eventlog");
886 goto out_err;
887 }
888
889 /* Other attributes */
890 ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
891 if (ret) {
892 printk(KERN_INFO "gsmi: Failed to add attrs");
893 goto out_err;
894 }
895
896 if (register_efivars(&efivars, &efivar_ops, gsmi_kobj)) {
897 printk(KERN_INFO "gsmi: Failed to register efivars\n");
898 goto out_err;
899 }
900
901 register_reboot_notifier(&gsmi_reboot_notifier);
902 register_die_notifier(&gsmi_die_notifier);
903 atomic_notifier_chain_register(&panic_notifier_list,
904 &gsmi_panic_notifier);
905
906 return 0;
907
908 out_err:
909 kobject_put(gsmi_kobj);
910 gsmi_buf_free(gsmi_dev.param_buf);
911 gsmi_buf_free(gsmi_dev.data_buf);
912 gsmi_buf_free(gsmi_dev.name_buf);
913 if (gsmi_dev.dma_pool)
914 dma_pool_destroy(gsmi_dev.dma_pool);
915 platform_device_unregister(gsmi_dev.pdev);
916 pr_info("gsmi: failed to load: %d\n", ret);
917 return ret;
918}
919
920static void __exit gsmi_exit(void)
921{
922 unregister_reboot_notifier(&gsmi_reboot_notifier);
923 unregister_die_notifier(&gsmi_die_notifier);
924 atomic_notifier_chain_unregister(&panic_notifier_list,
925 &gsmi_panic_notifier);
926 unregister_efivars(&efivars);
927
928 kobject_put(gsmi_kobj);
929 gsmi_buf_free(gsmi_dev.param_buf);
930 gsmi_buf_free(gsmi_dev.data_buf);
931 gsmi_buf_free(gsmi_dev.name_buf);
932 dma_pool_destroy(gsmi_dev.dma_pool);
933 platform_device_unregister(gsmi_dev.pdev);
934}
935
936module_init(gsmi_init);
937module_exit(gsmi_exit);
938
939MODULE_AUTHOR("Google, Inc.");
940MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
new file mode 100644
index 000000000000..2a90ba613613
--- /dev/null
+++ b/drivers/firmware/google/memconsole.c
@@ -0,0 +1,166 @@
1/*
2 * memconsole.c
3 *
4 * Infrastructure for importing the BIOS memory based console
5 * into the kernel log ringbuffer.
6 *
7 * Copyright 2010 Google Inc. All rights reserved.
8 */
9
10#include <linux/ctype.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/sysfs.h>
15#include <linux/kobject.h>
16#include <linux/module.h>
17#include <linux/dmi.h>
18#include <asm/bios_ebda.h>
19
20#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
21#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
22
23struct biosmemcon_ebda {
24 u32 signature;
25 union {
26 struct {
27 u8 enabled;
28 u32 buffer_addr;
29 u16 start;
30 u16 end;
31 u16 num_chars;
32 u8 wrapped;
33 } __packed v1;
34 struct {
35 u32 buffer_addr;
36 /* Misdocumented as number of pages! */
37 u16 num_bytes;
38 u16 start;
39 u16 end;
40 } __packed v2;
41 };
42} __packed;
43
44static char *memconsole_baseaddr;
45static size_t memconsole_length;
46
47static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
48 struct bin_attribute *bin_attr, char *buf,
49 loff_t pos, size_t count)
50{
51 return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
52 memconsole_length);
53}
54
55static struct bin_attribute memconsole_bin_attr = {
56 .attr = {.name = "log", .mode = 0444},
57 .read = memconsole_read,
58};
59
60
61static void found_v1_header(struct biosmemcon_ebda *hdr)
62{
63 printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr);
64 printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
65 "start = %d, end = %d, num = %d\n",
66 hdr->v1.buffer_addr, hdr->v1.start,
67 hdr->v1.end, hdr->v1.num_chars);
68
69 memconsole_length = hdr->v1.num_chars;
70 memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
71}
72
73static void found_v2_header(struct biosmemcon_ebda *hdr)
74{
75 printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr);
76 printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
77 "start = %d, end = %d, num_bytes = %d\n",
78 hdr->v2.buffer_addr, hdr->v2.start,
79 hdr->v2.end, hdr->v2.num_bytes);
80
81 memconsole_length = hdr->v2.end - hdr->v2.start;
82 memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr
83 + hdr->v2.start);
84}
85
86/*
87 * Search through the EBDA for the BIOS Memory Console, and
88 * set the global variables to point to it. Return true if found.
89 */
90static bool found_memconsole(void)
91{
92 unsigned int address;
93 size_t length, cur;
94
95 address = get_bios_ebda();
96 if (!address) {
97 printk(KERN_INFO "BIOS EBDA non-existent.\n");
98 return false;
99 }
100
101 /* EBDA length is byte 0 of EBDA (in KB) */
102 length = *(u8 *)phys_to_virt(address);
103 length <<= 10; /* convert to bytes */
104
105 /*
106 * Search through EBDA for BIOS memory console structure
107 * note: signature is not necessarily dword-aligned
108 */
109 for (cur = 0; cur < length; cur++) {
110 struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
111
112 /* memconsole v1 */
113 if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
114 found_v1_header(hdr);
115 return true;
116 }
117
118 /* memconsole v2 */
119 if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
120 found_v2_header(hdr);
121 return true;
122 }
123 }
124
125 printk(KERN_INFO "BIOS console EBDA structure not found!\n");
126 return false;
127}
128
129static struct dmi_system_id memconsole_dmi_table[] __initdata = {
130 {
131 .ident = "Google Board",
132 .matches = {
133 DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
134 },
135 },
136 {}
137};
138MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
139
140static int __init memconsole_init(void)
141{
142 int ret;
143
144 if (!dmi_check_system(memconsole_dmi_table))
145 return -ENODEV;
146
147 if (!found_memconsole())
148 return -ENODEV;
149
150 memconsole_bin_attr.size = memconsole_length;
151
152 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
153
154 return ret;
155}
156
157static void __exit memconsole_exit(void)
158{
159 sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
160}
161
162module_init(memconsole_init);
163module_exit(memconsole_exit);
164
165MODULE_AUTHOR("Google, Inc.");
166MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 2192456dfd68..f032e446fc11 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -42,7 +42,20 @@
42struct acpi_table_ibft *ibft_addr; 42struct acpi_table_ibft *ibft_addr;
43EXPORT_SYMBOL_GPL(ibft_addr); 43EXPORT_SYMBOL_GPL(ibft_addr);
44 44
45#define IBFT_SIGN "iBFT" 45static const struct {
46 char *sign;
47} ibft_signs[] = {
48#ifdef CONFIG_ACPI
49 /*
50 * One spec says "IBFT", the other says "iBFT". We have to check
51 * for both.
52 */
53 { ACPI_SIG_IBFT },
54#endif
55 { "iBFT" },
56 { "BIFT" }, /* Broadcom iSCSI Offload */
57};
58
46#define IBFT_SIGN_LEN 4 59#define IBFT_SIGN_LEN 4
47#define IBFT_START 0x80000 /* 512kB */ 60#define IBFT_START 0x80000 /* 512kB */
48#define IBFT_END 0x100000 /* 1MB */ 61#define IBFT_END 0x100000 /* 1MB */
@@ -62,6 +75,7 @@ static int __init find_ibft_in_mem(void)
62 unsigned long pos; 75 unsigned long pos;
63 unsigned int len = 0; 76 unsigned int len = 0;
64 void *virt; 77 void *virt;
78 int i;
65 79
66 for (pos = IBFT_START; pos < IBFT_END; pos += 16) { 80 for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
67 /* The table can't be inside the VGA BIOS reserved space, 81 /* The table can't be inside the VGA BIOS reserved space,
@@ -69,18 +83,23 @@ static int __init find_ibft_in_mem(void)
69 if (pos == VGA_MEM) 83 if (pos == VGA_MEM)
70 pos += VGA_SIZE; 84 pos += VGA_SIZE;
71 virt = isa_bus_to_virt(pos); 85 virt = isa_bus_to_virt(pos);
72 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { 86
73 unsigned long *addr = 87 for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) {
74 (unsigned long *)isa_bus_to_virt(pos + 4); 88 if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) ==
75 len = *addr; 89 0) {
76 /* if the length of the table extends past 1M, 90 unsigned long *addr =
77 * the table cannot be valid. */ 91 (unsigned long *)isa_bus_to_virt(pos + 4);
78 if (pos + len <= (IBFT_END-1)) { 92 len = *addr;
79 ibft_addr = (struct acpi_table_ibft *)virt; 93 /* if the length of the table extends past 1M,
80 break; 94 * the table cannot be valid. */
95 if (pos + len <= (IBFT_END-1)) {
96 ibft_addr = (struct acpi_table_ibft *)virt;
97 goto done;
98 }
81 } 99 }
82 } 100 }
83 } 101 }
102done:
84 return len; 103 return len;
85} 104}
86/* 105/*
@@ -89,18 +108,12 @@ static int __init find_ibft_in_mem(void)
89 */ 108 */
90unsigned long __init find_ibft_region(unsigned long *sizep) 109unsigned long __init find_ibft_region(unsigned long *sizep)
91{ 110{
92 111 int i;
93 ibft_addr = NULL; 112 ibft_addr = NULL;
94 113
95#ifdef CONFIG_ACPI 114#ifdef CONFIG_ACPI
96 /* 115 for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
97 * One spec says "IBFT", the other says "iBFT". We have to check 116 acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
98 * for both.
99 */
100 if (!ibft_addr)
101 acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft);
102 if (!ibft_addr)
103 acpi_table_parse(IBFT_SIGN, acpi_find_ibft);
104#endif /* CONFIG_ACPI */ 117#endif /* CONFIG_ACPI */
105 118
106 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will 119 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5ed9d25d021a..99dde874fbbd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -148,6 +148,7 @@ struct rdma_id_private {
148 u32 qp_num; 148 u32 qp_num;
149 u8 srq; 149 u8 srq;
150 u8 tos; 150 u8 tos;
151 u8 reuseaddr;
151}; 152};
152 153
153struct cma_multicast { 154struct cma_multicast {
@@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr)
712 return cma_zero_addr(addr) || cma_loopback_addr(addr); 713 return cma_zero_addr(addr) || cma_loopback_addr(addr);
713} 714}
714 715
716static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
717{
718 if (src->sa_family != dst->sa_family)
719 return -1;
720
721 switch (src->sa_family) {
722 case AF_INET:
723 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
724 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
725 default:
726 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
727 &((struct sockaddr_in6 *) dst)->sin6_addr);
728 }
729}
730
715static inline __be16 cma_port(struct sockaddr *addr) 731static inline __be16 cma_port(struct sockaddr *addr)
716{ 732{
717 if (addr->sa_family == AF_INET) 733 if (addr->sa_family == AF_INET)
@@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
1564 mutex_unlock(&lock); 1580 mutex_unlock(&lock);
1565} 1581}
1566 1582
1567int rdma_listen(struct rdma_cm_id *id, int backlog)
1568{
1569 struct rdma_id_private *id_priv;
1570 int ret;
1571
1572 id_priv = container_of(id, struct rdma_id_private, id);
1573 if (id_priv->state == CMA_IDLE) {
1574 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
1575 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
1576 if (ret)
1577 return ret;
1578 }
1579
1580 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1581 return -EINVAL;
1582
1583 id_priv->backlog = backlog;
1584 if (id->device) {
1585 switch (rdma_node_get_transport(id->device->node_type)) {
1586 case RDMA_TRANSPORT_IB:
1587 ret = cma_ib_listen(id_priv);
1588 if (ret)
1589 goto err;
1590 break;
1591 case RDMA_TRANSPORT_IWARP:
1592 ret = cma_iw_listen(id_priv, backlog);
1593 if (ret)
1594 goto err;
1595 break;
1596 default:
1597 ret = -ENOSYS;
1598 goto err;
1599 }
1600 } else
1601 cma_listen_on_all(id_priv);
1602
1603 return 0;
1604err:
1605 id_priv->backlog = 0;
1606 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1607 return ret;
1608}
1609EXPORT_SYMBOL(rdma_listen);
1610
1611void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1583void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1612{ 1584{
1613 struct rdma_id_private *id_priv; 1585 struct rdma_id_private *id_priv;
@@ -2090,6 +2062,25 @@ err:
2090} 2062}
2091EXPORT_SYMBOL(rdma_resolve_addr); 2063EXPORT_SYMBOL(rdma_resolve_addr);
2092 2064
2065int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2066{
2067 struct rdma_id_private *id_priv;
2068 unsigned long flags;
2069 int ret;
2070
2071 id_priv = container_of(id, struct rdma_id_private, id);
2072 spin_lock_irqsave(&id_priv->lock, flags);
2073 if (id_priv->state == CMA_IDLE) {
2074 id_priv->reuseaddr = reuse;
2075 ret = 0;
2076 } else {
2077 ret = -EINVAL;
2078 }
2079 spin_unlock_irqrestore(&id_priv->lock, flags);
2080 return ret;
2081}
2082EXPORT_SYMBOL(rdma_set_reuseaddr);
2083
2093static void cma_bind_port(struct rdma_bind_list *bind_list, 2084static void cma_bind_port(struct rdma_bind_list *bind_list,
2094 struct rdma_id_private *id_priv) 2085 struct rdma_id_private *id_priv)
2095{ 2086{
@@ -2165,41 +2156,71 @@ retry:
2165 return -EADDRNOTAVAIL; 2156 return -EADDRNOTAVAIL;
2166} 2157}
2167 2158
2168static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2159/*
2160 * Check that the requested port is available. This is called when trying to
2161 * bind to a specific port, or when trying to listen on a bound port. In
2162 * the latter case, the provided id_priv may already be on the bind_list, but
2163 * we still need to check that it's okay to start listening.
2164 */
2165static int cma_check_port(struct rdma_bind_list *bind_list,
2166 struct rdma_id_private *id_priv, uint8_t reuseaddr)
2169{ 2167{
2170 struct rdma_id_private *cur_id; 2168 struct rdma_id_private *cur_id;
2171 struct sockaddr_in *sin, *cur_sin; 2169 struct sockaddr *addr, *cur_addr;
2172 struct rdma_bind_list *bind_list;
2173 struct hlist_node *node; 2170 struct hlist_node *node;
2171
2172 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2173 if (cma_any_addr(addr) && !reuseaddr)
2174 return -EADDRNOTAVAIL;
2175
2176 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2177 if (id_priv == cur_id)
2178 continue;
2179
2180 if ((cur_id->state == CMA_LISTEN) ||
2181 !reuseaddr || !cur_id->reuseaddr) {
2182 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
2183 if (cma_any_addr(cur_addr))
2184 return -EADDRNOTAVAIL;
2185
2186 if (!cma_addr_cmp(addr, cur_addr))
2187 return -EADDRINUSE;
2188 }
2189 }
2190 return 0;
2191}
2192
2193static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2194{
2195 struct rdma_bind_list *bind_list;
2174 unsigned short snum; 2196 unsigned short snum;
2197 int ret;
2175 2198
2176 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2199 snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));
2177 snum = ntohs(sin->sin_port);
2178 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2200 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2179 return -EACCES; 2201 return -EACCES;
2180 2202
2181 bind_list = idr_find(ps, snum); 2203 bind_list = idr_find(ps, snum);
2182 if (!bind_list) 2204 if (!bind_list) {
2183 return cma_alloc_port(ps, id_priv, snum); 2205 ret = cma_alloc_port(ps, id_priv, snum);
2184 2206 } else {
2185 /* 2207 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2186 * We don't support binding to any address if anyone is bound to 2208 if (!ret)
2187 * a specific address on the same port. 2209 cma_bind_port(bind_list, id_priv);
2188 */
2189 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2190 return -EADDRNOTAVAIL;
2191
2192 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2193 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2194 return -EADDRNOTAVAIL;
2195
2196 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
2197 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
2198 return -EADDRINUSE;
2199 } 2210 }
2211 return ret;
2212}
2200 2213
2201 cma_bind_port(bind_list, id_priv); 2214static int cma_bind_listen(struct rdma_id_private *id_priv)
2202 return 0; 2215{
2216 struct rdma_bind_list *bind_list = id_priv->bind_list;
2217 int ret = 0;
2218
2219 mutex_lock(&lock);
2220 if (bind_list->owners.first->next)
2221 ret = cma_check_port(bind_list, id_priv, 0);
2222 mutex_unlock(&lock);
2223 return ret;
2203} 2224}
2204 2225
2205static int cma_get_port(struct rdma_id_private *id_priv) 2226static int cma_get_port(struct rdma_id_private *id_priv)
@@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2253 return 0; 2274 return 0;
2254} 2275}
2255 2276
2277int rdma_listen(struct rdma_cm_id *id, int backlog)
2278{
2279 struct rdma_id_private *id_priv;
2280 int ret;
2281
2282 id_priv = container_of(id, struct rdma_id_private, id);
2283 if (id_priv->state == CMA_IDLE) {
2284 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
2285 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
2286 if (ret)
2287 return ret;
2288 }
2289
2290 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
2291 return -EINVAL;
2292
2293 if (id_priv->reuseaddr) {
2294 ret = cma_bind_listen(id_priv);
2295 if (ret)
2296 goto err;
2297 }
2298
2299 id_priv->backlog = backlog;
2300 if (id->device) {
2301 switch (rdma_node_get_transport(id->device->node_type)) {
2302 case RDMA_TRANSPORT_IB:
2303 ret = cma_ib_listen(id_priv);
2304 if (ret)
2305 goto err;
2306 break;
2307 case RDMA_TRANSPORT_IWARP:
2308 ret = cma_iw_listen(id_priv, backlog);
2309 if (ret)
2310 goto err;
2311 break;
2312 default:
2313 ret = -ENOSYS;
2314 goto err;
2315 }
2316 } else
2317 cma_listen_on_all(id_priv);
2318
2319 return 0;
2320err:
2321 id_priv->backlog = 0;
2322 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
2323 return ret;
2324}
2325EXPORT_SYMBOL(rdma_listen);
2326
2256int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2327int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2257{ 2328{
2258 struct rdma_id_private *id_priv; 2329 struct rdma_id_private *id_priv;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 2a1e9ae134b4..a9c042345c6f 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
725 */ 725 */
726 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 726 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
727 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 727 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
728 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { 728 if (iw_event->status == 0) {
729 cm_id_priv->id.local_addr = iw_event->local_addr; 729 cm_id_priv->id.local_addr = iw_event->local_addr;
730 cm_id_priv->id.remote_addr = iw_event->remote_addr; 730 cm_id_priv->id.remote_addr = iw_event->remote_addr;
731 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 731 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ec1e9da1488b..b3fa798525b2 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
883 } 883 }
884 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 884 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
885 break; 885 break;
886 case RDMA_OPTION_ID_REUSEADDR:
887 if (optlen != sizeof(int)) {
888 ret = -EINVAL;
889 break;
890 }
891 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
892 break;
886 default: 893 default:
887 ret = -ENOSYS; 894 ret = -ENOSYS;
888 } 895 }
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9d8dcfab2b38..d7ee70fc9173 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1198 } 1198 }
1199 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1199 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1200 rpl->status, status2errno(rpl->status)); 1200 rpl->status, status2errno(rpl->status));
1201 ep->com.wr_wait.ret = status2errno(rpl->status); 1201 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1202 ep->com.wr_wait.done = 1;
1203 wake_up(&ep->com.wr_wait.wait);
1204 1202
1205 return 0; 1203 return 0;
1206} 1204}
@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1234 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1232 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1235 1233
1236 PDBG("%s ep %p\n", __func__, ep); 1234 PDBG("%s ep %p\n", __func__, ep);
1237 ep->com.wr_wait.ret = status2errno(rpl->status); 1235 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1238 ep->com.wr_wait.done = 1;
1239 wake_up(&ep->com.wr_wait.wait);
1240 return 0; 1236 return 0;
1241} 1237}
1242 1238
@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1466 struct c4iw_qp_attributes attrs; 1462 struct c4iw_qp_attributes attrs;
1467 int disconnect = 1; 1463 int disconnect = 1;
1468 int release = 0; 1464 int release = 0;
1469 int closing = 0; 1465 int abort = 0;
1470 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1471 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1472 1468
@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1492 * in rdma connection migration (see c4iw_accept_cr()). 1488 * in rdma connection migration (see c4iw_accept_cr()).
1493 */ 1489 */
1494 __state_set(&ep->com, CLOSING); 1490 __state_set(&ep->com, CLOSING);
1495 ep->com.wr_wait.done = 1;
1496 ep->com.wr_wait.ret = -ECONNRESET;
1497 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1491 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1498 wake_up(&ep->com.wr_wait.wait); 1492 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1499 break; 1493 break;
1500 case MPA_REP_SENT: 1494 case MPA_REP_SENT:
1501 __state_set(&ep->com, CLOSING); 1495 __state_set(&ep->com, CLOSING);
1502 ep->com.wr_wait.done = 1;
1503 ep->com.wr_wait.ret = -ECONNRESET;
1504 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1496 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1505 wake_up(&ep->com.wr_wait.wait); 1497 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1506 break; 1498 break;
1507 case FPDU_MODE: 1499 case FPDU_MODE:
1508 start_ep_timer(ep); 1500 start_ep_timer(ep);
1509 __state_set(&ep->com, CLOSING); 1501 __state_set(&ep->com, CLOSING);
1510 closing = 1; 1502 attrs.next_state = C4IW_QP_STATE_CLOSING;
1503 abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1504 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1511 peer_close_upcall(ep); 1505 peer_close_upcall(ep);
1506 disconnect = 1;
1512 break; 1507 break;
1513 case ABORTING: 1508 case ABORTING:
1514 disconnect = 0; 1509 disconnect = 0;
@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1536 BUG_ON(1); 1531 BUG_ON(1);
1537 } 1532 }
1538 mutex_unlock(&ep->com.mutex); 1533 mutex_unlock(&ep->com.mutex);
1539 if (closing) {
1540 attrs.next_state = C4IW_QP_STATE_CLOSING;
1541 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1542 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1543 }
1544 if (disconnect) 1534 if (disconnect)
1545 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1535 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1546 if (release) 1536 if (release)
@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1581 /* 1571 /*
1582 * Wake up any threads in rdma_init() or rdma_fini(). 1572 * Wake up any threads in rdma_init() or rdma_fini().
1583 */ 1573 */
1584 ep->com.wr_wait.done = 1; 1574 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1585 ep->com.wr_wait.ret = -ECONNRESET;
1586 wake_up(&ep->com.wr_wait.wait);
1587 1575
1588 mutex_lock(&ep->com.mutex); 1576 mutex_lock(&ep->com.mutex);
1589 switch (ep->com.state) { 1577 switch (ep->com.state) {
@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1710 ep = lookup_tid(t, tid); 1698 ep = lookup_tid(t, tid);
1711 BUG_ON(!ep); 1699 BUG_ON(!ep);
1712 1700
1713 if (ep->com.qp) { 1701 if (ep && ep->com.qp) {
1714 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 1702 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
1715 ep->com.qp->wq.sq.qid); 1703 ep->com.qp->wq.sq.qid);
1716 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1704 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1717 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1705 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1718 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1706 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1719 } else 1707 } else
1720 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); 1708 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
1721 1709
1722 return 0; 1710 return 0;
1723} 1711}
@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2296 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2284 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2297 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 2285 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
2298 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2286 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2299 if (wr_waitp) { 2287 if (wr_waitp)
2300 if (ret) 2288 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2301 wr_waitp->ret = -ret;
2302 else
2303 wr_waitp->ret = 0;
2304 wr_waitp->done = 1;
2305 wake_up(&wr_waitp->wait);
2306 }
2307 kfree_skb(skb); 2289 kfree_skb(skb);
2308 break; 2290 break;
2309 case 2: 2291 case 2:
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index e29172c2afcb..40a13cc633a3 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL"); 44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION); 45MODULE_VERSION(DRV_VERSION);
46 46
47static LIST_HEAD(dev_list); 47static LIST_HEAD(uld_ctx_list);
48static DEFINE_MUTEX(dev_mutex); 48static DEFINE_MUTEX(dev_mutex);
49 49
50static struct dentry *c4iw_debugfs_root; 50static struct dentry *c4iw_debugfs_root;
@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
370 c4iw_destroy_resource(&rdev->resource); 370 c4iw_destroy_resource(&rdev->resource);
371} 371}
372 372
373static void c4iw_remove(struct c4iw_dev *dev) 373struct uld_ctx {
374 struct list_head entry;
375 struct cxgb4_lld_info lldi;
376 struct c4iw_dev *dev;
377};
378
379static void c4iw_remove(struct uld_ctx *ctx)
374{ 380{
375 PDBG("%s c4iw_dev %p\n", __func__, dev); 381 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
376 list_del(&dev->entry); 382 c4iw_unregister_device(ctx->dev);
377 if (dev->registered) 383 c4iw_rdev_close(&ctx->dev->rdev);
378 c4iw_unregister_device(dev); 384 idr_destroy(&ctx->dev->cqidr);
379 c4iw_rdev_close(&dev->rdev); 385 idr_destroy(&ctx->dev->qpidr);
380 idr_destroy(&dev->cqidr); 386 idr_destroy(&ctx->dev->mmidr);
381 idr_destroy(&dev->qpidr); 387 iounmap(ctx->dev->rdev.oc_mw_kva);
382 idr_destroy(&dev->mmidr); 388 ib_dealloc_device(&ctx->dev->ibdev);
383 iounmap(dev->rdev.oc_mw_kva); 389 ctx->dev = NULL;
384 ib_dealloc_device(&dev->ibdev);
385} 390}
386 391
387static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 392static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
392 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 397 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
393 if (!devp) { 398 if (!devp) {
394 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 399 printk(KERN_ERR MOD "Cannot allocate ib device\n");
395 return NULL; 400 return ERR_PTR(-ENOMEM);
396 } 401 }
397 devp->rdev.lldi = *infop; 402 devp->rdev.lldi = *infop;
398 403
@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
402 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 407 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
403 devp->rdev.lldi.vr->ocq.size); 408 devp->rdev.lldi.vr->ocq.size);
404 409
405 printk(KERN_INFO MOD "ocq memory: " 410 PDBG(KERN_INFO MOD "ocq memory: "
406 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 411 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
407 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 412 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
408 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 413 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
409 414
410 mutex_lock(&dev_mutex);
411
412 ret = c4iw_rdev_open(&devp->rdev); 415 ret = c4iw_rdev_open(&devp->rdev);
413 if (ret) { 416 if (ret) {
414 mutex_unlock(&dev_mutex); 417 mutex_unlock(&dev_mutex);
415 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 418 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
416 ib_dealloc_device(&devp->ibdev); 419 ib_dealloc_device(&devp->ibdev);
417 return NULL; 420 return ERR_PTR(ret);
418 } 421 }
419 422
420 idr_init(&devp->cqidr); 423 idr_init(&devp->cqidr);
421 idr_init(&devp->qpidr); 424 idr_init(&devp->qpidr);
422 idr_init(&devp->mmidr); 425 idr_init(&devp->mmidr);
423 spin_lock_init(&devp->lock); 426 spin_lock_init(&devp->lock);
424 list_add_tail(&devp->entry, &dev_list);
425 mutex_unlock(&dev_mutex);
426 427
427 if (c4iw_debugfs_root) { 428 if (c4iw_debugfs_root) {
428 devp->debugfs_root = debugfs_create_dir( 429 devp->debugfs_root = debugfs_create_dir(
@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
435 436
436static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 437static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
437{ 438{
438 struct c4iw_dev *dev; 439 struct uld_ctx *ctx;
439 static int vers_printed; 440 static int vers_printed;
440 int i; 441 int i;
441 442
@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
443 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 444 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
444 DRV_VERSION); 445 DRV_VERSION);
445 446
446 dev = c4iw_alloc(infop); 447 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
447 if (!dev) 448 if (!ctx) {
449 ctx = ERR_PTR(-ENOMEM);
448 goto out; 450 goto out;
451 }
452 ctx->lldi = *infop;
449 453
450 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 454 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
451 __func__, pci_name(dev->rdev.lldi.pdev), 455 __func__, pci_name(ctx->lldi.pdev),
452 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, 456 ctx->lldi.nchan, ctx->lldi.nrxq,
453 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); 457 ctx->lldi.ntxq, ctx->lldi.nports);
458
459 mutex_lock(&dev_mutex);
460 list_add_tail(&ctx->entry, &uld_ctx_list);
461 mutex_unlock(&dev_mutex);
454 462
455 for (i = 0; i < dev->rdev.lldi.nrxq; i++) 463 for (i = 0; i < ctx->lldi.nrxq; i++)
456 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); 464 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
457out: 465out:
458 return dev; 466 return ctx;
459} 467}
460 468
461static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 469static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
462 const struct pkt_gl *gl) 470 const struct pkt_gl *gl)
463{ 471{
464 struct c4iw_dev *dev = handle; 472 struct uld_ctx *ctx = handle;
473 struct c4iw_dev *dev = ctx->dev;
465 struct sk_buff *skb; 474 struct sk_buff *skb;
466 const struct cpl_act_establish *rpl; 475 const struct cpl_act_establish *rpl;
467 unsigned int opcode; 476 unsigned int opcode;
@@ -503,47 +512,49 @@ nomem:
503 512
504static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 513static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
505{ 514{
506 struct c4iw_dev *dev = handle; 515 struct uld_ctx *ctx = handle;
507 516
508 PDBG("%s new_state %u\n", __func__, new_state); 517 PDBG("%s new_state %u\n", __func__, new_state);
509 switch (new_state) { 518 switch (new_state) {
510 case CXGB4_STATE_UP: 519 case CXGB4_STATE_UP:
511 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); 520 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
512 if (!dev->registered) { 521 if (!ctx->dev) {
513 int ret; 522 int ret = 0;
514 ret = c4iw_register_device(dev); 523
515 if (ret) 524 ctx->dev = c4iw_alloc(&ctx->lldi);
525 if (!IS_ERR(ctx->dev))
526 ret = c4iw_register_device(ctx->dev);
527 if (IS_ERR(ctx->dev) || ret)
516 printk(KERN_ERR MOD 528 printk(KERN_ERR MOD
517 "%s: RDMA registration failed: %d\n", 529 "%s: RDMA registration failed: %d\n",
518 pci_name(dev->rdev.lldi.pdev), ret); 530 pci_name(ctx->lldi.pdev), ret);
519 } 531 }
520 break; 532 break;
521 case CXGB4_STATE_DOWN: 533 case CXGB4_STATE_DOWN:
522 printk(KERN_INFO MOD "%s: Down\n", 534 printk(KERN_INFO MOD "%s: Down\n",
523 pci_name(dev->rdev.lldi.pdev)); 535 pci_name(ctx->lldi.pdev));
524 if (dev->registered) 536 if (ctx->dev)
525 c4iw_unregister_device(dev); 537 c4iw_remove(ctx);
526 break; 538 break;
527 case CXGB4_STATE_START_RECOVERY: 539 case CXGB4_STATE_START_RECOVERY:
528 printk(KERN_INFO MOD "%s: Fatal Error\n", 540 printk(KERN_INFO MOD "%s: Fatal Error\n",
529 pci_name(dev->rdev.lldi.pdev)); 541 pci_name(ctx->lldi.pdev));
530 dev->rdev.flags |= T4_FATAL_ERROR; 542 if (ctx->dev) {
531 if (dev->registered) {
532 struct ib_event event; 543 struct ib_event event;
533 544
545 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
534 memset(&event, 0, sizeof event); 546 memset(&event, 0, sizeof event);
535 event.event = IB_EVENT_DEVICE_FATAL; 547 event.event = IB_EVENT_DEVICE_FATAL;
536 event.device = &dev->ibdev; 548 event.device = &ctx->dev->ibdev;
537 ib_dispatch_event(&event); 549 ib_dispatch_event(&event);
538 c4iw_unregister_device(dev); 550 c4iw_remove(ctx);
539 } 551 }
540 break; 552 break;
541 case CXGB4_STATE_DETACH: 553 case CXGB4_STATE_DETACH:
542 printk(KERN_INFO MOD "%s: Detach\n", 554 printk(KERN_INFO MOD "%s: Detach\n",
543 pci_name(dev->rdev.lldi.pdev)); 555 pci_name(ctx->lldi.pdev));
544 mutex_lock(&dev_mutex); 556 if (ctx->dev)
545 c4iw_remove(dev); 557 c4iw_remove(ctx);
546 mutex_unlock(&dev_mutex);
547 break; 558 break;
548 } 559 }
549 return 0; 560 return 0;
@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)
576 587
577static void __exit c4iw_exit_module(void) 588static void __exit c4iw_exit_module(void)
578{ 589{
579 struct c4iw_dev *dev, *tmp; 590 struct uld_ctx *ctx, *tmp;
580 591
581 mutex_lock(&dev_mutex); 592 mutex_lock(&dev_mutex);
582 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 593 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
583 c4iw_remove(dev); 594 if (ctx->dev)
595 c4iw_remove(ctx);
596 kfree(ctx);
584 } 597 }
585 mutex_unlock(&dev_mutex); 598 mutex_unlock(&dev_mutex);
586 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 599 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9f6166f59268..35d2a5dd9bb4 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
131 131
132#define C4IW_WR_TO (10*HZ) 132#define C4IW_WR_TO (10*HZ)
133 133
134enum {
135 REPLY_READY = 0,
136};
137
134struct c4iw_wr_wait { 138struct c4iw_wr_wait {
135 wait_queue_head_t wait; 139 wait_queue_head_t wait;
136 int done; 140 unsigned long status;
137 int ret; 141 int ret;
138}; 142};
139 143
140static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) 144static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
141{ 145{
142 wr_waitp->ret = 0; 146 wr_waitp->ret = 0;
143 wr_waitp->done = 0; 147 wr_waitp->status = 0;
144 init_waitqueue_head(&wr_waitp->wait); 148 init_waitqueue_head(&wr_waitp->wait);
145} 149}
146 150
151static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
152{
153 wr_waitp->ret = ret;
154 set_bit(REPLY_READY, &wr_waitp->status);
155 wake_up(&wr_waitp->wait);
156}
157
147static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, 158static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
148 struct c4iw_wr_wait *wr_waitp, 159 struct c4iw_wr_wait *wr_waitp,
149 u32 hwtid, u32 qpid, 160 u32 hwtid, u32 qpid,
150 const char *func) 161 const char *func)
151{ 162{
152 unsigned to = C4IW_WR_TO; 163 unsigned to = C4IW_WR_TO;
153 do { 164 int ret;
154 165
155 wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); 166 do {
156 if (!wr_waitp->done) { 167 ret = wait_event_timeout(wr_waitp->wait,
168 test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
169 if (!ret) {
157 printk(KERN_ERR MOD "%s - Device %s not responding - " 170 printk(KERN_ERR MOD "%s - Device %s not responding - "
158 "tid %u qpid %u\n", func, 171 "tid %u qpid %u\n", func,
159 pci_name(rdev->lldi.pdev), hwtid, qpid); 172 pci_name(rdev->lldi.pdev), hwtid, qpid);
173 if (c4iw_fatal_error(rdev)) {
174 wr_waitp->ret = -EIO;
175 break;
176 }
160 to = to << 2; 177 to = to << 2;
161 } 178 }
162 } while (!wr_waitp->done); 179 } while (!ret);
163 if (wr_waitp->ret) 180 if (wr_waitp->ret)
164 printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", 181 PDBG("%s: FW reply %d tid %u qpid %u\n",
165 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); 182 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
166 return wr_waitp->ret; 183 return wr_waitp->ret;
167} 184}
168 185
169
170struct c4iw_dev { 186struct c4iw_dev {
171 struct ib_device ibdev; 187 struct ib_device ibdev;
172 struct c4iw_rdev rdev; 188 struct c4iw_rdev rdev;
@@ -175,9 +191,7 @@ struct c4iw_dev {
175 struct idr qpidr; 191 struct idr qpidr;
176 struct idr mmidr; 192 struct idr mmidr;
177 spinlock_t lock; 193 spinlock_t lock;
178 struct list_head entry;
179 struct dentry *debugfs_root; 194 struct dentry *debugfs_root;
180 u8 registered;
181}; 195};
182 196
183static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 197static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index f66dd8bf5128..5b9e4220ca08 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
516 if (ret) 516 if (ret)
517 goto bail2; 517 goto bail2;
518 } 518 }
519 dev->registered = 1;
520 return 0; 519 return 0;
521bail2: 520bail2:
522 ib_unregister_device(&dev->ibdev); 521 ib_unregister_device(&dev->ibdev);
@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
535 c4iw_class_attributes[i]); 534 c4iw_class_attributes[i]);
536 ib_unregister_device(&dev->ibdev); 535 ib_unregister_device(&dev->ibdev);
537 kfree(dev->ibdev.iwcm); 536 kfree(dev->ibdev.iwcm);
538 dev->registered = 0;
539 return; 537 return;
540} 538}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70a5a3c646da..3b773b05a898 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
214 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 214 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
215 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 215 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
216 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 216 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
217 t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | 217 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
218 V_FW_RI_RES_WR_IQID(scq->cqid)); 218 V_FW_RI_RES_WR_IQID(scq->cqid));
219 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 219 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
220 V_FW_RI_RES_WR_DCAEN(0) | 220 V_FW_RI_RES_WR_DCAEN(0) |
@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1210 if (ret) { 1210 if (ret) {
1211 if (internal) 1211 if (internal)
1212 c4iw_get_ep(&qhp->ep->com); 1212 c4iw_get_ep(&qhp->ep->com);
1213 disconnect = abort = 1;
1214 goto err; 1213 goto err;
1215 } 1214 }
1216 break; 1215 break;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 24af12fc8228..c0221eec8817 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -269,11 +269,8 @@ struct t4_swsqe {
269 269
270static inline pgprot_t t4_pgprot_wc(pgprot_t prot) 270static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
271{ 271{
272#if defined(__i386__) || defined(__x86_64__) 272#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
273 return pgprot_writecombine(prot); 273 return pgprot_writecombine(prot);
274#elif defined(CONFIG_PPC64)
275 return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) &
276 ~(pgprot_t)_PAGE_GUARDED);
277#else 274#else
278 return pgprot_noncached(prot); 275 return pgprot_noncached(prot);
279#endif 276#endif
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 58c0e417bc30..be24ac726114 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
398 struct ipath_devdata *dd; 398 struct ipath_devdata *dd;
399 unsigned long long addr; 399 unsigned long long addr;
400 u32 bar0 = 0, bar1 = 0; 400 u32 bar0 = 0, bar1 = 0;
401 u8 rev;
402 401
403 dd = ipath_alloc_devdata(pdev); 402 dd = ipath_alloc_devdata(pdev);
404 if (IS_ERR(dd)) { 403 if (IS_ERR(dd)) {
@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
540 goto bail_regions; 539 goto bail_regions;
541 } 540 }
542 541
543 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 542 dd->ipath_pcirev = pdev->revision;
544 if (ret) {
545 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
546 "%u: err %d\n", dd->ipath_unit, -ret);
547 goto bail_regions; /* shouldn't ever happen */
548 }
549 dd->ipath_pcirev = rev;
550 543
551#if defined(__powerpc__) 544#if defined(__powerpc__)
552 /* There isn't a generic way to specify writethrough mappings */ 545 /* There isn't a generic way to specify writethrough mappings */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 33c7eedaba6c..e74cdf9ef471 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2563 u16 last_ae; 2563 u16 last_ae;
2564 u8 original_hw_tcp_state; 2564 u8 original_hw_tcp_state;
2565 u8 original_ibqp_state; 2565 u8 original_ibqp_state;
2566 enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; 2566 int disconn_status = 0;
2567 int issue_disconn = 0; 2567 int issue_disconn = 0;
2568 int issue_close = 0; 2568 int issue_close = 0;
2569 int issue_flush = 0; 2569 int issue_flush = 0;
@@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2605 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2605 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2606 issue_disconn = 1; 2606 issue_disconn = 1;
2607 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) 2607 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
2608 disconn_status = IW_CM_EVENT_STATUS_RESET; 2608 disconn_status = -ECONNRESET;
2609 } 2609 }
2610 2610
2611 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || 2611 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
@@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2666 cm_id->provider_data = nesqp; 2666 cm_id->provider_data = nesqp;
2667 /* Send up the close complete event */ 2667 /* Send up the close complete event */
2668 cm_event.event = IW_CM_EVENT_CLOSE; 2668 cm_event.event = IW_CM_EVENT_CLOSE;
2669 cm_event.status = IW_CM_EVENT_STATUS_OK; 2669 cm_event.status = 0;
2670 cm_event.provider_data = cm_id->provider_data; 2670 cm_event.provider_data = cm_id->provider_data;
2671 cm_event.local_addr = cm_id->local_addr; 2671 cm_event.local_addr = cm_id->local_addr;
2672 cm_event.remote_addr = cm_id->remote_addr; 2672 cm_event.remote_addr = cm_id->remote_addr;
@@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2966 nes_add_ref(&nesqp->ibqp); 2966 nes_add_ref(&nesqp->ibqp);
2967 2967
2968 cm_event.event = IW_CM_EVENT_ESTABLISHED; 2968 cm_event.event = IW_CM_EVENT_ESTABLISHED;
2969 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; 2969 cm_event.status = 0;
2970 cm_event.provider_data = (void *)nesqp; 2970 cm_event.provider_data = (void *)nesqp;
2971 cm_event.local_addr = cm_id->local_addr; 2971 cm_event.local_addr = cm_id->local_addr;
2972 cm_event.remote_addr = cm_id->remote_addr; 2972 cm_event.remote_addr = cm_id->remote_addr;
@@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event)
3377 3377
3378 /* notify OF layer we successfully created the requested connection */ 3378 /* notify OF layer we successfully created the requested connection */
3379 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 3379 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
3380 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; 3380 cm_event.status = 0;
3381 cm_event.provider_data = cm_id->provider_data; 3381 cm_event.provider_data = cm_id->provider_data;
3382 cm_event.local_addr.sin_family = AF_INET; 3382 cm_event.local_addr.sin_family = AF_INET;
3383 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; 3383 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
@@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event)
3484 nesqp->cm_id = NULL; 3484 nesqp->cm_id = NULL;
3485 /* cm_id->provider_data = NULL; */ 3485 /* cm_id->provider_data = NULL; */
3486 cm_event.event = IW_CM_EVENT_DISCONNECT; 3486 cm_event.event = IW_CM_EVENT_DISCONNECT;
3487 cm_event.status = IW_CM_EVENT_STATUS_RESET; 3487 cm_event.status = -ECONNRESET;
3488 cm_event.provider_data = cm_id->provider_data; 3488 cm_event.provider_data = cm_id->provider_data;
3489 cm_event.local_addr = cm_id->local_addr; 3489 cm_event.local_addr = cm_id->local_addr;
3490 cm_event.remote_addr = cm_id->remote_addr; 3490 cm_event.remote_addr = cm_id->remote_addr;
@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
3495 ret = cm_id->event_handler(cm_id, &cm_event); 3495 ret = cm_id->event_handler(cm_id, &cm_event);
3496 atomic_inc(&cm_closes); 3496 atomic_inc(&cm_closes);
3497 cm_event.event = IW_CM_EVENT_CLOSE; 3497 cm_event.event = IW_CM_EVENT_CLOSE;
3498 cm_event.status = IW_CM_EVENT_STATUS_OK; 3498 cm_event.status = 0;
3499 cm_event.provider_data = cm_id->provider_data; 3499 cm_event.provider_data = cm_id->provider_data;
3500 cm_event.local_addr = cm_id->local_addr; 3500 cm_event.local_addr = cm_id->local_addr;
3501 cm_event.remote_addr = cm_id->remote_addr; 3501 cm_event.remote_addr = cm_id->remote_addr;
@@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
3534 cm_node, cm_id, jiffies); 3534 cm_node, cm_id, jiffies);
3535 3535
3536 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; 3536 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
3537 cm_event.status = IW_CM_EVENT_STATUS_OK; 3537 cm_event.status = 0;
3538 cm_event.provider_data = (void *)cm_node; 3538 cm_event.provider_data = (void *)cm_node;
3539 3539
3540 cm_event.local_addr.sin_family = AF_INET; 3540 cm_event.local_addr.sin_family = AF_INET;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 26d8018c0a7c..95ca93ceedac 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1484 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { 1484 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
1485 cm_id = nesqp->cm_id; 1485 cm_id = nesqp->cm_id;
1486 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 1486 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
1487 cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; 1487 cm_event.status = -ETIMEDOUT;
1488 cm_event.local_addr = cm_id->local_addr; 1488 cm_event.local_addr = cm_id->local_addr;
1489 cm_event.remote_addr = cm_id->remote_addr; 1489 cm_event.remote_addr = cm_id->remote_addr;
1490 cm_event.private_data = NULL; 1490 cm_event.private_data = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6bab3eaea70f..9f53e68a096a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
7534 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); 7534 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7535 tstart = get_jiffies_64(); 7535 tstart = get_jiffies_64();
7536 while (chan_done && 7536 while (chan_done &&
7537 !time_after64(tstart, tstart + msecs_to_jiffies(500))) { 7537 !time_after64(get_jiffies_64(),
7538 tstart + msecs_to_jiffies(500))) {
7538 msleep(20); 7539 msleep(20);
7539 for (chan = 0; chan < SERDES_CHANS; ++chan) { 7540 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7540 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 7541 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 48b6674cbc49..891cc2ff5f00 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
526 */ 526 */
527 devid = parent->device; 527 devid = parent->device;
528 if (devid >= 0x25e2 && devid <= 0x25fa) { 528 if (devid >= 0x25e2 && devid <= 0x25fa) {
529 u8 rev;
530
531 /* 5000 P/V/X/Z */ 529 /* 5000 P/V/X/Z */
532 pci_read_config_byte(parent, PCI_REVISION_ID, &rev); 530 if (parent->revision <= 0xb2)
533 if (rev <= 0xb2)
534 bits = 1U << 10; 531 bits = 1U << 10;
535 else 532 else
536 bits = 7U << 10; 533 bits = 7U << 10;
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 1839194ea987..10bcd4ae5402 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -223,8 +223,9 @@ static int __init atakbd_init(void)
223 return -ENODEV; 223 return -ENODEV;
224 224
225 // need to init core driver if not already done so 225 // need to init core driver if not already done so
226 if (atari_keyb_init()) 226 error = atari_keyb_init();
227 return -ENODEV; 227 if (error)
228 return error;
228 229
229 atakbd_dev = input_allocate_device(); 230 atakbd_dev = input_allocate_device();
230 if (!atakbd_dev) 231 if (!atakbd_dev)
diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c
index adf45b3040e9..5c4a692bf73a 100644
--- a/drivers/input/mouse/atarimouse.c
+++ b/drivers/input/mouse/atarimouse.c
@@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf)
77#endif 77#endif
78 78
79 /* only relative events get here */ 79 /* only relative events get here */
80 dx = buf[1]; 80 dx = buf[1];
81 dy = -buf[2]; 81 dy = buf[2];
82 82
83 input_report_rel(atamouse_dev, REL_X, dx); 83 input_report_rel(atamouse_dev, REL_X, dx);
84 input_report_rel(atamouse_dev, REL_Y, dy); 84 input_report_rel(atamouse_dev, REL_Y, dy);
85 85
86 input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); 86 input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4);
87 input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); 87 input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2);
88 input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); 88 input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1);
89 89
90 input_sync(atamouse_dev); 90 input_sync(atamouse_dev);
91 91
@@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev)
108static void atamouse_close(struct input_dev *dev) 108static void atamouse_close(struct input_dev *dev)
109{ 109{
110 ikbd_mouse_disable(); 110 ikbd_mouse_disable();
111 atari_mouse_interrupt_hook = NULL; 111 atari_input_mouse_interrupt_hook = NULL;
112} 112}
113 113
114static int __init atamouse_init(void) 114static int __init atamouse_init(void)
@@ -118,8 +118,9 @@ static int __init atamouse_init(void)
118 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) 118 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP))
119 return -ENODEV; 119 return -ENODEV;
120 120
121 if (!atari_keyb_init()) 121 error = atari_keyb_init();
122 return -ENODEV; 122 if (error)
123 return error;
123 124
124 atamouse_dev = input_allocate_device(); 125 atamouse_dev = input_allocate_device();
125 if (!atamouse_dev) 126 if (!atamouse_dev)
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 0aaa0597a622..34ae49dc557c 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -5,8 +5,10 @@ config LGUEST
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
7 multiple instances of the same Linux kernel, using the 7 multiple instances of the same Linux kernel, using the
8 "lguest" command found in the Documentation/lguest directory. 8 "lguest" command found in the Documentation/virtual/lguest
9 directory.
10
9 Note that "lguest" is pronounced to rhyme with "fell quest", 11 Note that "lguest" is pronounced to rhyme with "fell quest",
10 not "rustyvisor". See Documentation/lguest/lguest.txt. 12 not "rustyvisor". See Documentation/virtual/lguest/lguest.txt.
11 13
12 If unsure, say N. If curious, say M. If masochistic, say Y. 14 If unsure, say N. If curious, say M. If masochistic, say Y.
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index 7d463c26124f..8ac947c7e7c7 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -18,7 +18,7 @@ Mastery: PREFIX=M
18Beer: 18Beer:
19 @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" 19 @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
20Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: 20Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
21 @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` 21 @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
22Puppy: 22Puppy:
23 @clear 23 @clear
24 @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" 24 @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 8b021eb0d48c..6cccd60c594e 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -40,7 +40,7 @@
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/sysdev.h> 43#include <linux/syscore_ops.h>
44#include <linux/freezer.h> 44#include <linux/freezer.h>
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/suspend.h> 46#include <linux/suspend.h>
@@ -2527,12 +2527,9 @@ void pmu_blink(int n)
2527#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2527#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
2528int pmu_sys_suspended; 2528int pmu_sys_suspended;
2529 2529
2530static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) 2530static int pmu_syscore_suspend(void)
2531{ 2531{
2532 if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended) 2532 /* Suspend PMU event interrupts */
2533 return 0;
2534
2535 /* Suspend PMU event interrupts */\
2536 pmu_suspend(); 2533 pmu_suspend();
2537 pmu_sys_suspended = 1; 2534 pmu_sys_suspended = 1;
2538 2535
@@ -2544,12 +2541,12 @@ static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
2544 return 0; 2541 return 0;
2545} 2542}
2546 2543
2547static int pmu_sys_resume(struct sys_device *sysdev) 2544static void pmu_syscore_resume(void)
2548{ 2545{
2549 struct adb_request req; 2546 struct adb_request req;
2550 2547
2551 if (!pmu_sys_suspended) 2548 if (!pmu_sys_suspended)
2552 return 0; 2549 return;
2553 2550
2554 /* Tell PMU we are ready */ 2551 /* Tell PMU we are ready */
2555 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2552 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
@@ -2562,50 +2559,21 @@ static int pmu_sys_resume(struct sys_device *sysdev)
2562 /* Resume PMU event interrupts */ 2559 /* Resume PMU event interrupts */
2563 pmu_resume(); 2560 pmu_resume();
2564 pmu_sys_suspended = 0; 2561 pmu_sys_suspended = 0;
2565
2566 return 0;
2567} 2562}
2568 2563
2569#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2564static struct syscore_ops pmu_syscore_ops = {
2570 2565 .suspend = pmu_syscore_suspend,
2571static struct sysdev_class pmu_sysclass = { 2566 .resume = pmu_syscore_resume,
2572 .name = "pmu",
2573};
2574
2575static struct sys_device device_pmu = {
2576 .cls = &pmu_sysclass,
2577};
2578
2579static struct sysdev_driver driver_pmu = {
2580#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
2581 .suspend = &pmu_sys_suspend,
2582 .resume = &pmu_sys_resume,
2583#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2584}; 2567};
2585 2568
2586static int __init init_pmu_sysfs(void) 2569static int pmu_syscore_register(void)
2587{ 2570{
2588 int rc; 2571 register_syscore_ops(&pmu_syscore_ops);
2589 2572
2590 rc = sysdev_class_register(&pmu_sysclass);
2591 if (rc) {
2592 printk(KERN_ERR "Failed registering PMU sys class\n");
2593 return -ENODEV;
2594 }
2595 rc = sysdev_register(&device_pmu);
2596 if (rc) {
2597 printk(KERN_ERR "Failed registering PMU sys device\n");
2598 return -ENODEV;
2599 }
2600 rc = sysdev_driver_register(&pmu_sysclass, &driver_pmu);
2601 if (rc) {
2602 printk(KERN_ERR "Failed registering PMU sys driver\n");
2603 return -ENODEV;
2604 }
2605 return 0; 2573 return 0;
2606} 2574}
2607 2575subsys_initcall(pmu_syscore_register);
2608subsys_initcall(init_pmu_sysfs); 2576#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2609 2577
2610EXPORT_SYMBOL(pmu_request); 2578EXPORT_SYMBOL(pmu_request);
2611EXPORT_SYMBOL(pmu_queue_request); 2579EXPORT_SYMBOL(pmu_queue_request);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 1735c84ff757..fe902338539b 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.18" 79#define MPT_LINUX_VERSION_COMMON "3.04.19"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 66f94125de4e..7596aecd5072 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5012,7 +5012,6 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
5012 (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) { 5012 (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
5013 VirtTarget *vtarget = NULL; 5013 VirtTarget *vtarget = NULL;
5014 u8 id, channel; 5014 u8 id, channel;
5015 u32 log_info = le32_to_cpu(reply->IOCLogInfo);
5016 5015
5017 id = sas_event_data->TargetID; 5016 id = sas_event_data->TargetID;
5018 channel = sas_event_data->Bus; 5017 channel = sas_event_data->Bus;
@@ -5023,7 +5022,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
5023 "LogInfo (0x%x) available for " 5022 "LogInfo (0x%x) available for "
5024 "INTERNAL_DEVICE_RESET" 5023 "INTERNAL_DEVICE_RESET"
5025 "fw_id %d fw_channel %d\n", ioc->name, 5024 "fw_id %d fw_channel %d\n", ioc->name,
5026 log_info, id, channel)); 5025 le32_to_cpu(reply->IOCLogInfo),
5026 id, channel));
5027 if (vtarget->raidVolume) { 5027 if (vtarget->raidVolume) {
5028 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT 5028 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5029 "Skipping Raid Volume for inDMD\n", 5029 "Skipping Raid Volume for inDMD\n",
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0d9b82a44540..a1d4ee6671be 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1415,11 +1415,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1415 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1415 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1416 ioc->name, SCpnt, done)); 1416 ioc->name, SCpnt, done));
1417 1417
1418 if (ioc->taskmgmt_quiesce_io) { 1418 if (ioc->taskmgmt_quiesce_io)
1419 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1420 ioc->name, SCpnt));
1421 return SCSI_MLQUEUE_HOST_BUSY; 1419 return SCSI_MLQUEUE_HOST_BUSY;
1422 }
1423 1420
1424 /* 1421 /*
1425 * Put together a MPT SCSI request... 1422 * Put together a MPT SCSI request...
@@ -1773,7 +1770,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1773 int scpnt_idx; 1770 int scpnt_idx;
1774 int retval; 1771 int retval;
1775 VirtDevice *vdevice; 1772 VirtDevice *vdevice;
1776 ulong sn = SCpnt->serial_number;
1777 MPT_ADAPTER *ioc; 1773 MPT_ADAPTER *ioc;
1778 1774
1779 /* If we can't locate our host adapter structure, return FAILED status. 1775 /* If we can't locate our host adapter structure, return FAILED status.
@@ -1859,8 +1855,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1859 vdevice->vtarget->id, vdevice->lun, 1855 vdevice->vtarget->id, vdevice->lun,
1860 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1856 ctx2abort, mptscsih_get_tm_timeout(ioc));
1861 1857
1862 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1858 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) {
1863 SCpnt->serial_number == sn) {
1864 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1859 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1865 "task abort: command still in active list! (sc=%p)\n", 1860 "task abort: command still in active list! (sc=%p)\n",
1866 ioc->name, SCpnt)); 1861 ioc->name, SCpnt));
@@ -1873,9 +1868,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1873 } 1868 }
1874 1869
1875 out: 1870 out:
1876 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", 1871 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n",
1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, 1872 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
1878 SCpnt, SCpnt->serial_number); 1873 SCpnt);
1879 1874
1880 return retval; 1875 return retval;
1881} 1876}
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 6d9568d2ec59..8f61ba6aac23 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -867,6 +867,10 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
867 struct _x_config_parms cfg; 867 struct _x_config_parms cfg;
868 struct _CONFIG_PAGE_HEADER hdr; 868 struct _CONFIG_PAGE_HEADER hdr;
869 int err = -EBUSY; 869 int err = -EBUSY;
870 u32 nego_parms;
871 u32 period;
872 struct scsi_device *sdev;
873 int i;
870 874
871 /* don't allow updating nego parameters on RAID devices */ 875 /* don't allow updating nego parameters on RAID devices */
872 if (starget->channel == 0 && 876 if (starget->channel == 0 &&
@@ -904,6 +908,24 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
904 pg1->Header.PageNumber = hdr.PageNumber; 908 pg1->Header.PageNumber = hdr.PageNumber;
905 pg1->Header.PageType = hdr.PageType; 909 pg1->Header.PageType = hdr.PageType;
906 910
911 nego_parms = le32_to_cpu(pg1->RequestedParameters);
912 period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >>
913 MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
914 if (period == 8) {
915 /* Turn on inline data padding for TAPE when running U320 */
916 for (i = 0 ; i < 16; i++) {
917 sdev = scsi_device_lookup_by_target(starget, i);
918 if (sdev && sdev->type == TYPE_TAPE) {
919 sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT
920 "IDP:ON\n", ioc->name);
921 nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP;
922 pg1->RequestedParameters =
923 cpu_to_le32(nego_parms);
924 break;
925 }
926 }
927 }
928
907 mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); 929 mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters));
908 930
909 if (mpt_config(ioc, &cfg)) { 931 if (mpt_config(ioc, &cfg)) {
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index f003957e8e1c..74fbe56321ff 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -361,7 +361,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
361 */ 361 */
362 error = le32_to_cpu(msg->body[0]); 362 error = le32_to_cpu(msg->body[0]);
363 363
364 osm_debug("Completed %ld\n", cmd->serial_number); 364 osm_debug("Completed %0x%p\n", cmd);
365 365
366 cmd->result = error & 0xff; 366 cmd->result = error & 0xff;
367 /* 367 /*
@@ -678,7 +678,7 @@ static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt,
678 /* Queue the message */ 678 /* Queue the message */
679 i2o_msg_post(c, msg); 679 i2o_msg_post(c, msg);
680 680
681 osm_debug("Issued %ld\n", SCpnt->serial_number); 681 osm_debug("Issued %0x%p\n", SCpnt);
682 682
683 return 0; 683 return 0;
684 684
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4e007c6a4b44..d80dcdee88f3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,5 +481,6 @@ source "drivers/misc/cb710/Kconfig"
481source "drivers/misc/iwmc3200top/Kconfig" 481source "drivers/misc/iwmc3200top/Kconfig"
482source "drivers/misc/ti-st/Kconfig" 482source "drivers/misc/ti-st/Kconfig"
483source "drivers/misc/lis3lv02d/Kconfig" 483source "drivers/misc/lis3lv02d/Kconfig"
484source "drivers/misc/carma/Kconfig"
484 485
485endif # MISC_DEVICES 486endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f5468602961f..848e8464faab 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_PCH_PHUB) += pch_phub.o
44obj-y += ti-st/ 44obj-y += ti-st/
45obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o 45obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
46obj-y += lis3lv02d/ 46obj-y += lis3lv02d/
47obj-y += carma/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
new file mode 100644
index 000000000000..c90370ed712b
--- /dev/null
+++ b/drivers/misc/carma/Kconfig
@@ -0,0 +1,17 @@
1config CARMA_FPGA
2 tristate "CARMA DATA-FPGA Access Driver"
3 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
4 select VIDEOBUF_DMA_SG
5 default n
6 help
7 Say Y here to include support for communicating with the data
8 processing FPGAs on the OVRO CARMA board.
9
10config CARMA_FPGA_PROGRAM
11 tristate "CARMA DATA-FPGA Programmer"
12 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
13 select VIDEOBUF_DMA_SG
14 default n
15 help
16 Say Y here to include support for programming the data processing
17 FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
new file mode 100644
index 000000000000..ff36ac2ce534
--- /dev/null
+++ b/drivers/misc/carma/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
2obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
new file mode 100644
index 000000000000..7ce6065dc20e
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -0,0 +1,1141 @@
1/*
2 * CARMA Board DATA-FPGA Programmer
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/of_platform.h>
14#include <linux/completion.h>
15#include <linux/miscdevice.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/leds.h>
25#include <linux/slab.h>
26#include <linux/kref.h>
27#include <linux/fs.h>
28#include <linux/io.h>
29
30#include <media/videobuf-dma-sg.h>
31
32/* MPC8349EMDS specific get_immrbase() */
33#include <sysdev/fsl_soc.h>
34
35static const char drv_name[] = "carma-fpga-program";
36
37/*
38 * Firmware images are always this exact size
39 *
40 * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
41 * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
42 */
43#define FW_SIZE_EP2S90 12849552
44#define FW_SIZE_EP2S130 18662880
45
46struct fpga_dev {
47 struct miscdevice miscdev;
48
49 /* Reference count */
50 struct kref ref;
51
52 /* Device Registers */
53 struct device *dev;
54 void __iomem *regs;
55 void __iomem *immr;
56
57 /* Freescale DMA Device */
58 struct dma_chan *chan;
59
60 /* Interrupts */
61 int irq, status;
62 struct completion completion;
63
64 /* FPGA Bitfile */
65 struct mutex lock;
66
67 struct videobuf_dmabuf vb;
68 bool vb_allocated;
69
70 /* max size and written bytes */
71 size_t fw_size;
72 size_t bytes;
73};
74
75/*
76 * FPGA Bitfile Helpers
77 */
78
79/**
80 * fpga_drop_firmware_data() - drop the bitfile image from memory
81 * @priv: the driver's private data structure
82 *
83 * LOCKING: must hold priv->lock
84 */
85static void fpga_drop_firmware_data(struct fpga_dev *priv)
86{
87 videobuf_dma_free(&priv->vb);
88 priv->vb_allocated = false;
89 priv->bytes = 0;
90}
91
92/*
93 * Private Data Reference Count
94 */
95
96static void fpga_dev_remove(struct kref *ref)
97{
98 struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
99
100 /* free any firmware image that was not programmed */
101 fpga_drop_firmware_data(priv);
102
103 mutex_destroy(&priv->lock);
104 kfree(priv);
105}
106
107/*
108 * LED Trigger (could be a seperate module)
109 */
110
111/*
112 * NOTE: this whole thing does have the problem that whenever the led's are
113 * NOTE: first set to use the fpga trigger, they could be in the wrong state
114 */
115
116DEFINE_LED_TRIGGER(ledtrig_fpga);
117
118static void ledtrig_fpga_programmed(bool enabled)
119{
120 if (enabled)
121 led_trigger_event(ledtrig_fpga, LED_FULL);
122 else
123 led_trigger_event(ledtrig_fpga, LED_OFF);
124}
125
126/*
127 * FPGA Register Helpers
128 */
129
130/* Register Definitions */
131#define FPGA_CONFIG_CONTROL 0x40
132#define FPGA_CONFIG_STATUS 0x44
133#define FPGA_CONFIG_FIFO_SIZE 0x48
134#define FPGA_CONFIG_FIFO_USED 0x4C
135#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
136#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
137
138#define FPGA_FIFO_ADDRESS 0x3000
139
140static int fpga_fifo_size(void __iomem *regs)
141{
142 return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
143}
144
145#define CFG_STATUS_ERR_MASK 0xfffe
146
147static int fpga_config_error(void __iomem *regs)
148{
149 return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
150}
151
152static int fpga_fifo_empty(void __iomem *regs)
153{
154 return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
155}
156
157static void fpga_fifo_write(void __iomem *regs, u32 val)
158{
159 iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
160}
161
162static void fpga_set_byte_count(void __iomem *regs, u32 count)
163{
164 iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
165}
166
167#define CFG_CTL_ENABLE (1 << 0)
168#define CFG_CTL_RESET (1 << 1)
169#define CFG_CTL_DMA (1 << 2)
170
171static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
172{
173 u32 val;
174
175 val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
176 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
177}
178
179static void fpga_programmer_disable(struct fpga_dev *priv)
180{
181 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
182}
183
184static void fpga_dump_registers(struct fpga_dev *priv)
185{
186 u32 control, status, size, used, total, curr;
187
188 /* good status: do nothing */
189 if (priv->status == 0)
190 return;
191
192 /* Dump all status registers */
193 control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
194 status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
195 size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
196 used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
197 total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
198 curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
199
200 dev_err(priv->dev, "Configuration failed, dumping status registers\n");
201 dev_err(priv->dev, "Control: 0x%.8x\n", control);
202 dev_err(priv->dev, "Status: 0x%.8x\n", status);
203 dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
204 dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
205 dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
206 dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
207}
208
209/*
210 * FPGA Power Supply Code
211 */
212
213#define CTL_PWR_CONTROL 0x2006
214#define CTL_PWR_STATUS 0x200A
215#define CTL_PWR_FAIL 0x200B
216
217#define PWR_CONTROL_ENABLE 0x01
218
219#define PWR_STATUS_ERROR_MASK 0x10
220#define PWR_STATUS_GOOD 0x0f
221
222/*
223 * Determine if the FPGA power is good for all supplies
224 */
225static bool fpga_power_good(struct fpga_dev *priv)
226{
227 u8 val;
228
229 val = ioread8(priv->regs + CTL_PWR_STATUS);
230 if (val & PWR_STATUS_ERROR_MASK)
231 return false;
232
233 return val == PWR_STATUS_GOOD;
234}
235
236/*
237 * Disable the FPGA power supplies
238 */
239static void fpga_disable_power_supplies(struct fpga_dev *priv)
240{
241 unsigned long start;
242 u8 val;
243
244 iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
245
246 /*
247 * Wait 500ms for the power rails to discharge
248 *
249 * Without this delay, the CTL-CPLD state machine can get into a
250 * state where it is waiting for the power-goods to assert, but they
251 * never do. This only happens when enabling and disabling the
252 * power sequencer very rapidly.
253 *
254 * The loop below will also wait for the power goods to de-assert,
255 * but testing has shown that they are always disabled by the time
256 * the sleep completes. However, omitting the sleep and only waiting
257 * for the power-goods to de-assert was not sufficient to ensure
258 * that the power sequencer would not wedge itself.
259 */
260 msleep(500);
261
262 start = jiffies;
263 while (time_before(jiffies, start + HZ)) {
264 val = ioread8(priv->regs + CTL_PWR_STATUS);
265 if (!(val & PWR_STATUS_GOOD))
266 break;
267
268 usleep_range(5000, 10000);
269 }
270
271 val = ioread8(priv->regs + CTL_PWR_STATUS);
272 if (val & PWR_STATUS_GOOD) {
273 dev_err(priv->dev, "power disable failed: "
274 "power goods: status 0x%.2x\n", val);
275 }
276
277 if (val & PWR_STATUS_ERROR_MASK) {
278 dev_err(priv->dev, "power disable failed: "
279 "alarm bit set: status 0x%.2x\n", val);
280 }
281}
282
283/**
284 * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
285 * @priv: the driver's private data structure
286 *
287 * Enable the DATA-FPGA power supplies, waiting up to 1 second for
288 * them to enable successfully.
289 *
290 * Returns 0 on success, -ERRNO otherwise
291 */
292static int fpga_enable_power_supplies(struct fpga_dev *priv)
293{
294 unsigned long start = jiffies;
295
296 if (fpga_power_good(priv)) {
297 dev_dbg(priv->dev, "power was already good\n");
298 return 0;
299 }
300
301 iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
302 while (time_before(jiffies, start + HZ)) {
303 if (fpga_power_good(priv))
304 return 0;
305
306 usleep_range(5000, 10000);
307 }
308
309 return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
310}
311
312/*
313 * Determine if the FPGA power supplies are all enabled
314 */
315static bool fpga_power_enabled(struct fpga_dev *priv)
316{
317 u8 val;
318
319 val = ioread8(priv->regs + CTL_PWR_CONTROL);
320 if (val & PWR_CONTROL_ENABLE)
321 return true;
322
323 return false;
324}
325
326/*
327 * Determine if the FPGA's are programmed and running correctly
328 */
329static bool fpga_running(struct fpga_dev *priv)
330{
331 if (!fpga_power_good(priv))
332 return false;
333
334 /* Check the config done bit */
335 return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
336}
337
338/*
339 * FPGA Programming Code
340 */
341
342/**
343 * fpga_program_block() - put a block of data into the programmer's FIFO
344 * @priv: the driver's private data structure
345 * @buf: the data to program
346 * @count: the length of data to program (must be a multiple of 4 bytes)
347 *
348 * Returns 0 on success, -ERRNO otherwise
349 */
350static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
351{
352 u32 *data = buf;
353 int size = fpga_fifo_size(priv->regs);
354 int i, len;
355 unsigned long timeout;
356
357 /* enforce correct data length for the FIFO */
358 BUG_ON(count % 4 != 0);
359
360 while (count > 0) {
361
362 /* Get the size of the block to write (maximum is FIFO_SIZE) */
363 len = min_t(size_t, count, size);
364 timeout = jiffies + HZ / 4;
365
366 /* Write the block */
367 for (i = 0; i < len / 4; i++)
368 fpga_fifo_write(priv->regs, data[i]);
369
370 /* Update the amounts left */
371 count -= len;
372 data += len / 4;
373
374 /* Wait for the fifo to empty */
375 while (true) {
376
377 if (fpga_fifo_empty(priv->regs)) {
378 break;
379 } else {
380 dev_dbg(priv->dev, "Fifo not empty\n");
381 cpu_relax();
382 }
383
384 if (fpga_config_error(priv->regs)) {
385 dev_err(priv->dev, "Error detected\n");
386 return -EIO;
387 }
388
389 if (time_after(jiffies, timeout)) {
390 dev_err(priv->dev, "Fifo drain timeout\n");
391 return -ETIMEDOUT;
392 }
393
394 usleep_range(5000, 10000);
395 }
396 }
397
398 return 0;
399}
400
401/**
402 * fpga_program_cpu() - program the DATA-FPGA's using the CPU
403 * @priv: the driver's private data structure
404 *
405 * This is useful when the DMA programming method fails. It is possible to
406 * wedge the Freescale DMA controller such that the DMA programming method
407 * always fails. This method has always succeeded.
408 *
409 * Returns 0 on success, -ERRNO otherwise
410 */
411static noinline int fpga_program_cpu(struct fpga_dev *priv)
412{
413 int ret;
414
415 /* Disable the programmer */
416 fpga_programmer_disable(priv);
417
418 /* Set the total byte count */
419 fpga_set_byte_count(priv->regs, priv->bytes);
420 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
421
422 /* Enable the controller for programming */
423 fpga_programmer_enable(priv, false);
424 dev_dbg(priv->dev, "enabled the controller\n");
425
426 /* Write each chunk of the FPGA bitfile to FPGA programmer */
427 ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes);
428 if (ret)
429 goto out_disable_controller;
430
431 /* Wait for the interrupt handler to signal that programming finished */
432 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
433 if (!ret) {
434 dev_err(priv->dev, "Timed out waiting for completion\n");
435 ret = -ETIMEDOUT;
436 goto out_disable_controller;
437 }
438
439 /* Retrieve the status from the interrupt handler */
440 ret = priv->status;
441
442out_disable_controller:
443 fpga_programmer_disable(priv);
444 return ret;
445}
446
447#define FIFO_DMA_ADDRESS 0xf0003000
448#define FIFO_MAX_LEN 4096
449
450/**
451 * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
452 * @priv: the driver's private data structure
453 *
454 * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
455 * the engine is programmed such that the hardware DMA request lines can
456 * control the entire DMA transaction. The system controller FPGA then
457 * completely offloads the programming from the CPU.
458 *
459 * Returns 0 on success, -ERRNO otherwise
460 */
461static noinline int fpga_program_dma(struct fpga_dev *priv)
462{
463 struct videobuf_dmabuf *vb = &priv->vb;
464 struct dma_chan *chan = priv->chan;
465 struct dma_async_tx_descriptor *tx;
466 size_t num_pages, len, avail = 0;
467 struct dma_slave_config config;
468 struct scatterlist *sg;
469 struct sg_table table;
470 dma_cookie_t cookie;
471 int ret, i;
472
473 /* Disable the programmer */
474 fpga_programmer_disable(priv);
475
476 /* Allocate a scatterlist for the DMA destination */
477 num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
478 ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
479 if (ret) {
480 dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
481 ret = -ENOMEM;
482 goto out_return;
483 }
484
485 /*
486 * This is an ugly hack
487 *
488 * We fill in a scatterlist as if it were mapped for DMA. This is
489 * necessary because there exists no better structure for this
490 * inside the kernel code.
491 *
492 * As an added bonus, we can use the DMAEngine API for all of this,
493 * rather than inventing another extremely similar API.
494 */
495 avail = priv->bytes;
496 for_each_sg(table.sgl, sg, num_pages, i) {
497 len = min_t(size_t, avail, FIFO_MAX_LEN);
498 sg_dma_address(sg) = FIFO_DMA_ADDRESS;
499 sg_dma_len(sg) = len;
500
501 avail -= len;
502 }
503
504 /* Map the buffer for DMA */
505 ret = videobuf_dma_map(priv->dev, &priv->vb);
506 if (ret) {
507 dev_err(priv->dev, "Unable to map buffer for DMA\n");
508 goto out_free_table;
509 }
510
511 /*
512 * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
513 * transaction, and then put it under external control
514 */
515 memset(&config, 0, sizeof(config));
516 config.direction = DMA_TO_DEVICE;
517 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
518 config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
519 ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
520 (unsigned long)&config);
521 if (ret) {
522 dev_err(priv->dev, "DMA slave configuration failed\n");
523 goto out_dma_unmap;
524 }
525
526 ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1);
527 if (ret) {
528 dev_err(priv->dev, "DMA external control setup failed\n");
529 goto out_dma_unmap;
530 }
531
532 /* setup and submit the DMA transaction */
533 tx = chan->device->device_prep_dma_sg(chan,
534 table.sgl, num_pages,
535 vb->sglist, vb->sglen, 0);
536 if (!tx) {
537 dev_err(priv->dev, "Unable to prep DMA transaction\n");
538 ret = -ENOMEM;
539 goto out_dma_unmap;
540 }
541
542 cookie = tx->tx_submit(tx);
543 if (dma_submit_error(cookie)) {
544 dev_err(priv->dev, "Unable to submit DMA transaction\n");
545 ret = -ENOMEM;
546 goto out_dma_unmap;
547 }
548
549 dma_async_memcpy_issue_pending(chan);
550
551 /* Set the total byte count */
552 fpga_set_byte_count(priv->regs, priv->bytes);
553 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
554
555 /* Enable the controller for DMA programming */
556 fpga_programmer_enable(priv, true);
557 dev_dbg(priv->dev, "enabled the controller\n");
558
559 /* Wait for the interrupt handler to signal that programming finished */
560 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
561 if (!ret) {
562 dev_err(priv->dev, "Timed out waiting for completion\n");
563 ret = -ETIMEDOUT;
564 goto out_disable_controller;
565 }
566
567 /* Retrieve the status from the interrupt handler */
568 ret = priv->status;
569
570out_disable_controller:
571 fpga_programmer_disable(priv);
572out_dma_unmap:
573 videobuf_dma_unmap(priv->dev, vb);
574out_free_table:
575 sg_free_table(&table);
576out_return:
577 return ret;
578}
579
580/*
581 * Interrupt Handling
582 */
583
584static irqreturn_t fpga_irq(int irq, void *dev_id)
585{
586 struct fpga_dev *priv = dev_id;
587
588 /* Save the status */
589 priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
590 dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
591 fpga_dump_registers(priv);
592
593 /* Disabling the programmer clears the interrupt */
594 fpga_programmer_disable(priv);
595
596 /* Notify any waiters */
597 complete(&priv->completion);
598
599 return IRQ_HANDLED;
600}
601
602/*
603 * SYSFS Helpers
604 */
605
606/**
607 * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
608 * @priv: the driver's private data structure
609 *
610 * LOCKING: must hold priv->lock
611 */
612static int fpga_do_stop(struct fpga_dev *priv)
613{
614 u32 val;
615
616 /* Set the led to unprogrammed */
617 ledtrig_fpga_programmed(false);
618
619 /* Pulse the config line to reset the FPGA's */
620 val = CFG_CTL_ENABLE | CFG_CTL_RESET;
621 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
622 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
623
624 return 0;
625}
626
627static noinline int fpga_do_program(struct fpga_dev *priv)
628{
629 int ret;
630
631 if (priv->bytes != priv->fw_size) {
632 dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
633 "should be %zu bytes\n",
634 priv->bytes, priv->fw_size);
635 return -EINVAL;
636 }
637
638 if (!fpga_power_enabled(priv)) {
639 dev_err(priv->dev, "Power not enabled\n");
640 return -EINVAL;
641 }
642
643 if (!fpga_power_good(priv)) {
644 dev_err(priv->dev, "Power not good\n");
645 return -EINVAL;
646 }
647
648 /* Set the LED to unprogrammed */
649 ledtrig_fpga_programmed(false);
650
651 /* Try to program the FPGA's using DMA */
652 ret = fpga_program_dma(priv);
653
654 /* If DMA failed or doesn't exist, try with CPU */
655 if (ret) {
656 dev_warn(priv->dev, "Falling back to CPU programming\n");
657 ret = fpga_program_cpu(priv);
658 }
659
660 if (ret) {
661 dev_err(priv->dev, "Unable to program FPGA's\n");
662 return ret;
663 }
664
665 /* Drop the firmware bitfile from memory */
666 fpga_drop_firmware_data(priv);
667
668 dev_dbg(priv->dev, "FPGA programming successful\n");
669 ledtrig_fpga_programmed(true);
670
671 return 0;
672}
673
674/*
675 * File Operations
676 */
677
678static int fpga_open(struct inode *inode, struct file *filp)
679{
680 /*
681 * The miscdevice layer puts our struct miscdevice into the
682 * filp->private_data field. We use this to find our private
683 * data and then overwrite it with our own private structure.
684 */
685 struct fpga_dev *priv = container_of(filp->private_data,
686 struct fpga_dev, miscdev);
687 unsigned int nr_pages;
688 int ret;
689
690 /* We only allow one process at a time */
691 ret = mutex_lock_interruptible(&priv->lock);
692 if (ret)
693 return ret;
694
695 filp->private_data = priv;
696 kref_get(&priv->ref);
697
698 /* Truncation: drop any existing data */
699 if (filp->f_flags & O_TRUNC)
700 priv->bytes = 0;
701
702 /* Check if we have already allocated a buffer */
703 if (priv->vb_allocated)
704 return 0;
705
706 /* Allocate a buffer to hold enough data for the bitfile */
707 nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
708 ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages);
709 if (ret) {
710 dev_err(priv->dev, "unable to allocate data buffer\n");
711 mutex_unlock(&priv->lock);
712 kref_put(&priv->ref, fpga_dev_remove);
713 return ret;
714 }
715
716 priv->vb_allocated = true;
717 return 0;
718}
719
720static int fpga_release(struct inode *inode, struct file *filp)
721{
722 struct fpga_dev *priv = filp->private_data;
723
724 mutex_unlock(&priv->lock);
725 kref_put(&priv->ref, fpga_dev_remove);
726 return 0;
727}
728
729static ssize_t fpga_write(struct file *filp, const char __user *buf,
730 size_t count, loff_t *f_pos)
731{
732 struct fpga_dev *priv = filp->private_data;
733
734 /* FPGA bitfiles have an exact size: disallow anything else */
735 if (priv->bytes >= priv->fw_size)
736 return -ENOSPC;
737
738 count = min_t(size_t, priv->fw_size - priv->bytes, count);
739 if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count))
740 return -EFAULT;
741
742 priv->bytes += count;
743 return count;
744}
745
746static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
747 loff_t *f_pos)
748{
749 struct fpga_dev *priv = filp->private_data;
750
751 count = min_t(size_t, priv->bytes - *f_pos, count);
752 if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count))
753 return -EFAULT;
754
755 *f_pos += count;
756 return count;
757}
758
759static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
760{
761 struct fpga_dev *priv = filp->private_data;
762 loff_t newpos;
763
764 /* only read-only opens are allowed to seek */
765 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
766 return -EINVAL;
767
768 switch (origin) {
769 case SEEK_SET: /* seek relative to the beginning of the file */
770 newpos = offset;
771 break;
772 case SEEK_CUR: /* seek relative to current position in the file */
773 newpos = filp->f_pos + offset;
774 break;
775 case SEEK_END: /* seek relative to the end of the file */
776 newpos = priv->fw_size - offset;
777 break;
778 default:
779 return -EINVAL;
780 }
781
782 /* check for sanity */
783 if (newpos > priv->fw_size)
784 return -EINVAL;
785
786 filp->f_pos = newpos;
787 return newpos;
788}
789
790static const struct file_operations fpga_fops = {
791 .open = fpga_open,
792 .release = fpga_release,
793 .write = fpga_write,
794 .read = fpga_read,
795 .llseek = fpga_llseek,
796};
797
798/*
799 * Device Attributes
800 */
801
802static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
803 char *buf)
804{
805 struct fpga_dev *priv = dev_get_drvdata(dev);
806 u8 val;
807
808 val = ioread8(priv->regs + CTL_PWR_FAIL);
809 return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
810}
811
812static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
813 char *buf)
814{
815 struct fpga_dev *priv = dev_get_drvdata(dev);
816 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
817}
818
819static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
820 char *buf)
821{
822 struct fpga_dev *priv = dev_get_drvdata(dev);
823 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
824}
825
826static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
827 const char *buf, size_t count)
828{
829 struct fpga_dev *priv = dev_get_drvdata(dev);
830 unsigned long val;
831 int ret;
832
833 if (strict_strtoul(buf, 0, &val))
834 return -EINVAL;
835
836 if (val) {
837 ret = fpga_enable_power_supplies(priv);
838 if (ret)
839 return ret;
840 } else {
841 fpga_do_stop(priv);
842 fpga_disable_power_supplies(priv);
843 }
844
845 return count;
846}
847
848static ssize_t program_show(struct device *dev, struct device_attribute *attr,
849 char *buf)
850{
851 struct fpga_dev *priv = dev_get_drvdata(dev);
852 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
853}
854
855static ssize_t program_store(struct device *dev, struct device_attribute *attr,
856 const char *buf, size_t count)
857{
858 struct fpga_dev *priv = dev_get_drvdata(dev);
859 unsigned long val;
860 int ret;
861
862 if (strict_strtoul(buf, 0, &val))
863 return -EINVAL;
864
865 /* We can't have an image writer and be programming simultaneously */
866 if (mutex_lock_interruptible(&priv->lock))
867 return -ERESTARTSYS;
868
869 /* Program or Reset the FPGA's */
870 ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
871 if (ret)
872 goto out_unlock;
873
874 /* Success */
875 ret = count;
876
877out_unlock:
878 mutex_unlock(&priv->lock);
879 return ret;
880}
881
882static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
883static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
884static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
885 penable_show, penable_store);
886
887static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
888 program_show, program_store);
889
890static struct attribute *fpga_attributes[] = {
891 &dev_attr_power_fail.attr,
892 &dev_attr_power_good.attr,
893 &dev_attr_power_enable.attr,
894 &dev_attr_program.attr,
895 NULL,
896};
897
898static const struct attribute_group fpga_attr_group = {
899 .attrs = fpga_attributes,
900};
901
902/*
903 * OpenFirmware Device Subsystem
904 */
905
906#define SYS_REG_VERSION 0x00
907#define SYS_REG_GEOGRAPHIC 0x10
908
909static bool dma_filter(struct dma_chan *chan, void *data)
910{
911 /*
912 * DMA Channel #0 is the only acceptable device
913 *
914 * This probably won't survive an unload/load cycle of the Freescale
915 * DMAEngine driver, but that won't be a problem
916 */
917 return chan->chan_id == 0 && chan->device->dev_id == 0;
918}
919
920static int fpga_of_remove(struct platform_device *op)
921{
922 struct fpga_dev *priv = dev_get_drvdata(&op->dev);
923 struct device *this_device = priv->miscdev.this_device;
924
925 sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
926 misc_deregister(&priv->miscdev);
927
928 free_irq(priv->irq, priv);
929 irq_dispose_mapping(priv->irq);
930
931 /* make sure the power supplies are off */
932 fpga_disable_power_supplies(priv);
933
934 /* unmap registers */
935 iounmap(priv->immr);
936 iounmap(priv->regs);
937
938 dma_release_channel(priv->chan);
939
940 /* drop our reference to the private data structure */
941 kref_put(&priv->ref, fpga_dev_remove);
942 return 0;
943}
944
945/* CTL-CPLD Version Register */
946#define CTL_CPLD_VERSION 0x2000
947
948static int fpga_of_probe(struct platform_device *op,
949 const struct of_device_id *match)
950{
951 struct device_node *of_node = op->dev.of_node;
952 struct device *this_device;
953 struct fpga_dev *priv;
954 dma_cap_mask_t mask;
955 u32 ver;
956 int ret;
957
958 /* Allocate private data */
959 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
960 if (!priv) {
961 dev_err(&op->dev, "Unable to allocate private data\n");
962 ret = -ENOMEM;
963 goto out_return;
964 }
965
966 /* Setup the miscdevice */
967 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
968 priv->miscdev.name = drv_name;
969 priv->miscdev.fops = &fpga_fops;
970
971 kref_init(&priv->ref);
972
973 dev_set_drvdata(&op->dev, priv);
974 priv->dev = &op->dev;
975 mutex_init(&priv->lock);
976 init_completion(&priv->completion);
977 videobuf_dma_init(&priv->vb);
978
979 dev_set_drvdata(priv->dev, priv);
980 dma_cap_zero(mask);
981 dma_cap_set(DMA_MEMCPY, mask);
982 dma_cap_set(DMA_INTERRUPT, mask);
983 dma_cap_set(DMA_SLAVE, mask);
984 dma_cap_set(DMA_SG, mask);
985
986 /* Get control of DMA channel #0 */
987 priv->chan = dma_request_channel(mask, dma_filter, NULL);
988 if (!priv->chan) {
989 dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
990 ret = -ENODEV;
991 goto out_free_priv;
992 }
993
994 /* Remap the registers for use */
995 priv->regs = of_iomap(of_node, 0);
996 if (!priv->regs) {
997 dev_err(&op->dev, "Unable to ioremap registers\n");
998 ret = -ENOMEM;
999 goto out_dma_release_channel;
1000 }
1001
1002 /* Remap the IMMR for use */
1003 priv->immr = ioremap(get_immrbase(), 0x100000);
1004 if (!priv->immr) {
1005 dev_err(&op->dev, "Unable to ioremap IMMR\n");
1006 ret = -ENOMEM;
1007 goto out_unmap_regs;
1008 }
1009
1010 /*
1011 * Check that external DMA is configured
1012 *
1013 * U-Boot does this for us, but we should check it and bail out if
1014 * there is a problem. Failing to have this register setup correctly
1015 * will cause the DMA controller to transfer a single cacheline
1016 * worth of data, then wedge itself.
1017 */
1018 if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
1019 dev_err(&op->dev, "External DMA control not configured\n");
1020 ret = -ENODEV;
1021 goto out_unmap_immr;
1022 }
1023
1024 /*
1025 * Check the CTL-CPLD version
1026 *
1027 * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
1028 * don't want to run on any version of the CTL-CPLD that does not use
1029 * a compatible register layout.
1030 *
1031 * v2: changed register layout, added power sequencer
1032 * v3: added glitch filter on the i2c overcurrent/overtemp outputs
1033 */
1034 ver = ioread8(priv->regs + CTL_CPLD_VERSION);
1035 if (ver != 0x02 && ver != 0x03) {
1036 dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
1037 ret = -ENODEV;
1038 goto out_unmap_immr;
1039 }
1040
1041 /* Set the exact size that the firmware image should be */
1042 ver = ioread32be(priv->regs + SYS_REG_VERSION);
1043 priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
1044
1045 /* Find the correct IRQ number */
1046 priv->irq = irq_of_parse_and_map(of_node, 0);
1047 if (priv->irq == NO_IRQ) {
1048 dev_err(&op->dev, "Unable to find IRQ line\n");
1049 ret = -ENODEV;
1050 goto out_unmap_immr;
1051 }
1052
1053 /* Request the IRQ */
1054 ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
1055 if (ret) {
1056 dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
1057 ret = -ENODEV;
1058 goto out_irq_dispose_mapping;
1059 }
1060
1061 /* Reset and stop the FPGA's, just in case */
1062 fpga_do_stop(priv);
1063
1064 /* Register the miscdevice */
1065 ret = misc_register(&priv->miscdev);
1066 if (ret) {
1067 dev_err(&op->dev, "Unable to register miscdevice\n");
1068 goto out_free_irq;
1069 }
1070
1071 /* Create the sysfs files */
1072 this_device = priv->miscdev.this_device;
1073 dev_set_drvdata(this_device, priv);
1074 ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
1075 if (ret) {
1076 dev_err(&op->dev, "Unable to create sysfs files\n");
1077 goto out_misc_deregister;
1078 }
1079
1080 dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
1081 (ver & (1 << 17)) ? "Correlator" : "Digitizer",
1082 (ver & (1 << 16)) ? "B" : "A",
1083 (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
1084
1085 return 0;
1086
1087out_misc_deregister:
1088 misc_deregister(&priv->miscdev);
1089out_free_irq:
1090 free_irq(priv->irq, priv);
1091out_irq_dispose_mapping:
1092 irq_dispose_mapping(priv->irq);
1093out_unmap_immr:
1094 iounmap(priv->immr);
1095out_unmap_regs:
1096 iounmap(priv->regs);
1097out_dma_release_channel:
1098 dma_release_channel(priv->chan);
1099out_free_priv:
1100 kref_put(&priv->ref, fpga_dev_remove);
1101out_return:
1102 return ret;
1103}
1104
1105static struct of_device_id fpga_of_match[] = {
1106 { .compatible = "carma,fpga-programmer", },
1107 {},
1108};
1109
1110static struct of_platform_driver fpga_of_driver = {
1111 .probe = fpga_of_probe,
1112 .remove = fpga_of_remove,
1113 .driver = {
1114 .name = drv_name,
1115 .of_match_table = fpga_of_match,
1116 .owner = THIS_MODULE,
1117 },
1118};
1119
1120/*
1121 * Module Init / Exit
1122 */
1123
1124static int __init fpga_init(void)
1125{
1126 led_trigger_register_simple("fpga", &ledtrig_fpga);
1127 return of_register_platform_driver(&fpga_of_driver);
1128}
1129
1130static void __exit fpga_exit(void)
1131{
1132 of_unregister_platform_driver(&fpga_of_driver);
1133 led_trigger_unregister_simple(ledtrig_fpga);
1134}
1135
1136MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1137MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
1138MODULE_LICENSE("GPL");
1139
1140module_init(fpga_init);
1141module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
new file mode 100644
index 000000000000..3965821fef17
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga.c
@@ -0,0 +1,1433 @@
1/*
2 * CARMA DATA-FPGA Access Driver
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/*
13 * FPGA Memory Dump Format
14 *
15 * FPGA #0 control registers (32 x 32-bit words)
16 * FPGA #1 control registers (32 x 32-bit words)
17 * FPGA #2 control registers (32 x 32-bit words)
18 * FPGA #3 control registers (32 x 32-bit words)
19 * SYSFPGA control registers (32 x 32-bit words)
20 * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
21 * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
22 * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
23 * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
24 *
25 * Each correlation array consists of:
26 *
27 * Correlation Data (2 x NUM_LAGSn x 32-bit words)
28 * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
29 * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
30 *
31 * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
32 * the FPGA configuration registers. They do not change once the FPGA's
33 * have been programmed, they only change on re-programming.
34 */
35
36/*
37 * Basic Description:
38 *
39 * This driver is used to capture correlation spectra off of the four data
40 * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
41 * this driver supports dynamic enable/disable of capture while the device
42 * remains open.
43 *
44 * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
45 * capture rate, all buffers are pre-allocated to avoid any potentially long
46 * running memory allocations while capturing.
47 *
48 * There are two lists and one pointer which are used to keep track of the
49 * different states of data buffers.
50 *
51 * 1) free list
52 * This list holds all empty data buffers which are ready to receive data.
53 *
54 * 2) inflight pointer
55 * This pointer holds the currently inflight data buffer. This buffer is having
56 * data copied into it by the DMA engine.
57 *
58 * 3) used list
59 * This list holds data buffers which have been filled, and are waiting to be
60 * read by userspace.
61 *
62 * All buffers start life on the free list, then move successively to the
63 * inflight pointer, and then to the used list. After they have been read by
64 * userspace, they are moved back to the free list. The cycle repeats as long
65 * as necessary.
66 *
67 * It should be noted that all buffers are mapped and ready for DMA when they
68 * are on any of the three lists. They are only unmapped when they are in the
69 * process of being read by userspace.
70 */
71
72/*
73 * Notes on the IRQ masking scheme:
74 *
75 * The IRQ masking scheme here is different than most other hardware. The only
76 * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
77 * the data is if the status registers are not cleared before the next
78 * correlation data dump is ready.
79 *
80 * The interrupt line is connected to the status registers, such that when they
81 * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
82 * to schedule a long-running DMA operation and return from the interrupt
83 * handler quickly, but we cannot clear the status registers.
84 *
85 * To handle this, the system controller FPGA has the capability to connect the
86 * interrupt line to a user-controlled GPIO pin. This pin is driven high
87 * (unasserted) and left that way. To mask the interrupt, we change the
88 * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
89 */
90
91#include <linux/of_platform.h>
92#include <linux/dma-mapping.h>
93#include <linux/miscdevice.h>
94#include <linux/interrupt.h>
95#include <linux/dmaengine.h>
96#include <linux/seq_file.h>
97#include <linux/highmem.h>
98#include <linux/debugfs.h>
99#include <linux/kernel.h>
100#include <linux/module.h>
101#include <linux/poll.h>
102#include <linux/init.h>
103#include <linux/slab.h>
104#include <linux/kref.h>
105#include <linux/io.h>
106
107#include <media/videobuf-dma-sg.h>
108
109/* system controller registers */
110#define SYS_IRQ_SOURCE_CTL 0x24
111#define SYS_IRQ_OUTPUT_EN 0x28
112#define SYS_IRQ_OUTPUT_DATA 0x2C
113#define SYS_IRQ_INPUT_DATA 0x30
114#define SYS_FPGA_CONFIG_STATUS 0x44
115
116/* GPIO IRQ line assignment */
117#define IRQ_CORL_DONE 0x10
118
119/* FPGA registers */
120#define MMAP_REG_VERSION 0x00
121#define MMAP_REG_CORL_CONF1 0x08
122#define MMAP_REG_CORL_CONF2 0x0C
123#define MMAP_REG_STATUS 0x48
124
125#define SYS_FPGA_BLOCK 0xF0000000
126
127#define DATA_FPGA_START 0x400000
128#define DATA_FPGA_SIZE 0x80000
129
130static const char drv_name[] = "carma-fpga";
131
132#define NUM_FPGA 4
133
134#define MIN_DATA_BUFS 8
135#define MAX_DATA_BUFS 64
136
137struct fpga_info {
138 unsigned int num_lag_ram;
139 unsigned int blk_size;
140};
141
142struct data_buf {
143 struct list_head entry;
144 struct videobuf_dmabuf vb;
145 size_t size;
146};
147
148struct fpga_device {
149 /* character device */
150 struct miscdevice miscdev;
151 struct device *dev;
152 struct mutex mutex;
153
154 /* reference count */
155 struct kref ref;
156
157 /* FPGA registers and information */
158 struct fpga_info info[NUM_FPGA];
159 void __iomem *regs;
160 int irq;
161
162 /* FPGA Physical Address/Size Information */
163 resource_size_t phys_addr;
164 size_t phys_size;
165
166 /* DMA structures */
167 struct sg_table corl_table;
168 unsigned int corl_nents;
169 struct dma_chan *chan;
170
171 /* Protection for all members below */
172 spinlock_t lock;
173
174 /* Device enable/disable flag */
175 bool enabled;
176
177 /* Correlation data buffers */
178 wait_queue_head_t wait;
179 struct list_head free;
180 struct list_head used;
181 struct data_buf *inflight;
182
183 /* Information about data buffers */
184 unsigned int num_dropped;
185 unsigned int num_buffers;
186 size_t bufsize;
187 struct dentry *dbg_entry;
188};
189
190struct fpga_reader {
191 struct fpga_device *priv;
192 struct data_buf *buf;
193 off_t buf_start;
194};
195
196static void fpga_device_release(struct kref *ref)
197{
198 struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
199
200 /* the last reader has exited, cleanup the last bits */
201 mutex_destroy(&priv->mutex);
202 kfree(priv);
203}
204
205/*
206 * Data Buffer Allocation Helpers
207 */
208
209/**
210 * data_free_buffer() - free a single data buffer and all allocated memory
211 * @buf: the buffer to free
212 *
213 * This will free all of the pages allocated to the given data buffer, and
214 * then free the structure itself
215 */
216static void data_free_buffer(struct data_buf *buf)
217{
218 /* It is ok to free a NULL buffer */
219 if (!buf)
220 return;
221
222 /* free all memory */
223 videobuf_dma_free(&buf->vb);
224 kfree(buf);
225}
226
227/**
228 * data_alloc_buffer() - allocate and fill a data buffer with pages
229 * @bytes: the number of bytes required
230 *
231 * This allocates all space needed for a data buffer. It must be mapped before
232 * use in a DMA transaction using videobuf_dma_map().
233 *
234 * Returns NULL on failure
235 */
236static struct data_buf *data_alloc_buffer(const size_t bytes)
237{
238 unsigned int nr_pages;
239 struct data_buf *buf;
240 int ret;
241
242 /* calculate the number of pages necessary */
243 nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
244
245 /* allocate the buffer structure */
246 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
247 if (!buf)
248 goto out_return;
249
250 /* initialize internal fields */
251 INIT_LIST_HEAD(&buf->entry);
252 buf->size = bytes;
253
254 /* allocate the videobuf */
255 videobuf_dma_init(&buf->vb);
256 ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
257 if (ret)
258 goto out_free_buf;
259
260 return buf;
261
262out_free_buf:
263 kfree(buf);
264out_return:
265 return NULL;
266}
267
268/**
269 * data_free_buffers() - free all allocated buffers
270 * @priv: the driver's private data structure
271 *
272 * Free all buffers allocated by the driver (except those currently in the
273 * process of being read by userspace).
274 *
275 * LOCKING: must hold dev->mutex
276 * CONTEXT: user
277 */
278static void data_free_buffers(struct fpga_device *priv)
279{
280 struct data_buf *buf, *tmp;
281
282 /* the device should be stopped, no DMA in progress */
283 BUG_ON(priv->inflight != NULL);
284
285 list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
286 list_del_init(&buf->entry);
287 videobuf_dma_unmap(priv->dev, &buf->vb);
288 data_free_buffer(buf);
289 }
290
291 list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
292 list_del_init(&buf->entry);
293 videobuf_dma_unmap(priv->dev, &buf->vb);
294 data_free_buffer(buf);
295 }
296
297 priv->num_buffers = 0;
298 priv->bufsize = 0;
299}
300
301/**
302 * data_alloc_buffers() - allocate 1 seconds worth of data buffers
303 * @priv: the driver's private data structure
304 *
305 * Allocate enough buffers for a whole second worth of data
306 *
307 * This routine will attempt to degrade nicely by succeeding even if a full
308 * second worth of data buffers could not be allocated, as long as a minimum
309 * number were allocated. In this case, it will print a message to the kernel
310 * log.
311 *
312 * The device must not be modifying any lists when this is called.
313 *
314 * CONTEXT: user
315 * LOCKING: must hold dev->mutex
316 *
317 * Returns 0 on success, -ERRNO otherwise
318 */
319static int data_alloc_buffers(struct fpga_device *priv)
320{
321 struct data_buf *buf;
322 int i, ret;
323
324 for (i = 0; i < MAX_DATA_BUFS; i++) {
325
326 /* allocate a buffer */
327 buf = data_alloc_buffer(priv->bufsize);
328 if (!buf)
329 break;
330
331 /* map it for DMA */
332 ret = videobuf_dma_map(priv->dev, &buf->vb);
333 if (ret) {
334 data_free_buffer(buf);
335 break;
336 }
337
338 /* add it to the list of free buffers */
339 list_add_tail(&buf->entry, &priv->free);
340 priv->num_buffers++;
341 }
342
343 /* Make sure we allocated the minimum required number of buffers */
344 if (priv->num_buffers < MIN_DATA_BUFS) {
345 dev_err(priv->dev, "Unable to allocate enough data buffers\n");
346 data_free_buffers(priv);
347 return -ENOMEM;
348 }
349
350 /* Warn if we are running in a degraded state, but do not fail */
351 if (priv->num_buffers < MAX_DATA_BUFS) {
352 dev_warn(priv->dev,
353 "Unable to allocate %d buffers, using %d buffers instead\n",
354 MAX_DATA_BUFS, i);
355 }
356
357 return 0;
358}
359
360/*
361 * DMA Operations Helpers
362 */
363
364/**
365 * fpga_start_addr() - get the physical address a DATA-FPGA
366 * @priv: the driver's private data structure
367 * @fpga: the DATA-FPGA number (zero based)
368 */
369static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
370{
371 return priv->phys_addr + 0x400000 + (0x80000 * fpga);
372}
373
374/**
375 * fpga_block_addr() - get the physical address of a correlation data block
376 * @priv: the driver's private data structure
377 * @fpga: the DATA-FPGA number (zero based)
378 * @blknum: the correlation block number (zero based)
379 */
380static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
381 unsigned int blknum)
382{
383 return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
384}
385
386#define REG_BLOCK_SIZE (32 * 4)
387
388/**
389 * data_setup_corl_table() - create the scatterlist for correlation dumps
390 * @priv: the driver's private data structure
391 *
392 * Create the scatterlist for transferring a correlation dump from the
393 * DATA FPGAs. This structure will be reused for each buffer than needs
394 * to be filled with correlation data.
395 *
396 * Returns 0 on success, -ERRNO otherwise
397 */
398static int data_setup_corl_table(struct fpga_device *priv)
399{
400 struct sg_table *table = &priv->corl_table;
401 struct scatterlist *sg;
402 struct fpga_info *info;
403 int i, j, ret;
404
405 /* Calculate the number of entries needed */
406 priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
407 for (i = 0; i < NUM_FPGA; i++)
408 priv->corl_nents += priv->info[i].num_lag_ram;
409
410 /* Allocate the scatterlist table */
411 ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
412 if (ret) {
413 dev_err(priv->dev, "unable to allocate DMA table\n");
414 return ret;
415 }
416
417 /* Add the DATA FPGA registers to the scatterlist */
418 sg = table->sgl;
419 for (i = 0; i < NUM_FPGA; i++) {
420 sg_dma_address(sg) = fpga_start_addr(priv, i);
421 sg_dma_len(sg) = REG_BLOCK_SIZE;
422 sg = sg_next(sg);
423 }
424
425 /* Add the SYS-FPGA registers to the scatterlist */
426 sg_dma_address(sg) = SYS_FPGA_BLOCK;
427 sg_dma_len(sg) = REG_BLOCK_SIZE;
428 sg = sg_next(sg);
429
430 /* Add the FPGA correlation data blocks to the scatterlist */
431 for (i = 0; i < NUM_FPGA; i++) {
432 info = &priv->info[i];
433 for (j = 0; j < info->num_lag_ram; j++) {
434 sg_dma_address(sg) = fpga_block_addr(priv, i, j);
435 sg_dma_len(sg) = info->blk_size;
436 sg = sg_next(sg);
437 }
438 }
439
440 /*
441 * All physical addresses and lengths are present in the structure
442 * now. It can be reused for every FPGA DATA interrupt
443 */
444 return 0;
445}
446
447/*
448 * FPGA Register Access Helpers
449 */
450
451static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
452 unsigned int reg, u32 val)
453{
454 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
455 iowrite32be(val, priv->regs + fpga_start + reg);
456}
457
458static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
459 unsigned int reg)
460{
461 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
462 return ioread32be(priv->regs + fpga_start + reg);
463}
464
465/**
466 * data_calculate_bufsize() - calculate the data buffer size required
467 * @priv: the driver's private data structure
468 *
469 * Calculate the total buffer size needed to hold a single block
470 * of correlation data
471 *
472 * CONTEXT: user
473 *
474 * Returns 0 on success, -ERRNO otherwise
475 */
476static int data_calculate_bufsize(struct fpga_device *priv)
477{
478 u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
479 u32 conf1, conf2, version;
480 u32 num_lag_ram, blk_size;
481 int i;
482
483 /* Each buffer starts with the 5 FPGA register areas */
484 priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
485
486 /* Read and store the configuration data for each FPGA */
487 for (i = 0; i < NUM_FPGA; i++) {
488 version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
489 conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
490 conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
491
492 /* minor version 2 and later */
493 if ((version & 0x000000FF) >= 2) {
494 num_corl = (conf1 & 0x000000F0) >> 4;
495 num_pack = (conf1 & 0x00000F00) >> 8;
496 num_lags = (conf1 & 0x00FFF000) >> 12;
497 num_meta = (conf1 & 0x7F000000) >> 24;
498 num_qcnt = (conf2 & 0x00000FFF) >> 0;
499 } else {
500 num_corl = (conf1 & 0x000000F0) >> 4;
501 num_pack = 1; /* implied */
502 num_lags = (conf1 & 0x000FFF00) >> 8;
503 num_meta = (conf1 & 0x7FF00000) >> 20;
504 num_qcnt = (conf2 & 0x00000FFF) >> 0;
505 }
506
507 num_lag_ram = (num_corl + num_pack - 1) / num_pack;
508 blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
509
510 priv->info[i].num_lag_ram = num_lag_ram;
511 priv->info[i].blk_size = blk_size;
512 priv->bufsize += num_lag_ram * blk_size;
513
514 dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
515 dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
516 dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
517 dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
518 dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
519 dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
520 }
521
522 dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
523 return 0;
524}
525
526/*
527 * Interrupt Handling
528 */
529
530/**
531 * data_disable_interrupts() - stop the device from generating interrupts
532 * @priv: the driver's private data structure
533 *
534 * Hide interrupts by switching to GPIO interrupt source
535 *
536 * LOCKING: must hold dev->lock
537 */
538static void data_disable_interrupts(struct fpga_device *priv)
539{
540 /* hide the interrupt by switching the IRQ driver to GPIO */
541 iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
542}
543
544/**
545 * data_enable_interrupts() - allow the device to generate interrupts
546 * @priv: the driver's private data structure
547 *
548 * Unhide interrupts by switching to the FPGA interrupt source. At the
549 * same time, clear the DATA-FPGA status registers.
550 *
551 * LOCKING: must hold dev->lock
552 */
553static void data_enable_interrupts(struct fpga_device *priv)
554{
555 /* clear the actual FPGA corl_done interrupt */
556 fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
557 fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
558 fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
559 fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
560
561 /* flush the writes */
562 fpga_read_reg(priv, 0, MMAP_REG_STATUS);
563
564 /* switch back to the external interrupt source */
565 iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
566}
567
568/**
569 * data_dma_cb() - DMAEngine callback for DMA completion
570 * @data: the driver's private data structure
571 *
572 * Complete a DMA transfer from the DATA-FPGA's
573 *
574 * This is called via the DMA callback mechanism, and will handle moving the
575 * completed DMA transaction to the used list, and then wake any processes
576 * waiting for new data
577 *
578 * CONTEXT: any, softirq expected
579 */
580static void data_dma_cb(void *data)
581{
582 struct fpga_device *priv = data;
583 unsigned long flags;
584
585 spin_lock_irqsave(&priv->lock, flags);
586
587 /* If there is no inflight buffer, we've got a bug */
588 BUG_ON(priv->inflight == NULL);
589
590 /* Move the inflight buffer onto the used list */
591 list_move_tail(&priv->inflight->entry, &priv->used);
592 priv->inflight = NULL;
593
594 /* clear the FPGA status and re-enable interrupts */
595 data_enable_interrupts(priv);
596
597 spin_unlock_irqrestore(&priv->lock, flags);
598
599 /*
600 * We've changed both the inflight and used lists, so we need
601 * to wake up any processes that are blocking for those events
602 */
603 wake_up(&priv->wait);
604}
605
606/**
607 * data_submit_dma() - prepare and submit the required DMA to fill a buffer
608 * @priv: the driver's private data structure
609 * @buf: the data buffer
610 *
611 * Prepare and submit the necessary DMA transactions to fill a correlation
612 * data buffer.
613 *
614 * LOCKING: must hold dev->lock
615 * CONTEXT: hardirq only
616 *
617 * Returns 0 on success, -ERRNO otherwise
618 */
619static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
620{
621 struct scatterlist *dst_sg, *src_sg;
622 unsigned int dst_nents, src_nents;
623 struct dma_chan *chan = priv->chan;
624 struct dma_async_tx_descriptor *tx;
625 dma_cookie_t cookie;
626 dma_addr_t dst, src;
627
628 dst_sg = buf->vb.sglist;
629 dst_nents = buf->vb.sglen;
630
631 src_sg = priv->corl_table.sgl;
632 src_nents = priv->corl_nents;
633
634 /*
635 * All buffers passed to this function should be ready and mapped
636 * for DMA already. Therefore, we don't need to do anything except
637 * submit it to the Freescale DMA Engine for processing
638 */
639
640 /* setup the scatterlist to scatterlist transfer */
641 tx = chan->device->device_prep_dma_sg(chan,
642 dst_sg, dst_nents,
643 src_sg, src_nents,
644 0);
645 if (!tx) {
646 dev_err(priv->dev, "unable to prep scatterlist DMA\n");
647 return -ENOMEM;
648 }
649
650 /* submit the transaction to the DMA controller */
651 cookie = tx->tx_submit(tx);
652 if (dma_submit_error(cookie)) {
653 dev_err(priv->dev, "unable to submit scatterlist DMA\n");
654 return -ENOMEM;
655 }
656
657 /* Prepare the re-read of the SYS-FPGA block */
658 dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
659 src = SYS_FPGA_BLOCK;
660 tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
661 REG_BLOCK_SIZE,
662 DMA_PREP_INTERRUPT);
663 if (!tx) {
664 dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
665 return -ENOMEM;
666 }
667
668 /* Setup the callback */
669 tx->callback = data_dma_cb;
670 tx->callback_param = priv;
671
672 /* submit the transaction to the DMA controller */
673 cookie = tx->tx_submit(tx);
674 if (dma_submit_error(cookie)) {
675 dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
676 return -ENOMEM;
677 }
678
679 return 0;
680}
681
682#define CORL_DONE 0x1
683#define CORL_ERR 0x2
684
685static irqreturn_t data_irq(int irq, void *dev_id)
686{
687 struct fpga_device *priv = dev_id;
688 bool submitted = false;
689 struct data_buf *buf;
690 u32 status;
691 int i;
692
693 /* detect spurious interrupts via FPGA status */
694 for (i = 0; i < 4; i++) {
695 status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
696 if (!(status & (CORL_DONE | CORL_ERR))) {
697 dev_err(priv->dev, "spurious irq detected (FPGA)\n");
698 return IRQ_NONE;
699 }
700 }
701
702 /* detect spurious interrupts via raw IRQ pin readback */
703 status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
704 if (status & IRQ_CORL_DONE) {
705 dev_err(priv->dev, "spurious irq detected (IRQ)\n");
706 return IRQ_NONE;
707 }
708
709 spin_lock(&priv->lock);
710
711 /* hide the interrupt by switching the IRQ driver to GPIO */
712 data_disable_interrupts(priv);
713
714 /* If there are no free buffers, drop this data */
715 if (list_empty(&priv->free)) {
716 priv->num_dropped++;
717 goto out;
718 }
719
720 buf = list_first_entry(&priv->free, struct data_buf, entry);
721 list_del_init(&buf->entry);
722 BUG_ON(buf->size != priv->bufsize);
723
724 /* Submit a DMA transfer to get the correlation data */
725 if (data_submit_dma(priv, buf)) {
726 dev_err(priv->dev, "Unable to setup DMA transfer\n");
727 list_move_tail(&buf->entry, &priv->free);
728 goto out;
729 }
730
731 /* Save the buffer for the DMA callback */
732 priv->inflight = buf;
733 submitted = true;
734
735 /* Start the DMA Engine */
736 dma_async_memcpy_issue_pending(priv->chan);
737
738out:
739 /* If no DMA was submitted, re-enable interrupts */
740 if (!submitted)
741 data_enable_interrupts(priv);
742
743 spin_unlock(&priv->lock);
744 return IRQ_HANDLED;
745}
746
747/*
748 * Realtime Device Enable Helpers
749 */
750
751/**
752 * data_device_enable() - enable the device for buffered dumping
753 * @priv: the driver's private data structure
754 *
755 * Enable the device for buffered dumping. Allocates buffers and hooks up
756 * the interrupt handler. When this finishes, data will come pouring in.
757 *
758 * LOCKING: must hold dev->mutex
759 * CONTEXT: user context only
760 *
761 * Returns 0 on success, -ERRNO otherwise
762 */
763static int data_device_enable(struct fpga_device *priv)
764{
765 u32 val;
766 int ret;
767
768 /* multiple enables are safe: they do nothing */
769 if (priv->enabled)
770 return 0;
771
772 /* check that the FPGAs are programmed */
773 val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
774 if (!(val & (1 << 18))) {
775 dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
776 return -ENODATA;
777 }
778
779 /* read the FPGAs to calculate the buffer size */
780 ret = data_calculate_bufsize(priv);
781 if (ret) {
782 dev_err(priv->dev, "unable to calculate buffer size\n");
783 goto out_error;
784 }
785
786 /* allocate the correlation data buffers */
787 ret = data_alloc_buffers(priv);
788 if (ret) {
789 dev_err(priv->dev, "unable to allocate buffers\n");
790 goto out_error;
791 }
792
793 /* setup the source scatterlist for dumping correlation data */
794 ret = data_setup_corl_table(priv);
795 if (ret) {
796 dev_err(priv->dev, "unable to setup correlation DMA table\n");
797 goto out_error;
798 }
799
800 /* hookup the irq handler */
801 ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
802 if (ret) {
803 dev_err(priv->dev, "unable to request IRQ handler\n");
804 goto out_error;
805 }
806
807 /* switch to the external FPGA IRQ line */
808 data_enable_interrupts(priv);
809
810 /* success, we're enabled */
811 priv->enabled = true;
812 return 0;
813
814out_error:
815 sg_free_table(&priv->corl_table);
816 priv->corl_nents = 0;
817
818 data_free_buffers(priv);
819 return ret;
820}
821
822/**
823 * data_device_disable() - disable the device for buffered dumping
824 * @priv: the driver's private data structure
825 *
826 * Disable the device for buffered dumping. Stops new DMA transactions from
827 * being generated, waits for all outstanding DMA to complete, and then frees
828 * all buffers.
829 *
830 * LOCKING: must hold dev->mutex
831 * CONTEXT: user only
832 *
833 * Returns 0 on success, -ERRNO otherwise
834 */
835static int data_device_disable(struct fpga_device *priv)
836{
837 int ret;
838
839 /* allow multiple disable */
840 if (!priv->enabled)
841 return 0;
842
843 /* switch to the internal GPIO IRQ line */
844 data_disable_interrupts(priv);
845
846 /* unhook the irq handler */
847 free_irq(priv->irq, priv);
848
849 /*
850 * wait for all outstanding DMA to complete
851 *
852 * Device interrupts are disabled, therefore another buffer cannot
853 * be marked inflight.
854 */
855 ret = wait_event_interruptible(priv->wait, priv->inflight == NULL);
856 if (ret)
857 return ret;
858
859 /* free the correlation table */
860 sg_free_table(&priv->corl_table);
861 priv->corl_nents = 0;
862
863 /*
864 * We are taking the spinlock not to protect priv->enabled, but instead
865 * to make sure that there are no readers in the process of altering
866 * the free or used lists while we are setting this flag.
867 */
868 spin_lock_irq(&priv->lock);
869 priv->enabled = false;
870 spin_unlock_irq(&priv->lock);
871
872 /* free all buffers: the free and used lists are not being changed */
873 data_free_buffers(priv);
874 return 0;
875}
876
877/*
878 * DEBUGFS Interface
879 */
880#ifdef CONFIG_DEBUG_FS
881
882/*
883 * Count the number of entries in the given list
884 */
885static unsigned int list_num_entries(struct list_head *list)
886{
887 struct list_head *entry;
888 unsigned int ret = 0;
889
890 list_for_each(entry, list)
891 ret++;
892
893 return ret;
894}
895
896static int data_debug_show(struct seq_file *f, void *offset)
897{
898 struct fpga_device *priv = f->private;
899 int ret;
900
901 /*
902 * Lock the mutex first, so that we get an accurate value for enable
903 * Lock the spinlock next, to get accurate list counts
904 */
905 ret = mutex_lock_interruptible(&priv->mutex);
906 if (ret)
907 return ret;
908
909 spin_lock_irq(&priv->lock);
910
911 seq_printf(f, "enabled: %d\n", priv->enabled);
912 seq_printf(f, "bufsize: %d\n", priv->bufsize);
913 seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
914 seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
915 seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
916 seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
917 seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
918
919 spin_unlock_irq(&priv->lock);
920 mutex_unlock(&priv->mutex);
921 return 0;
922}
923
924static int data_debug_open(struct inode *inode, struct file *file)
925{
926 return single_open(file, data_debug_show, inode->i_private);
927}
928
929static const struct file_operations data_debug_fops = {
930 .owner = THIS_MODULE,
931 .open = data_debug_open,
932 .read = seq_read,
933 .llseek = seq_lseek,
934 .release = single_release,
935};
936
937static int data_debugfs_init(struct fpga_device *priv)
938{
939 priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
940 &data_debug_fops);
941 if (IS_ERR(priv->dbg_entry))
942 return PTR_ERR(priv->dbg_entry);
943
944 return 0;
945}
946
947static void data_debugfs_exit(struct fpga_device *priv)
948{
949 debugfs_remove(priv->dbg_entry);
950}
951
952#else
953
954static inline int data_debugfs_init(struct fpga_device *priv)
955{
956 return 0;
957}
958
959static inline void data_debugfs_exit(struct fpga_device *priv)
960{
961}
962
963#endif /* CONFIG_DEBUG_FS */
964
965/*
966 * SYSFS Attributes
967 */
968
969static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
970 char *buf)
971{
972 struct fpga_device *priv = dev_get_drvdata(dev);
973 return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
974}
975
976static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
977 const char *buf, size_t count)
978{
979 struct fpga_device *priv = dev_get_drvdata(dev);
980 unsigned long enable;
981 int ret;
982
983 ret = strict_strtoul(buf, 0, &enable);
984 if (ret) {
985 dev_err(priv->dev, "unable to parse enable input\n");
986 return -EINVAL;
987 }
988
989 ret = mutex_lock_interruptible(&priv->mutex);
990 if (ret)
991 return ret;
992
993 if (enable)
994 ret = data_device_enable(priv);
995 else
996 ret = data_device_disable(priv);
997
998 if (ret) {
999 dev_err(priv->dev, "device %s failed\n",
1000 enable ? "enable" : "disable");
1001 count = ret;
1002 goto out_unlock;
1003 }
1004
1005out_unlock:
1006 mutex_unlock(&priv->mutex);
1007 return count;
1008}
1009
1010static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
1011
1012static struct attribute *data_sysfs_attrs[] = {
1013 &dev_attr_enable.attr,
1014 NULL,
1015};
1016
1017static const struct attribute_group rt_sysfs_attr_group = {
1018 .attrs = data_sysfs_attrs,
1019};
1020
1021/*
1022 * FPGA Realtime Data Character Device
1023 */
1024
1025static int data_open(struct inode *inode, struct file *filp)
1026{
1027 /*
1028 * The miscdevice layer puts our struct miscdevice into the
1029 * filp->private_data field. We use this to find our private
1030 * data and then overwrite it with our own private structure.
1031 */
1032 struct fpga_device *priv = container_of(filp->private_data,
1033 struct fpga_device, miscdev);
1034 struct fpga_reader *reader;
1035 int ret;
1036
1037 /* allocate private data */
1038 reader = kzalloc(sizeof(*reader), GFP_KERNEL);
1039 if (!reader)
1040 return -ENOMEM;
1041
1042 reader->priv = priv;
1043 reader->buf = NULL;
1044
1045 filp->private_data = reader;
1046 ret = nonseekable_open(inode, filp);
1047 if (ret) {
1048 dev_err(priv->dev, "nonseekable-open failed\n");
1049 kfree(reader);
1050 return ret;
1051 }
1052
1053 /*
1054 * success, increase the reference count of the private data structure
1055 * so that it doesn't disappear if the device is unbound
1056 */
1057 kref_get(&priv->ref);
1058 return 0;
1059}
1060
1061static int data_release(struct inode *inode, struct file *filp)
1062{
1063 struct fpga_reader *reader = filp->private_data;
1064 struct fpga_device *priv = reader->priv;
1065
1066 /* free the per-reader structure */
1067 data_free_buffer(reader->buf);
1068 kfree(reader);
1069 filp->private_data = NULL;
1070
1071 /* decrement our reference count to the private data */
1072 kref_put(&priv->ref, fpga_device_release);
1073 return 0;
1074}
1075
1076static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1077 loff_t *f_pos)
1078{
1079 struct fpga_reader *reader = filp->private_data;
1080 struct fpga_device *priv = reader->priv;
1081 struct list_head *used = &priv->used;
1082 struct data_buf *dbuf;
1083 size_t avail;
1084 void *data;
1085 int ret;
1086
1087 /* check if we already have a partial buffer */
1088 if (reader->buf) {
1089 dbuf = reader->buf;
1090 goto have_buffer;
1091 }
1092
1093 spin_lock_irq(&priv->lock);
1094
1095 /* Block until there is at least one buffer on the used list */
1096 while (list_empty(used)) {
1097 spin_unlock_irq(&priv->lock);
1098
1099 if (filp->f_flags & O_NONBLOCK)
1100 return -EAGAIN;
1101
1102 ret = wait_event_interruptible(priv->wait, !list_empty(used));
1103 if (ret)
1104 return ret;
1105
1106 spin_lock_irq(&priv->lock);
1107 }
1108
1109 /* Grab the first buffer off of the used list */
1110 dbuf = list_first_entry(used, struct data_buf, entry);
1111 list_del_init(&dbuf->entry);
1112
1113 spin_unlock_irq(&priv->lock);
1114
1115 /* Buffers are always mapped: unmap it */
1116 videobuf_dma_unmap(priv->dev, &dbuf->vb);
1117
1118 /* save the buffer for later */
1119 reader->buf = dbuf;
1120 reader->buf_start = 0;
1121
1122have_buffer:
1123 /* Get the number of bytes available */
1124 avail = dbuf->size - reader->buf_start;
1125 data = dbuf->vb.vaddr + reader->buf_start;
1126
1127 /* Get the number of bytes we can transfer */
1128 count = min(count, avail);
1129
1130 /* Copy the data to the userspace buffer */
1131 if (copy_to_user(ubuf, data, count))
1132 return -EFAULT;
1133
1134 /* Update the amount of available space */
1135 avail -= count;
1136
1137 /*
1138 * If there is still some data available, save the buffer for the
1139 * next userspace call to read() and return
1140 */
1141 if (avail > 0) {
1142 reader->buf_start += count;
1143 reader->buf = dbuf;
1144 return count;
1145 }
1146
1147 /*
1148 * Get the buffer ready to be reused for DMA
1149 *
1150 * If it fails, we pretend that the read never happed and return
1151 * -EFAULT to userspace. The read will be retried.
1152 */
1153 ret = videobuf_dma_map(priv->dev, &dbuf->vb);
1154 if (ret) {
1155 dev_err(priv->dev, "unable to remap buffer for DMA\n");
1156 return -EFAULT;
1157 }
1158
1159 /* Lock against concurrent enable/disable */
1160 spin_lock_irq(&priv->lock);
1161
1162 /* the reader is finished with this buffer */
1163 reader->buf = NULL;
1164
1165 /*
1166 * One of two things has happened, the device is disabled, or the
1167 * device has been reconfigured underneath us. In either case, we
1168 * should just throw away the buffer.
1169 */
1170 if (!priv->enabled || dbuf->size != priv->bufsize) {
1171 videobuf_dma_unmap(priv->dev, &dbuf->vb);
1172 data_free_buffer(dbuf);
1173 goto out_unlock;
1174 }
1175
1176 /* The buffer is safe to reuse, so add it back to the free list */
1177 list_add_tail(&dbuf->entry, &priv->free);
1178
1179out_unlock:
1180 spin_unlock_irq(&priv->lock);
1181 return count;
1182}
1183
1184static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
1185{
1186 struct fpga_reader *reader = filp->private_data;
1187 struct fpga_device *priv = reader->priv;
1188 unsigned int mask = 0;
1189
1190 poll_wait(filp, &priv->wait, tbl);
1191
1192 if (!list_empty(&priv->used))
1193 mask |= POLLIN | POLLRDNORM;
1194
1195 return mask;
1196}
1197
1198static int data_mmap(struct file *filp, struct vm_area_struct *vma)
1199{
1200 struct fpga_reader *reader = filp->private_data;
1201 struct fpga_device *priv = reader->priv;
1202 unsigned long offset, vsize, psize, addr;
1203
1204 /* VMA properties */
1205 offset = vma->vm_pgoff << PAGE_SHIFT;
1206 vsize = vma->vm_end - vma->vm_start;
1207 psize = priv->phys_size - offset;
1208 addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
1209
1210 /* Check against the FPGA region's physical memory size */
1211 if (vsize > psize) {
1212 dev_err(priv->dev, "requested mmap mapping too large\n");
1213 return -EINVAL;
1214 }
1215
1216 /* IO memory (stop cacheing) */
1217 vma->vm_flags |= VM_IO | VM_RESERVED;
1218 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1219
1220 return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
1221 vma->vm_page_prot);
1222}
1223
1224static const struct file_operations data_fops = {
1225 .owner = THIS_MODULE,
1226 .open = data_open,
1227 .release = data_release,
1228 .read = data_read,
1229 .poll = data_poll,
1230 .mmap = data_mmap,
1231 .llseek = no_llseek,
1232};
1233
1234/*
1235 * OpenFirmware Device Subsystem
1236 */
1237
1238static bool dma_filter(struct dma_chan *chan, void *data)
1239{
1240 /*
1241 * DMA Channel #0 is used for the FPGA Programmer, so ignore it
1242 *
1243 * This probably won't survive an unload/load cycle of the Freescale
1244 * DMAEngine driver, but that won't be a problem
1245 */
1246 if (chan->chan_id == 0 && chan->device->dev_id == 0)
1247 return false;
1248
1249 return true;
1250}
1251
1252static int data_of_probe(struct platform_device *op,
1253 const struct of_device_id *match)
1254{
1255 struct device_node *of_node = op->dev.of_node;
1256 struct device *this_device;
1257 struct fpga_device *priv;
1258 struct resource res;
1259 dma_cap_mask_t mask;
1260 int ret;
1261
1262 /* Allocate private data */
1263 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1264 if (!priv) {
1265 dev_err(&op->dev, "Unable to allocate device private data\n");
1266 ret = -ENOMEM;
1267 goto out_return;
1268 }
1269
1270 dev_set_drvdata(&op->dev, priv);
1271 priv->dev = &op->dev;
1272 kref_init(&priv->ref);
1273 mutex_init(&priv->mutex);
1274
1275 dev_set_drvdata(priv->dev, priv);
1276 spin_lock_init(&priv->lock);
1277 INIT_LIST_HEAD(&priv->free);
1278 INIT_LIST_HEAD(&priv->used);
1279 init_waitqueue_head(&priv->wait);
1280
1281 /* Setup the misc device */
1282 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
1283 priv->miscdev.name = drv_name;
1284 priv->miscdev.fops = &data_fops;
1285
1286 /* Get the physical address of the FPGA registers */
1287 ret = of_address_to_resource(of_node, 0, &res);
1288 if (ret) {
1289 dev_err(&op->dev, "Unable to find FPGA physical address\n");
1290 ret = -ENODEV;
1291 goto out_free_priv;
1292 }
1293
1294 priv->phys_addr = res.start;
1295 priv->phys_size = resource_size(&res);
1296
1297 /* ioremap the registers for use */
1298 priv->regs = of_iomap(of_node, 0);
1299 if (!priv->regs) {
1300 dev_err(&op->dev, "Unable to ioremap registers\n");
1301 ret = -ENOMEM;
1302 goto out_free_priv;
1303 }
1304
1305 dma_cap_zero(mask);
1306 dma_cap_set(DMA_MEMCPY, mask);
1307 dma_cap_set(DMA_INTERRUPT, mask);
1308 dma_cap_set(DMA_SLAVE, mask);
1309 dma_cap_set(DMA_SG, mask);
1310
1311 /* Request a DMA channel */
1312 priv->chan = dma_request_channel(mask, dma_filter, NULL);
1313 if (!priv->chan) {
1314 dev_err(&op->dev, "Unable to request DMA channel\n");
1315 ret = -ENODEV;
1316 goto out_unmap_regs;
1317 }
1318
1319 /* Find the correct IRQ number */
1320 priv->irq = irq_of_parse_and_map(of_node, 0);
1321 if (priv->irq == NO_IRQ) {
1322 dev_err(&op->dev, "Unable to find IRQ line\n");
1323 ret = -ENODEV;
1324 goto out_release_dma;
1325 }
1326
1327 /* Drive the GPIO for FPGA IRQ high (no interrupt) */
1328 iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
1329
1330 /* Register the miscdevice */
1331 ret = misc_register(&priv->miscdev);
1332 if (ret) {
1333 dev_err(&op->dev, "Unable to register miscdevice\n");
1334 goto out_irq_dispose_mapping;
1335 }
1336
1337 /* Create the debugfs files */
1338 ret = data_debugfs_init(priv);
1339 if (ret) {
1340 dev_err(&op->dev, "Unable to create debugfs files\n");
1341 goto out_misc_deregister;
1342 }
1343
1344 /* Create the sysfs files */
1345 this_device = priv->miscdev.this_device;
1346 dev_set_drvdata(this_device, priv);
1347 ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
1348 if (ret) {
1349 dev_err(&op->dev, "Unable to create sysfs files\n");
1350 goto out_data_debugfs_exit;
1351 }
1352
1353 dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
1354 return 0;
1355
1356out_data_debugfs_exit:
1357 data_debugfs_exit(priv);
1358out_misc_deregister:
1359 misc_deregister(&priv->miscdev);
1360out_irq_dispose_mapping:
1361 irq_dispose_mapping(priv->irq);
1362out_release_dma:
1363 dma_release_channel(priv->chan);
1364out_unmap_regs:
1365 iounmap(priv->regs);
1366out_free_priv:
1367 kref_put(&priv->ref, fpga_device_release);
1368out_return:
1369 return ret;
1370}
1371
1372static int data_of_remove(struct platform_device *op)
1373{
1374 struct fpga_device *priv = dev_get_drvdata(&op->dev);
1375 struct device *this_device = priv->miscdev.this_device;
1376
1377 /* remove all sysfs files, now the device cannot be re-enabled */
1378 sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
1379
1380 /* remove all debugfs files */
1381 data_debugfs_exit(priv);
1382
1383 /* disable the device from generating data */
1384 data_device_disable(priv);
1385
1386 /* remove the character device to stop new readers from appearing */
1387 misc_deregister(&priv->miscdev);
1388
1389 /* cleanup everything not needed by readers */
1390 irq_dispose_mapping(priv->irq);
1391 dma_release_channel(priv->chan);
1392 iounmap(priv->regs);
1393
1394 /* release our reference */
1395 kref_put(&priv->ref, fpga_device_release);
1396 return 0;
1397}
1398
1399static struct of_device_id data_of_match[] = {
1400 { .compatible = "carma,carma-fpga", },
1401 {},
1402};
1403
1404static struct of_platform_driver data_of_driver = {
1405 .probe = data_of_probe,
1406 .remove = data_of_remove,
1407 .driver = {
1408 .name = drv_name,
1409 .of_match_table = data_of_match,
1410 .owner = THIS_MODULE,
1411 },
1412};
1413
1414/*
1415 * Module Init / Exit
1416 */
1417
1418static int __init data_init(void)
1419{
1420 return of_register_platform_driver(&data_of_driver);
1421}
1422
1423static void __exit data_exit(void)
1424{
1425 of_unregister_platform_driver(&data_of_driver);
1426}
1427
1428MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1429MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
1430MODULE_LICENSE("GPL");
1431
1432module_init(data_init);
1433module_exit(data_exit);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 38657cdaf54d..c4acac74725c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/uaccess.h> 34#include <linux/uaccess.h>
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/prefetch.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include "gru.h" 38#include "gru.h"
38#include "grutables.h" 39#include "grutables.h"
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index f8538bbd0bfa..ae16c8cb4f3e 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -28,6 +28,7 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/prefetch.h>
31#include <asm/uv/uv_hub.h> 32#include <asm/uv/uv_hub.h>
32#include "gru.h" 33#include "gru.h"
33#include "grutables.h" 34#include "grutables.h"
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index 2c8c3f39710d..abb5de1afce3 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,7 @@
5menu "Texas Instruments shared transport line discipline" 5menu "Texas Instruments shared transport line discipline"
6config TI_ST 6config TI_ST
7 tristate "Shared transport core driver" 7 tristate "Shared transport core driver"
8 depends on RFKILL 8 depends on NET && GPIOLIB
9 select FW_LOADER 9 select FW_LOADER
10 help 10 help
11 This enables the shared transport core driver for TI 11 This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 486117f72c9f..f91f82eabda7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -43,13 +43,15 @@ static void add_channel_to_table(struct st_data_s *st_gdata,
43 pr_info("%s: id %d\n", __func__, new_proto->chnl_id); 43 pr_info("%s: id %d\n", __func__, new_proto->chnl_id);
44 /* list now has the channel id as index itself */ 44 /* list now has the channel id as index itself */
45 st_gdata->list[new_proto->chnl_id] = new_proto; 45 st_gdata->list[new_proto->chnl_id] = new_proto;
46 st_gdata->is_registered[new_proto->chnl_id] = true;
46} 47}
47 48
48static void remove_channel_from_table(struct st_data_s *st_gdata, 49static void remove_channel_from_table(struct st_data_s *st_gdata,
49 struct st_proto_s *proto) 50 struct st_proto_s *proto)
50{ 51{
51 pr_info("%s: id %d\n", __func__, proto->chnl_id); 52 pr_info("%s: id %d\n", __func__, proto->chnl_id);
52 st_gdata->list[proto->chnl_id] = NULL; 53/* st_gdata->list[proto->chnl_id] = NULL; */
54 st_gdata->is_registered[proto->chnl_id] = false;
53} 55}
54 56
55/* 57/*
@@ -104,7 +106,7 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
104 106
105 if (unlikely 107 if (unlikely
106 (st_gdata == NULL || st_gdata->rx_skb == NULL 108 (st_gdata == NULL || st_gdata->rx_skb == NULL
107 || st_gdata->list[chnl_id] == NULL)) { 109 || st_gdata->is_registered[chnl_id] == false)) {
108 pr_err("chnl_id %d not registered, no data to send?", 110 pr_err("chnl_id %d not registered, no data to send?",
109 chnl_id); 111 chnl_id);
110 kfree_skb(st_gdata->rx_skb); 112 kfree_skb(st_gdata->rx_skb);
@@ -141,14 +143,15 @@ void st_reg_complete(struct st_data_s *st_gdata, char err)
141 unsigned char i = 0; 143 unsigned char i = 0;
142 pr_info(" %s ", __func__); 144 pr_info(" %s ", __func__);
143 for (i = 0; i < ST_MAX_CHANNELS; i++) { 145 for (i = 0; i < ST_MAX_CHANNELS; i++) {
144 if (likely(st_gdata != NULL && st_gdata->list[i] != NULL && 146 if (likely(st_gdata != NULL &&
145 st_gdata->list[i]->reg_complete_cb != NULL)) { 147 st_gdata->is_registered[i] == true &&
148 st_gdata->list[i]->reg_complete_cb != NULL)) {
146 st_gdata->list[i]->reg_complete_cb 149 st_gdata->list[i]->reg_complete_cb
147 (st_gdata->list[i]->priv_data, err); 150 (st_gdata->list[i]->priv_data, err);
148 pr_info("protocol %d's cb sent %d\n", i, err); 151 pr_info("protocol %d's cb sent %d\n", i, err);
149 if (err) { /* cleanup registered protocol */ 152 if (err) { /* cleanup registered protocol */
150 st_gdata->protos_registered--; 153 st_gdata->protos_registered--;
151 st_gdata->list[i] = NULL; 154 st_gdata->is_registered[i] = false;
152 } 155 }
153 } 156 }
154 } 157 }
@@ -475,9 +478,9 @@ void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf)
475{ 478{
476 seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", 479 seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n",
477 st_gdata->protos_registered, 480 st_gdata->protos_registered,
478 st_gdata->list[0x04] != NULL ? 'R' : 'U', 481 st_gdata->is_registered[0x04] == true ? 'R' : 'U',
479 st_gdata->list[0x08] != NULL ? 'R' : 'U', 482 st_gdata->is_registered[0x08] == true ? 'R' : 'U',
480 st_gdata->list[0x09] != NULL ? 'R' : 'U'); 483 st_gdata->is_registered[0x09] == true ? 'R' : 'U');
481} 484}
482 485
483/********************************************************************/ 486/********************************************************************/
@@ -504,7 +507,7 @@ long st_register(struct st_proto_s *new_proto)
504 return -EPROTONOSUPPORT; 507 return -EPROTONOSUPPORT;
505 } 508 }
506 509
507 if (st_gdata->list[new_proto->chnl_id] != NULL) { 510 if (st_gdata->is_registered[new_proto->chnl_id] == true) {
508 pr_err("chnl_id %d already registered", new_proto->chnl_id); 511 pr_err("chnl_id %d already registered", new_proto->chnl_id);
509 return -EALREADY; 512 return -EALREADY;
510 } 513 }
@@ -563,7 +566,7 @@ long st_register(struct st_proto_s *new_proto)
563 /* check for already registered once more, 566 /* check for already registered once more,
564 * since the above check is old 567 * since the above check is old
565 */ 568 */
566 if (st_gdata->list[new_proto->chnl_id] != NULL) { 569 if (st_gdata->is_registered[new_proto->chnl_id] == true) {
567 pr_err(" proto %d already registered ", 570 pr_err(" proto %d already registered ",
568 new_proto->chnl_id); 571 new_proto->chnl_id);
569 return -EALREADY; 572 return -EALREADY;
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index b4488c8f6b23..5da93ee6f6be 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -30,6 +30,7 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/sysfs.h>
33#include <linux/tty.h> 34#include <linux/tty.h>
34 35
35#include <linux/skbuff.h> 36#include <linux/skbuff.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 44b1f46458ca..5069111c81cc 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -260,6 +260,13 @@ config MTD_BCM963XX
260 Support for parsing CFE image tag and creating MTD partitions on 260 Support for parsing CFE image tag and creating MTD partitions on
261 Broadcom BCM63xx boards. 261 Broadcom BCM63xx boards.
262 262
263config MTD_LANTIQ
264 tristate "Lantiq SoC NOR support"
265 depends on LANTIQ
266 select MTD_PARTITIONS
267 help
268 Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
269
263config MTD_DILNETPC 270config MTD_DILNETPC
264 tristate "CFI Flash device mapped on DIL/Net PC" 271 tristate "CFI Flash device mapped on DIL/Net PC"
265 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 272 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 08533bd5cba7..6adf4c9b9057 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o
60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
61obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o 61obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
62obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o 62obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
63obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
new file mode 100644
index 000000000000..a90cabd7b84d
--- /dev/null
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -0,0 +1,251 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19#include <linux/mtd/cfi.h>
20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h>
22
23#include <lantiq_soc.h>
24#include <lantiq_platform.h>
25
26/*
27 * The NOR flash is connected to the same external bus unit (EBU) as PCI.
28 * To make PCI work we need to enable the endianness swapping for the address
29 * written to the EBU. This endianness swapping works for PCI correctly but
30 * fails for attached NOR devices. To workaround this we need to use a complex
31 * map. The workaround involves swapping all addresses whilst probing the chip.
32 * Once probing is complete we stop swapping the addresses but swizzle the
33 * unlock addresses to ensure that access to the NOR device works correctly.
34 */
35
36enum {
37 LTQ_NOR_PROBING,
38 LTQ_NOR_NORMAL
39};
40
41struct ltq_mtd {
42 struct resource *res;
43 struct mtd_info *mtd;
44 struct map_info *map;
45};
46
47static char ltq_map_name[] = "ltq_nor";
48
49static map_word
50ltq_read16(struct map_info *map, unsigned long adr)
51{
52 unsigned long flags;
53 map_word temp;
54
55 if (map->map_priv_1 == LTQ_NOR_PROBING)
56 adr ^= 2;
57 spin_lock_irqsave(&ebu_lock, flags);
58 temp.x[0] = *(u16 *)(map->virt + adr);
59 spin_unlock_irqrestore(&ebu_lock, flags);
60 return temp;
61}
62
63static void
64ltq_write16(struct map_info *map, map_word d, unsigned long adr)
65{
66 unsigned long flags;
67
68 if (map->map_priv_1 == LTQ_NOR_PROBING)
69 adr ^= 2;
70 spin_lock_irqsave(&ebu_lock, flags);
71 *(u16 *)(map->virt + adr) = d.x[0];
72 spin_unlock_irqrestore(&ebu_lock, flags);
73}
74
75/*
76 * The following 2 functions copy data between iomem and a cached memory
77 * section. As memcpy() makes use of pre-fetching we cannot use it here.
78 * The normal alternative of using memcpy_{to,from}io also makes use of
79 * memcpy() on MIPS so it is not applicable either. We are therefore stuck
80 * with having to use our own loop.
81 */
82static void
83ltq_copy_from(struct map_info *map, void *to,
84 unsigned long from, ssize_t len)
85{
86 unsigned char *f = (unsigned char *)map->virt + from;
87 unsigned char *t = (unsigned char *)to;
88 unsigned long flags;
89
90 spin_lock_irqsave(&ebu_lock, flags);
91 while (len--)
92 *t++ = *f++;
93 spin_unlock_irqrestore(&ebu_lock, flags);
94}
95
96static void
97ltq_copy_to(struct map_info *map, unsigned long to,
98 const void *from, ssize_t len)
99{
100 unsigned char *f = (unsigned char *)from;
101 unsigned char *t = (unsigned char *)map->virt + to;
102 unsigned long flags;
103
104 spin_lock_irqsave(&ebu_lock, flags);
105 while (len--)
106 *t++ = *f++;
107 spin_unlock_irqrestore(&ebu_lock, flags);
108}
109
110static const char const *part_probe_types[] = { "cmdlinepart", NULL };
111
112static int __init
113ltq_mtd_probe(struct platform_device *pdev)
114{
115 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
116 struct ltq_mtd *ltq_mtd;
117 struct mtd_partition *parts;
118 struct resource *res;
119 int nr_parts = 0;
120 struct cfi_private *cfi;
121 int err;
122
123 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
124 platform_set_drvdata(pdev, ltq_mtd);
125
126 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
127 if (!ltq_mtd->res) {
128 dev_err(&pdev->dev, "failed to get memory resource");
129 err = -ENOENT;
130 goto err_out;
131 }
132
133 res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
134 resource_size(ltq_mtd->res), dev_name(&pdev->dev));
135 if (!ltq_mtd->res) {
136 dev_err(&pdev->dev, "failed to request mem resource");
137 err = -EBUSY;
138 goto err_out;
139 }
140
141 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
142 ltq_mtd->map->phys = res->start;
143 ltq_mtd->map->size = resource_size(res);
144 ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
145 ltq_mtd->map->phys, ltq_mtd->map->size);
146 if (!ltq_mtd->map->virt) {
147 dev_err(&pdev->dev, "failed to ioremap!\n");
148 err = -ENOMEM;
149 goto err_free;
150 }
151
152 ltq_mtd->map->name = ltq_map_name;
153 ltq_mtd->map->bankwidth = 2;
154 ltq_mtd->map->read = ltq_read16;
155 ltq_mtd->map->write = ltq_write16;
156 ltq_mtd->map->copy_from = ltq_copy_from;
157 ltq_mtd->map->copy_to = ltq_copy_to;
158
159 ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
160 ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
161 ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
162
163 if (!ltq_mtd->mtd) {
164 dev_err(&pdev->dev, "probing failed\n");
165 err = -ENXIO;
166 goto err_unmap;
167 }
168
169 ltq_mtd->mtd->owner = THIS_MODULE;
170
171 cfi = ltq_mtd->map->fldrv_priv;
172 cfi->addr_unlock1 ^= 1;
173 cfi->addr_unlock2 ^= 1;
174
175 nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
176 part_probe_types, &parts, 0);
177 if (nr_parts > 0) {
178 dev_info(&pdev->dev,
179 "using %d partitions from cmdline", nr_parts);
180 } else {
181 nr_parts = ltq_mtd_data->nr_parts;
182 parts = ltq_mtd_data->parts;
183 }
184
185 err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts);
186 if (err) {
187 dev_err(&pdev->dev, "failed to add partitions\n");
188 goto err_destroy;
189 }
190
191 return 0;
192
193err_destroy:
194 map_destroy(ltq_mtd->mtd);
195err_unmap:
196 iounmap(ltq_mtd->map->virt);
197err_free:
198 kfree(ltq_mtd->map);
199err_out:
200 kfree(ltq_mtd);
201 return err;
202}
203
204static int __devexit
205ltq_mtd_remove(struct platform_device *pdev)
206{
207 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
208
209 if (ltq_mtd) {
210 if (ltq_mtd->mtd) {
211 del_mtd_partitions(ltq_mtd->mtd);
212 map_destroy(ltq_mtd->mtd);
213 }
214 if (ltq_mtd->map->virt)
215 iounmap(ltq_mtd->map->virt);
216 kfree(ltq_mtd->map);
217 kfree(ltq_mtd);
218 }
219 return 0;
220}
221
222static struct platform_driver ltq_mtd_driver = {
223 .remove = __devexit_p(ltq_mtd_remove),
224 .driver = {
225 .name = "ltq_nor",
226 .owner = THIS_MODULE,
227 },
228};
229
230static int __init
231init_ltq_mtd(void)
232{
233 int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
234
235 if (ret)
236 pr_err("ltq_nor: error registering platform driver");
237 return ret;
238}
239
240static void __exit
241exit_ltq_mtd(void)
242{
243 platform_driver_unregister(&ltq_mtd_driver);
244}
245
246module_init(init_ltq_mtd);
247module_exit(exit_ltq_mtd);
248
249MODULE_LICENSE("GPL");
250MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
251MODULE_DESCRIPTION("Lantiq SoC NOR");
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 3ffe05db4923..5d513b54a7d7 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/gpio.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void)
470 471
471#ifdef CONFIG_MIPS_PB1550 472#ifdef CONFIG_MIPS_PB1550
472 /* set gpio206 high */ 473 /* set gpio206 high */
473 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); 474 gpio_direction_input(206);
474 475
475 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); 476 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
476 477
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6c884ef1b069..19f04a34783a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2017,6 +2017,13 @@ config FTMAC100
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some 2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's. 2018 other ARM/NDS32 SoC's.
2019 2019
2020config LANTIQ_ETOP
2021 tristate "Lantiq SoC ETOP driver"
2022 depends on SOC_TYPE_XWAY
2023 help
2024 Support for the MII0 inside the Lantiq SoC
2025
2026
2020source "drivers/net/fs_enet/Kconfig" 2027source "drivers/net/fs_enet/Kconfig"
2021 2028
2022source "drivers/net/octeon/Kconfig" 2029source "drivers/net/octeon/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e5a7375685ad..209fbb70619b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
259obj-$(CONFIG_ENC28J60) += enc28j60.o 259obj-$(CONFIG_ENC28J60) += enc28j60.o
260obj-$(CONFIG_ETHOC) += ethoc.o 260obj-$(CONFIG_ETHOC) += ethoc.o
261obj-$(CONFIG_GRETH) += greth.o 261obj-$(CONFIG_GRETH) += greth.o
262obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
262 263
263obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 264obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
264 265
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index ee648fe5d96f..01560bb67a7a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -68,6 +68,7 @@
68#include <linux/sockios.h> 68#include <linux/sockios.h>
69#include <linux/firmware.h> 69#include <linux/firmware.h>
70#include <linux/slab.h> 70#include <linux/slab.h>
71#include <linux/prefetch.h>
71 72
72#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 73#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
73#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index ce0091eb06f5..1264d781b554 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
554 memaddr == (unsigned short *)0xffe00000) { 554 memaddr == (unsigned short *)0xffe00000) {
555 /* PAMs card and Riebl on ST use level 5 autovector */ 555 /* PAMs card and Riebl on ST use level 5 autovector */
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, 556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
557 "PAM/Riebl-ST Ethernet", dev)) { 557 "PAM,Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return 0; 559 return 0;
560 } 560 }
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index cf79cf759e13..2c60435f2beb 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -41,6 +41,7 @@
41#include <linux/memory.h> 41#include <linux/memory.h>
42#include <asm/kexec.h> 42#include <asm/kexec.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/prefetch.h>
44 45
45#include <net/ip.h> 46#include <net/ip.h>
46 47
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6f8adc7f5d7c..e145f2c455cb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5100,11 +5100,6 @@ err_set_interrupt:
5100 return err; 5100 return err;
5101} 5101}
5102 5102
5103static void ring_free_rcu(struct rcu_head *head)
5104{
5105 kfree(container_of(head, struct ixgbe_ring, rcu));
5106}
5107
5108/** 5103/**
5109 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 5104 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
5110 * @adapter: board private structure to clear interrupt scheme on 5105 * @adapter: board private structure to clear interrupt scheme on
@@ -5126,7 +5121,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
5126 /* ixgbe_get_stats64() might access this ring, we must wait 5121 /* ixgbe_get_stats64() might access this ring, we must wait
5127 * a grace period before freeing it. 5122 * a grace period before freeing it.
5128 */ 5123 */
5129 call_rcu(&ring->rcu, ring_free_rcu); 5124 kfree_rcu(ring, rcu);
5130 adapter->rx_ring[i] = NULL; 5125 adapter->rx_ring[i] = NULL;
5131 } 5126 }
5132 5127
diff --git a/drivers/net/lantiq_etop.c b/drivers/net/lantiq_etop.c
new file mode 100644
index 000000000000..45f252b7da30
--- /dev/null
+++ b/drivers/net/lantiq_etop.c
@@ -0,0 +1,805 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
16 */
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/interrupt.h>
23#include <linux/uaccess.h>
24#include <linux/in.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/phy.h>
28#include <linux/ip.h>
29#include <linux/tcp.h>
30#include <linux/skbuff.h>
31#include <linux/mm.h>
32#include <linux/platform_device.h>
33#include <linux/ethtool.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/io.h>
37
38#include <asm/checksum.h>
39
40#include <lantiq_soc.h>
41#include <xway_dma.h>
42#include <lantiq_platform.h>
43
44#define LTQ_ETOP_MDIO 0x11804
45#define MDIO_REQUEST 0x80000000
46#define MDIO_READ 0x40000000
47#define MDIO_ADDR_MASK 0x1f
48#define MDIO_ADDR_OFFSET 0x15
49#define MDIO_REG_MASK 0x1f
50#define MDIO_REG_OFFSET 0x10
51#define MDIO_VAL_MASK 0xffff
52
53#define PPE32_CGEN 0x800
54#define LQ_PPE32_ENET_MAC_CFG 0x1840
55
56#define LTQ_ETOP_ENETS0 0x11850
57#define LTQ_ETOP_MAC_DA0 0x1186C
58#define LTQ_ETOP_MAC_DA1 0x11870
59#define LTQ_ETOP_CFG 0x16020
60#define LTQ_ETOP_IGPLEN 0x16080
61
62#define MAX_DMA_CHAN 0x8
63#define MAX_DMA_CRC_LEN 0x4
64#define MAX_DMA_DATA_LEN 0x600
65
66#define ETOP_FTCU BIT(28)
67#define ETOP_MII_MASK 0xf
68#define ETOP_MII_NORMAL 0xd
69#define ETOP_MII_REVERSE 0xe
70#define ETOP_PLEN_UNDER 0x40
71#define ETOP_CGEN 0x800
72
73/* use 2 static channels for TX/RX */
74#define LTQ_ETOP_TX_CHANNEL 1
75#define LTQ_ETOP_RX_CHANNEL 6
76#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
77#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
78
79#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
80#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
81#define ltq_etop_w32_mask(x, y, z) \
82 ltq_w32_mask(x, y, ltq_etop_membase + (z))
83
84#define DRV_VERSION "1.0"
85
86static void __iomem *ltq_etop_membase;
87
88struct ltq_etop_chan {
89 int idx;
90 int tx_free;
91 struct net_device *netdev;
92 struct napi_struct napi;
93 struct ltq_dma_channel dma;
94 struct sk_buff *skb[LTQ_DESC_NUM];
95};
96
97struct ltq_etop_priv {
98 struct net_device *netdev;
99 struct ltq_eth_data *pldata;
100 struct resource *res;
101
102 struct mii_bus *mii_bus;
103 struct phy_device *phydev;
104
105 struct ltq_etop_chan ch[MAX_DMA_CHAN];
106 int tx_free[MAX_DMA_CHAN >> 1];
107
108 spinlock_t lock;
109};
110
111static int
112ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
113{
114 ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
115 if (!ch->skb[ch->dma.desc])
116 return -ENOMEM;
117 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
118 ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
119 DMA_FROM_DEVICE);
120 ch->dma.desc_base[ch->dma.desc].addr =
121 CPHYSADDR(ch->skb[ch->dma.desc]->data);
122 ch->dma.desc_base[ch->dma.desc].ctl =
123 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
124 MAX_DMA_DATA_LEN;
125 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
126 return 0;
127}
128
129static void
130ltq_etop_hw_receive(struct ltq_etop_chan *ch)
131{
132 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
133 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
134 struct sk_buff *skb = ch->skb[ch->dma.desc];
135 int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
136 unsigned long flags;
137
138 spin_lock_irqsave(&priv->lock, flags);
139 if (ltq_etop_alloc_skb(ch)) {
140 netdev_err(ch->netdev,
141 "failed to allocate new rx buffer, stopping DMA\n");
142 ltq_dma_close(&ch->dma);
143 }
144 ch->dma.desc++;
145 ch->dma.desc %= LTQ_DESC_NUM;
146 spin_unlock_irqrestore(&priv->lock, flags);
147
148 skb_put(skb, len);
149 skb->dev = ch->netdev;
150 skb->protocol = eth_type_trans(skb, ch->netdev);
151 netif_receive_skb(skb);
152}
153
154static int
155ltq_etop_poll_rx(struct napi_struct *napi, int budget)
156{
157 struct ltq_etop_chan *ch = container_of(napi,
158 struct ltq_etop_chan, napi);
159 int rx = 0;
160 int complete = 0;
161
162 while ((rx < budget) && !complete) {
163 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
164
165 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
166 ltq_etop_hw_receive(ch);
167 rx++;
168 } else {
169 complete = 1;
170 }
171 }
172 if (complete || !rx) {
173 napi_complete(&ch->napi);
174 ltq_dma_ack_irq(&ch->dma);
175 }
176 return rx;
177}
178
179static int
180ltq_etop_poll_tx(struct napi_struct *napi, int budget)
181{
182 struct ltq_etop_chan *ch =
183 container_of(napi, struct ltq_etop_chan, napi);
184 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
185 struct netdev_queue *txq =
186 netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
187 unsigned long flags;
188
189 spin_lock_irqsave(&priv->lock, flags);
190 while ((ch->dma.desc_base[ch->tx_free].ctl &
191 (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
192 dev_kfree_skb_any(ch->skb[ch->tx_free]);
193 ch->skb[ch->tx_free] = NULL;
194 memset(&ch->dma.desc_base[ch->tx_free], 0,
195 sizeof(struct ltq_dma_desc));
196 ch->tx_free++;
197 ch->tx_free %= LTQ_DESC_NUM;
198 }
199 spin_unlock_irqrestore(&priv->lock, flags);
200
201 if (netif_tx_queue_stopped(txq))
202 netif_tx_start_queue(txq);
203 napi_complete(&ch->napi);
204 ltq_dma_ack_irq(&ch->dma);
205 return 1;
206}
207
208static irqreturn_t
209ltq_etop_dma_irq(int irq, void *_priv)
210{
211 struct ltq_etop_priv *priv = _priv;
212 int ch = irq - LTQ_DMA_CH0_INT;
213
214 napi_schedule(&priv->ch[ch].napi);
215 return IRQ_HANDLED;
216}
217
218static void
219ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
220{
221 struct ltq_etop_priv *priv = netdev_priv(dev);
222
223 ltq_dma_free(&ch->dma);
224 if (ch->dma.irq)
225 free_irq(ch->dma.irq, priv);
226 if (IS_RX(ch->idx)) {
227 int desc;
228 for (desc = 0; desc < LTQ_DESC_NUM; desc++)
229 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
230 }
231}
232
233static void
234ltq_etop_hw_exit(struct net_device *dev)
235{
236 struct ltq_etop_priv *priv = netdev_priv(dev);
237 int i;
238
239 ltq_pmu_disable(PMU_PPE);
240 for (i = 0; i < MAX_DMA_CHAN; i++)
241 if (IS_TX(i) || IS_RX(i))
242 ltq_etop_free_channel(dev, &priv->ch[i]);
243}
244
245static int
246ltq_etop_hw_init(struct net_device *dev)
247{
248 struct ltq_etop_priv *priv = netdev_priv(dev);
249 int i;
250
251 ltq_pmu_enable(PMU_PPE);
252
253 switch (priv->pldata->mii_mode) {
254 case PHY_INTERFACE_MODE_RMII:
255 ltq_etop_w32_mask(ETOP_MII_MASK,
256 ETOP_MII_REVERSE, LTQ_ETOP_CFG);
257 break;
258
259 case PHY_INTERFACE_MODE_MII:
260 ltq_etop_w32_mask(ETOP_MII_MASK,
261 ETOP_MII_NORMAL, LTQ_ETOP_CFG);
262 break;
263
264 default:
265 netdev_err(dev, "unknown mii mode %d\n",
266 priv->pldata->mii_mode);
267 return -ENOTSUPP;
268 }
269
270 /* enable crc generation */
271 ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
272
273 ltq_dma_init_port(DMA_PORT_ETOP);
274
275 for (i = 0; i < MAX_DMA_CHAN; i++) {
276 int irq = LTQ_DMA_CH0_INT + i;
277 struct ltq_etop_chan *ch = &priv->ch[i];
278
279 ch->idx = ch->dma.nr = i;
280
281 if (IS_TX(i)) {
282 ltq_dma_alloc_tx(&ch->dma);
283 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
284 "etop_tx", priv);
285 } else if (IS_RX(i)) {
286 ltq_dma_alloc_rx(&ch->dma);
287 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
288 ch->dma.desc++)
289 if (ltq_etop_alloc_skb(ch))
290 return -ENOMEM;
291 ch->dma.desc = 0;
292 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
293 "etop_rx", priv);
294 }
295 ch->dma.irq = irq;
296 }
297 return 0;
298}
299
300static void
301ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
302{
303 strcpy(info->driver, "Lantiq ETOP");
304 strcpy(info->bus_info, "internal");
305 strcpy(info->version, DRV_VERSION);
306}
307
308static int
309ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
310{
311 struct ltq_etop_priv *priv = netdev_priv(dev);
312
313 return phy_ethtool_gset(priv->phydev, cmd);
314}
315
316static int
317ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
318{
319 struct ltq_etop_priv *priv = netdev_priv(dev);
320
321 return phy_ethtool_sset(priv->phydev, cmd);
322}
323
324static int
325ltq_etop_nway_reset(struct net_device *dev)
326{
327 struct ltq_etop_priv *priv = netdev_priv(dev);
328
329 return phy_start_aneg(priv->phydev);
330}
331
332static const struct ethtool_ops ltq_etop_ethtool_ops = {
333 .get_drvinfo = ltq_etop_get_drvinfo,
334 .get_settings = ltq_etop_get_settings,
335 .set_settings = ltq_etop_set_settings,
336 .nway_reset = ltq_etop_nway_reset,
337};
338
339static int
340ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
341{
342 u32 val = MDIO_REQUEST |
343 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
344 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
345 phy_data;
346
347 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
348 ;
349 ltq_etop_w32(val, LTQ_ETOP_MDIO);
350 return 0;
351}
352
353static int
354ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
355{
356 u32 val = MDIO_REQUEST | MDIO_READ |
357 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
358 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
359
360 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
361 ;
362 ltq_etop_w32(val, LTQ_ETOP_MDIO);
363 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
364 ;
365 val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
366 return val;
367}
368
369static void
370ltq_etop_mdio_link(struct net_device *dev)
371{
372 /* nothing to do */
373}
374
375static int
376ltq_etop_mdio_probe(struct net_device *dev)
377{
378 struct ltq_etop_priv *priv = netdev_priv(dev);
379 struct phy_device *phydev = NULL;
380 int phy_addr;
381
382 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
383 if (priv->mii_bus->phy_map[phy_addr]) {
384 phydev = priv->mii_bus->phy_map[phy_addr];
385 break;
386 }
387 }
388
389 if (!phydev) {
390 netdev_err(dev, "no PHY found\n");
391 return -ENODEV;
392 }
393
394 phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
395 0, priv->pldata->mii_mode);
396
397 if (IS_ERR(phydev)) {
398 netdev_err(dev, "Could not attach to PHY\n");
399 return PTR_ERR(phydev);
400 }
401
402 phydev->supported &= (SUPPORTED_10baseT_Half
403 | SUPPORTED_10baseT_Full
404 | SUPPORTED_100baseT_Half
405 | SUPPORTED_100baseT_Full
406 | SUPPORTED_Autoneg
407 | SUPPORTED_MII
408 | SUPPORTED_TP);
409
410 phydev->advertising = phydev->supported;
411 priv->phydev = phydev;
412 pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
413 dev->name, phydev->drv->name,
414 dev_name(&phydev->dev), phydev->irq);
415
416 return 0;
417}
418
419static int
420ltq_etop_mdio_init(struct net_device *dev)
421{
422 struct ltq_etop_priv *priv = netdev_priv(dev);
423 int i;
424 int err;
425
426 priv->mii_bus = mdiobus_alloc();
427 if (!priv->mii_bus) {
428 netdev_err(dev, "failed to allocate mii bus\n");
429 err = -ENOMEM;
430 goto err_out;
431 }
432
433 priv->mii_bus->priv = dev;
434 priv->mii_bus->read = ltq_etop_mdio_rd;
435 priv->mii_bus->write = ltq_etop_mdio_wr;
436 priv->mii_bus->name = "ltq_mii";
437 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
438 priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
439 if (!priv->mii_bus->irq) {
440 err = -ENOMEM;
441 goto err_out_free_mdiobus;
442 }
443
444 for (i = 0; i < PHY_MAX_ADDR; ++i)
445 priv->mii_bus->irq[i] = PHY_POLL;
446
447 if (mdiobus_register(priv->mii_bus)) {
448 err = -ENXIO;
449 goto err_out_free_mdio_irq;
450 }
451
452 if (ltq_etop_mdio_probe(dev)) {
453 err = -ENXIO;
454 goto err_out_unregister_bus;
455 }
456 return 0;
457
458err_out_unregister_bus:
459 mdiobus_unregister(priv->mii_bus);
460err_out_free_mdio_irq:
461 kfree(priv->mii_bus->irq);
462err_out_free_mdiobus:
463 mdiobus_free(priv->mii_bus);
464err_out:
465 return err;
466}
467
468static void
469ltq_etop_mdio_cleanup(struct net_device *dev)
470{
471 struct ltq_etop_priv *priv = netdev_priv(dev);
472
473 phy_disconnect(priv->phydev);
474 mdiobus_unregister(priv->mii_bus);
475 kfree(priv->mii_bus->irq);
476 mdiobus_free(priv->mii_bus);
477}
478
479static int
480ltq_etop_open(struct net_device *dev)
481{
482 struct ltq_etop_priv *priv = netdev_priv(dev);
483 int i;
484
485 for (i = 0; i < MAX_DMA_CHAN; i++) {
486 struct ltq_etop_chan *ch = &priv->ch[i];
487
488 if (!IS_TX(i) && (!IS_RX(i)))
489 continue;
490 ltq_dma_open(&ch->dma);
491 napi_enable(&ch->napi);
492 }
493 phy_start(priv->phydev);
494 netif_tx_start_all_queues(dev);
495 return 0;
496}
497
498static int
499ltq_etop_stop(struct net_device *dev)
500{
501 struct ltq_etop_priv *priv = netdev_priv(dev);
502 int i;
503
504 netif_tx_stop_all_queues(dev);
505 phy_stop(priv->phydev);
506 for (i = 0; i < MAX_DMA_CHAN; i++) {
507 struct ltq_etop_chan *ch = &priv->ch[i];
508
509 if (!IS_RX(i) && !IS_TX(i))
510 continue;
511 napi_disable(&ch->napi);
512 ltq_dma_close(&ch->dma);
513 }
514 return 0;
515}
516
517static int
518ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
519{
520 int queue = skb_get_queue_mapping(skb);
521 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
522 struct ltq_etop_priv *priv = netdev_priv(dev);
523 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
524 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
525 int len;
526 unsigned long flags;
527 u32 byte_offset;
528
529 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
530
531 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
532 dev_kfree_skb_any(skb);
533 netdev_err(dev, "tx ring full\n");
534 netif_tx_stop_queue(txq);
535 return NETDEV_TX_BUSY;
536 }
537
538 /* dma needs to start on a 16 byte aligned address */
539 byte_offset = CPHYSADDR(skb->data) % 16;
540 ch->skb[ch->dma.desc] = skb;
541
542 dev->trans_start = jiffies;
543
544 spin_lock_irqsave(&priv->lock, flags);
545 desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
546 DMA_TO_DEVICE)) - byte_offset;
547 wmb();
548 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
549 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
550 ch->dma.desc++;
551 ch->dma.desc %= LTQ_DESC_NUM;
552 spin_unlock_irqrestore(&priv->lock, flags);
553
554 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
555 netif_tx_stop_queue(txq);
556
557 return NETDEV_TX_OK;
558}
559
560static int
561ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
562{
563 int ret = eth_change_mtu(dev, new_mtu);
564
565 if (!ret) {
566 struct ltq_etop_priv *priv = netdev_priv(dev);
567 unsigned long flags;
568
569 spin_lock_irqsave(&priv->lock, flags);
570 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
571 LTQ_ETOP_IGPLEN);
572 spin_unlock_irqrestore(&priv->lock, flags);
573 }
574 return ret;
575}
576
577static int
578ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
579{
580 struct ltq_etop_priv *priv = netdev_priv(dev);
581
582 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
583 return phy_mii_ioctl(priv->phydev, rq, cmd);
584}
585
586static int
587ltq_etop_set_mac_address(struct net_device *dev, void *p)
588{
589 int ret = eth_mac_addr(dev, p);
590
591 if (!ret) {
592 struct ltq_etop_priv *priv = netdev_priv(dev);
593 unsigned long flags;
594
595 /* store the mac for the unicast filter */
596 spin_lock_irqsave(&priv->lock, flags);
597 ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
598 ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
599 LTQ_ETOP_MAC_DA1);
600 spin_unlock_irqrestore(&priv->lock, flags);
601 }
602 return ret;
603}
604
605static void
606ltq_etop_set_multicast_list(struct net_device *dev)
607{
608 struct ltq_etop_priv *priv = netdev_priv(dev);
609 unsigned long flags;
610
611 /* ensure that the unicast filter is not enabled in promiscious mode */
612 spin_lock_irqsave(&priv->lock, flags);
613 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
614 ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
615 else
616 ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
617 spin_unlock_irqrestore(&priv->lock, flags);
618}
619
620static u16
621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
622{
623 /* we are currently only using the first queue */
624 return 0;
625}
626
627static int
628ltq_etop_init(struct net_device *dev)
629{
630 struct ltq_etop_priv *priv = netdev_priv(dev);
631 struct sockaddr mac;
632 int err;
633
634 ether_setup(dev);
635 dev->watchdog_timeo = 10 * HZ;
636 err = ltq_etop_hw_init(dev);
637 if (err)
638 goto err_hw;
639 ltq_etop_change_mtu(dev, 1500);
640
641 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
642 if (!is_valid_ether_addr(mac.sa_data)) {
643 pr_warn("etop: invalid MAC, using random\n");
644 random_ether_addr(mac.sa_data);
645 }
646
647 err = ltq_etop_set_mac_address(dev, &mac);
648 if (err)
649 goto err_netdev;
650 ltq_etop_set_multicast_list(dev);
651 err = ltq_etop_mdio_init(dev);
652 if (err)
653 goto err_netdev;
654 return 0;
655
656err_netdev:
657 unregister_netdev(dev);
658 free_netdev(dev);
659err_hw:
660 ltq_etop_hw_exit(dev);
661 return err;
662}
663
664static void
665ltq_etop_tx_timeout(struct net_device *dev)
666{
667 int err;
668
669 ltq_etop_hw_exit(dev);
670 err = ltq_etop_hw_init(dev);
671 if (err)
672 goto err_hw;
673 dev->trans_start = jiffies;
674 netif_wake_queue(dev);
675 return;
676
677err_hw:
678 ltq_etop_hw_exit(dev);
679 netdev_err(dev, "failed to restart etop after TX timeout\n");
680}
681
682static const struct net_device_ops ltq_eth_netdev_ops = {
683 .ndo_open = ltq_etop_open,
684 .ndo_stop = ltq_etop_stop,
685 .ndo_start_xmit = ltq_etop_tx,
686 .ndo_change_mtu = ltq_etop_change_mtu,
687 .ndo_do_ioctl = ltq_etop_ioctl,
688 .ndo_set_mac_address = ltq_etop_set_mac_address,
689 .ndo_validate_addr = eth_validate_addr,
690 .ndo_set_multicast_list = ltq_etop_set_multicast_list,
691 .ndo_select_queue = ltq_etop_select_queue,
692 .ndo_init = ltq_etop_init,
693 .ndo_tx_timeout = ltq_etop_tx_timeout,
694};
695
696static int __init
697ltq_etop_probe(struct platform_device *pdev)
698{
699 struct net_device *dev;
700 struct ltq_etop_priv *priv;
701 struct resource *res;
702 int err;
703 int i;
704
705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
706 if (!res) {
707 dev_err(&pdev->dev, "failed to get etop resource\n");
708 err = -ENOENT;
709 goto err_out;
710 }
711
712 res = devm_request_mem_region(&pdev->dev, res->start,
713 resource_size(res), dev_name(&pdev->dev));
714 if (!res) {
715 dev_err(&pdev->dev, "failed to request etop resource\n");
716 err = -EBUSY;
717 goto err_out;
718 }
719
720 ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
721 res->start, resource_size(res));
722 if (!ltq_etop_membase) {
723 dev_err(&pdev->dev, "failed to remap etop engine %d\n",
724 pdev->id);
725 err = -ENOMEM;
726 goto err_out;
727 }
728
729 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
730 strcpy(dev->name, "eth%d");
731 dev->netdev_ops = &ltq_eth_netdev_ops;
732 dev->ethtool_ops = &ltq_etop_ethtool_ops;
733 priv = netdev_priv(dev);
734 priv->res = res;
735 priv->pldata = dev_get_platdata(&pdev->dev);
736 priv->netdev = dev;
737 spin_lock_init(&priv->lock);
738
739 for (i = 0; i < MAX_DMA_CHAN; i++) {
740 if (IS_TX(i))
741 netif_napi_add(dev, &priv->ch[i].napi,
742 ltq_etop_poll_tx, 8);
743 else if (IS_RX(i))
744 netif_napi_add(dev, &priv->ch[i].napi,
745 ltq_etop_poll_rx, 32);
746 priv->ch[i].netdev = dev;
747 }
748
749 err = register_netdev(dev);
750 if (err)
751 goto err_free;
752
753 platform_set_drvdata(pdev, dev);
754 return 0;
755
756err_free:
757 kfree(dev);
758err_out:
759 return err;
760}
761
762static int __devexit
763ltq_etop_remove(struct platform_device *pdev)
764{
765 struct net_device *dev = platform_get_drvdata(pdev);
766
767 if (dev) {
768 netif_tx_stop_all_queues(dev);
769 ltq_etop_hw_exit(dev);
770 ltq_etop_mdio_cleanup(dev);
771 unregister_netdev(dev);
772 }
773 return 0;
774}
775
776static struct platform_driver ltq_mii_driver = {
777 .remove = __devexit_p(ltq_etop_remove),
778 .driver = {
779 .name = "ltq_etop",
780 .owner = THIS_MODULE,
781 },
782};
783
784int __init
785init_ltq_etop(void)
786{
787 int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
788
789 if (ret)
790 pr_err("ltq_etop: Error registering platfom driver!");
791 return ret;
792}
793
794static void __exit
795exit_ltq_etop(void)
796{
797 platform_driver_unregister(&ltq_mii_driver);
798}
799
800module_init(init_ltq_etop);
801module_exit(exit_ltq_etop);
802
803MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
804MODULE_DESCRIPTION("Lantiq SoC ETOP");
805MODULE_LICENSE("GPL");
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 78e34e9e4f00..d8e4e69ad0b9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -603,21 +603,13 @@ static int macvlan_port_create(struct net_device *dev)
603 return err; 603 return err;
604} 604}
605 605
606static void macvlan_port_rcu_free(struct rcu_head *head)
607{
608 struct macvlan_port *port;
609
610 port = container_of(head, struct macvlan_port, rcu);
611 kfree(port);
612}
613
614static void macvlan_port_destroy(struct net_device *dev) 606static void macvlan_port_destroy(struct net_device *dev)
615{ 607{
616 struct macvlan_port *port = macvlan_port_get(dev); 608 struct macvlan_port *port = macvlan_port_get(dev);
617 609
618 dev->priv_flags &= ~IFF_MACVLAN_PORT; 610 dev->priv_flags &= ~IFF_MACVLAN_PORT;
619 netdev_rx_handler_unregister(dev); 611 netdev_rx_handler_unregister(dev);
620 call_rcu(&port->rcu, macvlan_port_rcu_free); 612 kfree_rcu(port, rcu);
621} 613}
622 614
623static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) 615static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 75b0d3cb7676..9f689f1da0fc 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
56 * Returns a pointer to the interrupt parent node, or NULL if the interrupt 56 * Returns a pointer to the interrupt parent node, or NULL if the interrupt
57 * parent could not be determined. 57 * parent could not be determined.
58 */ 58 */
59static struct device_node *of_irq_find_parent(struct device_node *child) 59struct device_node *of_irq_find_parent(struct device_node *child)
60{ 60{
61 struct device_node *p; 61 struct device_node *p;
62 const __be32 *parp; 62 const __be32 *parp;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index d552d2c77844..6af6b628175b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,6 +39,7 @@
39#include <linux/syscore_ops.h> 39#include <linux/syscore_ops.h>
40#include <linux/tboot.h> 40#include <linux/tboot.h>
41#include <linux/dmi.h> 41#include <linux/dmi.h>
42#include <linux/pci-ats.h>
42#include <asm/cacheflush.h> 43#include <asm/cacheflush.h>
43#include <asm/iommu.h> 44#include <asm/iommu.h>
44#include "pci.h" 45#include "pci.h"
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 553d8ee55c1c..42fae4776515 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -13,6 +13,7 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/pci-ats.h>
16#include "pci.h" 17#include "pci.h"
17 18
18#define VIRTFN_ID_LEN 16 19#define VIRTFN_ID_LEN 16
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a6ec200fe5ee..4020025f854e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -250,15 +250,6 @@ struct pci_sriov {
250 u8 __iomem *mstate; /* VF Migration State Array */ 250 u8 __iomem *mstate; /* VF Migration State Array */
251}; 251};
252 252
253/* Address Translation Service */
254struct pci_ats {
255 int pos; /* capability position */
256 int stu; /* Smallest Translation Unit */
257 int qdep; /* Invalidate Queue Depth */
258 int ref_cnt; /* Physical Function reference count */
259 unsigned int is_enabled:1; /* Enable bit is set */
260};
261
262#ifdef CONFIG_PCI_IOV 253#ifdef CONFIG_PCI_IOV
263extern int pci_iov_init(struct pci_dev *dev); 254extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 255extern void pci_iov_release(struct pci_dev *dev);
@@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
269extern void pci_restore_iov_state(struct pci_dev *dev); 260extern void pci_restore_iov_state(struct pci_dev *dev);
270extern int pci_iov_bus_range(struct pci_bus *bus); 261extern int pci_iov_bus_range(struct pci_bus *bus);
271 262
272extern int pci_enable_ats(struct pci_dev *dev, int ps);
273extern void pci_disable_ats(struct pci_dev *dev);
274extern int pci_ats_queue_depth(struct pci_dev *dev);
275/**
276 * pci_ats_enabled - query the ATS status
277 * @dev: the PCI device
278 *
279 * Returns 1 if ATS capability is enabled, or 0 if not.
280 */
281static inline int pci_ats_enabled(struct pci_dev *dev)
282{
283 return dev->ats && dev->ats->is_enabled;
284}
285#else 263#else
286static inline int pci_iov_init(struct pci_dev *dev) 264static inline int pci_iov_init(struct pci_dev *dev)
287{ 265{
@@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
304 return 0; 282 return 0;
305} 283}
306 284
307static inline int pci_enable_ats(struct pci_dev *dev, int ps)
308{
309 return -ENODEV;
310}
311static inline void pci_disable_ats(struct pci_dev *dev)
312{
313}
314static inline int pci_ats_queue_depth(struct pci_dev *dev)
315{
316 return -ENODEV;
317}
318static inline int pci_ats_enabled(struct pci_dev *dev)
319{
320 return 0;
321}
322#endif /* CONFIG_PCI_IOV */ 285#endif /* CONFIG_PCI_IOV */
323 286
324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 287static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e1878877399c..42891726ea72 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -3,10 +3,10 @@
3# 3#
4 4
5config RTC_LIB 5config RTC_LIB
6 tristate 6 bool
7 7
8menuconfig RTC_CLASS 8menuconfig RTC_CLASS
9 tristate "Real Time Clock" 9 bool "Real Time Clock"
10 default n 10 default n
11 depends on !S390 11 depends on !S390
12 select RTC_LIB 12 select RTC_LIB
@@ -15,9 +15,6 @@ menuconfig RTC_CLASS
15 be allowed to plug one or more RTCs to your system. You will 15 be allowed to plug one or more RTCs to your system. You will
16 probably want to enable one or more of the interfaces below. 16 probably want to enable one or more of the interfaces below.
17 17
18 This driver can also be built as a module. If so, the module
19 will be called rtc-core.
20
21if RTC_CLASS 18if RTC_CLASS
22 19
23config RTC_HCTOSYS 20config RTC_HCTOSYS
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 39013867cbd6..4194e59e14cd 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -41,26 +41,21 @@ static void rtc_device_release(struct device *dev)
41 * system's wall clock; restore it on resume(). 41 * system's wall clock; restore it on resume().
42 */ 42 */
43 43
44static struct timespec delta;
45static time_t oldtime; 44static time_t oldtime;
45static struct timespec oldts;
46 46
47static int rtc_suspend(struct device *dev, pm_message_t mesg) 47static int rtc_suspend(struct device *dev, pm_message_t mesg)
48{ 48{
49 struct rtc_device *rtc = to_rtc_device(dev); 49 struct rtc_device *rtc = to_rtc_device(dev);
50 struct rtc_time tm; 50 struct rtc_time tm;
51 struct timespec ts = current_kernel_time();
52 51
53 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) 52 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
54 return 0; 53 return 0;
55 54
56 rtc_read_time(rtc, &tm); 55 rtc_read_time(rtc, &tm);
56 ktime_get_ts(&oldts);
57 rtc_tm_to_time(&tm, &oldtime); 57 rtc_tm_to_time(&tm, &oldtime);
58 58
59 /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */
60 set_normalized_timespec(&delta,
61 ts.tv_sec - oldtime,
62 ts.tv_nsec - (NSEC_PER_SEC >> 1));
63
64 return 0; 59 return 0;
65} 60}
66 61
@@ -70,10 +65,12 @@ static int rtc_resume(struct device *dev)
70 struct rtc_time tm; 65 struct rtc_time tm;
71 time_t newtime; 66 time_t newtime;
72 struct timespec time; 67 struct timespec time;
68 struct timespec newts;
73 69
74 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) 70 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
75 return 0; 71 return 0;
76 72
73 ktime_get_ts(&newts);
77 rtc_read_time(rtc, &tm); 74 rtc_read_time(rtc, &tm);
78 if (rtc_valid_tm(&tm) != 0) { 75 if (rtc_valid_tm(&tm) != 0) {
79 pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); 76 pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
@@ -85,15 +82,13 @@ static int rtc_resume(struct device *dev)
85 pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); 82 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
86 return 0; 83 return 0;
87 } 84 }
85 /* calculate the RTC time delta */
86 set_normalized_timespec(&time, newtime - oldtime, 0);
88 87
89 /* restore wall clock using delta against this RTC; 88 /* subtract kernel time between rtc_suspend to rtc_resume */
90 * adjust again for avg 1/2 second RTC sampling error 89 time = timespec_sub(time, timespec_sub(newts, oldts));
91 */
92 set_normalized_timespec(&time,
93 newtime + delta.tv_sec,
94 (NSEC_PER_SEC >> 1) + delta.tv_nsec);
95 do_settimeofday(&time);
96 90
91 timekeeping_inject_sleeptime(&time);
97 return 0; 92 return 0;
98} 93}
99 94
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index da7b9887ec48..f980600f78a8 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,8 +75,10 @@ MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); 75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
77MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78static int sleeptime = 10; 78
79static int retrycount = 12; 79#define ARCMSR_SLEEPTIME 10
80#define ARCMSR_RETRYCOUNT 12
81
80wait_queue_head_t wait_q; 82wait_queue_head_t wait_q;
81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 83static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd); 84 struct scsi_cmnd *cmd);
@@ -171,24 +173,6 @@ static struct pci_driver arcmsr_pci_driver = {
171**************************************************************************** 173****************************************************************************
172**************************************************************************** 174****************************************************************************
173*/ 175*/
174int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
175{
176 struct Scsi_Host *shost = NULL;
177 int i, isleep;
178 shost = cmd->device->host;
179 isleep = sleeptime / 10;
180 if (isleep > 0) {
181 for (i = 0; i < isleep; i++) {
182 msleep(10000);
183 }
184 }
185
186 isleep = sleeptime % 10;
187 if (isleep > 0) {
188 msleep(isleep*1000);
189 }
190 return 0;
191}
192 176
193static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) 177static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
194{ 178{
@@ -323,66 +307,64 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
323 307
324 default: acb->adapter_type = ACB_ADAPTER_TYPE_A; 308 default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
325 } 309 }
326} 310}
327 311
328static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 312static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
329{ 313{
330 struct MessageUnit_A __iomem *reg = acb->pmuA; 314 struct MessageUnit_A __iomem *reg = acb->pmuA;
331 uint32_t Index; 315 int i;
332 uint8_t Retries = 0x00; 316
333 do { 317 for (i = 0; i < 2000; i++) {
334 for (Index = 0; Index < 100; Index++) { 318 if (readl(&reg->outbound_intstatus) &
335 if (readl(&reg->outbound_intstatus) & 319 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
336 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 320 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
337 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 321 &reg->outbound_intstatus);
338 &reg->outbound_intstatus); 322 return true;
339 return true; 323 }
340 } 324 msleep(10);
341 msleep(10); 325 } /* max 20 seconds */
342 }/*max 1 seconds*/
343 326
344 } while (Retries++ < 20);/*max 20 sec*/
345 return false; 327 return false;
346} 328}
347 329
348static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 330static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
349{ 331{
350 struct MessageUnit_B *reg = acb->pmuB; 332 struct MessageUnit_B *reg = acb->pmuB;
351 uint32_t Index; 333 int i;
352 uint8_t Retries = 0x00; 334
353 do { 335 for (i = 0; i < 2000; i++) {
354 for (Index = 0; Index < 100; Index++) { 336 if (readl(reg->iop2drv_doorbell)
355 if (readl(reg->iop2drv_doorbell) 337 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
356 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 338 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
357 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 339 reg->iop2drv_doorbell);
358 , reg->iop2drv_doorbell); 340 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
359 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 341 reg->drv2iop_doorbell);
360 return true; 342 return true;
361 } 343 }
362 msleep(10); 344 msleep(10);
363 }/*max 1 seconds*/ 345 } /* max 20 seconds */
364 346
365 } while (Retries++ < 20);/*max 20 sec*/
366 return false; 347 return false;
367} 348}
368 349
369static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) 350static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
370{ 351{
371 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; 352 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
372 unsigned char Retries = 0x00; 353 int i;
373 uint32_t Index; 354
374 do { 355 for (i = 0; i < 2000; i++) {
375 for (Index = 0; Index < 100; Index++) { 356 if (readl(&phbcmu->outbound_doorbell)
376 if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 357 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
377 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear);/*clear interrupt*/ 358 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
378 return true; 359 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
379 } 360 return true;
380 /* one us delay */ 361 }
381 msleep(10); 362 msleep(10);
382 } /*max 1 seconds*/ 363 } /* max 20 seconds */
383 } while (Retries++ < 20); /*max 20 sec*/ 364
384 return false; 365 return false;
385} 366}
367
386static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 368static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
387{ 369{
388 struct MessageUnit_A __iomem *reg = acb->pmuA; 370 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -459,10 +441,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
459 struct CommandControlBlock *ccb_tmp; 441 struct CommandControlBlock *ccb_tmp;
460 int i = 0, j = 0; 442 int i = 0, j = 0;
461 dma_addr_t cdb_phyaddr; 443 dma_addr_t cdb_phyaddr;
462 unsigned long roundup_ccbsize = 0, offset; 444 unsigned long roundup_ccbsize;
463 unsigned long max_xfer_len; 445 unsigned long max_xfer_len;
464 unsigned long max_sg_entrys; 446 unsigned long max_sg_entrys;
465 uint32_t firm_config_version; 447 uint32_t firm_config_version;
448
466 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 449 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
467 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 450 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
468 acb->devstate[i][j] = ARECA_RAID_GONE; 451 acb->devstate[i][j] = ARECA_RAID_GONE;
@@ -472,23 +455,20 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
472 firm_config_version = acb->firm_cfg_version; 455 firm_config_version = acb->firm_cfg_version;
473 if((firm_config_version & 0xFF) >= 3){ 456 if((firm_config_version & 0xFF) >= 3){
474 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ 457 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
475 max_sg_entrys = (max_xfer_len/4096); 458 max_sg_entrys = (max_xfer_len/4096);
476 } 459 }
477 acb->host->max_sectors = max_xfer_len/512; 460 acb->host->max_sectors = max_xfer_len/512;
478 acb->host->sg_tablesize = max_sg_entrys; 461 acb->host->sg_tablesize = max_sg_entrys;
479 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 462 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
480 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32; 463 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
481 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 464 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
482 if(!dma_coherent){ 465 if(!dma_coherent){
483 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no); 466 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
484 return -ENOMEM; 467 return -ENOMEM;
485 } 468 }
486 acb->dma_coherent = dma_coherent; 469 acb->dma_coherent = dma_coherent;
487 acb->dma_coherent_handle = dma_coherent_handle; 470 acb->dma_coherent_handle = dma_coherent_handle;
488 memset(dma_coherent, 0, acb->uncache_size); 471 memset(dma_coherent, 0, acb->uncache_size);
489 offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent;
490 dma_coherent_handle = dma_coherent_handle + offset;
491 dma_coherent = (struct CommandControlBlock *)dma_coherent + offset;
492 ccb_tmp = dma_coherent; 472 ccb_tmp = dma_coherent;
493 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 473 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
494 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ 474 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
@@ -2602,12 +2582,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2602 if (cdb_phyaddr_hi32 != 0) { 2582 if (cdb_phyaddr_hi32 != 0) {
2603 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 2583 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
2604 2584
2605 if (cdb_phyaddr_hi32 != 0) { 2585 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
2606 unsigned char Retries = 0x00; 2586 acb->adapter_index, cdb_phyaddr_hi32);
2607 do {
2608 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x \n", acb->adapter_index, cdb_phyaddr_hi32);
2609 } while (Retries++ < 100);
2610 }
2611 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]); 2587 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
2612 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]); 2588 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
2613 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); 2589 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
@@ -2955,12 +2931,12 @@ static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2955 arcmsr_hardware_reset(acb); 2931 arcmsr_hardware_reset(acb);
2956 acb->acb_flags &= ~ACB_F_IOP_INITED; 2932 acb->acb_flags &= ~ACB_F_IOP_INITED;
2957sleep_again: 2933sleep_again:
2958 arcmsr_sleep_for_bus_reset(cmd); 2934 ssleep(ARCMSR_SLEEPTIME);
2959 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { 2935 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2960 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); 2936 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
2961 if (retry_count > retrycount) { 2937 if (retry_count > ARCMSR_RETRYCOUNT) {
2962 acb->fw_flag = FW_DEADLOCK; 2938 acb->fw_flag = FW_DEADLOCK;
2963 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); 2939 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
2964 return FAILED; 2940 return FAILED;
2965 } 2941 }
2966 retry_count++; 2942 retry_count++;
@@ -3025,12 +3001,12 @@ sleep_again:
3025 arcmsr_hardware_reset(acb); 3001 arcmsr_hardware_reset(acb);
3026 acb->acb_flags &= ~ACB_F_IOP_INITED; 3002 acb->acb_flags &= ~ACB_F_IOP_INITED;
3027sleep: 3003sleep:
3028 arcmsr_sleep_for_bus_reset(cmd); 3004 ssleep(ARCMSR_SLEEPTIME);
3029 if ((readl(&reg->host_diagnostic) & 0x04) != 0) { 3005 if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3030 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); 3006 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3031 if (retry_count > retrycount) { 3007 if (retry_count > ARCMSR_RETRYCOUNT) {
3032 acb->fw_flag = FW_DEADLOCK; 3008 acb->fw_flag = FW_DEADLOCK;
3033 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); 3009 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3034 return FAILED; 3010 return FAILED;
3035 } 3011 }
3036 retry_count++; 3012 retry_count++;
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1cb8a5e85c7f..1d7b976c850f 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BEISCSI_H 18#ifndef BEISCSI_H
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index ad246369d373..b8a82f2c62c8 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
@@ -458,6 +458,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
458 req_hdr->opcode = opcode; 458 req_hdr->opcode = opcode;
459 req_hdr->subsystem = subsystem; 459 req_hdr->subsystem = subsystem;
460 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 460 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
461 req_hdr->timeout = 120;
461} 462}
462 463
463static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 464static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index fbd1dc2c15f7..497eb29e5c9e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@serverengines.com 11 * linux-drivers@emulex.com
12 * 12 *
13 * ServerEngines 13 * Emulex
14 * 209 N. Fair Oaks Ave 14 * 3333 Susan Street
15 * Sunnyvale, CA 94085 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#ifndef BEISCSI_CMDS_H 18#ifndef BEISCSI_CMDS_H
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 868cc5590145..3cad10605023 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#include <scsi/libiscsi.h> 20#include <scsi/libiscsi.h>
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 9c532797c29e..ff60b7fd92d6 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BE_ISCSI_ 20#ifndef _BE_ISCSI_
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 24e20ba9633c..cea9b275965c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,16 +7,16 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
19
20#include <linux/reboot.h> 20#include <linux/reboot.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
@@ -420,7 +420,8 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
420 return 0; 420 return 0;
421 421
422free_kset: 422free_kset:
423 iscsi_boot_destroy_kset(phba->boot_kset); 423 if (phba->boot_kset)
424 iscsi_boot_destroy_kset(phba->boot_kset);
424 return -ENOMEM; 425 return -ENOMEM;
425} 426}
426 427
@@ -3464,23 +3465,23 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
3464 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3465 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3465 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3466 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3466 reg = ioread32(addr); 3467 reg = ioread32(addr);
3467 SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
3468 3468
3469 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3469 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3470 if (!enabled) { 3470 if (!enabled) {
3471 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3471 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3472 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr); 3472 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3473 iowrite32(reg, addr); 3473 iowrite32(reg, addr);
3474 if (!phba->msix_enabled) { 3474 }
3475 eq = &phwi_context->be_eq[0].q; 3475
3476 if (!phba->msix_enabled) {
3477 eq = &phwi_context->be_eq[0].q;
3478 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3479 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3480 } else {
3481 for (i = 0; i <= phba->num_cpus; i++) {
3482 eq = &phwi_context->be_eq[i].q;
3476 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3483 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3477 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3484 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3478 } else {
3479 for (i = 0; i <= phba->num_cpus; i++) {
3480 eq = &phwi_context->be_eq[i].q;
3481 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3482 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3483 }
3484 } 3485 }
3485 } 3486 }
3486} 3487}
@@ -4019,12 +4020,17 @@ static int beiscsi_mtask(struct iscsi_task *task)
4019 hwi_write_buffer(pwrb, task); 4020 hwi_write_buffer(pwrb, task);
4020 break; 4021 break;
4021 case ISCSI_OP_NOOP_OUT: 4022 case ISCSI_OP_NOOP_OUT:
4022 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4023 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4023 INI_RD_CMD); 4024 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4024 if (task->hdr->ttt == ISCSI_RESERVED_TAG) 4025 TGT_DM_CMD);
4026 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4027 pwrb, 0);
4025 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4028 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4026 else 4029 } else {
4030 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4031 INI_RD_CMD);
4027 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); 4032 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4033 }
4028 hwi_write_buffer(pwrb, task); 4034 hwi_write_buffer(pwrb, task);
4029 break; 4035 break;
4030 case ISCSI_OP_TEXT: 4036 case ISCSI_OP_TEXT:
@@ -4144,10 +4150,11 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4144 phba->ctrl.mbox_mem_alloced.size, 4150 phba->ctrl.mbox_mem_alloced.size,
4145 phba->ctrl.mbox_mem_alloced.va, 4151 phba->ctrl.mbox_mem_alloced.va,
4146 phba->ctrl.mbox_mem_alloced.dma); 4152 phba->ctrl.mbox_mem_alloced.dma);
4153 if (phba->boot_kset)
4154 iscsi_boot_destroy_kset(phba->boot_kset);
4147 iscsi_host_remove(phba->shost); 4155 iscsi_host_remove(phba->shost);
4148 pci_dev_put(phba->pcidev); 4156 pci_dev_put(phba->pcidev);
4149 iscsi_host_free(phba->shost); 4157 iscsi_host_free(phba->shost);
4150 iscsi_boot_destroy_kset(phba->boot_kset);
4151} 4158}
4152 4159
4153static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4160static void beiscsi_msix_enable(struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 90eb74f6bcab..081c171a1ed6 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BEISCSI_MAIN_ 20#ifndef _BEISCSI_MAIN_
@@ -35,7 +34,7 @@
35 34
36#include "be.h" 35#include "be.h"
37#define DRV_NAME "be2iscsi" 36#define DRV_NAME "be2iscsi"
38#define BUILD_STR "2.0.549.0" 37#define BUILD_STR "2.103.298.0"
39#define BE_NAME "ServerEngines BladeEngine2" \ 38#define BE_NAME "ServerEngines BladeEngine2" \
40 "Linux iSCSI Driver version" BUILD_STR 39 "Linux iSCSI Driver version" BUILD_STR
41#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 877324fc594c..44762cfa3e12 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#include "be_mgmt.h" 20#include "be_mgmt.h"
@@ -203,8 +202,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
203 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); 202 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
204 203
205 req->chute = chute; 204 req->chute = chute;
206 req->hdr_ring_id = 0; 205 req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
207 req->data_ring_id = 0; 206 req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
208 207
209 status = be_mcc_notify_wait(phba); 208 status = be_mcc_notify_wait(phba);
210 if (status) 209 if (status)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index b9acedf78653..08428824ace2 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2010 ServerEngines 2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,15 +7,14 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@serverengines.com 13 * linux-drivers@emulex.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 * 14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
19 */ 18 */
20 19
21#ifndef _BEISCSI_MGMT_ 20#ifndef _BEISCSI_MGMT_
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 0fd510a01561..59b5e9b61d71 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -57,9 +57,19 @@ int pcie_max_read_reqsz;
57int bfa_debugfs_enable = 1; 57int bfa_debugfs_enable = 1;
58int msix_disable_cb = 0, msix_disable_ct = 0; 58int msix_disable_cb = 0, msix_disable_ct = 0;
59 59
60/* Firmware releated */
60u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
61u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
62 63
64#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
65#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
66#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
67
68static u32 *bfad_load_fwimg(struct pci_dev *pdev);
69static void bfad_free_fwimg(void);
70static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
71 u32 *bfi_image_size, char *fw_name);
72
63static const char *msix_name_ct[] = { 73static const char *msix_name_ct[] = {
64 "cpe0", "cpe1", "cpe2", "cpe3", 74 "cpe0", "cpe1", "cpe2", "cpe3",
65 "rme0", "rme1", "rme2", "rme3", 75 "rme0", "rme1", "rme2", "rme3",
@@ -222,6 +232,9 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
222 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 232 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
223 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 233 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
224 } else { 234 } else {
235 printk(KERN_WARNING
236 "bfa %s: bfa init failed\n",
237 bfad->pci_name);
225 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 238 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
226 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 239 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
227 } 240 }
@@ -991,10 +1004,6 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
991 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; 1004 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
992 } 1005 }
993 1006
994 /* Setup the debugfs node for this scsi_host */
995 if (bfa_debugfs_enable)
996 bfad_debugfs_init(&bfad->pport);
997
998 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 1007 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
999 1008
1000out: 1009out:
@@ -1004,10 +1013,6 @@ out:
1004void 1013void
1005bfad_uncfg_pport(struct bfad_s *bfad) 1014bfad_uncfg_pport(struct bfad_s *bfad)
1006{ 1015{
1007 /* Remove the debugfs node for this scsi_host */
1008 kfree(bfad->regdata);
1009 bfad_debugfs_exit(&bfad->pport);
1010
1011 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 1016 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1012 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { 1017 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1013 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1018 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
@@ -1389,6 +1394,10 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1389 bfad->pport.bfad = bfad; 1394 bfad->pport.bfad = bfad;
1390 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1395 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1391 1396
1397 /* Setup the debugfs node for this bfad */
1398 if (bfa_debugfs_enable)
1399 bfad_debugfs_init(&bfad->pport);
1400
1392 retval = bfad_drv_init(bfad); 1401 retval = bfad_drv_init(bfad);
1393 if (retval != BFA_STATUS_OK) 1402 if (retval != BFA_STATUS_OK)
1394 goto out_drv_init_failure; 1403 goto out_drv_init_failure;
@@ -1404,6 +1413,9 @@ out_bfad_sm_failure:
1404 bfa_detach(&bfad->bfa); 1413 bfa_detach(&bfad->bfa);
1405 bfad_hal_mem_release(bfad); 1414 bfad_hal_mem_release(bfad);
1406out_drv_init_failure: 1415out_drv_init_failure:
1416 /* Remove the debugfs node for this bfad */
1417 kfree(bfad->regdata);
1418 bfad_debugfs_exit(&bfad->pport);
1407 mutex_lock(&bfad_mutex); 1419 mutex_lock(&bfad_mutex);
1408 bfad_inst--; 1420 bfad_inst--;
1409 list_del(&bfad->list_entry); 1421 list_del(&bfad->list_entry);
@@ -1445,6 +1457,10 @@ bfad_pci_remove(struct pci_dev *pdev)
1445 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1457 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 bfad_hal_mem_release(bfad); 1458 bfad_hal_mem_release(bfad);
1447 1459
1460 /* Remove the debugfs node for this bfad */
1461 kfree(bfad->regdata);
1462 bfad_debugfs_exit(&bfad->pport);
1463
1448 /* Cleaning the BFAD instance */ 1464 /* Cleaning the BFAD instance */
1449 mutex_lock(&bfad_mutex); 1465 mutex_lock(&bfad_mutex);
1450 bfad_inst--; 1466 bfad_inst--;
@@ -1550,7 +1566,7 @@ bfad_exit(void)
1550} 1566}
1551 1567
1552/* Firmware handling */ 1568/* Firmware handling */
1553u32 * 1569static void
1554bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 1570bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1555 u32 *bfi_image_size, char *fw_name) 1571 u32 *bfi_image_size, char *fw_name)
1556{ 1572{
@@ -1558,27 +1574,25 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1558 1574
1559 if (request_firmware(&fw, fw_name, &pdev->dev)) { 1575 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1560 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); 1576 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1561 goto error; 1577 *bfi_image = NULL;
1578 goto out;
1562 } 1579 }
1563 1580
1564 *bfi_image = vmalloc(fw->size); 1581 *bfi_image = vmalloc(fw->size);
1565 if (NULL == *bfi_image) { 1582 if (NULL == *bfi_image) {
1566 printk(KERN_ALERT "Fail to allocate buffer for fw image " 1583 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1567 "size=%x!\n", (u32) fw->size); 1584 "size=%x!\n", (u32) fw->size);
1568 goto error; 1585 goto out;
1569 } 1586 }
1570 1587
1571 memcpy(*bfi_image, fw->data, fw->size); 1588 memcpy(*bfi_image, fw->data, fw->size);
1572 *bfi_image_size = fw->size/sizeof(u32); 1589 *bfi_image_size = fw->size/sizeof(u32);
1573 1590out:
1574 return *bfi_image; 1591 release_firmware(fw);
1575
1576error:
1577 return NULL;
1578} 1592}
1579 1593
1580u32 * 1594static u32 *
1581bfad_get_firmware_buf(struct pci_dev *pdev) 1595bfad_load_fwimg(struct pci_dev *pdev)
1582{ 1596{
1583 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { 1597 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1584 if (bfi_image_ct_fc_size == 0) 1598 if (bfi_image_ct_fc_size == 0)
@@ -1598,6 +1612,17 @@ bfad_get_firmware_buf(struct pci_dev *pdev)
1598 } 1612 }
1599} 1613}
1600 1614
1615static void
1616bfad_free_fwimg(void)
1617{
1618 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
1619 vfree(bfi_image_ct_fc);
1620 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
1621 vfree(bfi_image_ct_cna);
1622 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
1623 vfree(bfi_image_cb_fc);
1624}
1625
1601module_init(bfad_init); 1626module_init(bfad_init);
1602module_exit(bfad_exit); 1627module_exit(bfad_exit);
1603MODULE_LICENSE("GPL"); 1628MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index c66e32eced7b..48be0c54f2de 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -28,10 +28,10 @@
28 * mount -t debugfs none /sys/kernel/debug 28 * mount -t debugfs none /sys/kernel/debug
29 * 29 *
30 * BFA Hierarchy: 30 * BFA Hierarchy:
31 * - bfa/host# 31 * - bfa/pci_dev:<pci_name>
32 * where the host number corresponds to the one under /sys/class/scsi_host/host# 32 * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa
33 * 33 *
34 * Debugging service available per host: 34 * Debugging service available per pci_dev:
35 * fwtrc: To collect current firmware trace. 35 * fwtrc: To collect current firmware trace.
36 * drvtrc: To collect current driver trace 36 * drvtrc: To collect current driver trace
37 * fwsave: To collect last saved fw trace as a result of firmware crash. 37 * fwsave: To collect last saved fw trace as a result of firmware crash.
@@ -489,11 +489,9 @@ static atomic_t bfa_debugfs_port_count;
489inline void 489inline void
490bfad_debugfs_init(struct bfad_port_s *port) 490bfad_debugfs_init(struct bfad_port_s *port)
491{ 491{
492 struct bfad_im_port_s *im_port = port->im_port; 492 struct bfad_s *bfad = port->bfad;
493 struct bfad_s *bfad = im_port->bfad;
494 struct Scsi_Host *shost = im_port->shost;
495 const struct bfad_debugfs_entry *file; 493 const struct bfad_debugfs_entry *file;
496 char name[16]; 494 char name[64];
497 int i; 495 int i;
498 496
499 if (!bfa_debugfs_enable) 497 if (!bfa_debugfs_enable)
@@ -510,17 +508,15 @@ bfad_debugfs_init(struct bfad_port_s *port)
510 } 508 }
511 } 509 }
512 510
513 /* 511 /* Setup the pci_dev debugfs directory for the port */
514 * Setup the host# directory for the port, 512 snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name);
515 * corresponds to the scsi_host num of this port.
516 */
517 snprintf(name, sizeof(name), "host%d", shost->host_no);
518 if (!port->port_debugfs_root) { 513 if (!port->port_debugfs_root) {
519 port->port_debugfs_root = 514 port->port_debugfs_root =
520 debugfs_create_dir(name, bfa_debugfs_root); 515 debugfs_create_dir(name, bfa_debugfs_root);
521 if (!port->port_debugfs_root) { 516 if (!port->port_debugfs_root) {
522 printk(KERN_WARNING 517 printk(KERN_WARNING
523 "BFA host root dir creation failed\n"); 518 "bfa %s: debugfs root creation failed\n",
519 bfad->pci_name);
524 goto err; 520 goto err;
525 } 521 }
526 522
@@ -536,8 +532,8 @@ bfad_debugfs_init(struct bfad_port_s *port)
536 file->fops); 532 file->fops);
537 if (!bfad->bfad_dentry_files[i]) { 533 if (!bfad->bfad_dentry_files[i]) {
538 printk(KERN_WARNING 534 printk(KERN_WARNING
539 "BFA host%d: create %s entry failed\n", 535 "bfa %s: debugfs %s creation failed\n",
540 shost->host_no, file->name); 536 bfad->pci_name, file->name);
541 goto err; 537 goto err;
542 } 538 }
543 } 539 }
@@ -550,8 +546,7 @@ err:
550inline void 546inline void
551bfad_debugfs_exit(struct bfad_port_s *port) 547bfad_debugfs_exit(struct bfad_port_s *port)
552{ 548{
553 struct bfad_im_port_s *im_port = port->im_port; 549 struct bfad_s *bfad = port->bfad;
554 struct bfad_s *bfad = im_port->bfad;
555 int i; 550 int i;
556 551
557 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { 552 for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
@@ -562,9 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
562 } 557 }
563 558
564 /* 559 /*
565 * Remove the host# directory for the port, 560 * Remove the pci_dev debugfs directory for the port */
566 * corresponds to the scsi_host num of this port.
567 */
568 if (port->port_debugfs_root) { 561 if (port->port_debugfs_root) {
569 debugfs_remove(port->port_debugfs_root); 562 debugfs_remove(port->port_debugfs_root);
570 port->port_debugfs_root = NULL; 563 port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index bfee63b16fa9..c296c8968511 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,29 +141,4 @@ extern struct device_attribute *bfad_im_vport_attrs[];
141 141
142irqreturn_t bfad_intx(int irq, void *dev_id); 142irqreturn_t bfad_intx(int irq, void *dev_id);
143 143
144/* Firmware releated */
145#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
146#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
147#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
148
149u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
150u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
151 u32 *bfi_image_size, char *fw_name);
152
153static inline u32 *
154bfad_load_fwimg(struct pci_dev *pdev)
155{
156 return bfad_get_firmware_buf(pdev);
157}
158
159static inline void
160bfad_free_fwimg(void)
161{
162 if (bfi_image_ct_fc_size && bfi_image_ct_fc)
163 vfree(bfi_image_ct_fc);
164 if (bfi_image_ct_cna_size && bfi_image_ct_cna)
165 vfree(bfi_image_ct_cna);
166 if (bfi_image_cb_fc_size && bfi_image_cb_fc)
167 vfree(bfi_image_cb_fc);
168}
169#endif 144#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index b6d350ac4288..0a404bfb44fe 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -130,7 +130,7 @@
130#define BNX2FC_TM_TIMEOUT 60 /* secs */ 130#define BNX2FC_TM_TIMEOUT 60 /* secs */
131#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ 131#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
132 132
133#define BNX2FC_WAIT_CNT 120 133#define BNX2FC_WAIT_CNT 1200
134#define BNX2FC_FW_TIMEOUT (3 * HZ) 134#define BNX2FC_FW_TIMEOUT (3 * HZ)
135#define PORT_MAX 2 135#define PORT_MAX 2
136 136
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e2e647509a73..662365676689 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1130,7 +1130,7 @@ static void bnx2fc_interface_release(struct kref *kref)
1130 struct net_device *phys_dev; 1130 struct net_device *phys_dev;
1131 1131
1132 hba = container_of(kref, struct bnx2fc_hba, kref); 1132 hba = container_of(kref, struct bnx2fc_hba, kref);
1133 BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n"); 1133 BNX2FC_MISC_DBG("Interface is being released\n");
1134 1134
1135 netdev = hba->netdev; 1135 netdev = hba->netdev;
1136 phys_dev = hba->phys_dev; 1136 phys_dev = hba->phys_dev;
@@ -1254,20 +1254,17 @@ setup_err:
1254static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 1254static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1255 struct device *parent, int npiv) 1255 struct device *parent, int npiv)
1256{ 1256{
1257 struct fc_lport *lport = NULL; 1257 struct fc_lport *lport, *n_port;
1258 struct fcoe_port *port; 1258 struct fcoe_port *port;
1259 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1260 struct fc_vport *vport = dev_to_vport(parent); 1260 struct fc_vport *vport = dev_to_vport(parent);
1261 int rc = 0; 1261 int rc = 0;
1262 1262
1263 /* Allocate Scsi_Host structure */ 1263 /* Allocate Scsi_Host structure */
1264 if (!npiv) { 1264 if (!npiv)
1265 lport = libfc_host_alloc(&bnx2fc_shost_template, 1265 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
1266 sizeof(struct fcoe_port)); 1266 else
1267 } else { 1267 lport = libfc_vport_create(vport, sizeof(*port));
1268 lport = libfc_vport_create(vport,
1269 sizeof(struct fcoe_port));
1270 }
1271 1268
1272 if (!lport) { 1269 if (!lport) {
1273 printk(KERN_ERR PFX "could not allocate scsi host structure\n"); 1270 printk(KERN_ERR PFX "could not allocate scsi host structure\n");
@@ -1285,7 +1282,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1285 goto lp_config_err; 1282 goto lp_config_err;
1286 1283
1287 if (npiv) { 1284 if (npiv) {
1288 vport = dev_to_vport(parent);
1289 printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", 1285 printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
1290 vport->node_name, vport->port_name); 1286 vport->node_name, vport->port_name);
1291 fc_set_wwnn(lport, vport->node_name); 1287 fc_set_wwnn(lport, vport->node_name);
@@ -1314,12 +1310,17 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1314 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1310 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1315 1311
1316 /* Allocate exchange manager */ 1312 /* Allocate exchange manager */
1317 if (!npiv) { 1313 if (!npiv)
1318 rc = bnx2fc_em_config(lport); 1314 rc = bnx2fc_em_config(lport);
1319 if (rc) { 1315 else {
1320 printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); 1316 shost = vport_to_shost(vport);
1321 goto shost_err; 1317 n_port = shost_priv(shost);
1322 } 1318 rc = fc_exch_mgr_list_clone(n_port, lport);
1319 }
1320
1321 if (rc) {
1322 printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
1323 goto shost_err;
1323 } 1324 }
1324 1325
1325 bnx2fc_interface_get(hba); 1326 bnx2fc_interface_get(hba);
@@ -1352,8 +1353,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1352 /* Free existing transmit skbs */ 1353 /* Free existing transmit skbs */
1353 fcoe_clean_pending_queue(lport); 1354 fcoe_clean_pending_queue(lport);
1354 1355
1355 bnx2fc_interface_put(hba);
1356
1357 /* Free queued packets for the receive thread */ 1356 /* Free queued packets for the receive thread */
1358 bnx2fc_clean_rx_queue(lport); 1357 bnx2fc_clean_rx_queue(lport);
1359 1358
@@ -1372,6 +1371,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1372 1371
1373 /* Release Scsi_Host */ 1372 /* Release Scsi_Host */
1374 scsi_host_put(lport->host); 1373 scsi_host_put(lport->host);
1374
1375 bnx2fc_interface_put(hba);
1375} 1376}
1376 1377
1377/** 1378/**
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 1b680e288c56..f756d5f85c7a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -522,6 +522,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
522 fp = fc_frame_alloc(lport, payload_len); 522 fp = fc_frame_alloc(lport, payload_len);
523 if (!fp) { 523 if (!fp) {
524 printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 524 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
525 kfree(unsol_els);
525 return; 526 return;
526 } 527 }
527 528
@@ -547,6 +548,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
547 */ 548 */
548 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); 549 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
549 kfree_skb(skb); 550 kfree_skb(skb);
551 kfree(unsol_els);
550 return; 552 return;
551 } 553 }
552 } 554 }
@@ -563,6 +565,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
563 } else { 565 } else {
564 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); 566 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
565 kfree_skb(skb); 567 kfree_skb(skb);
568 kfree(unsol_els);
566 } 569 }
567} 570}
568 571
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1decefbf32e3..b5b5c346d779 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1663,6 +1663,12 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
1663 tgt = (struct bnx2fc_rport *)&rp[1]; 1663 tgt = (struct bnx2fc_rport *)&rp[1];
1664 1664
1665 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1665 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1666 if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) {
1667 sc_cmd->result = DID_NO_CONNECT << 16;
1668 sc_cmd->scsi_done(sc_cmd);
1669 return 0;
1670
1671 }
1666 /* 1672 /*
1667 * Session is not offloaded yet. Let SCSI-ml retry 1673 * Session is not offloaded yet. Let SCSI-ml retry
1668 * the command. 1674 * the command.
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d0c82340f0e2..60d2ef291646 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -772,6 +772,7 @@ static const struct error_info additional[] =
772 {0x3802, "Esn - power management class event"}, 772 {0x3802, "Esn - power management class event"},
773 {0x3804, "Esn - media class event"}, 773 {0x3804, "Esn - media class event"},
774 {0x3806, "Esn - device busy class event"}, 774 {0x3806, "Esn - device busy class event"},
775 {0x3807, "Thin Provisioning soft threshold reached"},
775 776
776 {0x3900, "Saving parameters not supported"}, 777 {0x3900, "Saving parameters not supported"},
777 778
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index b10b3841535c..f5b718d3c31b 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -778,8 +778,8 @@ static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
778static void srb_waiting_insert(struct DeviceCtlBlk *dcb, 778static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
779 struct ScsiReqBlk *srb) 779 struct ScsiReqBlk *srb)
780{ 780{
781 dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n", 781 dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
782 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list); 783 list_add(&srb->list, &dcb->srb_waiting_list);
784} 784}
785 785
@@ -787,16 +787,16 @@ static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
787static void srb_waiting_append(struct DeviceCtlBlk *dcb, 787static void srb_waiting_append(struct DeviceCtlBlk *dcb,
788 struct ScsiReqBlk *srb) 788 struct ScsiReqBlk *srb)
789{ 789{
790 dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n", 790 dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
791 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list); 792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
793} 793}
794 794
795 795
796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) 796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
797{ 797{
798 dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n", 798 dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
799 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list); 800 list_add_tail(&srb->list, &dcb->srb_going_list);
801} 801}
802 802
@@ -805,8 +805,8 @@ static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
805{ 805{
806 struct ScsiReqBlk *i; 806 struct ScsiReqBlk *i;
807 struct ScsiReqBlk *tmp; 807 struct ScsiReqBlk *tmp;
808 dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n", 808 dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
809 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
810 810
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list) 811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
812 if (i == srb) { 812 if (i == srb) {
@@ -821,8 +821,8 @@ static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
821{ 821{
822 struct ScsiReqBlk *i; 822 struct ScsiReqBlk *i;
823 struct ScsiReqBlk *tmp; 823 struct ScsiReqBlk *tmp;
824 dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n", 824 dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
825 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
826 826
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list) 827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
828 if (i == srb) { 828 if (i == srb) {
@@ -836,8 +836,8 @@ static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
836 struct ScsiReqBlk *srb) 836 struct ScsiReqBlk *srb)
837{ 837{
838 dprintkdbg(DBG_0, 838 dprintkdbg(DBG_0,
839 "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n", 839 "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
840 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list); 841 list_move(&srb->list, &dcb->srb_waiting_list);
842} 842}
843 843
@@ -846,8 +846,8 @@ static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
846 struct ScsiReqBlk *srb) 846 struct ScsiReqBlk *srb)
847{ 847{
848 dprintkdbg(DBG_0, 848 dprintkdbg(DBG_0,
849 "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n", 849 "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
850 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list); 851 list_move(&srb->list, &dcb->srb_going_list);
852} 852}
853 853
@@ -982,8 +982,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
982{ 982{
983 int nseg; 983 int nseg;
984 enum dma_data_direction dir = cmd->sc_data_direction; 984 enum dma_data_direction dir = cmd->sc_data_direction;
985 dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n", 985 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
986 cmd->serial_number, dcb->target_id, dcb->target_lun); 986 cmd, dcb->target_id, dcb->target_lun);
987 987
988 srb->dcb = dcb; 988 srb->dcb = dcb;
989 srb->cmd = cmd; 989 srb->cmd = cmd;
@@ -1086,8 +1086,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1086 struct ScsiReqBlk *srb; 1086 struct ScsiReqBlk *srb;
1087 struct AdapterCtlBlk *acb = 1087 struct AdapterCtlBlk *acb =
1088 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1088 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1089 dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n", 1089 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
1090 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 1090 cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
1091 1091
1092 /* Assume BAD_TARGET; will be cleared later */ 1092 /* Assume BAD_TARGET; will be cleared later */
1093 cmd->result = DID_BAD_TARGET << 16; 1093 cmd->result = DID_BAD_TARGET << 16;
@@ -1140,7 +1140,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1140 /* process immediately */ 1140 /* process immediately */
1141 send_srb(acb, srb); 1141 send_srb(acb, srb);
1142 } 1142 }
1143 dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->serial_number); 1143 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1144 return 0; 1144 return 0;
1145 1145
1146complete: 1146complete:
@@ -1203,9 +1203,9 @@ static void dump_register_info(struct AdapterCtlBlk *acb,
1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", 1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1204 srb, srb->cmd); 1204 srb, srb->cmd);
1205 else 1205 else
1206 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) " 1206 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1207 "cmnd=0x%02x <%02i-%i>\n", 1207 "cmnd=0x%02x <%02i-%i>\n",
1208 srb, srb->cmd, srb->cmd->serial_number, 1208 srb, srb->cmd,
1209 srb->cmd->cmnd[0], srb->cmd->device->id, 1209 srb->cmd->cmnd[0], srb->cmd->device->id,
1210 srb->cmd->device->lun); 1210 srb->cmd->device->lun);
1211 printk(" sglist=%p cnt=%i idx=%i len=%zu\n", 1211 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
@@ -1301,8 +1301,8 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1301 struct AdapterCtlBlk *acb = 1301 struct AdapterCtlBlk *acb =
1302 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1302 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1303 dprintkl(KERN_INFO, 1303 dprintkl(KERN_INFO,
1304 "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n", 1304 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1305 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); 1305 cmd, cmd->device->id, cmd->device->lun, cmd);
1306 1306
1307 if (timer_pending(&acb->waiting_timer)) 1307 if (timer_pending(&acb->waiting_timer))
1308 del_timer(&acb->waiting_timer); 1308 del_timer(&acb->waiting_timer);
@@ -1368,8 +1368,8 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1368 (struct AdapterCtlBlk *)cmd->device->host->hostdata; 1368 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1369 struct DeviceCtlBlk *dcb; 1369 struct DeviceCtlBlk *dcb;
1370 struct ScsiReqBlk *srb; 1370 struct ScsiReqBlk *srb;
1371 dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n", 1371 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1372 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); 1372 cmd, cmd->device->id, cmd->device->lun, cmd);
1373 1373
1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); 1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1375 if (!dcb) { 1375 if (!dcb) {
@@ -1495,8 +1495,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1495 u16 s_stat2, return_code; 1495 u16 s_stat2, return_code;
1496 u8 s_stat, scsicommand, i, identify_message; 1496 u8 s_stat, scsicommand, i, identify_message;
1497 u8 *ptr; 1497 u8 *ptr;
1498 dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n", 1498 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1499 srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); 1499 dcb->target_id, dcb->target_lun, srb);
1500 1500
1501 srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ 1501 srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
1502 1502
@@ -1505,8 +1505,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1505 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); 1505 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1506#if 1 1506#if 1
1507 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { 1507 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
1508 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n", 1508 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1509 srb->cmd->serial_number, s_stat, s_stat2); 1509 s_stat, s_stat2);
1510 /* 1510 /*
1511 * Try anyway? 1511 * Try anyway?
1512 * 1512 *
@@ -1522,16 +1522,15 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1522 } 1522 }
1523#endif 1523#endif
1524 if (acb->active_dcb) { 1524 if (acb->active_dcb) {
1525 dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a" 1525 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1526 "command while another command (pid#%li) is active.", 1526 "command while another command (0x%p) is active.",
1527 srb->cmd->serial_number, 1527 srb->cmd,
1528 acb->active_dcb->active_srb ? 1528 acb->active_dcb->active_srb ?
1529 acb->active_dcb->active_srb->cmd->serial_number : 0); 1529 acb->active_dcb->active_srb->cmd : 0);
1530 return 1; 1530 return 1;
1531 } 1531 }
1532 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { 1532 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1533 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n", 1533 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1534 srb->cmd->serial_number);
1535 return 1; 1534 return 1;
1536 } 1535 }
1537 /* Allow starting of SCSI commands half a second before we allow the mid-level 1536 /* Allow starting of SCSI commands half a second before we allow the mid-level
@@ -1603,9 +1602,9 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1603 tag_number++; 1602 tag_number++;
1604 } 1603 }
1605 if (tag_number >= dcb->max_command) { 1604 if (tag_number >= dcb->max_command) {
1606 dprintkl(KERN_WARNING, "start_scsi: (pid#%li) " 1605 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1607 "Out of tags target=<%02i-%i>)\n", 1606 "Out of tags target=<%02i-%i>)\n",
1608 srb->cmd->serial_number, srb->cmd->device->id, 1607 srb->cmd, srb->cmd->device->id,
1609 srb->cmd->device->lun); 1608 srb->cmd->device->lun);
1610 srb->state = SRB_READY; 1609 srb->state = SRB_READY;
1611 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, 1610 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
@@ -1623,8 +1622,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1623#endif 1622#endif
1624/*polling:*/ 1623/*polling:*/
1625 /* Send CDB ..command block ......... */ 1624 /* Send CDB ..command block ......... */
1626 dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n", 1625 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1627 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, 1626 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
1628 srb->cmd->cmnd[0], srb->tag_number); 1627 srb->cmd->cmnd[0], srb->tag_number);
1629 if (srb->flag & AUTO_REQSENSE) { 1628 if (srb->flag & AUTO_REQSENSE) {
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); 1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
@@ -1647,8 +1646,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1647 * we caught an interrupt (must be reset or reselection ... ) 1646 * we caught an interrupt (must be reset or reselection ... )
1648 * : Let's process it first! 1647 * : Let's process it first!
1649 */ 1648 */
1650 dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n", 1649 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1651 srb->cmd->serial_number, dcb->target_id, dcb->target_lun); 1650 srb->cmd, dcb->target_id, dcb->target_lun);
1652 srb->state = SRB_READY; 1651 srb->state = SRB_READY;
1653 free_tag(dcb, srb); 1652 free_tag(dcb, srb);
1654 srb->msg_count = 0; 1653 srb->msg_count = 0;
@@ -1843,7 +1842,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1843static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 1842static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1844 u16 *pscsi_status) 1843 u16 *pscsi_status)
1845{ 1844{
1846 dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->serial_number); 1845 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1847 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) 1846 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1848 *pscsi_status = PH_BUS_FREE; /*.. initial phase */ 1847 *pscsi_status = PH_BUS_FREE; /*.. initial phase */
1849 1848
@@ -1857,18 +1856,18 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1857{ 1856{
1858 u16 i; 1857 u16 i;
1859 u8 *ptr; 1858 u8 *ptr;
1860 dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->serial_number); 1859 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1861 1860
1862 clear_fifo(acb, "msgout_phase1"); 1861 clear_fifo(acb, "msgout_phase1");
1863 if (!(srb->state & SRB_MSGOUT)) { 1862 if (!(srb->state & SRB_MSGOUT)) {
1864 srb->state |= SRB_MSGOUT; 1863 srb->state |= SRB_MSGOUT;
1865 dprintkl(KERN_DEBUG, 1864 dprintkl(KERN_DEBUG,
1866 "msgout_phase1: (pid#%li) Phase unexpected\n", 1865 "msgout_phase1: (0x%p) Phase unexpected\n",
1867 srb->cmd->serial_number); /* So what ? */ 1866 srb->cmd); /* So what ? */
1868 } 1867 }
1869 if (!srb->msg_count) { 1868 if (!srb->msg_count) {
1870 dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n", 1869 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1871 srb->cmd->serial_number); 1870 srb->cmd);
1872 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP); 1871 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
1873 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ 1872 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
1874 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); 1873 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
@@ -1888,7 +1887,7 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1888static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 1887static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1889 u16 *pscsi_status) 1888 u16 *pscsi_status)
1890{ 1889{
1891 dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->serial_number); 1890 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1892 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); 1891 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1893} 1892}
1894 1893
@@ -1899,7 +1898,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1899 struct DeviceCtlBlk *dcb; 1898 struct DeviceCtlBlk *dcb;
1900 u8 *ptr; 1899 u8 *ptr;
1901 u16 i; 1900 u16 i;
1902 dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->serial_number); 1901 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1903 1902
1904 clear_fifo(acb, "command_phase1"); 1903 clear_fifo(acb, "command_phase1");
1905 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); 1904 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
@@ -2041,8 +2040,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2041 struct DeviceCtlBlk *dcb = srb->dcb; 2040 struct DeviceCtlBlk *dcb = srb->dcb;
2042 u16 scsi_status = *pscsi_status; 2041 u16 scsi_status = *pscsi_status;
2043 u32 d_left_counter = 0; 2042 u32 d_left_counter = 0;
2044 dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n", 2043 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
2045 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2044 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2046 2045
2047 /* 2046 /*
2048 * KG: We need to drain the buffers before we draw any conclusions! 2047 * KG: We need to drain the buffers before we draw any conclusions!
@@ -2171,8 +2170,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2171static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2170static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2172 u16 *pscsi_status) 2171 u16 *pscsi_status)
2173{ 2172{
2174 dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n", 2173 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2175 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2174 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2176 clear_fifo(acb, "data_out_phase1"); 2175 clear_fifo(acb, "data_out_phase1");
2177 /* do prepare before transfer when data out phase */ 2176 /* do prepare before transfer when data out phase */
2178 data_io_transfer(acb, srb, XFERDATAOUT); 2177 data_io_transfer(acb, srb, XFERDATAOUT);
@@ -2183,8 +2182,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2183{ 2182{
2184 u16 scsi_status = *pscsi_status; 2183 u16 scsi_status = *pscsi_status;
2185 2184
2186 dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n", 2185 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2187 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2186 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2188 2187
2189 /* 2188 /*
2190 * KG: DataIn is much more tricky than DataOut. When the device is finished 2189 * KG: DataIn is much more tricky than DataOut. When the device is finished
@@ -2204,8 +2203,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2204 unsigned int sc, fc; 2203 unsigned int sc, fc;
2205 2204
2206 if (scsi_status & PARITYERROR) { 2205 if (scsi_status & PARITYERROR) {
2207 dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) " 2206 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2208 "Parity Error\n", srb->cmd->serial_number); 2207 "Parity Error\n", srb->cmd);
2209 srb->status |= PARITY_ERROR; 2208 srb->status |= PARITY_ERROR;
2210 } 2209 }
2211 /* 2210 /*
@@ -2394,8 +2393,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2394static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2393static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2395 u16 *pscsi_status) 2394 u16 *pscsi_status)
2396{ 2395{
2397 dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n", 2396 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2398 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2397 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2399 data_io_transfer(acb, srb, XFERDATAIN); 2398 data_io_transfer(acb, srb, XFERDATAIN);
2400} 2399}
2401 2400
@@ -2406,8 +2405,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2406 struct DeviceCtlBlk *dcb = srb->dcb; 2405 struct DeviceCtlBlk *dcb = srb->dcb;
2407 u8 bval; 2406 u8 bval;
2408 dprintkdbg(DBG_0, 2407 dprintkdbg(DBG_0,
2409 "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n", 2408 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2410 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, 2409 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
2411 ((io_dir & DMACMD_DIR) ? 'r' : 'w'), 2410 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2412 srb->total_xfer_length, srb->sg_index, srb->sg_count); 2411 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2413 if (srb == acb->tmp_srb) 2412 if (srb == acb->tmp_srb)
@@ -2579,8 +2578,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2579static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2578static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2580 u16 *pscsi_status) 2579 u16 *pscsi_status)
2581{ 2580{
2582 dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n", 2581 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2583 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2582 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2584 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); 2583 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2585 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ 2584 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
2586 srb->state = SRB_COMPLETED; 2585 srb->state = SRB_COMPLETED;
@@ -2593,8 +2592,8 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2593static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2592static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2594 u16 *pscsi_status) 2593 u16 *pscsi_status)
2595{ 2594{
2596 dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n", 2595 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2597 srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); 2596 srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
2598 srb->state = SRB_STATUS; 2597 srb->state = SRB_STATUS;
2599 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ 2598 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
2600 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); 2599 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
@@ -2635,8 +2634,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2635{ 2634{
2636 struct ScsiReqBlk *srb = NULL; 2635 struct ScsiReqBlk *srb = NULL;
2637 struct ScsiReqBlk *i; 2636 struct ScsiReqBlk *i;
2638 dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n", 2637 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2639 srb->cmd->serial_number, tag, srb); 2638 srb->cmd, tag, srb);
2640 2639
2641 if (!(dcb->tag_mask & (1 << tag))) 2640 if (!(dcb->tag_mask & (1 << tag)))
2642 dprintkl(KERN_DEBUG, 2641 dprintkl(KERN_DEBUG,
@@ -2654,8 +2653,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2654 if (!srb) 2653 if (!srb)
2655 goto mingx0; 2654 goto mingx0;
2656 2655
2657 dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n", 2656 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2658 srb->cmd->serial_number, srb->dcb->target_id, srb->dcb->target_lun); 2657 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2659 if (dcb->flag & ABORT_DEV_) { 2658 if (dcb->flag & ABORT_DEV_) {
2660 /*srb->state = SRB_ABORT_SENT; */ 2659 /*srb->state = SRB_ABORT_SENT; */
2661 enable_msgout_abort(acb, srb); 2660 enable_msgout_abort(acb, srb);
@@ -2865,7 +2864,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2865 u16 *pscsi_status) 2864 u16 *pscsi_status)
2866{ 2865{
2867 struct DeviceCtlBlk *dcb = acb->active_dcb; 2866 struct DeviceCtlBlk *dcb = acb->active_dcb;
2868 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->serial_number); 2867 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2869 2868
2870 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); 2869 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2871 if (msgin_completed(srb->msgin_buf, acb->msg_len)) { 2870 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
@@ -2931,9 +2930,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2931 * SAVE POINTER may be ignored as we have the struct 2930 * SAVE POINTER may be ignored as we have the struct
2932 * ScsiReqBlk* associated with the scsi command. 2931 * ScsiReqBlk* associated with the scsi command.
2933 */ 2932 */
2934 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " 2933 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2935 "SAVE POINTER rem=%i Ignore\n", 2934 "SAVE POINTER rem=%i Ignore\n",
2936 srb->cmd->serial_number, srb->total_xfer_length); 2935 srb->cmd, srb->total_xfer_length);
2937 break; 2936 break;
2938 2937
2939 case RESTORE_POINTERS: 2938 case RESTORE_POINTERS:
@@ -2941,9 +2940,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2941 break; 2940 break;
2942 2941
2943 case ABORT: 2942 case ABORT:
2944 dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " 2943 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2945 "<%02i-%i> ABORT msg\n", 2944 "<%02i-%i> ABORT msg\n",
2946 srb->cmd->serial_number, dcb->target_id, 2945 srb->cmd, dcb->target_id,
2947 dcb->target_lun); 2946 dcb->target_lun);
2948 dcb->flag |= ABORT_DEV_; 2947 dcb->flag |= ABORT_DEV_;
2949 enable_msgout_abort(acb, srb); 2948 enable_msgout_abort(acb, srb);
@@ -2975,7 +2974,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2975static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, 2974static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2976 u16 *pscsi_status) 2975 u16 *pscsi_status)
2977{ 2976{
2978 dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->serial_number); 2977 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2979 clear_fifo(acb, "msgin_phase1"); 2978 clear_fifo(acb, "msgin_phase1");
2980 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); 2979 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2981 if (!(srb->state & SRB_MSGIN)) { 2980 if (!(srb->state & SRB_MSGIN)) {
@@ -3041,7 +3040,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
3041 } 3040 }
3042 srb = dcb->active_srb; 3041 srb = dcb->active_srb;
3043 acb->active_dcb = NULL; 3042 acb->active_dcb = NULL;
3044 dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->serial_number); 3043 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
3045 3044
3046 srb->scsi_phase = PH_BUS_FREE; /* initial phase */ 3045 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
3047 clear_fifo(acb, "disconnect"); 3046 clear_fifo(acb, "disconnect");
@@ -3071,14 +3070,14 @@ static void disconnect(struct AdapterCtlBlk *acb)
3071 && srb->state != SRB_MSGOUT) { 3070 && srb->state != SRB_MSGOUT) {
3072 srb->state = SRB_READY; 3071 srb->state = SRB_READY;
3073 dprintkl(KERN_DEBUG, 3072 dprintkl(KERN_DEBUG,
3074 "disconnect: (pid#%li) Unexpected\n", 3073 "disconnect: (0x%p) Unexpected\n",
3075 srb->cmd->serial_number); 3074 srb->cmd);
3076 srb->target_status = SCSI_STAT_SEL_TIMEOUT; 3075 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
3077 goto disc1; 3076 goto disc1;
3078 } else { 3077 } else {
3079 /* Normal selection timeout */ 3078 /* Normal selection timeout */
3080 dprintkdbg(DBG_KG, "disconnect: (pid#%li) " 3079 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
3081 "<%02i-%i> SelTO\n", srb->cmd->serial_number, 3080 "<%02i-%i> SelTO\n", srb->cmd,
3082 dcb->target_id, dcb->target_lun); 3081 dcb->target_id, dcb->target_lun);
3083 if (srb->retry_count++ > DC395x_MAX_RETRIES 3082 if (srb->retry_count++ > DC395x_MAX_RETRIES
3084 || acb->scan_devices) { 3083 || acb->scan_devices) {
@@ -3089,8 +3088,8 @@ static void disconnect(struct AdapterCtlBlk *acb)
3089 free_tag(dcb, srb); 3088 free_tag(dcb, srb);
3090 srb_going_to_waiting_move(dcb, srb); 3089 srb_going_to_waiting_move(dcb, srb);
3091 dprintkdbg(DBG_KG, 3090 dprintkdbg(DBG_KG,
3092 "disconnect: (pid#%li) Retry\n", 3091 "disconnect: (0x%p) Retry\n",
3093 srb->cmd->serial_number); 3092 srb->cmd);
3094 waiting_set_timer(acb, HZ / 20); 3093 waiting_set_timer(acb, HZ / 20);
3095 } 3094 }
3096 } else if (srb->state & SRB_DISCONNECT) { 3095 } else if (srb->state & SRB_DISCONNECT) {
@@ -3142,9 +3141,9 @@ static void reselect(struct AdapterCtlBlk *acb)
3142 } 3141 }
3143 /* Why the if ? */ 3142 /* Why the if ? */
3144 if (!acb->scan_devices) { 3143 if (!acb->scan_devices) {
3145 dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> " 3144 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
3146 "Arb lost but Resel win rsel=%i stat=0x%04x\n", 3145 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
3147 srb->cmd->serial_number, dcb->target_id, 3146 srb->cmd, dcb->target_id,
3148 dcb->target_lun, rsel_tar_lun_id, 3147 dcb->target_lun, rsel_tar_lun_id,
3149 DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); 3148 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
3150 arblostflag = 1; 3149 arblostflag = 1;
@@ -3318,7 +3317,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3318 enum dma_data_direction dir = cmd->sc_data_direction; 3317 enum dma_data_direction dir = cmd->sc_data_direction;
3319 int ckc_only = 1; 3318 int ckc_only = 1;
3320 3319
3321 dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->serial_number, 3320 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3322 srb->cmd->device->id, srb->cmd->device->lun); 3321 srb->cmd->device->id, srb->cmd->device->lun);
3323 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", 3322 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3324 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, 3323 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
@@ -3497,9 +3496,9 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3497 cmd->SCp.buffers_residual = 0; 3496 cmd->SCp.buffers_residual = 0;
3498 if (debug_enabled(DBG_KG)) { 3497 if (debug_enabled(DBG_KG)) {
3499 if (srb->total_xfer_length) 3498 if (srb->total_xfer_length)
3500 dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> " 3499 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3501 "cmnd=0x%02x Missed %i bytes\n", 3500 "cmnd=0x%02x Missed %i bytes\n",
3502 cmd->serial_number, cmd->device->id, cmd->device->lun, 3501 cmd, cmd->device->id, cmd->device->lun,
3503 cmd->cmnd[0], srb->total_xfer_length); 3502 cmd->cmnd[0], srb->total_xfer_length);
3504 } 3503 }
3505 3504
@@ -3508,8 +3507,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3508 if (srb == acb->tmp_srb) 3507 if (srb == acb->tmp_srb)
3509 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); 3508 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3510 else { 3509 else {
3511 dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n", 3510 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3512 cmd->serial_number, cmd->result); 3511 cmd, cmd->result);
3513 srb_free_insert(acb, srb); 3512 srb_free_insert(acb, srb);
3514 } 3513 }
3515 pci_unmap_srb(acb, srb); 3514 pci_unmap_srb(acb, srb);
@@ -3538,7 +3537,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3538 p = srb->cmd; 3537 p = srb->cmd;
3539 dir = p->sc_data_direction; 3538 dir = p->sc_data_direction;
3540 result = MK_RES(0, did_flag, 0, 0); 3539 result = MK_RES(0, did_flag, 0, 0);
3541 printk("G:%li(%02i-%i) ", p->serial_number, 3540 printk("G:%p(%02i-%i) ", p,
3542 p->device->id, p->device->lun); 3541 p->device->id, p->device->lun);
3543 srb_going_remove(dcb, srb); 3542 srb_going_remove(dcb, srb);
3544 free_tag(dcb, srb); 3543 free_tag(dcb, srb);
@@ -3568,7 +3567,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3568 p = srb->cmd; 3567 p = srb->cmd;
3569 3568
3570 result = MK_RES(0, did_flag, 0, 0); 3569 result = MK_RES(0, did_flag, 0, 0);
3571 printk("W:%li<%02i-%i>", p->serial_number, p->device->id, 3570 printk("W:%p<%02i-%i>", p, p->device->id,
3572 p->device->lun); 3571 p->device->lun);
3573 srb_waiting_remove(dcb, srb); 3572 srb_waiting_remove(dcb, srb);
3574 srb_free_insert(acb, srb); 3573 srb_free_insert(acb, srb);
@@ -3677,8 +3676,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3677 struct ScsiReqBlk *srb) 3676 struct ScsiReqBlk *srb)
3678{ 3677{
3679 struct scsi_cmnd *cmd = srb->cmd; 3678 struct scsi_cmnd *cmd = srb->cmd;
3680 dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n", 3679 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3681 cmd->serial_number, cmd->device->id, cmd->device->lun); 3680 cmd, cmd->device->id, cmd->device->lun);
3682 3681
3683 srb->flag |= AUTO_REQSENSE; 3682 srb->flag |= AUTO_REQSENSE;
3684 srb->adapter_status = 0; 3683 srb->adapter_status = 0;
@@ -3708,8 +3707,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3708 3707
3709 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ 3708 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
3710 dprintkl(KERN_DEBUG, 3709 dprintkl(KERN_DEBUG,
3711 "request_sense: (pid#%li) failed <%02i-%i>\n", 3710 "request_sense: (0x%p) failed <%02i-%i>\n",
3712 srb->cmd->serial_number, dcb->target_id, dcb->target_lun); 3711 srb->cmd, dcb->target_id, dcb->target_lun);
3713 srb_going_to_waiting_move(dcb, srb); 3712 srb_going_to_waiting_move(dcb, srb);
3714 waiting_set_timer(acb, HZ / 100); 3713 waiting_set_timer(acb, HZ / 100);
3715 } 3714 }
@@ -4717,13 +4716,13 @@ static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
4717 dcb->target_id, dcb->target_lun, 4716 dcb->target_id, dcb->target_lun,
4718 list_size(&dcb->srb_waiting_list)); 4717 list_size(&dcb->srb_waiting_list));
4719 list_for_each_entry(srb, &dcb->srb_waiting_list, list) 4718 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4720 SPRINTF(" %li", srb->cmd->serial_number); 4719 SPRINTF(" %p", srb->cmd);
4721 if (!list_empty(&dcb->srb_going_list)) 4720 if (!list_empty(&dcb->srb_going_list))
4722 SPRINTF("\nDCB (%02i-%i): Going : %i:", 4721 SPRINTF("\nDCB (%02i-%i): Going : %i:",
4723 dcb->target_id, dcb->target_lun, 4722 dcb->target_id, dcb->target_lun,
4724 list_size(&dcb->srb_going_list)); 4723 list_size(&dcb->srb_going_list));
4725 list_for_each_entry(srb, &dcb->srb_going_list, list) 4724 list_for_each_entry(srb, &dcb->srb_going_list, list)
4726 SPRINTF(" %li", srb->cmd->serial_number); 4725 SPRINTF(" %p", srb->cmd);
4727 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) 4726 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4728 SPRINTF("\n"); 4727 SPRINTF("\n");
4729 } 4728 }
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 42fe52902add..6fec9fe5dc39 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -782,7 +782,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
782 h->sdev = sdev; 782 h->sdev = sdev;
783 783
784 err = alua_initialize(sdev, h); 784 err = alua_initialize(sdev, h);
785 if (err != SCSI_DH_OK) 785 if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED))
786 goto failed; 786 goto failed;
787 787
788 if (!try_module_get(THIS_MODULE)) 788 if (!try_module_get(THIS_MODULE))
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 293c183dfe6d..e7fc70d6b478 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -182,14 +182,24 @@ struct rdac_dh_data {
182 struct rdac_controller *ctlr; 182 struct rdac_controller *ctlr;
183#define UNINITIALIZED_LUN (1 << 8) 183#define UNINITIALIZED_LUN (1 << 8)
184 unsigned lun; 184 unsigned lun;
185
186#define RDAC_MODE 0
187#define RDAC_MODE_AVT 1
188#define RDAC_MODE_IOSHIP 2
189 unsigned char mode;
190
185#define RDAC_STATE_ACTIVE 0 191#define RDAC_STATE_ACTIVE 0
186#define RDAC_STATE_PASSIVE 1 192#define RDAC_STATE_PASSIVE 1
187 unsigned char state; 193 unsigned char state;
188 194
189#define RDAC_LUN_UNOWNED 0 195#define RDAC_LUN_UNOWNED 0
190#define RDAC_LUN_OWNED 1 196#define RDAC_LUN_OWNED 1
191#define RDAC_LUN_AVT 2
192 char lun_state; 197 char lun_state;
198
199#define RDAC_PREFERRED 0
200#define RDAC_NON_PREFERRED 1
201 char preferred;
202
193 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 203 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
194 union { 204 union {
195 struct c2_inquiry c2; 205 struct c2_inquiry c2;
@@ -199,11 +209,15 @@ struct rdac_dh_data {
199 } inq; 209 } inq;
200}; 210};
201 211
212static const char *mode[] = {
213 "RDAC",
214 "AVT",
215 "IOSHIP",
216};
202static const char *lun_state[] = 217static const char *lun_state[] =
203{ 218{
204 "unowned", 219 "unowned",
205 "owned", 220 "owned",
206 "owned (AVT mode)",
207}; 221};
208 222
209struct rdac_queue_data { 223struct rdac_queue_data {
@@ -458,25 +472,33 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
458 int err; 472 int err;
459 struct c9_inquiry *inqp; 473 struct c9_inquiry *inqp;
460 474
461 h->lun_state = RDAC_LUN_UNOWNED;
462 h->state = RDAC_STATE_ACTIVE; 475 h->state = RDAC_STATE_ACTIVE;
463 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 476 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
464 if (err == SCSI_DH_OK) { 477 if (err == SCSI_DH_OK) {
465 inqp = &h->inq.c9; 478 inqp = &h->inq.c9;
466 if ((inqp->avte_cvp >> 7) == 0x1) { 479 /* detect the operating mode */
467 /* LUN in AVT mode */ 480 if ((inqp->avte_cvp >> 5) & 0x1)
468 sdev_printk(KERN_NOTICE, sdev, 481 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
469 "%s: AVT mode detected\n", 482 else if (inqp->avte_cvp >> 7)
470 RDAC_NAME); 483 h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
471 h->lun_state = RDAC_LUN_AVT; 484 else
472 } else if ((inqp->avte_cvp & 0x1) != 0) { 485 h->mode = RDAC_MODE; /* LUN in RDAC mode */
473 /* LUN was owned by the controller */ 486
487 /* Update ownership */
488 if (inqp->avte_cvp & 0x1)
474 h->lun_state = RDAC_LUN_OWNED; 489 h->lun_state = RDAC_LUN_OWNED;
490 else {
491 h->lun_state = RDAC_LUN_UNOWNED;
492 if (h->mode == RDAC_MODE)
493 h->state = RDAC_STATE_PASSIVE;
475 } 494 }
476 }
477 495
478 if (h->lun_state == RDAC_LUN_UNOWNED) 496 /* Update path prio*/
479 h->state = RDAC_STATE_PASSIVE; 497 if (inqp->path_prio & 0x1)
498 h->preferred = RDAC_PREFERRED;
499 else
500 h->preferred = RDAC_NON_PREFERRED;
501 }
480 502
481 return err; 503 return err;
482} 504}
@@ -648,12 +670,27 @@ static int rdac_activate(struct scsi_device *sdev,
648{ 670{
649 struct rdac_dh_data *h = get_rdac_data(sdev); 671 struct rdac_dh_data *h = get_rdac_data(sdev);
650 int err = SCSI_DH_OK; 672 int err = SCSI_DH_OK;
673 int act = 0;
651 674
652 err = check_ownership(sdev, h); 675 err = check_ownership(sdev, h);
653 if (err != SCSI_DH_OK) 676 if (err != SCSI_DH_OK)
654 goto done; 677 goto done;
655 678
656 if (h->lun_state == RDAC_LUN_UNOWNED) { 679 switch (h->mode) {
680 case RDAC_MODE:
681 if (h->lun_state == RDAC_LUN_UNOWNED)
682 act = 1;
683 break;
684 case RDAC_MODE_IOSHIP:
685 if ((h->lun_state == RDAC_LUN_UNOWNED) &&
686 (h->preferred == RDAC_PREFERRED))
687 act = 1;
688 break;
689 default:
690 break;
691 }
692
693 if (act) {
657 err = queue_mode_select(sdev, fn, data); 694 err = queue_mode_select(sdev, fn, data);
658 if (err == SCSI_DH_OK) 695 if (err == SCSI_DH_OK)
659 return 0; 696 return 0;
@@ -836,8 +873,9 @@ static int rdac_bus_attach(struct scsi_device *sdev)
836 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 873 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
837 874
838 sdev_printk(KERN_NOTICE, sdev, 875 sdev_printk(KERN_NOTICE, sdev,
839 "%s: LUN %d (%s)\n", 876 "%s: LUN %d (%s) (%s)\n",
840 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]); 877 RDAC_NAME, h->lun, mode[(int)h->mode],
878 lun_state[(int)h->lun_state]);
841 879
842 return 0; 880 return 0;
843 881
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index cffcb108ac96..b4f6c9a84e71 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -780,7 +780,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
780 return FAILED; 780 return FAILED;
781 } 781 }
782 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 782 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
783 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number); 783 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
784 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { 784 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
785 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); 785 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
786 return FAILED; 786 return FAILED;
@@ -802,10 +802,10 @@ static int adpt_abort(struct scsi_cmnd * cmd)
802 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); 802 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
803 return FAILED; 803 return FAILED;
804 } 804 }
805 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number); 805 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
806 return FAILED; 806 return FAILED;
807 } 807 }
808 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number); 808 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
809 return SUCCESS; 809 return SUCCESS;
810} 810}
811 811
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 0eb4fe6a4c8a..94de88955a99 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1766,8 +1766,8 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1766 struct mscp *cpp; 1766 struct mscp *cpp;
1767 1767
1768 if (SCpnt->host_scribble) 1768 if (SCpnt->host_scribble)
1769 panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", 1769 panic("%s: qcomm, SCpnt %p already active.\n",
1770 ha->board_name, SCpnt->serial_number, SCpnt); 1770 ha->board_name, SCpnt);
1771 1771
1772 /* i is the mailbox number, look for the first free mailbox 1772 /* i is the mailbox number, look for the first free mailbox
1773 starting from last_cp_used */ 1773 starting from last_cp_used */
@@ -1801,7 +1801,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1801 1801
1802 if (do_trace) 1802 if (do_trace)
1803 scmd_printk(KERN_INFO, SCpnt, 1803 scmd_printk(KERN_INFO, SCpnt,
1804 "qcomm, mbox %d, pid %ld.\n", i, SCpnt->serial_number); 1804 "qcomm, mbox %d.\n", i);
1805 1805
1806 cpp->reqsen = 1; 1806 cpp->reqsen = 1;
1807 cpp->dispri = 1; 1807 cpp->dispri = 1;
@@ -1833,8 +1833,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
1833 if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { 1833 if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
1834 unmap_dma(i, ha); 1834 unmap_dma(i, ha);
1835 SCpnt->host_scribble = NULL; 1835 SCpnt->host_scribble = NULL;
1836 scmd_printk(KERN_INFO, SCpnt, 1836 scmd_printk(KERN_INFO, SCpnt, "qcomm, adapter busy.\n");
1837 "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number);
1838 return 1; 1837 return 1;
1839 } 1838 }
1840 1839
@@ -1851,14 +1850,12 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
1851 unsigned int i; 1850 unsigned int i;
1852 1851
1853 if (SCarg->host_scribble == NULL) { 1852 if (SCarg->host_scribble == NULL) {
1854 scmd_printk(KERN_INFO, SCarg, 1853 scmd_printk(KERN_INFO, SCarg, "abort, cmd inactive.\n");
1855 "abort, pid %ld inactive.\n", SCarg->serial_number);
1856 return SUCCESS; 1854 return SUCCESS;
1857 } 1855 }
1858 1856
1859 i = *(unsigned int *)SCarg->host_scribble; 1857 i = *(unsigned int *)SCarg->host_scribble;
1860 scmd_printk(KERN_WARNING, SCarg, 1858 scmd_printk(KERN_WARNING, SCarg, "abort, mbox %d.\n", i);
1861 "abort, mbox %d, pid %ld.\n", i, SCarg->serial_number);
1862 1859
1863 if (i >= shost->can_queue) 1860 if (i >= shost->can_queue)
1864 panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name); 1861 panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name);
@@ -1902,8 +1899,8 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
1902 SCarg->result = DID_ABORT << 16; 1899 SCarg->result = DID_ABORT << 16;
1903 SCarg->host_scribble = NULL; 1900 SCarg->host_scribble = NULL;
1904 ha->cp_stat[i] = FREE; 1901 ha->cp_stat[i] = FREE;
1905 printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", 1902 printk("%s, abort, mbox %d ready, DID_ABORT, done.\n",
1906 ha->board_name, i, SCarg->serial_number); 1903 ha->board_name, i);
1907 SCarg->scsi_done(SCarg); 1904 SCarg->scsi_done(SCarg);
1908 return SUCCESS; 1905 return SUCCESS;
1909 } 1906 }
@@ -1919,13 +1916,12 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
1919 struct Scsi_Host *shost = SCarg->device->host; 1916 struct Scsi_Host *shost = SCarg->device->host;
1920 struct hostdata *ha = (struct hostdata *)shost->hostdata; 1917 struct hostdata *ha = (struct hostdata *)shost->hostdata;
1921 1918
1922 scmd_printk(KERN_INFO, SCarg, 1919 scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
1923 "reset, enter, pid %ld.\n", SCarg->serial_number);
1924 1920
1925 spin_lock_irq(shost->host_lock); 1921 spin_lock_irq(shost->host_lock);
1926 1922
1927 if (SCarg->host_scribble == NULL) 1923 if (SCarg->host_scribble == NULL)
1928 printk("%s: reset, pid %ld inactive.\n", ha->board_name, SCarg->serial_number); 1924 printk("%s: reset, inactive.\n", ha->board_name);
1929 1925
1930 if (ha->in_reset) { 1926 if (ha->in_reset) {
1931 printk("%s: reset, exit, already in reset.\n", ha->board_name); 1927 printk("%s: reset, exit, already in reset.\n", ha->board_name);
@@ -1964,14 +1960,14 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
1964 1960
1965 if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) { 1961 if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) {
1966 ha->cp_stat[i] = ABORTING; 1962 ha->cp_stat[i] = ABORTING;
1967 printk("%s: reset, mbox %d aborting, pid %ld.\n", 1963 printk("%s: reset, mbox %d aborting.\n",
1968 ha->board_name, i, SCpnt->serial_number); 1964 ha->board_name, i);
1969 } 1965 }
1970 1966
1971 else { 1967 else {
1972 ha->cp_stat[i] = IN_RESET; 1968 ha->cp_stat[i] = IN_RESET;
1973 printk("%s: reset, mbox %d in reset, pid %ld.\n", 1969 printk("%s: reset, mbox %d in reset.\n",
1974 ha->board_name, i, SCpnt->serial_number); 1970 ha->board_name, i);
1975 } 1971 }
1976 1972
1977 if (SCpnt->host_scribble == NULL) 1973 if (SCpnt->host_scribble == NULL)
@@ -2025,8 +2021,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2025 ha->cp_stat[i] = LOCKED; 2021 ha->cp_stat[i] = LOCKED;
2026 2022
2027 printk 2023 printk
2028 ("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", 2024 ("%s, reset, mbox %d locked, DID_RESET, done.\n",
2029 ha->board_name, i, SCpnt->serial_number); 2025 ha->board_name, i);
2030 } 2026 }
2031 2027
2032 else if (ha->cp_stat[i] == ABORTING) { 2028 else if (ha->cp_stat[i] == ABORTING) {
@@ -2039,8 +2035,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2039 ha->cp_stat[i] = FREE; 2035 ha->cp_stat[i] = FREE;
2040 2036
2041 printk 2037 printk
2042 ("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", 2038 ("%s, reset, mbox %d aborting, DID_RESET, done.\n",
2043 ha->board_name, i, SCpnt->serial_number); 2039 ha->board_name, i);
2044 } 2040 }
2045 2041
2046 else 2042 else
@@ -2054,7 +2050,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
2054 do_trace = 0; 2050 do_trace = 0;
2055 2051
2056 if (arg_done) 2052 if (arg_done)
2057 printk("%s: reset, exit, pid %ld done.\n", ha->board_name, SCarg->serial_number); 2053 printk("%s: reset, exit, done.\n", ha->board_name);
2058 else 2054 else
2059 printk("%s: reset, exit.\n", ha->board_name); 2055 printk("%s: reset, exit.\n", ha->board_name);
2060 2056
@@ -2238,10 +2234,10 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2238 cpp = &ha->cp[k]; 2234 cpp = &ha->cp[k];
2239 SCpnt = cpp->SCpnt; 2235 SCpnt = cpp->SCpnt;
2240 scmd_printk(KERN_INFO, SCpnt, 2236 scmd_printk(KERN_INFO, SCpnt,
2241 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u" 2237 "%s mb %d fc %d nr %d sec %ld ns %u"
2242 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2238 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2243 (ihdlr ? "ihdlr" : "qcomm"), 2239 (ihdlr ? "ihdlr" : "qcomm"),
2244 SCpnt->serial_number, k, flushcount, 2240 k, flushcount,
2245 n_ready, blk_rq_pos(SCpnt->request), 2241 n_ready, blk_rq_pos(SCpnt->request),
2246 blk_rq_sectors(SCpnt->request), cursec, YESNO(s), 2242 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2247 YESNO(r), YESNO(rev), YESNO(input_only), 2243 YESNO(r), YESNO(rev), YESNO(input_only),
@@ -2285,10 +2281,10 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
2285 2281
2286 if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { 2282 if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
2287 scmd_printk(KERN_INFO, SCpnt, 2283 scmd_printk(KERN_INFO, SCpnt,
2288 "%s, pid %ld, mbox %d, adapter" 2284 "%s, mbox %d, adapter"
2289 " busy, will abort.\n", 2285 " busy, will abort.\n",
2290 (ihdlr ? "ihdlr" : "qcomm"), 2286 (ihdlr ? "ihdlr" : "qcomm"),
2291 SCpnt->serial_number, k); 2287 k);
2292 ha->cp_stat[k] = ABORTING; 2288 ha->cp_stat[k] = ABORTING;
2293 continue; 2289 continue;
2294 } 2290 }
@@ -2398,12 +2394,12 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2398 panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i); 2394 panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i);
2399 2395
2400 if (SCpnt->host_scribble == NULL) 2396 if (SCpnt->host_scribble == NULL)
2401 panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", ha->board_name, 2397 panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", ha->board_name,
2402 i, SCpnt->serial_number, SCpnt); 2398 i, SCpnt);
2403 2399
2404 if (*(unsigned int *)SCpnt->host_scribble != i) 2400 if (*(unsigned int *)SCpnt->host_scribble != i)
2405 panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", 2401 panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
2406 ha->board_name, i, SCpnt->serial_number, 2402 ha->board_name, i,
2407 *(unsigned int *)SCpnt->host_scribble); 2403 *(unsigned int *)SCpnt->host_scribble);
2408 2404
2409 sync_dma(i, ha); 2405 sync_dma(i, ha);
@@ -2449,11 +2445,11 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2449 if (spp->target_status && SCpnt->device->type == TYPE_DISK && 2445 if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
2450 (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 && 2446 (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 &&
2451 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) 2447 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
2452 printk("%s: ihdlr, target %d.%d:%d, pid %ld, " 2448 printk("%s: ihdlr, target %d.%d:%d, "
2453 "target_status 0x%x, sense key 0x%x.\n", 2449 "target_status 0x%x, sense key 0x%x.\n",
2454 ha->board_name, 2450 ha->board_name,
2455 SCpnt->device->channel, SCpnt->device->id, 2451 SCpnt->device->channel, SCpnt->device->id,
2456 SCpnt->device->lun, SCpnt->serial_number, 2452 SCpnt->device->lun,
2457 spp->target_status, SCpnt->sense_buffer[2]); 2453 spp->target_status, SCpnt->sense_buffer[2]);
2458 2454
2459 ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0; 2455 ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0;
@@ -2522,9 +2518,9 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2522 do_trace || msg_byte(spp->target_status)) 2518 do_trace || msg_byte(spp->target_status))
2523#endif 2519#endif
2524 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x," 2520 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"
2525 " pid %ld, reg 0x%x, count %d.\n", 2521 " reg 0x%x, count %d.\n",
2526 i, spp->adapter_status, spp->target_status, 2522 i, spp->adapter_status, spp->target_status,
2527 SCpnt->serial_number, reg, ha->iocount); 2523 reg, ha->iocount);
2528 2524
2529 unmap_dma(i, ha); 2525 unmap_dma(i, ha);
2530 2526
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 4a9641e69f54..d5f8362335d3 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -372,8 +372,7 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
372 cp->status = USED; /* claim free slot */ 372 cp->status = USED; /* claim free slot */
373 373
374 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 374 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
375 "eata_pio_queue pid %ld, y %d\n", 375 "eata_pio_queue 0x%p, y %d\n", cmd, y));
376 cmd->serial_number, y));
377 376
378 cmd->scsi_done = (void *) done; 377 cmd->scsi_done = (void *) done;
379 378
@@ -417,8 +416,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
417 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) { 416 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) {
418 cmd->result = DID_BUS_BUSY << 16; 417 cmd->result = DID_BUS_BUSY << 16;
419 scmd_printk(KERN_NOTICE, cmd, 418 scmd_printk(KERN_NOTICE, cmd,
420 "eata_pio_queue pid %ld, HBA busy, " 419 "eata_pio_queue pid 0x%p, HBA busy, "
421 "returning DID_BUS_BUSY, done.\n", cmd->serial_number); 420 "returning DID_BUS_BUSY, done.\n", cmd);
422 done(cmd); 421 done(cmd);
423 cp->status = FREE; 422 cp->status = FREE;
424 return 0; 423 return 0;
@@ -432,8 +431,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
432 outw(0, base + HA_RDATA); 431 outw(0, base + HA_RDATA);
433 432
434 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 433 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
435 "Queued base %#.4lx pid: %ld " 434 "Queued base %#.4lx cmd: 0x%p "
436 "slot %d irq %d\n", sh->base, cmd->serial_number, y, sh->irq)); 435 "slot %d irq %d\n", sh->base, cmd, y, sh->irq));
437 436
438 return 0; 437 return 0;
439} 438}
@@ -445,8 +444,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd)
445 unsigned int loop = 100; 444 unsigned int loop = 100;
446 445
447 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 446 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
448 "eata_pio_abort called pid: %ld\n", 447 "eata_pio_abort called pid: 0x%p\n", cmd));
449 cmd->serial_number));
450 448
451 while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY) 449 while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY)
452 if (--loop == 0) { 450 if (--loop == 0) {
@@ -481,8 +479,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
481 struct Scsi_Host *host = cmd->device->host; 479 struct Scsi_Host *host = cmd->device->host;
482 480
483 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 481 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
484 "eata_pio_reset called pid:%ld\n", 482 "eata_pio_reset called\n"));
485 cmd->serial_number));
486 483
487 spin_lock_irq(host->host_lock); 484 spin_lock_irq(host->host_lock);
488 485
@@ -501,7 +498,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
501 498
502 sp = HD(cmd)->ccb[x].cmd; 499 sp = HD(cmd)->ccb[x].cmd;
503 HD(cmd)->ccb[x].status = RESET; 500 HD(cmd)->ccb[x].status = RESET;
504 printk(KERN_WARNING "eata_pio_reset: slot %d in reset, pid %ld.\n", x, sp->serial_number); 501 printk(KERN_WARNING "eata_pio_reset: slot %d in reset.\n", x);
505 502
506 if (sp == NULL) 503 if (sp == NULL)
507 panic("eata_pio_reset: slot %d, sp==NULL.\n", x); 504 panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 57558523c1b8..9a1af1d6071a 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -708,8 +708,7 @@ static void esp_maybe_execute_command(struct esp *esp)
708 tp = &esp->target[tgt]; 708 tp = &esp->target[tgt];
709 lp = dev->hostdata; 709 lp = dev->hostdata;
710 710
711 list_del(&ent->list); 711 list_move(&ent->list, &esp->active_cmds);
712 list_add(&ent->list, &esp->active_cmds);
713 712
714 esp->active_cmd = ent; 713 esp->active_cmd = ent;
715 714
@@ -1244,8 +1243,7 @@ static int esp_finish_select(struct esp *esp)
1244 /* Now that the state is unwound properly, put back onto 1243 /* Now that the state is unwound properly, put back onto
1245 * the issue queue. This command is no longer active. 1244 * the issue queue. This command is no longer active.
1246 */ 1245 */
1247 list_del(&ent->list); 1246 list_move(&ent->list, &esp->queued_cmds);
1248 list_add(&ent->list, &esp->queued_cmds);
1249 esp->active_cmd = NULL; 1247 esp->active_cmd = NULL;
1250 1248
1251 /* Return value ignored by caller, it directly invokes 1249 /* Return value ignored by caller, it directly invokes
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index bde6ee5333eb..5d3700dc6f8c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -381,6 +381,42 @@ out:
381} 381}
382 382
383/** 383/**
384 * fcoe_interface_release() - fcoe_port kref release function
385 * @kref: Embedded reference count in an fcoe_interface struct
386 */
387static void fcoe_interface_release(struct kref *kref)
388{
389 struct fcoe_interface *fcoe;
390 struct net_device *netdev;
391
392 fcoe = container_of(kref, struct fcoe_interface, kref);
393 netdev = fcoe->netdev;
394 /* tear-down the FCoE controller */
395 fcoe_ctlr_destroy(&fcoe->ctlr);
396 kfree(fcoe);
397 dev_put(netdev);
398 module_put(THIS_MODULE);
399}
400
401/**
402 * fcoe_interface_get() - Get a reference to a FCoE interface
403 * @fcoe: The FCoE interface to be held
404 */
405static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
406{
407 kref_get(&fcoe->kref);
408}
409
410/**
411 * fcoe_interface_put() - Put a reference to a FCoE interface
412 * @fcoe: The FCoE interface to be released
413 */
414static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
415{
416 kref_put(&fcoe->kref, fcoe_interface_release);
417}
418
419/**
384 * fcoe_interface_cleanup() - Clean up a FCoE interface 420 * fcoe_interface_cleanup() - Clean up a FCoE interface
385 * @fcoe: The FCoE interface to be cleaned up 421 * @fcoe: The FCoE interface to be cleaned up
386 * 422 *
@@ -392,6 +428,21 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
392 struct fcoe_ctlr *fip = &fcoe->ctlr; 428 struct fcoe_ctlr *fip = &fcoe->ctlr;
393 u8 flogi_maddr[ETH_ALEN]; 429 u8 flogi_maddr[ETH_ALEN];
394 const struct net_device_ops *ops; 430 const struct net_device_ops *ops;
431 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
432
433 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
434
435 /* Logout of the fabric */
436 fc_fabric_logoff(fcoe->ctlr.lp);
437
438 /* Cleanup the fc_lport */
439 fc_lport_destroy(fcoe->ctlr.lp);
440
441 /* Stop the transmit retry timer */
442 del_timer_sync(&port->timer);
443
444 /* Free existing transmit skbs */
445 fcoe_clean_pending_queue(fcoe->ctlr.lp);
395 446
396 /* 447 /*
397 * Don't listen for Ethernet packets anymore. 448 * Don't listen for Ethernet packets anymore.
@@ -414,6 +465,9 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
414 } else 465 } else
415 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 466 dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
416 467
468 if (!is_zero_ether_addr(port->data_src_addr))
469 dev_uc_del(netdev, port->data_src_addr);
470
417 /* Tell the LLD we are done w/ FCoE */ 471 /* Tell the LLD we are done w/ FCoE */
418 ops = netdev->netdev_ops; 472 ops = netdev->netdev_ops;
419 if (ops->ndo_fcoe_disable) { 473 if (ops->ndo_fcoe_disable) {
@@ -421,42 +475,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
421 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 475 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
422 " specific feature for LLD.\n"); 476 " specific feature for LLD.\n");
423 } 477 }
424} 478 fcoe_interface_put(fcoe);
425
426/**
427 * fcoe_interface_release() - fcoe_port kref release function
428 * @kref: Embedded reference count in an fcoe_interface struct
429 */
430static void fcoe_interface_release(struct kref *kref)
431{
432 struct fcoe_interface *fcoe;
433 struct net_device *netdev;
434
435 fcoe = container_of(kref, struct fcoe_interface, kref);
436 netdev = fcoe->netdev;
437 /* tear-down the FCoE controller */
438 fcoe_ctlr_destroy(&fcoe->ctlr);
439 kfree(fcoe);
440 dev_put(netdev);
441 module_put(THIS_MODULE);
442}
443
444/**
445 * fcoe_interface_get() - Get a reference to a FCoE interface
446 * @fcoe: The FCoE interface to be held
447 */
448static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
449{
450 kref_get(&fcoe->kref);
451}
452
453/**
454 * fcoe_interface_put() - Put a reference to a FCoE interface
455 * @fcoe: The FCoE interface to be released
456 */
457static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
458{
459 kref_put(&fcoe->kref, fcoe_interface_release);
460} 479}
461 480
462/** 481/**
@@ -821,39 +840,9 @@ skip_oem:
821 * fcoe_if_destroy() - Tear down a SW FCoE instance 840 * fcoe_if_destroy() - Tear down a SW FCoE instance
822 * @lport: The local port to be destroyed 841 * @lport: The local port to be destroyed
823 * 842 *
824 * Locking: must be called with the RTNL mutex held and RTNL mutex
825 * needed to be dropped by this function since not dropping RTNL
826 * would cause circular locking warning on synchronous fip worker
827 * cancelling thru fcoe_interface_put invoked by this function.
828 *
829 */ 843 */
830static void fcoe_if_destroy(struct fc_lport *lport) 844static void fcoe_if_destroy(struct fc_lport *lport)
831{ 845{
832 struct fcoe_port *port = lport_priv(lport);
833 struct fcoe_interface *fcoe = port->priv;
834 struct net_device *netdev = fcoe->netdev;
835
836 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
837
838 /* Logout of the fabric */
839 fc_fabric_logoff(lport);
840
841 /* Cleanup the fc_lport */
842 fc_lport_destroy(lport);
843
844 /* Stop the transmit retry timer */
845 del_timer_sync(&port->timer);
846
847 /* Free existing transmit skbs */
848 fcoe_clean_pending_queue(lport);
849
850 if (!is_zero_ether_addr(port->data_src_addr))
851 dev_uc_del(netdev, port->data_src_addr);
852 rtnl_unlock();
853
854 /* receives may not be stopped until after this */
855 fcoe_interface_put(fcoe);
856
857 /* Free queued packets for the per-CPU receive threads */ 846 /* Free queued packets for the per-CPU receive threads */
858 fcoe_percpu_clean(lport); 847 fcoe_percpu_clean(lport);
859 848
@@ -1783,23 +1772,8 @@ static int fcoe_disable(struct net_device *netdev)
1783 int rc = 0; 1772 int rc = 0;
1784 1773
1785 mutex_lock(&fcoe_config_mutex); 1774 mutex_lock(&fcoe_config_mutex);
1786#ifdef CONFIG_FCOE_MODULE
1787 /*
1788 * Make sure the module has been initialized, and is not about to be
1789 * removed. Module paramter sysfs files are writable before the
1790 * module_init function is called and after module_exit.
1791 */
1792 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1793 rc = -ENODEV;
1794 goto out_nodev;
1795 }
1796#endif
1797
1798 if (!rtnl_trylock()) {
1799 mutex_unlock(&fcoe_config_mutex);
1800 return -ERESTARTSYS;
1801 }
1802 1775
1776 rtnl_lock();
1803 fcoe = fcoe_hostlist_lookup_port(netdev); 1777 fcoe = fcoe_hostlist_lookup_port(netdev);
1804 rtnl_unlock(); 1778 rtnl_unlock();
1805 1779
@@ -1809,7 +1783,6 @@ static int fcoe_disable(struct net_device *netdev)
1809 } else 1783 } else
1810 rc = -ENODEV; 1784 rc = -ENODEV;
1811 1785
1812out_nodev:
1813 mutex_unlock(&fcoe_config_mutex); 1786 mutex_unlock(&fcoe_config_mutex);
1814 return rc; 1787 return rc;
1815} 1788}
@@ -1828,22 +1801,7 @@ static int fcoe_enable(struct net_device *netdev)
1828 int rc = 0; 1801 int rc = 0;
1829 1802
1830 mutex_lock(&fcoe_config_mutex); 1803 mutex_lock(&fcoe_config_mutex);
1831#ifdef CONFIG_FCOE_MODULE 1804 rtnl_lock();
1832 /*
1833 * Make sure the module has been initialized, and is not about to be
1834 * removed. Module paramter sysfs files are writable before the
1835 * module_init function is called and after module_exit.
1836 */
1837 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1838 rc = -ENODEV;
1839 goto out_nodev;
1840 }
1841#endif
1842 if (!rtnl_trylock()) {
1843 mutex_unlock(&fcoe_config_mutex);
1844 return -ERESTARTSYS;
1845 }
1846
1847 fcoe = fcoe_hostlist_lookup_port(netdev); 1805 fcoe = fcoe_hostlist_lookup_port(netdev);
1848 rtnl_unlock(); 1806 rtnl_unlock();
1849 1807
@@ -1852,7 +1810,6 @@ static int fcoe_enable(struct net_device *netdev)
1852 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 1810 else if (!fcoe_link_ok(fcoe->ctlr.lp))
1853 fcoe_ctlr_link_up(&fcoe->ctlr); 1811 fcoe_ctlr_link_up(&fcoe->ctlr);
1854 1812
1855out_nodev:
1856 mutex_unlock(&fcoe_config_mutex); 1813 mutex_unlock(&fcoe_config_mutex);
1857 return rc; 1814 return rc;
1858} 1815}
@@ -1868,35 +1825,22 @@ out_nodev:
1868static int fcoe_destroy(struct net_device *netdev) 1825static int fcoe_destroy(struct net_device *netdev)
1869{ 1826{
1870 struct fcoe_interface *fcoe; 1827 struct fcoe_interface *fcoe;
1828 struct fc_lport *lport;
1871 int rc = 0; 1829 int rc = 0;
1872 1830
1873 mutex_lock(&fcoe_config_mutex); 1831 mutex_lock(&fcoe_config_mutex);
1874#ifdef CONFIG_FCOE_MODULE 1832 rtnl_lock();
1875 /*
1876 * Make sure the module has been initialized, and is not about to be
1877 * removed. Module paramter sysfs files are writable before the
1878 * module_init function is called and after module_exit.
1879 */
1880 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1881 rc = -ENODEV;
1882 goto out_nodev;
1883 }
1884#endif
1885 if (!rtnl_trylock()) {
1886 mutex_unlock(&fcoe_config_mutex);
1887 return -ERESTARTSYS;
1888 }
1889
1890 fcoe = fcoe_hostlist_lookup_port(netdev); 1833 fcoe = fcoe_hostlist_lookup_port(netdev);
1891 if (!fcoe) { 1834 if (!fcoe) {
1892 rtnl_unlock(); 1835 rtnl_unlock();
1893 rc = -ENODEV; 1836 rc = -ENODEV;
1894 goto out_nodev; 1837 goto out_nodev;
1895 } 1838 }
1896 fcoe_interface_cleanup(fcoe); 1839 lport = fcoe->ctlr.lp;
1897 list_del(&fcoe->list); 1840 list_del(&fcoe->list);
1898 /* RTNL mutex is dropped by fcoe_if_destroy */ 1841 fcoe_interface_cleanup(fcoe);
1899 fcoe_if_destroy(fcoe->ctlr.lp); 1842 rtnl_unlock();
1843 fcoe_if_destroy(lport);
1900out_nodev: 1844out_nodev:
1901 mutex_unlock(&fcoe_config_mutex); 1845 mutex_unlock(&fcoe_config_mutex);
1902 return rc; 1846 return rc;
@@ -1912,8 +1856,6 @@ static void fcoe_destroy_work(struct work_struct *work)
1912 1856
1913 port = container_of(work, struct fcoe_port, destroy_work); 1857 port = container_of(work, struct fcoe_port, destroy_work);
1914 mutex_lock(&fcoe_config_mutex); 1858 mutex_lock(&fcoe_config_mutex);
1915 rtnl_lock();
1916 /* RTNL mutex is dropped by fcoe_if_destroy */
1917 fcoe_if_destroy(port->lport); 1859 fcoe_if_destroy(port->lport);
1918 mutex_unlock(&fcoe_config_mutex); 1860 mutex_unlock(&fcoe_config_mutex);
1919} 1861}
@@ -1948,23 +1890,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1948 struct fc_lport *lport; 1890 struct fc_lport *lport;
1949 1891
1950 mutex_lock(&fcoe_config_mutex); 1892 mutex_lock(&fcoe_config_mutex);
1951 1893 rtnl_lock();
1952 if (!rtnl_trylock()) {
1953 mutex_unlock(&fcoe_config_mutex);
1954 return -ERESTARTSYS;
1955 }
1956
1957#ifdef CONFIG_FCOE_MODULE
1958 /*
1959 * Make sure the module has been initialized, and is not about to be
1960 * removed. Module paramter sysfs files are writable before the
1961 * module_init function is called and after module_exit.
1962 */
1963 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1964 rc = -ENODEV;
1965 goto out_nodev;
1966 }
1967#endif
1968 1894
1969 /* look for existing lport */ 1895 /* look for existing lport */
1970 if (fcoe_hostlist_lookup(netdev)) { 1896 if (fcoe_hostlist_lookup(netdev)) {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 9d38be2a41f9..229e4af5508a 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -978,10 +978,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
978 * the FCF that answers multicast solicitations, not the others that 978 * the FCF that answers multicast solicitations, not the others that
979 * are sending periodic multicast advertisements. 979 * are sending periodic multicast advertisements.
980 */ 980 */
981 if (mtu_valid) { 981 if (mtu_valid)
982 list_del(&fcf->list); 982 list_move(&fcf->list, &fip->fcfs);
983 list_add(&fcf->list, &fip->fcfs);
984 }
985 983
986 /* 984 /*
987 * If this is the first validated FCF, note the time and 985 * If this is the first validated FCF, note the time and
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 258684101bfd..f81f77c8569e 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -335,7 +335,7 @@ out_attach:
335EXPORT_SYMBOL(fcoe_transport_attach); 335EXPORT_SYMBOL(fcoe_transport_attach);
336 336
337/** 337/**
338 * fcoe_transport_attach - Detaches an FCoE transport 338 * fcoe_transport_detach - Detaches an FCoE transport
339 * @ft: The fcoe transport to be attached 339 * @ft: The fcoe transport to be attached
340 * 340 *
341 * Returns : 0 for success 341 * Returns : 0 for success
@@ -343,6 +343,7 @@ EXPORT_SYMBOL(fcoe_transport_attach);
343int fcoe_transport_detach(struct fcoe_transport *ft) 343int fcoe_transport_detach(struct fcoe_transport *ft)
344{ 344{
345 int rc = 0; 345 int rc = 0;
346 struct fcoe_netdev_mapping *nm = NULL, *tmp;
346 347
347 mutex_lock(&ft_mutex); 348 mutex_lock(&ft_mutex);
348 if (!ft->attached) { 349 if (!ft->attached) {
@@ -352,6 +353,19 @@ int fcoe_transport_detach(struct fcoe_transport *ft)
352 goto out_attach; 353 goto out_attach;
353 } 354 }
354 355
356 /* remove netdev mapping for this transport as it is going away */
357 mutex_lock(&fn_mutex);
358 list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
359 if (nm->ft == ft) {
360 LIBFCOE_TRANSPORT_DBG("transport %s going away, "
361 "remove its netdev mapping for %s\n",
362 ft->name, nm->netdev->name);
363 list_del(&nm->list);
364 kfree(nm);
365 }
366 }
367 mutex_unlock(&fn_mutex);
368
355 list_del(&ft->list); 369 list_del(&ft->list);
356 ft->attached = false; 370 ft->attached = false;
357 LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); 371 LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
@@ -371,9 +385,9 @@ static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
371 i = j = sprintf(buffer, "Attached FCoE transports:"); 385 i = j = sprintf(buffer, "Attached FCoE transports:");
372 mutex_lock(&ft_mutex); 386 mutex_lock(&ft_mutex);
373 list_for_each_entry(ft, &fcoe_transports, list) { 387 list_for_each_entry(ft, &fcoe_transports, list) {
374 i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); 388 if (i >= PAGE_SIZE - IFNAMSIZ)
375 if (i >= PAGE_SIZE)
376 break; 389 break;
390 i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
377 } 391 }
378 mutex_unlock(&ft_mutex); 392 mutex_unlock(&ft_mutex);
379 if (i == j) 393 if (i == j)
@@ -530,9 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
530 struct fcoe_transport *ft = NULL; 544 struct fcoe_transport *ft = NULL;
531 enum fip_state fip_mode = (enum fip_state)(long)kp->arg; 545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
532 546
533 if (!mutex_trylock(&ft_mutex))
534 return restart_syscall();
535
536#ifdef CONFIG_LIBFCOE_MODULE 547#ifdef CONFIG_LIBFCOE_MODULE
537 /* 548 /*
538 * Make sure the module has been initialized, and is not about to be 549 * Make sure the module has been initialized, and is not about to be
@@ -543,6 +554,8 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
543 goto out_nodev; 554 goto out_nodev;
544#endif 555#endif
545 556
557 mutex_lock(&ft_mutex);
558
546 netdev = fcoe_if_to_netdev(buffer); 559 netdev = fcoe_if_to_netdev(buffer);
547 if (!netdev) { 560 if (!netdev) {
548 LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); 561 LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
@@ -586,10 +599,7 @@ out_putdev:
586 dev_put(netdev); 599 dev_put(netdev);
587out_nodev: 600out_nodev:
588 mutex_unlock(&ft_mutex); 601 mutex_unlock(&ft_mutex);
589 if (rc == -ERESTARTSYS) 602 return rc;
590 return restart_syscall();
591 else
592 return rc;
593} 603}
594 604
595/** 605/**
@@ -608,9 +618,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
608 struct net_device *netdev = NULL; 618 struct net_device *netdev = NULL;
609 struct fcoe_transport *ft = NULL; 619 struct fcoe_transport *ft = NULL;
610 620
611 if (!mutex_trylock(&ft_mutex))
612 return restart_syscall();
613
614#ifdef CONFIG_LIBFCOE_MODULE 621#ifdef CONFIG_LIBFCOE_MODULE
615 /* 622 /*
616 * Make sure the module has been initialized, and is not about to be 623 * Make sure the module has been initialized, and is not about to be
@@ -621,6 +628,8 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
621 goto out_nodev; 628 goto out_nodev;
622#endif 629#endif
623 630
631 mutex_lock(&ft_mutex);
632
624 netdev = fcoe_if_to_netdev(buffer); 633 netdev = fcoe_if_to_netdev(buffer);
625 if (!netdev) { 634 if (!netdev) {
626 LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); 635 LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
@@ -645,11 +654,7 @@ out_putdev:
645 dev_put(netdev); 654 dev_put(netdev);
646out_nodev: 655out_nodev:
647 mutex_unlock(&ft_mutex); 656 mutex_unlock(&ft_mutex);
648 657 return rc;
649 if (rc == -ERESTARTSYS)
650 return restart_syscall();
651 else
652 return rc;
653} 658}
654 659
655/** 660/**
@@ -667,9 +672,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
667 struct net_device *netdev = NULL; 672 struct net_device *netdev = NULL;
668 struct fcoe_transport *ft = NULL; 673 struct fcoe_transport *ft = NULL;
669 674
670 if (!mutex_trylock(&ft_mutex))
671 return restart_syscall();
672
673#ifdef CONFIG_LIBFCOE_MODULE 675#ifdef CONFIG_LIBFCOE_MODULE
674 /* 676 /*
675 * Make sure the module has been initialized, and is not about to be 677 * Make sure the module has been initialized, and is not about to be
@@ -680,6 +682,8 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
680 goto out_nodev; 682 goto out_nodev;
681#endif 683#endif
682 684
685 mutex_lock(&ft_mutex);
686
683 netdev = fcoe_if_to_netdev(buffer); 687 netdev = fcoe_if_to_netdev(buffer);
684 if (!netdev) 688 if (!netdev)
685 goto out_nodev; 689 goto out_nodev;
@@ -716,9 +720,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
716 struct net_device *netdev = NULL; 720 struct net_device *netdev = NULL;
717 struct fcoe_transport *ft = NULL; 721 struct fcoe_transport *ft = NULL;
718 722
719 if (!mutex_trylock(&ft_mutex))
720 return restart_syscall();
721
722#ifdef CONFIG_LIBFCOE_MODULE 723#ifdef CONFIG_LIBFCOE_MODULE
723 /* 724 /*
724 * Make sure the module has been initialized, and is not about to be 725 * Make sure the module has been initialized, and is not about to be
@@ -729,6 +730,8 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
729 goto out_nodev; 730 goto out_nodev;
730#endif 731#endif
731 732
733 mutex_lock(&ft_mutex);
734
732 netdev = fcoe_if_to_netdev(buffer); 735 netdev = fcoe_if_to_netdev(buffer);
733 if (!netdev) 736 if (!netdev)
734 goto out_nodev; 737 goto out_nodev;
@@ -743,10 +746,7 @@ out_putdev:
743 dev_put(netdev); 746 dev_put(netdev);
744out_nodev: 747out_nodev:
745 mutex_unlock(&ft_mutex); 748 mutex_unlock(&ft_mutex);
746 if (rc == -ERESTARTSYS) 749 return rc;
747 return restart_syscall();
748 else
749 return rc;
750} 750}
751 751
752/** 752/**
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 415ad4fb50d4..c6c0434d8034 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -273,7 +273,7 @@ static ssize_t host_show_transport_mode(struct device *dev,
273 "performant" : "simple"); 273 "performant" : "simple");
274} 274}
275 275
276/* List of controllers which cannot be reset on kexec with reset_devices */ 276/* List of controllers which cannot be hard reset on kexec with reset_devices */
277static u32 unresettable_controller[] = { 277static u32 unresettable_controller[] = {
278 0x324a103C, /* Smart Array P712m */ 278 0x324a103C, /* Smart Array P712m */
279 0x324b103C, /* SmartArray P711m */ 279 0x324b103C, /* SmartArray P711m */
@@ -291,16 +291,45 @@ static u32 unresettable_controller[] = {
291 0x409D0E11, /* Smart Array 6400 EM */ 291 0x409D0E11, /* Smart Array 6400 EM */
292}; 292};
293 293
294static int ctlr_is_resettable(struct ctlr_info *h) 294/* List of controllers which cannot even be soft reset */
295static u32 soft_unresettable_controller[] = {
296 /* Exclude 640x boards. These are two pci devices in one slot
297 * which share a battery backed cache module. One controls the
298 * cache, the other accesses the cache through the one that controls
299 * it. If we reset the one controlling the cache, the other will
300 * likely not be happy. Just forbid resetting this conjoined mess.
301 * The 640x isn't really supported by hpsa anyway.
302 */
303 0x409C0E11, /* Smart Array 6400 */
304 0x409D0E11, /* Smart Array 6400 EM */
305};
306
307static int ctlr_is_hard_resettable(u32 board_id)
295{ 308{
296 int i; 309 int i;
297 310
298 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 311 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
299 if (unresettable_controller[i] == h->board_id) 312 if (unresettable_controller[i] == board_id)
313 return 0;
314 return 1;
315}
316
317static int ctlr_is_soft_resettable(u32 board_id)
318{
319 int i;
320
321 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
322 if (soft_unresettable_controller[i] == board_id)
300 return 0; 323 return 0;
301 return 1; 324 return 1;
302} 325}
303 326
327static int ctlr_is_resettable(u32 board_id)
328{
329 return ctlr_is_hard_resettable(board_id) ||
330 ctlr_is_soft_resettable(board_id);
331}
332
304static ssize_t host_show_resettable(struct device *dev, 333static ssize_t host_show_resettable(struct device *dev,
305 struct device_attribute *attr, char *buf) 334 struct device_attribute *attr, char *buf)
306{ 335{
@@ -308,7 +337,7 @@ static ssize_t host_show_resettable(struct device *dev,
308 struct Scsi_Host *shost = class_to_shost(dev); 337 struct Scsi_Host *shost = class_to_shost(dev);
309 338
310 h = shost_to_hba(shost); 339 h = shost_to_hba(shost);
311 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 340 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
312} 341}
313 342
314static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 343static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -929,13 +958,6 @@ static void hpsa_slave_destroy(struct scsi_device *sdev)
929 /* nothing to do. */ 958 /* nothing to do. */
930} 959}
931 960
932static void hpsa_scsi_setup(struct ctlr_info *h)
933{
934 h->ndevices = 0;
935 h->scsi_host = NULL;
936 spin_lock_init(&h->devlock);
937}
938
939static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 961static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
940{ 962{
941 int i; 963 int i;
@@ -1006,8 +1028,7 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1006 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1028 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1007} 1029}
1008 1030
1009static void complete_scsi_command(struct CommandList *cp, 1031static void complete_scsi_command(struct CommandList *cp)
1010 int timeout, u32 tag)
1011{ 1032{
1012 struct scsi_cmnd *cmd; 1033 struct scsi_cmnd *cmd;
1013 struct ctlr_info *h; 1034 struct ctlr_info *h;
@@ -1308,7 +1329,7 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1308 int retry_count = 0; 1329 int retry_count = 0;
1309 1330
1310 do { 1331 do {
1311 memset(c->err_info, 0, sizeof(c->err_info)); 1332 memset(c->err_info, 0, sizeof(*c->err_info));
1312 hpsa_scsi_do_simple_cmd_core(h, c); 1333 hpsa_scsi_do_simple_cmd_core(h, c);
1313 retry_count++; 1334 retry_count++;
1314 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1335 } while (check_for_unit_attention(h, c) && retry_count <= 3);
@@ -1570,6 +1591,7 @@ static unsigned char *msa2xxx_model[] = {
1570 "MSA2024", 1591 "MSA2024",
1571 "MSA2312", 1592 "MSA2312",
1572 "MSA2324", 1593 "MSA2324",
1594 "P2000 G3 SAS",
1573 NULL, 1595 NULL,
1574}; 1596};
1575 1597
@@ -2751,6 +2773,26 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2751 } 2773 }
2752} 2774}
2753 2775
2776static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2777 unsigned char *scsi3addr, u8 reset_type)
2778{
2779 struct CommandList *c;
2780
2781 c = cmd_alloc(h);
2782 if (!c)
2783 return -ENOMEM;
2784 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2785 RAID_CTLR_LUNID, TYPE_MSG);
2786 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2787 c->waiting = NULL;
2788 enqueue_cmd_and_start_io(h, c);
2789 /* Don't wait for completion, the reset won't complete. Don't free
2790 * the command either. This is the last command we will send before
2791 * re-initializing everything, so it doesn't matter and won't leak.
2792 */
2793 return 0;
2794}
2795
2754static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2796static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2755 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2797 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2756 int cmd_type) 2798 int cmd_type)
@@ -2828,7 +2870,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2828 c->Request.Type.Attribute = ATTR_SIMPLE; 2870 c->Request.Type.Attribute = ATTR_SIMPLE;
2829 c->Request.Type.Direction = XFER_NONE; 2871 c->Request.Type.Direction = XFER_NONE;
2830 c->Request.Timeout = 0; /* Don't time out */ 2872 c->Request.Timeout = 0; /* Don't time out */
2831 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ 2873 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2874 c->Request.CDB[0] = cmd;
2832 c->Request.CDB[1] = 0x03; /* Reset target above */ 2875 c->Request.CDB[1] = 0x03; /* Reset target above */
2833 /* If bytes 4-7 are zero, it means reset the */ 2876 /* If bytes 4-7 are zero, it means reset the */
2834 /* LunID device */ 2877 /* LunID device */
@@ -2936,7 +2979,7 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2936{ 2979{
2937 removeQ(c); 2980 removeQ(c);
2938 if (likely(c->cmd_type == CMD_SCSI)) 2981 if (likely(c->cmd_type == CMD_SCSI))
2939 complete_scsi_command(c, 0, raw_tag); 2982 complete_scsi_command(c);
2940 else if (c->cmd_type == CMD_IOCTL_PEND) 2983 else if (c->cmd_type == CMD_IOCTL_PEND)
2941 complete(c->waiting); 2984 complete(c->waiting);
2942} 2985}
@@ -2994,6 +3037,63 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2994 return next_command(h); 3037 return next_command(h);
2995} 3038}
2996 3039
3040/* Some controllers, like p400, will give us one interrupt
3041 * after a soft reset, even if we turned interrupts off.
3042 * Only need to check for this in the hpsa_xxx_discard_completions
3043 * functions.
3044 */
3045static int ignore_bogus_interrupt(struct ctlr_info *h)
3046{
3047 if (likely(!reset_devices))
3048 return 0;
3049
3050 if (likely(h->interrupts_enabled))
3051 return 0;
3052
3053 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3054 "(known firmware bug.) Ignoring.\n");
3055
3056 return 1;
3057}
3058
3059static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3060{
3061 struct ctlr_info *h = dev_id;
3062 unsigned long flags;
3063 u32 raw_tag;
3064
3065 if (ignore_bogus_interrupt(h))
3066 return IRQ_NONE;
3067
3068 if (interrupt_not_for_us(h))
3069 return IRQ_NONE;
3070 spin_lock_irqsave(&h->lock, flags);
3071 while (interrupt_pending(h)) {
3072 raw_tag = get_next_completion(h);
3073 while (raw_tag != FIFO_EMPTY)
3074 raw_tag = next_command(h);
3075 }
3076 spin_unlock_irqrestore(&h->lock, flags);
3077 return IRQ_HANDLED;
3078}
3079
3080static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3081{
3082 struct ctlr_info *h = dev_id;
3083 unsigned long flags;
3084 u32 raw_tag;
3085
3086 if (ignore_bogus_interrupt(h))
3087 return IRQ_NONE;
3088
3089 spin_lock_irqsave(&h->lock, flags);
3090 raw_tag = get_next_completion(h);
3091 while (raw_tag != FIFO_EMPTY)
3092 raw_tag = next_command(h);
3093 spin_unlock_irqrestore(&h->lock, flags);
3094 return IRQ_HANDLED;
3095}
3096
2997static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3097static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2998{ 3098{
2999 struct ctlr_info *h = dev_id; 3099 struct ctlr_info *h = dev_id;
@@ -3132,11 +3232,10 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3132 return 0; 3232 return 0;
3133} 3233}
3134 3234
3135#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3136#define hpsa_noop(p) hpsa_message(p, 3, 0) 3235#define hpsa_noop(p) hpsa_message(p, 3, 0)
3137 3236
3138static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3237static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3139 void * __iomem vaddr, bool use_doorbell) 3238 void * __iomem vaddr, u32 use_doorbell)
3140{ 3239{
3141 u16 pmcsr; 3240 u16 pmcsr;
3142 int pos; 3241 int pos;
@@ -3147,8 +3246,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3147 * other way using the doorbell register. 3246 * other way using the doorbell register.
3148 */ 3247 */
3149 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3248 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3150 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 3249 writel(use_doorbell, vaddr + SA5_DOORBELL);
3151 msleep(1000);
3152 } else { /* Try to do it the PCI power state way */ 3250 } else { /* Try to do it the PCI power state way */
3153 3251
3154 /* Quoting from the Open CISS Specification: "The Power 3252 /* Quoting from the Open CISS Specification: "The Power
@@ -3179,12 +3277,63 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3179 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3277 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3180 pmcsr |= PCI_D0; 3278 pmcsr |= PCI_D0;
3181 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3279 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3182
3183 msleep(500);
3184 } 3280 }
3185 return 0; 3281 return 0;
3186} 3282}
3187 3283
3284static __devinit void init_driver_version(char *driver_version, int len)
3285{
3286 memset(driver_version, 0, len);
3287 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3288}
3289
3290static __devinit int write_driver_ver_to_cfgtable(
3291 struct CfgTable __iomem *cfgtable)
3292{
3293 char *driver_version;
3294 int i, size = sizeof(cfgtable->driver_version);
3295
3296 driver_version = kmalloc(size, GFP_KERNEL);
3297 if (!driver_version)
3298 return -ENOMEM;
3299
3300 init_driver_version(driver_version, size);
3301 for (i = 0; i < size; i++)
3302 writeb(driver_version[i], &cfgtable->driver_version[i]);
3303 kfree(driver_version);
3304 return 0;
3305}
3306
3307static __devinit void read_driver_ver_from_cfgtable(
3308 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3309{
3310 int i;
3311
3312 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3313 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3314}
3315
3316static __devinit int controller_reset_failed(
3317 struct CfgTable __iomem *cfgtable)
3318{
3319
3320 char *driver_ver, *old_driver_ver;
3321 int rc, size = sizeof(cfgtable->driver_version);
3322
3323 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3324 if (!old_driver_ver)
3325 return -ENOMEM;
3326 driver_ver = old_driver_ver + size;
3327
3328 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3329 * should have been changed, otherwise we know the reset failed.
3330 */
3331 init_driver_version(old_driver_ver, size);
3332 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3333 rc = !memcmp(driver_ver, old_driver_ver, size);
3334 kfree(old_driver_ver);
3335 return rc;
3336}
3188/* This does a hard reset of the controller using PCI power management 3337/* This does a hard reset of the controller using PCI power management
3189 * states or the using the doorbell register. 3338 * states or the using the doorbell register.
3190 */ 3339 */
@@ -3195,10 +3344,10 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3195 u64 cfg_base_addr_index; 3344 u64 cfg_base_addr_index;
3196 void __iomem *vaddr; 3345 void __iomem *vaddr;
3197 unsigned long paddr; 3346 unsigned long paddr;
3198 u32 misc_fw_support, active_transport; 3347 u32 misc_fw_support;
3199 int rc; 3348 int rc;
3200 struct CfgTable __iomem *cfgtable; 3349 struct CfgTable __iomem *cfgtable;
3201 bool use_doorbell; 3350 u32 use_doorbell;
3202 u32 board_id; 3351 u32 board_id;
3203 u16 command_register; 3352 u16 command_register;
3204 3353
@@ -3215,20 +3364,15 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3215 * using the doorbell register. 3364 * using the doorbell register.
3216 */ 3365 */
3217 3366
3218 /* Exclude 640x boards. These are two pci devices in one slot
3219 * which share a battery backed cache module. One controls the
3220 * cache, the other accesses the cache through the one that controls
3221 * it. If we reset the one controlling the cache, the other will
3222 * likely not be happy. Just forbid resetting this conjoined mess.
3223 * The 640x isn't really supported by hpsa anyway.
3224 */
3225 rc = hpsa_lookup_board_id(pdev, &board_id); 3367 rc = hpsa_lookup_board_id(pdev, &board_id);
3226 if (rc < 0) { 3368 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3227 dev_warn(&pdev->dev, "Not resetting device.\n"); 3369 dev_warn(&pdev->dev, "Not resetting device.\n");
3228 return -ENODEV; 3370 return -ENODEV;
3229 } 3371 }
3230 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) 3372
3231 return -ENOTSUPP; 3373 /* if controller is soft- but not hard resettable... */
3374 if (!ctlr_is_hard_resettable(board_id))
3375 return -ENOTSUPP; /* try soft reset later. */
3232 3376
3233 /* Save the PCI command register */ 3377 /* Save the PCI command register */
3234 pci_read_config_word(pdev, 4, &command_register); 3378 pci_read_config_word(pdev, 4, &command_register);
@@ -3257,10 +3401,28 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3257 rc = -ENOMEM; 3401 rc = -ENOMEM;
3258 goto unmap_vaddr; 3402 goto unmap_vaddr;
3259 } 3403 }
3404 rc = write_driver_ver_to_cfgtable(cfgtable);
3405 if (rc)
3406 goto unmap_vaddr;
3260 3407
3261 /* If reset via doorbell register is supported, use that. */ 3408 /* If reset via doorbell register is supported, use that.
3409 * There are two such methods. Favor the newest method.
3410 */
3262 misc_fw_support = readl(&cfgtable->misc_fw_support); 3411 misc_fw_support = readl(&cfgtable->misc_fw_support);
3263 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3412 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3413 if (use_doorbell) {
3414 use_doorbell = DOORBELL_CTLR_RESET2;
3415 } else {
3416 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3417 if (use_doorbell) {
3418 dev_warn(&pdev->dev, "Controller claims that "
3419 "'Bit 2 doorbell reset' is "
3420 "supported, but not 'bit 5 doorbell reset'. "
3421 "Firmware update is recommended.\n");
3422 rc = -ENOTSUPP; /* try soft reset */
3423 goto unmap_cfgtable;
3424 }
3425 }
3264 3426
3265 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3427 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3266 if (rc) 3428 if (rc)
@@ -3279,30 +3441,32 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3279 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3441 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3280 3442
3281 /* Wait for board to become not ready, then ready. */ 3443 /* Wait for board to become not ready, then ready. */
3282 dev_info(&pdev->dev, "Waiting for board to become ready.\n"); 3444 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3283 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3445 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3284 if (rc) 3446 if (rc) {
3285 dev_warn(&pdev->dev, 3447 dev_warn(&pdev->dev,
3286 "failed waiting for board to become not ready\n"); 3448 "failed waiting for board to reset."
3449 " Will try soft reset.\n");
3450 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3451 goto unmap_cfgtable;
3452 }
3287 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3453 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3288 if (rc) { 3454 if (rc) {
3289 dev_warn(&pdev->dev, 3455 dev_warn(&pdev->dev,
3290 "failed waiting for board to become ready\n"); 3456 "failed waiting for board to become ready "
3457 "after hard reset\n");
3291 goto unmap_cfgtable; 3458 goto unmap_cfgtable;
3292 } 3459 }
3293 dev_info(&pdev->dev, "board ready.\n");
3294 3460
3295 /* Controller should be in simple mode at this point. If it's not, 3461 rc = controller_reset_failed(vaddr);
3296 * It means we're on one of those controllers which doesn't support 3462 if (rc < 0)
3297 * the doorbell reset method and on which the PCI power management reset 3463 goto unmap_cfgtable;
3298 * method doesn't work (P800, for example.) 3464 if (rc) {
3299 * In those cases, don't try to proceed, as it generally doesn't work. 3465 dev_warn(&pdev->dev, "Unable to successfully reset "
3300 */ 3466 "controller. Will try soft reset.\n");
3301 active_transport = readl(&cfgtable->TransportActive); 3467 rc = -ENOTSUPP;
3302 if (active_transport & PERFORMANT_MODE) { 3468 } else {
3303 dev_warn(&pdev->dev, "Unable to successfully reset controller," 3469 dev_info(&pdev->dev, "board ready after hard reset.\n");
3304 " Ignoring controller.\n");
3305 rc = -ENODEV;
3306 } 3470 }
3307 3471
3308unmap_cfgtable: 3472unmap_cfgtable:
@@ -3543,6 +3707,9 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3543 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3707 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3544 if (!h->cfgtable) 3708 if (!h->cfgtable)
3545 return -ENOMEM; 3709 return -ENOMEM;
3710 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3711 if (rc)
3712 return rc;
3546 /* Find performant mode table. */ 3713 /* Find performant mode table. */
3547 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3714 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3548 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3715 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
@@ -3777,11 +3944,12 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3777 * due to concerns about shared bbwc between 6402/6404 pair. 3944 * due to concerns about shared bbwc between 6402/6404 pair.
3778 */ 3945 */
3779 if (rc == -ENOTSUPP) 3946 if (rc == -ENOTSUPP)
3780 return 0; /* just try to do the kdump anyhow. */ 3947 return rc; /* just try to do the kdump anyhow. */
3781 if (rc) 3948 if (rc)
3782 return -ENODEV; 3949 return -ENODEV;
3783 3950
3784 /* Now try to get the controller to respond to a no-op */ 3951 /* Now try to get the controller to respond to a no-op */
3952 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
3785 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3953 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3786 if (hpsa_noop(pdev) == 0) 3954 if (hpsa_noop(pdev) == 0)
3787 break; 3955 break;
@@ -3792,18 +3960,133 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3792 return 0; 3960 return 0;
3793} 3961}
3794 3962
3963static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
3964{
3965 h->cmd_pool_bits = kzalloc(
3966 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
3967 sizeof(unsigned long), GFP_KERNEL);
3968 h->cmd_pool = pci_alloc_consistent(h->pdev,
3969 h->nr_cmds * sizeof(*h->cmd_pool),
3970 &(h->cmd_pool_dhandle));
3971 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3972 h->nr_cmds * sizeof(*h->errinfo_pool),
3973 &(h->errinfo_pool_dhandle));
3974 if ((h->cmd_pool_bits == NULL)
3975 || (h->cmd_pool == NULL)
3976 || (h->errinfo_pool == NULL)) {
3977 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
3978 return -ENOMEM;
3979 }
3980 return 0;
3981}
3982
3983static void hpsa_free_cmd_pool(struct ctlr_info *h)
3984{
3985 kfree(h->cmd_pool_bits);
3986 if (h->cmd_pool)
3987 pci_free_consistent(h->pdev,
3988 h->nr_cmds * sizeof(struct CommandList),
3989 h->cmd_pool, h->cmd_pool_dhandle);
3990 if (h->errinfo_pool)
3991 pci_free_consistent(h->pdev,
3992 h->nr_cmds * sizeof(struct ErrorInfo),
3993 h->errinfo_pool,
3994 h->errinfo_pool_dhandle);
3995}
3996
3997static int hpsa_request_irq(struct ctlr_info *h,
3998 irqreturn_t (*msixhandler)(int, void *),
3999 irqreturn_t (*intxhandler)(int, void *))
4000{
4001 int rc;
4002
4003 if (h->msix_vector || h->msi_vector)
4004 rc = request_irq(h->intr[h->intr_mode], msixhandler,
4005 IRQF_DISABLED, h->devname, h);
4006 else
4007 rc = request_irq(h->intr[h->intr_mode], intxhandler,
4008 IRQF_DISABLED, h->devname, h);
4009 if (rc) {
4010 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4011 h->intr[h->intr_mode], h->devname);
4012 return -ENODEV;
4013 }
4014 return 0;
4015}
4016
4017static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4018{
4019 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4020 HPSA_RESET_TYPE_CONTROLLER)) {
4021 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4022 return -EIO;
4023 }
4024
4025 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4026 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4027 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4028 return -1;
4029 }
4030
4031 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4032 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4033 dev_warn(&h->pdev->dev, "Board failed to become ready "
4034 "after soft reset.\n");
4035 return -1;
4036 }
4037
4038 return 0;
4039}
4040
4041static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4042{
4043 free_irq(h->intr[h->intr_mode], h);
4044#ifdef CONFIG_PCI_MSI
4045 if (h->msix_vector)
4046 pci_disable_msix(h->pdev);
4047 else if (h->msi_vector)
4048 pci_disable_msi(h->pdev);
4049#endif /* CONFIG_PCI_MSI */
4050 hpsa_free_sg_chain_blocks(h);
4051 hpsa_free_cmd_pool(h);
4052 kfree(h->blockFetchTable);
4053 pci_free_consistent(h->pdev, h->reply_pool_size,
4054 h->reply_pool, h->reply_pool_dhandle);
4055 if (h->vaddr)
4056 iounmap(h->vaddr);
4057 if (h->transtable)
4058 iounmap(h->transtable);
4059 if (h->cfgtable)
4060 iounmap(h->cfgtable);
4061 pci_release_regions(h->pdev);
4062 kfree(h);
4063}
4064
3795static int __devinit hpsa_init_one(struct pci_dev *pdev, 4065static int __devinit hpsa_init_one(struct pci_dev *pdev,
3796 const struct pci_device_id *ent) 4066 const struct pci_device_id *ent)
3797{ 4067{
3798 int dac, rc; 4068 int dac, rc;
3799 struct ctlr_info *h; 4069 struct ctlr_info *h;
4070 int try_soft_reset = 0;
4071 unsigned long flags;
3800 4072
3801 if (number_of_controllers == 0) 4073 if (number_of_controllers == 0)
3802 printk(KERN_INFO DRIVER_NAME "\n"); 4074 printk(KERN_INFO DRIVER_NAME "\n");
3803 4075
3804 rc = hpsa_init_reset_devices(pdev); 4076 rc = hpsa_init_reset_devices(pdev);
3805 if (rc) 4077 if (rc) {
3806 return rc; 4078 if (rc != -ENOTSUPP)
4079 return rc;
4080 /* If the reset fails in a particular way (it has no way to do
4081 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4082 * a soft reset once we get the controller configured up to the
4083 * point that it can accept a command.
4084 */
4085 try_soft_reset = 1;
4086 rc = 0;
4087 }
4088
4089reinit_after_soft_reset:
3807 4090
3808 /* Command structures must be aligned on a 32-byte boundary because 4091 /* Command structures must be aligned on a 32-byte boundary because
3809 * the 5 lower bits of the address are used by the hardware. and by 4092 * the 5 lower bits of the address are used by the hardware. and by
@@ -3847,54 +4130,82 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3847 /* make sure the board interrupts are off */ 4130 /* make sure the board interrupts are off */
3848 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4131 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3849 4132
3850 if (h->msix_vector || h->msi_vector) 4133 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
3851 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
3852 IRQF_DISABLED, h->devname, h);
3853 else
3854 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
3855 IRQF_DISABLED, h->devname, h);
3856 if (rc) {
3857 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3858 h->intr[h->intr_mode], h->devname);
3859 goto clean2; 4134 goto clean2;
3860 }
3861
3862 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4135 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3863 h->devname, pdev->device, 4136 h->devname, pdev->device,
3864 h->intr[h->intr_mode], dac ? "" : " not"); 4137 h->intr[h->intr_mode], dac ? "" : " not");
3865 4138 if (hpsa_allocate_cmd_pool(h))
3866 h->cmd_pool_bits =
3867 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3868 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3869 h->cmd_pool = pci_alloc_consistent(h->pdev,
3870 h->nr_cmds * sizeof(*h->cmd_pool),
3871 &(h->cmd_pool_dhandle));
3872 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3873 h->nr_cmds * sizeof(*h->errinfo_pool),
3874 &(h->errinfo_pool_dhandle));
3875 if ((h->cmd_pool_bits == NULL)
3876 || (h->cmd_pool == NULL)
3877 || (h->errinfo_pool == NULL)) {
3878 dev_err(&pdev->dev, "out of memory");
3879 rc = -ENOMEM;
3880 goto clean4; 4139 goto clean4;
3881 }
3882 if (hpsa_allocate_sg_chain_blocks(h)) 4140 if (hpsa_allocate_sg_chain_blocks(h))
3883 goto clean4; 4141 goto clean4;
3884 init_waitqueue_head(&h->scan_wait_queue); 4142 init_waitqueue_head(&h->scan_wait_queue);
3885 h->scan_finished = 1; /* no scan currently in progress */ 4143 h->scan_finished = 1; /* no scan currently in progress */
3886 4144
3887 pci_set_drvdata(pdev, h); 4145 pci_set_drvdata(pdev, h);
3888 memset(h->cmd_pool_bits, 0, 4146 h->ndevices = 0;
3889 ((h->nr_cmds + BITS_PER_LONG - 4147 h->scsi_host = NULL;
3890 1) / BITS_PER_LONG) * sizeof(unsigned long)); 4148 spin_lock_init(&h->devlock);
4149 hpsa_put_ctlr_into_performant_mode(h);
4150
4151 /* At this point, the controller is ready to take commands.
4152 * Now, if reset_devices and the hard reset didn't work, try
4153 * the soft reset and see if that works.
4154 */
4155 if (try_soft_reset) {
4156
4157 /* This is kind of gross. We may or may not get a completion
4158 * from the soft reset command, and if we do, then the value
4159 * from the fifo may or may not be valid. So, we wait 10 secs
4160 * after the reset throwing away any completions we get during
4161 * that time. Unregister the interrupt handler and register
4162 * fake ones to scoop up any residual completions.
4163 */
4164 spin_lock_irqsave(&h->lock, flags);
4165 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4166 spin_unlock_irqrestore(&h->lock, flags);
4167 free_irq(h->intr[h->intr_mode], h);
4168 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4169 hpsa_intx_discard_completions);
4170 if (rc) {
4171 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4172 "soft reset.\n");
4173 goto clean4;
4174 }
4175
4176 rc = hpsa_kdump_soft_reset(h);
4177 if (rc)
4178 /* Neither hard nor soft reset worked, we're hosed. */
4179 goto clean4;
4180
4181 dev_info(&h->pdev->dev, "Board READY.\n");
4182 dev_info(&h->pdev->dev,
4183 "Waiting for stale completions to drain.\n");
4184 h->access.set_intr_mask(h, HPSA_INTR_ON);
4185 msleep(10000);
4186 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4187
4188 rc = controller_reset_failed(h->cfgtable);
4189 if (rc)
4190 dev_info(&h->pdev->dev,
4191 "Soft reset appears to have failed.\n");
4192
4193 /* since the controller's reset, we have to go back and re-init
4194 * everything. Easiest to just forget what we've done and do it
4195 * all over again.
4196 */
4197 hpsa_undo_allocations_after_kdump_soft_reset(h);
4198 try_soft_reset = 0;
4199 if (rc)
4200 /* don't go to clean4, we already unallocated */
4201 return -ENODEV;
3891 4202
3892 hpsa_scsi_setup(h); 4203 goto reinit_after_soft_reset;
4204 }
3893 4205
3894 /* Turn the interrupts on so we can service requests */ 4206 /* Turn the interrupts on so we can service requests */
3895 h->access.set_intr_mask(h, HPSA_INTR_ON); 4207 h->access.set_intr_mask(h, HPSA_INTR_ON);
3896 4208
3897 hpsa_put_ctlr_into_performant_mode(h);
3898 hpsa_hba_inquiry(h); 4209 hpsa_hba_inquiry(h);
3899 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4210 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3900 h->busy_initializing = 0; 4211 h->busy_initializing = 0;
@@ -3902,16 +4213,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3902 4213
3903clean4: 4214clean4:
3904 hpsa_free_sg_chain_blocks(h); 4215 hpsa_free_sg_chain_blocks(h);
3905 kfree(h->cmd_pool_bits); 4216 hpsa_free_cmd_pool(h);
3906 if (h->cmd_pool)
3907 pci_free_consistent(h->pdev,
3908 h->nr_cmds * sizeof(struct CommandList),
3909 h->cmd_pool, h->cmd_pool_dhandle);
3910 if (h->errinfo_pool)
3911 pci_free_consistent(h->pdev,
3912 h->nr_cmds * sizeof(struct ErrorInfo),
3913 h->errinfo_pool,
3914 h->errinfo_pool_dhandle);
3915 free_irq(h->intr[h->intr_mode], h); 4217 free_irq(h->intr[h->intr_mode], h);
3916clean2: 4218clean2:
3917clean1: 4219clean1:
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 621a1530054a..6d8dcd4dd06b 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -127,10 +127,12 @@ struct ctlr_info {
127}; 127};
128#define HPSA_ABORT_MSG 0 128#define HPSA_ABORT_MSG 0
129#define HPSA_DEVICE_RESET_MSG 1 129#define HPSA_DEVICE_RESET_MSG 1
130#define HPSA_BUS_RESET_MSG 2 130#define HPSA_RESET_TYPE_CONTROLLER 0x00
131#define HPSA_HOST_RESET_MSG 3 131#define HPSA_RESET_TYPE_BUS 0x01
132#define HPSA_RESET_TYPE_TARGET 0x03
133#define HPSA_RESET_TYPE_LUN 0x04
132#define HPSA_MSG_SEND_RETRY_LIMIT 10 134#define HPSA_MSG_SEND_RETRY_LIMIT 10
133#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000 135#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
134 136
135/* Maximum time in seconds driver will wait for command completions 137/* Maximum time in seconds driver will wait for command completions
136 * when polling before giving up. 138 * when polling before giving up.
@@ -155,7 +157,7 @@ struct ctlr_info {
155 * HPSA_BOARD_READY_ITERATIONS are derived from those. 157 * HPSA_BOARD_READY_ITERATIONS are derived from those.
156 */ 158 */
157#define HPSA_BOARD_READY_WAIT_SECS (120) 159#define HPSA_BOARD_READY_WAIT_SECS (120)
158#define HPSA_BOARD_NOT_READY_WAIT_SECS (10) 160#define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
159#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 161#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
160#define HPSA_BOARD_READY_POLL_INTERVAL \ 162#define HPSA_BOARD_READY_POLL_INTERVAL \
161 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 163 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
@@ -212,6 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
212 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
213 c->Header.Tag.lower); 215 c->Header.Tag.lower);
214 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
217 (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
215 h->commands_outstanding++; 218 h->commands_outstanding++;
216 if (h->commands_outstanding > h->max_outstanding) 219 if (h->commands_outstanding > h->max_outstanding)
217 h->max_outstanding = h->commands_outstanding; 220 h->max_outstanding = h->commands_outstanding;
@@ -227,10 +230,12 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
227 if (val) { /* Turn interrupts on */ 230 if (val) { /* Turn interrupts on */
228 h->interrupts_enabled = 1; 231 h->interrupts_enabled = 1;
229 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 232 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
233 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
230 } else { /* Turn them off */ 234 } else { /* Turn them off */
231 h->interrupts_enabled = 0; 235 h->interrupts_enabled = 0;
232 writel(SA5_INTR_OFF, 236 writel(SA5_INTR_OFF,
233 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 237 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
238 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
234 } 239 }
235} 240}
236 241
@@ -239,10 +244,12 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
239 if (val) { /* turn on interrupts */ 244 if (val) { /* turn on interrupts */
240 h->interrupts_enabled = 1; 245 h->interrupts_enabled = 1;
241 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 246 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
247 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
242 } else { 248 } else {
243 h->interrupts_enabled = 0; 249 h->interrupts_enabled = 0;
244 writel(SA5_PERF_INTR_OFF, 250 writel(SA5_PERF_INTR_OFF,
245 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 251 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
252 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
246 } 253 }
247} 254}
248 255
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 18464900e761..55d741b019db 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -101,6 +101,7 @@
101#define CFGTBL_ChangeReq 0x00000001l 101#define CFGTBL_ChangeReq 0x00000001l
102#define CFGTBL_AccCmds 0x00000001l 102#define CFGTBL_AccCmds 0x00000001l
103#define DOORBELL_CTLR_RESET 0x00000004l 103#define DOORBELL_CTLR_RESET 0x00000004l
104#define DOORBELL_CTLR_RESET2 0x00000020l
104 105
105#define CFGTBL_Trans_Simple 0x00000002l 106#define CFGTBL_Trans_Simple 0x00000002l
106#define CFGTBL_Trans_Performant 0x00000004l 107#define CFGTBL_Trans_Performant 0x00000004l
@@ -256,14 +257,6 @@ struct ErrorInfo {
256#define CMD_IOCTL_PEND 0x01 257#define CMD_IOCTL_PEND 0x01
257#define CMD_SCSI 0x03 258#define CMD_SCSI 0x03
258 259
259/* This structure needs to be divisible by 32 for new
260 * indexing method and performant mode.
261 */
262#define PAD32 32
263#define PAD64DIFF 0
264#define USEEXTRA ((sizeof(void *) - 4)/4)
265#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
266
267#define DIRECT_LOOKUP_SHIFT 5 260#define DIRECT_LOOKUP_SHIFT 5
268#define DIRECT_LOOKUP_BIT 0x10 261#define DIRECT_LOOKUP_BIT 0x10
269#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) 262#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
@@ -345,6 +338,8 @@ struct CfgTable {
345 u8 reserved[0x78 - 0x58]; 338 u8 reserved[0x78 - 0x58];
346 u32 misc_fw_support; /* offset 0x78 */ 339 u32 misc_fw_support; /* offset 0x78 */
347#define MISC_FW_DOORBELL_RESET (0x02) 340#define MISC_FW_DOORBELL_RESET (0x02)
341#define MISC_FW_DOORBELL_RESET2 (0x010)
342 u8 driver_version[32];
348}; 343};
349 344
350#define NUM_BLOCKFETCH_ENTRIES 8 345#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 041958453e2a..3d391dc3f11f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1849,8 +1849,7 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
1849 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); 1849 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
1850 if (!rc) 1850 if (!rc)
1851 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); 1851 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
1852 if (!rc) 1852 vio_enable_interrupts(to_vio_dev(hostdata->dev));
1853 rc = vio_enable_interrupts(to_vio_dev(hostdata->dev));
1854 } else if (hostdata->reenable_crq) { 1853 } else if (hostdata->reenable_crq) {
1855 smp_rmb(); 1854 smp_rmb();
1856 action = "enable"; 1855 action = "enable";
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 6568aab745a0..92109b126391 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -343,7 +343,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
343 instance = cmd->device->host; 343 instance = cmd->device->host;
344 hostdata = (struct IN2000_hostdata *) instance->hostdata; 344 hostdata = (struct IN2000_hostdata *) instance->hostdata;
345 345
346 DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x-%ld(", cmd->cmnd[0], cmd->serial_number)) 346 DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
347 347
348/* Set up a few fields in the Scsi_Cmnd structure for our own use: 348/* Set up a few fields in the Scsi_Cmnd structure for our own use:
349 * - host_scribble is the pointer to the next cmd in the input queue 349 * - host_scribble is the pointer to the next cmd in the input queue
@@ -427,7 +427,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
427 427
428 in2000_execute(cmd->device->host); 428 in2000_execute(cmd->device->host);
429 429
430 DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) 430 DB(DB_QUEUE_COMMAND, printk(")Q "))
431 return 0; 431 return 0;
432} 432}
433 433
@@ -705,7 +705,7 @@ static void in2000_execute(struct Scsi_Host *instance)
705 * to search the input_Q again... 705 * to search the input_Q again...
706 */ 706 */
707 707
708 DB(DB_EXECUTE, printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) 708 DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
709 709
710} 710}
711 711
@@ -1149,7 +1149,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1149 case CSR_XFER_DONE | PHS_COMMAND: 1149 case CSR_XFER_DONE | PHS_COMMAND:
1150 case CSR_UNEXP | PHS_COMMAND: 1150 case CSR_UNEXP | PHS_COMMAND:
1151 case CSR_SRV_REQ | PHS_COMMAND: 1151 case CSR_SRV_REQ | PHS_COMMAND:
1152 DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) 1152 DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
1153 transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); 1153 transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
1154 hostdata->state = S_CONNECTED; 1154 hostdata->state = S_CONNECTED;
1155 break; 1155 break;
@@ -1191,7 +1191,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1191 switch (msg) { 1191 switch (msg) {
1192 1192
1193 case COMMAND_COMPLETE: 1193 case COMMAND_COMPLETE:
1194 DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) 1194 DB(DB_INTR, printk("CCMP"))
1195 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); 1195 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1196 hostdata->state = S_PRE_CMP_DISC; 1196 hostdata->state = S_PRE_CMP_DISC;
1197 break; 1197 break;
@@ -1329,7 +1329,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1329 1329
1330 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); 1330 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1331 if (phs == 0x60) { 1331 if (phs == 0x60) {
1332 DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) 1332 DB(DB_INTR, printk("SX-DONE"))
1333 cmd->SCp.Message = COMMAND_COMPLETE; 1333 cmd->SCp.Message = COMMAND_COMPLETE;
1334 lun = read_3393(hostdata, WD_TARGET_LUN); 1334 lun = read_3393(hostdata, WD_TARGET_LUN);
1335 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) 1335 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
@@ -1350,7 +1350,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1350 1350
1351 in2000_execute(instance); 1351 in2000_execute(instance);
1352 } else { 1352 } else {
1353 printk("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs, cmd->serial_number); 1353 printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
1354 } 1354 }
1355 break; 1355 break;
1356 1356
@@ -1417,7 +1417,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1417 spin_unlock_irqrestore(instance->host_lock, flags); 1417 spin_unlock_irqrestore(instance->host_lock, flags);
1418 return IRQ_HANDLED; 1418 return IRQ_HANDLED;
1419 } 1419 }
1420 DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) 1420 DB(DB_INTR, printk("UNEXP_DISC"))
1421 hostdata->connected = NULL; 1421 hostdata->connected = NULL;
1422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 1422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1423 hostdata->state = S_UNCONNECTED; 1423 hostdata->state = S_UNCONNECTED;
@@ -1442,7 +1442,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1442 */ 1442 */
1443 1443
1444 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); 1444 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1445 DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) 1445 DB(DB_INTR, printk("DISC"))
1446 if (cmd == NULL) { 1446 if (cmd == NULL) {
1447 printk(" - Already disconnected! "); 1447 printk(" - Already disconnected! ");
1448 hostdata->state = S_UNCONNECTED; 1448 hostdata->state = S_UNCONNECTED;
@@ -1575,7 +1575,6 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id)
1575 } else 1575 } else
1576 hostdata->state = S_CONNECTED; 1576 hostdata->state = S_CONNECTED;
1577 1577
1578 DB(DB_INTR, printk("-%ld", cmd->serial_number))
1579 break; 1578 break;
1580 1579
1581 default: 1580 default:
@@ -1704,7 +1703,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd)
1704 prev->host_scribble = cmd->host_scribble; 1703 prev->host_scribble = cmd->host_scribble;
1705 cmd->host_scribble = NULL; 1704 cmd->host_scribble = NULL;
1706 cmd->result = DID_ABORT << 16; 1705 cmd->result = DID_ABORT << 16;
1707 printk(KERN_WARNING "scsi%d: Abort - removing command %ld from input_Q. ", instance->host_no, cmd->serial_number); 1706 printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
1708 cmd->scsi_done(cmd); 1707 cmd->scsi_done(cmd);
1709 return SUCCESS; 1708 return SUCCESS;
1710 } 1709 }
@@ -1725,7 +1724,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd)
1725 1724
1726 if (hostdata->connected == cmd) { 1725 if (hostdata->connected == cmd) {
1727 1726
1728 printk(KERN_WARNING "scsi%d: Aborting connected command %ld - ", instance->host_no, cmd->serial_number); 1727 printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
1729 1728
1730 printk("sending wd33c93 ABORT command - "); 1729 printk("sending wd33c93 ABORT command - ");
1731 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); 1730 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
@@ -2270,7 +2269,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2270 strcat(bp, "\nconnected: "); 2269 strcat(bp, "\nconnected: ");
2271 if (hd->connected) { 2270 if (hd->connected) {
2272 cmd = (Scsi_Cmnd *) hd->connected; 2271 cmd = (Scsi_Cmnd *) hd->connected;
2273 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2272 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2274 strcat(bp, tbuf); 2273 strcat(bp, tbuf);
2275 } 2274 }
2276 } 2275 }
@@ -2278,7 +2277,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2278 strcat(bp, "\ninput_Q: "); 2277 strcat(bp, "\ninput_Q: ");
2279 cmd = (Scsi_Cmnd *) hd->input_Q; 2278 cmd = (Scsi_Cmnd *) hd->input_Q;
2280 while (cmd) { 2279 while (cmd) {
2281 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2280 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2282 strcat(bp, tbuf); 2281 strcat(bp, tbuf);
2283 cmd = (Scsi_Cmnd *) cmd->host_scribble; 2282 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2284 } 2283 }
@@ -2287,7 +2286,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2287 strcat(bp, "\ndisconnected_Q:"); 2286 strcat(bp, "\ndisconnected_Q:");
2288 cmd = (Scsi_Cmnd *) hd->disconnected_Q; 2287 cmd = (Scsi_Cmnd *) hd->disconnected_Q;
2289 while (cmd) { 2288 while (cmd) {
2290 sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2289 sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2291 strcat(bp, tbuf); 2290 strcat(bp, tbuf);
2292 cmd = (Scsi_Cmnd *) cmd->host_scribble; 2291 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2293 } 2292 }
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0621238fac4a..12868ca46110 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -60,6 +60,7 @@
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/vmalloc.h>
63#include <linux/ioport.h> 64#include <linux/ioport.h>
64#include <linux/delay.h> 65#include <linux/delay.h>
65#include <linux/pci.h> 66#include <linux/pci.h>
@@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 unsigned long pci_address, u32 length) 2718 unsigned long pci_address, u32 length)
2718{ 2719{
2719 int bytes_copied = 0; 2720 int bytes_copied = 0;
2720 int cur_len, rc, rem_len, rem_page_len; 2721 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2721 __be32 *page; 2722 __be32 *page;
2722 unsigned long lock_flags = 0; 2723 unsigned long lock_flags = 0;
2723 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2724 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724 2725
2726 if (ioa_cfg->sis64)
2727 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2728 else
2729 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2730
2725 while (bytes_copied < length && 2731 while (bytes_copied < length &&
2726 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { 2732 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2727 if (ioa_dump->page_offset >= PAGE_SIZE || 2733 if (ioa_dump->page_offset >= PAGE_SIZE ||
2728 ioa_dump->page_offset == 0) { 2734 ioa_dump->page_offset == 0) {
2729 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2735 page = (__be32 *)__get_free_page(GFP_ATOMIC);
@@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2885 unsigned long lock_flags = 0; 2891 unsigned long lock_flags = 0;
2886 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 2892 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 2893 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888 u32 num_entries, start_off, end_off; 2894 u32 num_entries, max_num_entries, start_off, end_off;
2889 u32 bytes_to_copy, bytes_copied, rc; 2895 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2890 struct ipr_sdt *sdt; 2896 struct ipr_sdt *sdt;
2891 int valid = 1; 2897 int valid = 1;
2892 int i; 2898 int i;
@@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2947 on entries in this table */ 2953 on entries in this table */
2948 sdt = &ioa_dump->sdt; 2954 sdt = &ioa_dump->sdt;
2949 2955
2956 if (ioa_cfg->sis64) {
2957 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2958 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2959 } else {
2960 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2961 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2962 }
2963
2964 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2965 (max_num_entries * sizeof(struct ipr_sdt_entry));
2950 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 2966 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2951 sizeof(struct ipr_sdt) / sizeof(__be32)); 2967 bytes_to_copy / sizeof(__be32));
2952 2968
2953 /* Smart Dump table is ready to use and the first entry is valid */ 2969 /* Smart Dump table is ready to use and the first entry is valid */
2954 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 2970 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
@@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2964 2980
2965 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 2981 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2966 2982
2967 if (num_entries > IPR_NUM_SDT_ENTRIES) 2983 if (num_entries > max_num_entries)
2968 num_entries = IPR_NUM_SDT_ENTRIES; 2984 num_entries = max_num_entries;
2985
2986 /* Update dump length to the actual data to be copied */
2987 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2988 if (ioa_cfg->sis64)
2989 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2990 else
2991 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2969 2992
2970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2993 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2971 2994
2972 for (i = 0; i < num_entries; i++) { 2995 for (i = 0; i < num_entries; i++) {
2973 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { 2996 if (ioa_dump->hdr.len > max_dump_size) {
2974 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 2997 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2975 break; 2998 break;
2976 } 2999 }
@@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2989 valid = 0; 3012 valid = 0;
2990 } 3013 }
2991 if (valid) { 3014 if (valid) {
2992 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 3015 if (bytes_to_copy > max_dump_size) {
2993 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3016 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2994 continue; 3017 continue;
2995 } 3018 }
@@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref)
3044 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3067 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3045 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3068 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3046 3069
3070 vfree(dump->ioa_dump.ioa_data);
3047 kfree(dump); 3071 kfree(dump);
3048 LEAVE; 3072 LEAVE;
3049} 3073}
@@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3835 struct ipr_dump *dump; 3859 struct ipr_dump *dump;
3836 unsigned long lock_flags = 0; 3860 unsigned long lock_flags = 0;
3837 char *src; 3861 char *src;
3838 int len; 3862 int len, sdt_end;
3839 size_t rc = count; 3863 size_t rc = count;
3840 3864
3841 if (!capable(CAP_SYS_ADMIN)) 3865 if (!capable(CAP_SYS_ADMIN))
@@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3875 3899
3876 off -= sizeof(dump->driver_dump); 3900 off -= sizeof(dump->driver_dump);
3877 3901
3878 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { 3902 if (ioa_cfg->sis64)
3879 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) 3903 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3880 len = offsetof(struct ipr_ioa_dump, ioa_data) - off; 3904 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3905 sizeof(struct ipr_sdt_entry));
3906 else
3907 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3908 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3909
3910 if (count && off < sdt_end) {
3911 if (off + count > sdt_end)
3912 len = sdt_end - off;
3881 else 3913 else
3882 len = count; 3914 len = count;
3883 src = (u8 *)&dump->ioa_dump + off; 3915 src = (u8 *)&dump->ioa_dump + off;
@@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3887 count -= len; 3919 count -= len;
3888 } 3920 }
3889 3921
3890 off -= offsetof(struct ipr_ioa_dump, ioa_data); 3922 off -= sdt_end;
3891 3923
3892 while (count) { 3924 while (count) {
3893 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 3925 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
@@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3916static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 3948static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3917{ 3949{
3918 struct ipr_dump *dump; 3950 struct ipr_dump *dump;
3951 __be32 **ioa_data;
3919 unsigned long lock_flags = 0; 3952 unsigned long lock_flags = 0;
3920 3953
3921 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3954 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
@@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3925 return -ENOMEM; 3958 return -ENOMEM;
3926 } 3959 }
3927 3960
3961 if (ioa_cfg->sis64)
3962 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3963 else
3964 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3965
3966 if (!ioa_data) {
3967 ipr_err("Dump memory allocation failed\n");
3968 kfree(dump);
3969 return -ENOMEM;
3970 }
3971
3972 dump->ioa_dump.ioa_data = ioa_data;
3973
3928 kref_init(&dump->kref); 3974 kref_init(&dump->kref);
3929 dump->ioa_cfg = ioa_cfg; 3975 dump->ioa_cfg = ioa_cfg;
3930 3976
@@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3932 3978
3933 if (INACTIVE != ioa_cfg->sdt_state) { 3979 if (INACTIVE != ioa_cfg->sdt_state) {
3934 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3981 vfree(dump->ioa_dump.ioa_data);
3935 kfree(dump); 3982 kfree(dump);
3936 return 0; 3983 return 0;
3937 } 3984 }
@@ -4953,9 +5000,35 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4953 * IRQ_NONE / IRQ_HANDLED 5000 * IRQ_NONE / IRQ_HANDLED
4954 **/ 5001 **/
4955static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5002static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4956 volatile u32 int_reg) 5003 u32 int_reg)
4957{ 5004{
4958 irqreturn_t rc = IRQ_HANDLED; 5005 irqreturn_t rc = IRQ_HANDLED;
5006 u32 int_mask_reg;
5007
5008 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5009 int_reg &= ~int_mask_reg;
5010
5011 /* If an interrupt on the adapter did not occur, ignore it.
5012 * Or in the case of SIS 64, check for a stage change interrupt.
5013 */
5014 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5015 if (ioa_cfg->sis64) {
5016 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5017 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5018 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5019
5020 /* clear stage change */
5021 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5022 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5023 list_del(&ioa_cfg->reset_cmd->queue);
5024 del_timer(&ioa_cfg->reset_cmd->timer);
5025 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5026 return IRQ_HANDLED;
5027 }
5028 }
5029
5030 return IRQ_NONE;
5031 }
4959 5032
4960 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5033 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4961 /* Mask the interrupt */ 5034 /* Mask the interrupt */
@@ -4968,6 +5041,13 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4968 list_del(&ioa_cfg->reset_cmd->queue); 5041 list_del(&ioa_cfg->reset_cmd->queue);
4969 del_timer(&ioa_cfg->reset_cmd->timer); 5042 del_timer(&ioa_cfg->reset_cmd->timer);
4970 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5043 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5044 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5045 if (ipr_debug && printk_ratelimit())
5046 dev_err(&ioa_cfg->pdev->dev,
5047 "Spurious interrupt detected. 0x%08X\n", int_reg);
5048 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5049 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5050 return IRQ_NONE;
4971 } else { 5051 } else {
4972 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5052 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4973 ioa_cfg->ioa_unit_checked = 1; 5053 ioa_cfg->ioa_unit_checked = 1;
@@ -5016,10 +5096,11 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5016{ 5096{
5017 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5097 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5018 unsigned long lock_flags = 0; 5098 unsigned long lock_flags = 0;
5019 volatile u32 int_reg, int_mask_reg; 5099 u32 int_reg = 0;
5020 u32 ioasc; 5100 u32 ioasc;
5021 u16 cmd_index; 5101 u16 cmd_index;
5022 int num_hrrq = 0; 5102 int num_hrrq = 0;
5103 int irq_none = 0;
5023 struct ipr_cmnd *ipr_cmd; 5104 struct ipr_cmnd *ipr_cmd;
5024 irqreturn_t rc = IRQ_NONE; 5105 irqreturn_t rc = IRQ_NONE;
5025 5106
@@ -5031,33 +5112,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5031 return IRQ_NONE; 5112 return IRQ_NONE;
5032 } 5113 }
5033 5114
5034 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
5036
5037 /* If an interrupt on the adapter did not occur, ignore it.
5038 * Or in the case of SIS 64, check for a stage change interrupt.
5039 */
5040 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
5041 if (ioa_cfg->sis64) {
5042 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5043 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5044 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5045
5046 /* clear stage change */
5047 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5049 list_del(&ioa_cfg->reset_cmd->queue);
5050 del_timer(&ioa_cfg->reset_cmd->timer);
5051 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053 return IRQ_HANDLED;
5054 }
5055 }
5056
5057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5058 return IRQ_NONE;
5059 }
5060
5061 while (1) { 5115 while (1) {
5062 ipr_cmd = NULL; 5116 ipr_cmd = NULL;
5063 5117
@@ -5097,7 +5151,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5097 /* Clear the PCI interrupt */ 5151 /* Clear the PCI interrupt */
5098 do { 5152 do {
5099 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5153 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5100 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5101 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5155 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5102 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5156 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5103 5157
@@ -5107,6 +5161,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5107 return IRQ_HANDLED; 5161 return IRQ_HANDLED;
5108 } 5162 }
5109 5163
5164 } else if (rc == IRQ_NONE && irq_none == 0) {
5165 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5166 irq_none++;
5110 } else 5167 } else
5111 break; 5168 break;
5112 } 5169 }
@@ -5143,7 +5200,8 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5143 5200
5144 nseg = scsi_dma_map(scsi_cmd); 5201 nseg = scsi_dma_map(scsi_cmd);
5145 if (nseg < 0) { 5202 if (nseg < 0) {
5146 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5203 if (printk_ratelimit())
5204 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5147 return -1; 5205 return -1;
5148 } 5206 }
5149 5207
@@ -5773,7 +5831,8 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5773 } 5831 }
5774 5832
5775 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5833 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5776 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 5834 if (ipr_is_gscsi(res))
5835 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5777 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 5836 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5778 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); 5837 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5779 } 5838 }
@@ -7516,7 +7575,7 @@ static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7516static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 7575static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7517{ 7576{
7518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7519 volatile u32 int_reg; 7578 u32 int_reg;
7520 7579
7521 ENTER; 7580 ENTER;
7522 ioa_cfg->pdev->state_saved = true; 7581 ioa_cfg->pdev->state_saved = true;
@@ -7555,7 +7614,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7555 ipr_cmd->job_step = ipr_reset_enable_ioa; 7614 ipr_cmd->job_step = ipr_reset_enable_ioa;
7556 7615
7557 if (GET_DUMP == ioa_cfg->sdt_state) { 7616 if (GET_DUMP == ioa_cfg->sdt_state) {
7558 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); 7617 if (ioa_cfg->sis64)
7618 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7619 else
7620 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7559 ipr_cmd->job_step = ipr_reset_wait_for_dump; 7621 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7560 schedule_work(&ioa_cfg->work_q); 7622 schedule_work(&ioa_cfg->work_q);
7561 return IPR_RC_JOB_RETURN; 7623 return IPR_RC_JOB_RETURN;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 13f425fb8851..f93f8637c5a1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * Literals 39 * Literals
40 */ 40 */
41#define IPR_DRIVER_VERSION "2.5.1" 41#define IPR_DRIVER_VERSION "2.5.2"
42#define IPR_DRIVER_DATE "(August 10, 2010)" 42#define IPR_DRIVER_DATE "(April 27, 2011)"
43 43
44/* 44/*
45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -217,7 +217,8 @@
217#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 217#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 218#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
219#define IPR_PCI_RESET_TIMEOUT (HZ / 2) 219#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
220#define IPR_DUMP_TIMEOUT (15 * HZ) 220#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
221#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
221#define IPR_DUMP_DELAY_SECONDS 4 222#define IPR_DUMP_DELAY_SECONDS 4
222#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ) 223#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ)
223 224
@@ -285,9 +286,12 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
285/* 286/*
286 * Dump literals 287 * Dump literals
287 */ 288 */
288#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) 289#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
289#define IPR_NUM_SDT_ENTRIES 511 290#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024)
290#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) 291#define IPR_FMT2_NUM_SDT_ENTRIES 511
292#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF
293#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
294#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
291 295
292/* 296/*
293 * Misc literals 297 * Misc literals
@@ -474,7 +478,7 @@ struct ipr_cmd_pkt {
474 478
475 u8 flags_lo; 479 u8 flags_lo;
476#define IPR_FLAGS_LO_ALIGNED_BFR 0x20 480#define IPR_FLAGS_LO_ALIGNED_BFR 0x20
477#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 481#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10
478#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 482#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00
479#define IPR_FLAGS_LO_SIMPLE_TASK 0x02 483#define IPR_FLAGS_LO_SIMPLE_TASK 0x02
480#define IPR_FLAGS_LO_ORDERED_TASK 0x04 484#define IPR_FLAGS_LO_ORDERED_TASK 0x04
@@ -1164,7 +1168,7 @@ struct ipr_sdt_header {
1164 1168
1165struct ipr_sdt { 1169struct ipr_sdt {
1166 struct ipr_sdt_header hdr; 1170 struct ipr_sdt_header hdr;
1167 struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES]; 1171 struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES];
1168}__attribute__((packed, aligned (4))); 1172}__attribute__((packed, aligned (4)));
1169 1173
1170struct ipr_uc_sdt { 1174struct ipr_uc_sdt {
@@ -1608,7 +1612,7 @@ struct ipr_driver_dump {
1608struct ipr_ioa_dump { 1612struct ipr_ioa_dump {
1609 struct ipr_dump_entry_header hdr; 1613 struct ipr_dump_entry_header hdr;
1610 struct ipr_sdt sdt; 1614 struct ipr_sdt sdt;
1611 __be32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES]; 1615 __be32 **ioa_data;
1612 u32 reserved; 1616 u32 reserved;
1613 u32 next_page_index; 1617 u32 next_page_index;
1614 u32 page_offset; 1618 u32 page_offset;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5b799a37ad09..2a3a4720a771 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -57,9 +57,6 @@ static struct kmem_cache *scsi_pkt_cachep;
57#define FC_SRB_READ (1 << 1) 57#define FC_SRB_READ (1 << 1)
58#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
59 59
60/* constant added to e_d_tov timeout to get rec_tov value */
61#define REC_TOV_CONST 1
62
63/* 60/*
64 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock 61 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
65 */ 62 */
@@ -248,7 +245,7 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
248/** 245/**
249 * fc_fcp_timer_set() - Start a timer for a fcp_pkt 246 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
250 * @fsp: The FCP packet to start a timer for 247 * @fsp: The FCP packet to start a timer for
251 * @delay: The timeout period for the timer 248 * @delay: The timeout period in jiffies
252 */ 249 */
253static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 250static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
254{ 251{
@@ -335,22 +332,23 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
335/** 332/**
336 * fc_fcp_can_queue_ramp_up() - increases can_queue 333 * fc_fcp_can_queue_ramp_up() - increases can_queue
337 * @lport: lport to ramp up can_queue 334 * @lport: lport to ramp up can_queue
338 *
339 * Locking notes: Called with Scsi_Host lock held
340 */ 335 */
341static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) 336static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
342{ 337{
343 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 338 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
339 unsigned long flags;
344 int can_queue; 340 int can_queue;
345 341
342 spin_lock_irqsave(lport->host->host_lock, flags);
343
346 if (si->last_can_queue_ramp_up_time && 344 if (si->last_can_queue_ramp_up_time &&
347 (time_before(jiffies, si->last_can_queue_ramp_up_time + 345 (time_before(jiffies, si->last_can_queue_ramp_up_time +
348 FC_CAN_QUEUE_PERIOD))) 346 FC_CAN_QUEUE_PERIOD)))
349 return; 347 goto unlock;
350 348
351 if (time_before(jiffies, si->last_can_queue_ramp_down_time + 349 if (time_before(jiffies, si->last_can_queue_ramp_down_time +
352 FC_CAN_QUEUE_PERIOD)) 350 FC_CAN_QUEUE_PERIOD))
353 return; 351 goto unlock;
354 352
355 si->last_can_queue_ramp_up_time = jiffies; 353 si->last_can_queue_ramp_up_time = jiffies;
356 354
@@ -362,6 +360,9 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
362 lport->host->can_queue = can_queue; 360 lport->host->can_queue = can_queue;
363 shost_printk(KERN_ERR, lport->host, "libfc: increased " 361 shost_printk(KERN_ERR, lport->host, "libfc: increased "
364 "can_queue to %d.\n", can_queue); 362 "can_queue to %d.\n", can_queue);
363
364unlock:
365 spin_unlock_irqrestore(lport->host->host_lock, flags);
365} 366}
366 367
367/** 368/**
@@ -373,18 +374,19 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
373 * commands complete or timeout, then try again with a reduced 374 * commands complete or timeout, then try again with a reduced
374 * can_queue. Eventually we will hit the point where we run 375 * can_queue. Eventually we will hit the point where we run
375 * on all reserved structs. 376 * on all reserved structs.
376 *
377 * Locking notes: Called with Scsi_Host lock held
378 */ 377 */
379static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 378static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
380{ 379{
381 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 380 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
381 unsigned long flags;
382 int can_queue; 382 int can_queue;
383 383
384 spin_lock_irqsave(lport->host->host_lock, flags);
385
384 if (si->last_can_queue_ramp_down_time && 386 if (si->last_can_queue_ramp_down_time &&
385 (time_before(jiffies, si->last_can_queue_ramp_down_time + 387 (time_before(jiffies, si->last_can_queue_ramp_down_time +
386 FC_CAN_QUEUE_PERIOD))) 388 FC_CAN_QUEUE_PERIOD)))
387 return; 389 goto unlock;
388 390
389 si->last_can_queue_ramp_down_time = jiffies; 391 si->last_can_queue_ramp_down_time = jiffies;
390 392
@@ -395,6 +397,9 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
395 lport->host->can_queue = can_queue; 397 lport->host->can_queue = can_queue;
396 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" 398 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
397 "Reducing can_queue to %d.\n", can_queue); 399 "Reducing can_queue to %d.\n", can_queue);
400
401unlock:
402 spin_unlock_irqrestore(lport->host->host_lock, flags);
398} 403}
399 404
400/* 405/*
@@ -409,16 +414,13 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
409 size_t len) 414 size_t len)
410{ 415{
411 struct fc_frame *fp; 416 struct fc_frame *fp;
412 unsigned long flags;
413 417
414 fp = fc_frame_alloc(lport, len); 418 fp = fc_frame_alloc(lport, len);
415 if (likely(fp)) 419 if (likely(fp))
416 return fp; 420 return fp;
417 421
418 /* error case */ 422 /* error case */
419 spin_lock_irqsave(lport->host->host_lock, flags);
420 fc_fcp_can_queue_ramp_down(lport); 423 fc_fcp_can_queue_ramp_down(lport);
421 spin_unlock_irqrestore(lport->host->host_lock, flags);
422 return NULL; 424 return NULL;
423} 425}
424 426
@@ -1093,16 +1095,14 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1093/** 1095/**
1094 * get_fsp_rec_tov() - Helper function to get REC_TOV 1096 * get_fsp_rec_tov() - Helper function to get REC_TOV
1095 * @fsp: the FCP packet 1097 * @fsp: the FCP packet
1098 *
1099 * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
1096 */ 1100 */
1097static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) 1101static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
1098{ 1102{
1099 struct fc_rport *rport; 1103 struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
1100 struct fc_rport_libfc_priv *rpriv;
1101
1102 rport = fsp->rport;
1103 rpriv = rport->dd_data;
1104 1104
1105 return rpriv->e_d_tov + REC_TOV_CONST; 1105 return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
1106} 1106}
1107 1107
1108/** 1108/**
@@ -1122,7 +1122,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1122 struct fc_rport_libfc_priv *rpriv; 1122 struct fc_rport_libfc_priv *rpriv;
1123 const size_t len = sizeof(fsp->cdb_cmd); 1123 const size_t len = sizeof(fsp->cdb_cmd);
1124 int rc = 0; 1124 int rc = 0;
1125 unsigned int rec_tov;
1126 1125
1127 if (fc_fcp_lock_pkt(fsp)) 1126 if (fc_fcp_lock_pkt(fsp))
1128 return 0; 1127 return 0;
@@ -1153,12 +1152,9 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1153 fsp->seq_ptr = seq; 1152 fsp->seq_ptr = seq;
1154 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1153 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1155 1154
1156 rec_tov = get_fsp_rec_tov(fsp);
1157
1158 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1155 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1159
1160 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1156 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1161 fc_fcp_timer_set(fsp, rec_tov); 1157 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1162 1158
1163unlock: 1159unlock:
1164 fc_fcp_unlock_pkt(fsp); 1160 fc_fcp_unlock_pkt(fsp);
@@ -1235,16 +1231,14 @@ static void fc_lun_reset_send(unsigned long data)
1235{ 1231{
1236 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1232 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1237 struct fc_lport *lport = fsp->lp; 1233 struct fc_lport *lport = fsp->lp;
1238 unsigned int rec_tov;
1239 1234
1240 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { 1235 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1241 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1236 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1242 return; 1237 return;
1243 if (fc_fcp_lock_pkt(fsp)) 1238 if (fc_fcp_lock_pkt(fsp))
1244 return; 1239 return;
1245 rec_tov = get_fsp_rec_tov(fsp);
1246 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1240 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1247 fc_fcp_timer_set(fsp, rec_tov); 1241 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1248 fc_fcp_unlock_pkt(fsp); 1242 fc_fcp_unlock_pkt(fsp);
1249 } 1243 }
1250} 1244}
@@ -1536,12 +1530,11 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1536 } 1530 }
1537 fc_fcp_srr(fsp, r_ctl, offset); 1531 fc_fcp_srr(fsp, r_ctl, offset);
1538 } else if (e_stat & ESB_ST_SEQ_INIT) { 1532 } else if (e_stat & ESB_ST_SEQ_INIT) {
1539 unsigned int rec_tov = get_fsp_rec_tov(fsp);
1540 /* 1533 /*
1541 * The remote port has the initiative, so just 1534 * The remote port has the initiative, so just
1542 * keep waiting for it to complete. 1535 * keep waiting for it to complete.
1543 */ 1536 */
1544 fc_fcp_timer_set(fsp, rec_tov); 1537 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1545 } else { 1538 } else {
1546 1539
1547 /* 1540 /*
@@ -1705,7 +1698,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1705{ 1698{
1706 struct fc_fcp_pkt *fsp = arg; 1699 struct fc_fcp_pkt *fsp = arg;
1707 struct fc_frame_header *fh; 1700 struct fc_frame_header *fh;
1708 unsigned int rec_tov;
1709 1701
1710 if (IS_ERR(fp)) { 1702 if (IS_ERR(fp)) {
1711 fc_fcp_srr_error(fsp, fp); 1703 fc_fcp_srr_error(fsp, fp);
@@ -1732,8 +1724,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1732 switch (fc_frame_payload_op(fp)) { 1724 switch (fc_frame_payload_op(fp)) {
1733 case ELS_LS_ACC: 1725 case ELS_LS_ACC:
1734 fsp->recov_retry = 0; 1726 fsp->recov_retry = 0;
1735 rec_tov = get_fsp_rec_tov(fsp); 1727 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
1736 fc_fcp_timer_set(fsp, rec_tov);
1737 break; 1728 break;
1738 case ELS_LS_RJT: 1729 case ELS_LS_RJT:
1739 default: 1730 default:
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 906bbcad0e2d..389ab80aef0a 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1590,7 +1590,6 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1590 */ 1590 */
1591int fc_lport_config(struct fc_lport *lport) 1591int fc_lport_config(struct fc_lport *lport)
1592{ 1592{
1593 INIT_LIST_HEAD(&lport->ema_list);
1594 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1593 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1595 mutex_init(&lport->lp_mutex); 1594 mutex_init(&lport->lp_mutex);
1596 1595
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 60e98a62f308..02d53d89534f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -805,6 +805,8 @@ struct lpfc_hba {
805 struct dentry *idiag_root; 805 struct dentry *idiag_root;
806 struct dentry *idiag_pci_cfg; 806 struct dentry *idiag_pci_cfg;
807 struct dentry *idiag_que_info; 807 struct dentry *idiag_que_info;
808 struct dentry *idiag_que_acc;
809 struct dentry *idiag_drb_acc;
808#endif 810#endif
809 811
810 /* Used for deferred freeing of ELS data buffers */ 812 /* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 77b2871d96b7..37e2a1272f86 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2426,6 +2426,7 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2426{
2427 struct bsg_job_data *dd_data; 2427 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2428 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2429 uint32_t size; 2430 uint32_t size;
2430 unsigned long flags; 2431 unsigned long flags;
2431 uint8_t *to; 2432 uint8_t *to;
@@ -2469,9 +2470,8 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2469 memcpy(to, from, size); 2470 memcpy(to, from, size);
2470 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2471 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { 2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2472 struct lpfc_mbx_nembed_cmd *nembed_sge = 2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2473 (struct lpfc_mbx_nembed_cmd *) 2474 &pmboxq->u.mb.un.varWords[0];
2474 &pmboxq->u.mb.un.varWords[0];
2475 2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt; 2477 virt;
@@ -2496,16 +2496,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2496 job->reply_payload.sg_cnt, 2496 job->reply_payload.sg_cnt,
2497 from, size); 2497 from, size);
2498 job->reply->result = 0; 2498 job->reply->result = 0;
2499 2499 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job.
2502 */
2500 job->dd_data = NULL; 2503 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2501 job->job_done(job); 2506 job->job_done(job);
2507 } else {
2508 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2502 } 2510 }
2503 dd_data->context_un.mbox.set_job = NULL;
2504 /* need to hold the lock until we call job done to hold off
2505 * the timeout handler returning to the midlayer while
2506 * we are stillprocessing the job
2507 */
2508 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2509 2511
2510 kfree(dd_data->context_un.mbox.mb); 2512 kfree(dd_data->context_un.mbox.mb);
2511 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
@@ -2644,6 +2646,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2644 struct ulp_bde64 *rxbpl = NULL; 2646 struct ulp_bde64 *rxbpl = NULL;
2645 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) 2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2646 job->request->rqst_data.h_vendor.vendor_cmd; 2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header;
2653 struct ulp_bde64 *bde;
2647 uint8_t *ext = NULL; 2654 uint8_t *ext = NULL;
2648 int rc = 0; 2655 int rc = 0;
2649 uint8_t *from; 2656 uint8_t *from;
@@ -2651,9 +2658,16 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2651 /* in case no data is transferred */ 2658 /* in case no data is transferred */
2652 job->reply->reply_payload_rcv_len = 0; 2659 job->reply->reply_payload_rcv_len = 0;
2653 2660
2661 /* sanity check to protect driver */
2662 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
2663 job->request_payload.payload_len > BSG_MBOX_SIZE) {
2664 rc = -ERANGE;
2665 goto job_done;
2666 }
2667
2654 /* check if requested extended data lengths are valid */ 2668 /* check if requested extended data lengths are valid */
2655 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || 2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2656 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { 2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
2657 rc = -ERANGE; 2671 rc = -ERANGE;
2658 goto job_done; 2672 goto job_done;
2659 } 2673 }
@@ -2744,8 +2758,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2744 * use ours 2758 * use ours
2745 */ 2759 */
2746 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 2760 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
2747 uint32_t transmit_length = pmb->un.varWords[1]; 2761 transmit_length = pmb->un.varWords[1];
2748 uint32_t receive_length = pmb->un.varWords[4]; 2762 receive_length = pmb->un.varWords[4];
2749 /* transmit length cannot be greater than receive length or 2763 /* transmit length cannot be greater than receive length or
2750 * mailbox extension size 2764 * mailbox extension size
2751 */ 2765 */
@@ -2795,10 +2809,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2795 from += sizeof(MAILBOX_t); 2809 from += sizeof(MAILBOX_t);
2796 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); 2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2797 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2798 struct READ_EVENT_LOG_VAR *rdEventLog = 2812 rdEventLog = &pmb->un.varRdEventLog;
2799 &pmb->un.varRdEventLog ; 2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
2800 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 2814 mode = bf_get(lpfc_event_log, rdEventLog);
2801 uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
2802 2815
2803 /* receive length cannot be greater than mailbox 2816 /* receive length cannot be greater than mailbox
2804 * extension size 2817 * extension size
@@ -2843,7 +2856,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2843 /* rebuild the command for sli4 using our own buffers 2856 /* rebuild the command for sli4 using our own buffers
2844 * like we do for biu diags 2857 * like we do for biu diags
2845 */ 2858 */
2846 uint32_t receive_length = pmb->un.varWords[2]; 2859 receive_length = pmb->un.varWords[2];
2847 /* receive length cannot be greater than mailbox 2860 /* receive length cannot be greater than mailbox
2848 * extension size 2861 * extension size
2849 */ 2862 */
@@ -2879,8 +2892,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2879 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2880 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2881 pmb->un.varUpdateCfg.co) { 2894 pmb->un.varUpdateCfg.co) {
2882 struct ulp_bde64 *bde = 2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
2883 (struct ulp_bde64 *)&pmb->un.varWords[4];
2884 2896
2885 /* bde size cannot be greater than mailbox ext size */ 2897 /* bde size cannot be greater than mailbox ext size */
2886 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 2898 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
@@ -2921,10 +2933,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2921 memcpy((uint8_t *)dmp->dma.virt, from, 2933 memcpy((uint8_t *)dmp->dma.virt, from,
2922 bde->tus.f.bdeSize); 2934 bde->tus.f.bdeSize);
2923 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2924 struct lpfc_mbx_nembed_cmd *nembed_sge;
2925 struct mbox_header *header;
2926 uint32_t receive_length;
2927
2928 /* rebuild the command for sli4 using our own buffers 2936 /* rebuild the command for sli4 using our own buffers
2929 * like we do for biu diags 2937 * like we do for biu diags
2930 */ 2938 */
@@ -3386,6 +3394,7 @@ no_dd_data:
3386 job->dd_data = NULL; 3394 job->dd_data = NULL;
3387 return rc; 3395 return rc;
3388} 3396}
3397
3389/** 3398/**
3390 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 3399 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3391 * @job: fc_bsg_job to handle 3400 * @job: fc_bsg_job to handle
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index a2c33e7c9152..b542aca6f5ae 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -109,3 +109,133 @@ struct menlo_response {
109 uint32_t xri; /* return the xri of the iocb exchange */ 109 uint32_t xri; /* return the xri of the iocb exchange */
110}; 110};
111 111
112/*
113 * macros and data structures for handling sli-config mailbox command
114 * pass-through support, this header file is shared between user and
115 * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h,
116 * with macro names prefixed with bsg_, as the macros defined in
117 * lpfc_hw4.h are not accessible from user space.
118 */
119
120/* Macros to deal with bit fields. Each bit field must have 3 #defines
121 * associated with it (_SHIFT, _MASK, and _WORD).
122 * EG. For a bit field that is in the 7th bit of the "field4" field of a
123 * structure and is 2 bits in size the following #defines must exist:
124 * struct temp {
125 * uint32_t field1;
126 * uint32_t field2;
127 * uint32_t field3;
128 * uint32_t field4;
129 * #define example_bit_field_SHIFT 7
130 * #define example_bit_field_MASK 0x03
131 * #define example_bit_field_WORD field4
132 * uint32_t field5;
133 * };
134 * Then the macros below may be used to get or set the value of that field.
135 * EG. To get the value of the bit field from the above example:
136 * struct temp t1;
137 * value = bsg_bf_get(example_bit_field, &t1);
138 * And then to set that bit field:
139 * bsg_bf_set(example_bit_field, &t1, 2);
140 * Or clear that bit field:
141 * bsg_bf_set(example_bit_field, &t1, 0);
142 */
143#define bsg_bf_get_le32(name, ptr) \
144 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
145#define bsg_bf_get(name, ptr) \
146 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
147#define bsg_bf_set_le32(name, ptr, value) \
148 ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
149 name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
150 ~(name##_MASK << name##_SHIFT)))))
151#define bsg_bf_set(name, ptr, value) \
152 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
153 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
154
155/*
156 * The sli_config structure specified here is based on the following
157 * restriction:
158 *
159 * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without
160 * carrying HBD.
161 * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or
162 * without carrying HBDs.
163 */
164
165struct lpfc_sli_config_mse {
166 uint32_t pa_lo;
167 uint32_t pa_hi;
168 uint32_t buf_len;
169#define lpfc_mbox_sli_config_mse_len_SHIFT 0
170#define lpfc_mbox_sli_config_mse_len_MASK 0xffffff
171#define lpfc_mbox_sli_config_mse_len_WORD buf_len
172};
173
174struct lpfc_sli_config_subcmd_hbd {
175 uint32_t buf_len;
176#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
177#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
178#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD buf_len
179 uint32_t pa_lo;
180 uint32_t pa_hi;
181};
182
183struct lpfc_sli_config_hdr {
184 uint32_t word1;
185#define lpfc_mbox_hdr_emb_SHIFT 0
186#define lpfc_mbox_hdr_emb_MASK 0x00000001
187#define lpfc_mbox_hdr_emb_WORD word1
188#define lpfc_mbox_hdr_mse_cnt_SHIFT 3
189#define lpfc_mbox_hdr_mse_cnt_MASK 0x0000001f
190#define lpfc_mbox_hdr_mse_cnt_WORD word1
191 uint32_t payload_length;
192 uint32_t tag_lo;
193 uint32_t tag_hi;
194 uint32_t reserved5;
195};
196
197struct lpfc_sli_config_generic {
198 struct lpfc_sli_config_hdr sli_config_hdr;
199#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
200 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
201};
202
203struct lpfc_sli_config_subcmnd {
204 struct lpfc_sli_config_hdr sli_config_hdr;
205 uint32_t word6;
206#define lpfc_subcmnd_opcode_SHIFT 0
207#define lpfc_subcmnd_opcode_MASK 0xff
208#define lpfc_subcmnd_opcode_WORD word6
209#define lpfc_subcmnd_subsys_SHIFT 8
210#define lpfc_subcmnd_subsys_MASK 0xff
211#define lpfc_subcmnd_subsys_WORD word6
212 uint32_t timeout;
213 uint32_t request_length;
214 uint32_t word9;
215#define lpfc_subcmnd_version_SHIFT 0
216#define lpfc_subcmnd_version_MASK 0xff
217#define lpfc_subcmnd_version_WORD word9
218 uint32_t word10;
219#define lpfc_subcmnd_ask_rd_len_SHIFT 0
220#define lpfc_subcmnd_ask_rd_len_MASK 0xffffff
221#define lpfc_subcmnd_ask_rd_len_WORD word10
222 uint32_t rd_offset;
223 uint32_t obj_name[26];
224 uint32_t hbd_count;
225#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
226 struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
227};
228
229struct lpfc_sli_config_mbox {
230 uint32_t word0;
231#define lpfc_mqe_status_SHIFT 16
232#define lpfc_mqe_status_MASK 0x0000FFFF
233#define lpfc_mqe_status_WORD word0
234#define lpfc_mqe_command_SHIFT 8
235#define lpfc_mqe_command_MASK 0x000000FF
236#define lpfc_mqe_command_WORD word0
237 union {
238 struct lpfc_sli_config_generic sli_config_generic;
239 struct lpfc_sli_config_subcmnd sli_config_subcmnd;
240 } un;
241};
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3d967741c708..c93fca058603 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1119,172 +1119,14 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
1119} 1119}
1120 1120
1121/* 1121/*
1122 * ---------------------------------
1122 * iDiag debugfs file access methods 1123 * iDiag debugfs file access methods
1123 */ 1124 * ---------------------------------
1124
1125/*
1126 * iDiag PCI config space register access methods:
1127 *
1128 * The PCI config space register accessees of read, write, read-modify-write
1129 * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
1130 * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
1131 *
1132 * /sys/kernel/debug/lpfc/fn<#>/iDiag
1133 *
1134 * the access is through the debugfs entry pciCfg:
1135 *
1136 * 1. For PCI config space register read access, there are two read methods:
1137 * A) read a single PCI config space register in the size of a byte
1138 * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
1139 * the 4K extended PCI config space.
1140 *
1141 * A) Read a single PCI config space register consists of two steps:
1142 *
1143 * Step-1: Set up PCI config space register read command, the command
1144 * syntax is,
1145 *
1146 * echo 1 <where> <count> > pciCfg
1147 *
1148 * where, 1 is the iDiag command for PCI config space read, <where> is the
1149 * offset from the beginning of the device's PCI config space to read from,
1150 * and <count> is the size of PCI config space register data to read back,
1151 * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
1152 * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
1153 *
1154 * Setp-2: Perform the debugfs read operation to execute the idiag command
1155 * set up in Step-1,
1156 *
1157 * cat pciCfg
1158 *
1159 * Examples:
1160 * To read PCI device's vendor-id and device-id from PCI config space,
1161 *
1162 * echo 1 0 4 > pciCfg
1163 * cat pciCfg
1164 *
1165 * To read PCI device's currnt command from config space,
1166 *
1167 * echo 1 4 2 > pciCfg
1168 * cat pciCfg
1169 *
1170 * B) Browse through the entire 4K extended PCI config space also consists
1171 * of two steps:
1172 *
1173 * Step-1: Set up PCI config space register browsing command, the command
1174 * syntax is,
1175 *
1176 * echo 1 0 4096 > pciCfg
1177 *
1178 * where, 1 is the iDiag command for PCI config space read, 0 must be used
1179 * as the offset for PCI config space register browse, and 4096 must be
1180 * used as the count for PCI config space register browse.
1181 *
1182 * Step-2: Repeately issue the debugfs read operation to browse through
1183 * the entire PCI config space registers:
1184 *
1185 * cat pciCfg
1186 * cat pciCfg
1187 * cat pciCfg
1188 * ...
1189 *
1190 * When browsing to the end of the 4K PCI config space, the browse method
1191 * shall wrap around to start reading from beginning again, and again...
1192 *
1193 * 2. For PCI config space register write access, it supports a single PCI
1194 * config space register write in the size of a byte (8 bits), a word
1195 * (16 bits), or a dword (32 bits). The command syntax is,
1196 *
1197 * echo 2 <where> <count> <value> > pciCfg
1198 *
1199 * where, 2 is the iDiag command for PCI config space write, <where> is
1200 * the offset from the beginning of the device's PCI config space to write
1201 * into, <count> is the size of data to write into the PCI config space,
1202 * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
1203 * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
1204 * is the data to be written into the PCI config space register at the
1205 * offset.
1206 *
1207 * Examples:
1208 * To disable PCI device's interrupt assertion,
1209 *
1210 * 1) Read in device's PCI config space register command field <cmd>:
1211 *
1212 * echo 1 4 2 > pciCfg
1213 * cat pciCfg
1214 *
1215 * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
1216 *
1217 * <cmd> = <cmd> | (1 < 10)
1218 *
1219 * 3) Write the modified command back:
1220 *
1221 * echo 2 4 2 <cmd> > pciCfg
1222 *
1223 * 3. For PCI config space register set bits access, it supports a single PCI
1224 * config space register set bits in the size of a byte (8 bits), a word
1225 * (16 bits), or a dword (32 bits). The command syntax is,
1226 *
1227 * echo 3 <where> <count> <bitmask> > pciCfg
1228 *
1229 * where, 3 is the iDiag command for PCI config space set bits, <where> is
1230 * the offset from the beginning of the device's PCI config space to set
1231 * bits into, <count> is the size of the bitmask to set into the PCI config
1232 * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
1233 * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
1234 * <bitmask> is the bitmask, indicating the bits to be set into the PCI
1235 * config space register at the offset. The logic performed to the content
1236 * of the PCI config space register, regval, is,
1237 *
1238 * regval |= <bitmask>
1239 *
1240 * 4. For PCI config space register clear bits access, it supports a single
1241 * PCI config space register clear bits in the size of a byte (8 bits),
1242 * a word (16 bits), or a dword (32 bits). The command syntax is,
1243 *
1244 * echo 4 <where> <count> <bitmask> > pciCfg
1245 *
1246 * where, 4 is the iDiag command for PCI config space clear bits, <where>
1247 * is the offset from the beginning of the device's PCI config space to
1248 * clear bits from, <count> is the size of the bitmask to set into the PCI
1249 * config space, it will be 1 for setting a byte (8 bits), 2 for setting
1250 * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
1251 * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
1252 * from the PCI config space register at the offset. the logic performed
1253 * to the content of the PCI config space register, regval, is,
1254 *
1255 * regval &= ~<bitmask>
1256 *
1257 * Note, for all single register read, write, set bits, or clear bits access,
1258 * the offset (<where>) must be aligned with the size of the data:
1259 *
1260 * For data size of byte (8 bits), the offset must be aligned to the byte
1261 * boundary; for data size of word (16 bits), the offset must be aligned
1262 * to the word boundary; while for data size of dword (32 bits), the offset
1263 * must be aligned to the dword boundary. Otherwise, the interface will
1264 * return the error:
1265 * 1125 *
1266 * "-bash: echo: write error: Invalid argument". 1126 * All access methods are through the proper SLI4 PCI function's debugfs
1127 * iDiag directory:
1267 * 1128 *
1268 * For example: 1129 * /sys/kernel/debug/lpfc/fn<#>/iDiag
1269 *
1270 * echo 1 2 4 > pciCfg
1271 * -bash: echo: write error: Invalid argument
1272 *
1273 * Note also, all of the numbers in the command fields for all read, write,
1274 * set bits, and clear bits PCI config space register command fields can be
1275 * either decimal or hex.
1276 *
1277 * For example,
1278 * echo 1 0 4096 > pciCfg
1279 *
1280 * will be the same as
1281 * echo 1 0 0x1000 > pciCfg
1282 *
1283 * And,
1284 * echo 2 155 1 10 > pciCfg
1285 *
1286 * will be
1287 * echo 2 0x9b 1 0xa > pciCfg
1288 */ 1130 */
1289 1131
1290/** 1132/**
@@ -1331,10 +1173,10 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
1331 for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { 1173 for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
1332 step_str = strsep(&pbuf, "\t "); 1174 step_str = strsep(&pbuf, "\t ");
1333 if (!step_str) 1175 if (!step_str)
1334 return 0; 1176 return i;
1335 idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); 1177 idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
1336 } 1178 }
1337 return 0; 1179 return i;
1338} 1180}
1339 1181
1340/** 1182/**
@@ -1403,7 +1245,7 @@ lpfc_idiag_release(struct inode *inode, struct file *file)
1403 * Description: 1245 * Description:
1404 * This routine frees the buffer that was allocated when the debugfs file 1246 * This routine frees the buffer that was allocated when the debugfs file
1405 * was opened. It also reset the fields in the idiag command struct in the 1247 * was opened. It also reset the fields in the idiag command struct in the
1406 * case the command is not continuous browsing of the data structure. 1248 * case of command for write operation.
1407 * 1249 *
1408 * Returns: 1250 * Returns:
1409 * This function returns zero. 1251 * This function returns zero.
@@ -1413,18 +1255,20 @@ lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
1413{ 1255{
1414 struct lpfc_debug *debug = file->private_data; 1256 struct lpfc_debug *debug = file->private_data;
1415 1257
1416 /* Read PCI config register, if not read all, clear command fields */ 1258 if (debug->op == LPFC_IDIAG_OP_WR) {
1417 if ((debug->op == LPFC_IDIAG_OP_RD) && 1259 switch (idiag.cmd.opcode) {
1418 (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) 1260 case LPFC_IDIAG_CMD_PCICFG_WR:
1419 if ((idiag.cmd.data[1] == sizeof(uint8_t)) || 1261 case LPFC_IDIAG_CMD_PCICFG_ST:
1420 (idiag.cmd.data[1] == sizeof(uint16_t)) || 1262 case LPFC_IDIAG_CMD_PCICFG_CL:
1421 (idiag.cmd.data[1] == sizeof(uint32_t))) 1263 case LPFC_IDIAG_CMD_QUEACC_WR:
1264 case LPFC_IDIAG_CMD_QUEACC_ST:
1265 case LPFC_IDIAG_CMD_QUEACC_CL:
1422 memset(&idiag, 0, sizeof(idiag)); 1266 memset(&idiag, 0, sizeof(idiag));
1423 1267 break;
1424 /* Write PCI config register, clear command fields */ 1268 default:
1425 if ((debug->op == LPFC_IDIAG_OP_WR) && 1269 break;
1426 (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) 1270 }
1427 memset(&idiag, 0, sizeof(idiag)); 1271 }
1428 1272
1429 /* Free the buffers to the file operation */ 1273 /* Free the buffers to the file operation */
1430 kfree(debug->buffer); 1274 kfree(debug->buffer);
@@ -1504,7 +1348,7 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
1504 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1348 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
1505 "%03x: %08x\n", where, u32val); 1349 "%03x: %08x\n", where, u32val);
1506 break; 1350 break;
1507 case LPFC_PCI_CFG_SIZE: /* browse all */ 1351 case LPFC_PCI_CFG_BROWSE: /* browse all */
1508 goto pcicfg_browse; 1352 goto pcicfg_browse;
1509 break; 1353 break;
1510 default: 1354 default:
@@ -1586,16 +1430,21 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1586 debug->op = LPFC_IDIAG_OP_WR; 1430 debug->op = LPFC_IDIAG_OP_WR;
1587 1431
1588 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); 1432 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1589 if (rc) 1433 if (rc < 0)
1590 return rc; 1434 return rc;
1591 1435
1592 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { 1436 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
1437 /* Sanity check on PCI config read command line arguments */
1438 if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
1439 goto error_out;
1593 /* Read command from PCI config space, set up command fields */ 1440 /* Read command from PCI config space, set up command fields */
1594 where = idiag.cmd.data[0]; 1441 where = idiag.cmd.data[0];
1595 count = idiag.cmd.data[1]; 1442 count = idiag.cmd.data[1];
1596 if (count == LPFC_PCI_CFG_SIZE) { 1443 if (count == LPFC_PCI_CFG_BROWSE) {
1597 if (where != 0) 1444 if (where % sizeof(uint32_t))
1598 goto error_out; 1445 goto error_out;
1446 /* Starting offset to browse */
1447 idiag.offset.last_rd = where;
1599 } else if ((count != sizeof(uint8_t)) && 1448 } else if ((count != sizeof(uint8_t)) &&
1600 (count != sizeof(uint16_t)) && 1449 (count != sizeof(uint16_t)) &&
1601 (count != sizeof(uint32_t))) 1450 (count != sizeof(uint32_t)))
@@ -1621,6 +1470,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1621 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || 1470 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
1622 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || 1471 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
1623 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { 1472 idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
1473 /* Sanity check on PCI config write command line arguments */
1474 if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
1475 goto error_out;
1624 /* Write command to PCI config space, read-modify-write */ 1476 /* Write command to PCI config space, read-modify-write */
1625 where = idiag.cmd.data[0]; 1477 where = idiag.cmd.data[0];
1626 count = idiag.cmd.data[1]; 1478 count = idiag.cmd.data[1];
@@ -1753,10 +1605,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1753 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1605 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1754 "Slow-path EQ information:\n"); 1606 "Slow-path EQ information:\n");
1755 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1607 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1756 "\tID [%02d], EQE-COUNT [%04d], " 1608 "\tEQID[%02d], "
1757 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1609 "QE-COUNT[%04d], QE-SIZE[%04d], "
1610 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1758 phba->sli4_hba.sp_eq->queue_id, 1611 phba->sli4_hba.sp_eq->queue_id,
1759 phba->sli4_hba.sp_eq->entry_count, 1612 phba->sli4_hba.sp_eq->entry_count,
1613 phba->sli4_hba.sp_eq->entry_size,
1760 phba->sli4_hba.sp_eq->host_index, 1614 phba->sli4_hba.sp_eq->host_index,
1761 phba->sli4_hba.sp_eq->hba_index); 1615 phba->sli4_hba.sp_eq->hba_index);
1762 1616
@@ -1765,10 +1619,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1765 "Fast-path EQ information:\n"); 1619 "Fast-path EQ information:\n");
1766 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1620 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
1767 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1621 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1768 "\tID [%02d], EQE-COUNT [%04d], " 1622 "\tEQID[%02d], "
1769 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1623 "QE-COUNT[%04d], QE-SIZE[%04d], "
1624 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1770 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 1625 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
1771 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 1626 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
1627 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
1772 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 1628 phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
1773 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 1629 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
1774 } 1630 }
@@ -1776,89 +1632,101 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1776 1632
1777 /* Get mailbox complete queue information */ 1633 /* Get mailbox complete queue information */
1778 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1634 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1779 "Mailbox CQ information:\n"); 1635 "Slow-path MBX CQ information:\n");
1780 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1636 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1781 "\t\tAssociated EQ-ID [%02d]:\n", 1637 "Associated EQID[%02d]:\n",
1782 phba->sli4_hba.mbx_cq->assoc_qid); 1638 phba->sli4_hba.mbx_cq->assoc_qid);
1783 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1639 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1784 "\tID [%02d], CQE-COUNT [%04d], " 1640 "\tCQID[%02d], "
1785 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1641 "QE-COUNT[%04d], QE-SIZE[%04d], "
1642 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1786 phba->sli4_hba.mbx_cq->queue_id, 1643 phba->sli4_hba.mbx_cq->queue_id,
1787 phba->sli4_hba.mbx_cq->entry_count, 1644 phba->sli4_hba.mbx_cq->entry_count,
1645 phba->sli4_hba.mbx_cq->entry_size,
1788 phba->sli4_hba.mbx_cq->host_index, 1646 phba->sli4_hba.mbx_cq->host_index,
1789 phba->sli4_hba.mbx_cq->hba_index); 1647 phba->sli4_hba.mbx_cq->hba_index);
1790 1648
1791 /* Get slow-path complete queue information */ 1649 /* Get slow-path complete queue information */
1792 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1650 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1793 "Slow-path CQ information:\n"); 1651 "Slow-path ELS CQ information:\n");
1794 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1652 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1795 "\t\tAssociated EQ-ID [%02d]:\n", 1653 "Associated EQID[%02d]:\n",
1796 phba->sli4_hba.els_cq->assoc_qid); 1654 phba->sli4_hba.els_cq->assoc_qid);
1797 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1655 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1798 "\tID [%02d], CQE-COUNT [%04d], " 1656 "\tCQID [%02d], "
1799 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1657 "QE-COUNT[%04d], QE-SIZE[%04d], "
1658 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1800 phba->sli4_hba.els_cq->queue_id, 1659 phba->sli4_hba.els_cq->queue_id,
1801 phba->sli4_hba.els_cq->entry_count, 1660 phba->sli4_hba.els_cq->entry_count,
1661 phba->sli4_hba.els_cq->entry_size,
1802 phba->sli4_hba.els_cq->host_index, 1662 phba->sli4_hba.els_cq->host_index,
1803 phba->sli4_hba.els_cq->hba_index); 1663 phba->sli4_hba.els_cq->hba_index);
1804 1664
1805 /* Get fast-path complete queue information */ 1665 /* Get fast-path complete queue information */
1806 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1807 "Fast-path CQ information:\n"); 1667 "Fast-path FCP CQ information:\n");
1808 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1668 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
1809 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1669 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1810 "\t\tAssociated EQ-ID [%02d]:\n", 1670 "Associated EQID[%02d]:\n",
1811 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 1671 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
1812 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1672 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1813 "\tID [%02d], EQE-COUNT [%04d], " 1673 "\tCQID[%02d], "
1814 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1674 "QE-COUNT[%04d], QE-SIZE[%04d], "
1815 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, 1675 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1816 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, 1676 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
1817 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 1677 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
1818 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 1678 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
1679 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
1680 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
1819 } 1681 }
1820 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 1682 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
1821 1683
1822 /* Get mailbox queue information */ 1684 /* Get mailbox queue information */
1823 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1685 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1824 "Mailbox MQ information:\n"); 1686 "Slow-path MBX MQ information:\n");
1825 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1687 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1826 "\t\tAssociated CQ-ID [%02d]:\n", 1688 "Associated CQID[%02d]:\n",
1827 phba->sli4_hba.mbx_wq->assoc_qid); 1689 phba->sli4_hba.mbx_wq->assoc_qid);
1828 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1690 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1829 "\tID [%02d], MQE-COUNT [%04d], " 1691 "\tWQID[%02d], "
1830 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1692 "QE-COUNT[%04d], QE-SIZE[%04d], "
1693 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1831 phba->sli4_hba.mbx_wq->queue_id, 1694 phba->sli4_hba.mbx_wq->queue_id,
1832 phba->sli4_hba.mbx_wq->entry_count, 1695 phba->sli4_hba.mbx_wq->entry_count,
1696 phba->sli4_hba.mbx_wq->entry_size,
1833 phba->sli4_hba.mbx_wq->host_index, 1697 phba->sli4_hba.mbx_wq->host_index,
1834 phba->sli4_hba.mbx_wq->hba_index); 1698 phba->sli4_hba.mbx_wq->hba_index);
1835 1699
1836 /* Get slow-path work queue information */ 1700 /* Get slow-path work queue information */
1837 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1701 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1838 "Slow-path WQ information:\n"); 1702 "Slow-path ELS WQ information:\n");
1839 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1703 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1840 "\t\tAssociated CQ-ID [%02d]:\n", 1704 "Associated CQID[%02d]:\n",
1841 phba->sli4_hba.els_wq->assoc_qid); 1705 phba->sli4_hba.els_wq->assoc_qid);
1842 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1706 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1843 "\tID [%02d], WQE-COUNT [%04d], " 1707 "\tWQID[%02d], "
1844 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", 1708 "QE-COUNT[%04d], QE-SIZE[%04d], "
1709 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
1845 phba->sli4_hba.els_wq->queue_id, 1710 phba->sli4_hba.els_wq->queue_id,
1846 phba->sli4_hba.els_wq->entry_count, 1711 phba->sli4_hba.els_wq->entry_count,
1712 phba->sli4_hba.els_wq->entry_size,
1847 phba->sli4_hba.els_wq->host_index, 1713 phba->sli4_hba.els_wq->host_index,
1848 phba->sli4_hba.els_wq->hba_index); 1714 phba->sli4_hba.els_wq->hba_index);
1849 1715
1850 /* Get fast-path work queue information */ 1716 /* Get fast-path work queue information */
1851 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1717 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1852 "Fast-path WQ information:\n"); 1718 "Fast-path FCP WQ information:\n");
1853 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { 1719 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
1854 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1720 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1855 "\t\tAssociated CQ-ID [%02d]:\n", 1721 "Associated CQID[%02d]:\n",
1856 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 1722 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
1857 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1723 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1858 "\tID [%02d], WQE-COUNT [%04d], " 1724 "\tWQID[%02d], "
1859 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1725 "QE-COUNT[%04d], WQE-SIZE[%04d], "
1726 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1860 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, 1727 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
1861 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, 1728 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
1729 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
1862 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 1730 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
1863 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 1731 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
1864 } 1732 }
@@ -1868,26 +1736,597 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1868 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1736 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1869 "Slow-path RQ information:\n"); 1737 "Slow-path RQ information:\n");
1870 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1738 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1871 "\t\tAssociated CQ-ID [%02d]:\n", 1739 "Associated CQID[%02d]:\n",
1872 phba->sli4_hba.hdr_rq->assoc_qid); 1740 phba->sli4_hba.hdr_rq->assoc_qid);
1873 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1741 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1874 "\tID [%02d], RHQE-COUNT [%04d], " 1742 "\tHQID[%02d], "
1875 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1743 "QE-COUNT[%04d], QE-SIZE[%04d], "
1744 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1876 phba->sli4_hba.hdr_rq->queue_id, 1745 phba->sli4_hba.hdr_rq->queue_id,
1877 phba->sli4_hba.hdr_rq->entry_count, 1746 phba->sli4_hba.hdr_rq->entry_count,
1747 phba->sli4_hba.hdr_rq->entry_size,
1878 phba->sli4_hba.hdr_rq->host_index, 1748 phba->sli4_hba.hdr_rq->host_index,
1879 phba->sli4_hba.hdr_rq->hba_index); 1749 phba->sli4_hba.hdr_rq->hba_index);
1880 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1750 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1881 "\tID [%02d], RDQE-COUNT [%04d], " 1751 "\tDQID[%02d], "
1882 "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", 1752 "QE-COUNT[%04d], QE-SIZE[%04d], "
1753 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
1883 phba->sli4_hba.dat_rq->queue_id, 1754 phba->sli4_hba.dat_rq->queue_id,
1884 phba->sli4_hba.dat_rq->entry_count, 1755 phba->sli4_hba.dat_rq->entry_count,
1756 phba->sli4_hba.dat_rq->entry_size,
1885 phba->sli4_hba.dat_rq->host_index, 1757 phba->sli4_hba.dat_rq->host_index,
1886 phba->sli4_hba.dat_rq->hba_index); 1758 phba->sli4_hba.dat_rq->hba_index);
1887 1759
1888 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 1760 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1889} 1761}
1890 1762
1763/**
1764 * lpfc_idiag_que_param_check - queue access command parameter sanity check
1765 * @q: The pointer to queue structure.
1766 * @index: The index into a queue entry.
1767 * @count: The number of queue entries to access.
1768 *
1769 * Description:
1770 * The routine performs sanity check on device queue access method commands.
1771 *
1772 * Returns:
1773 * This function returns -EINVAL when fails the sanity check, otherwise, it
1774 * returns 0.
1775 **/
1776static int
1777lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
1778{
1779 /* Only support single entry read or browsing */
1780 if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE))
1781 return -EINVAL;
1782 if (index > q->entry_count - 1)
1783 return -EINVAL;
1784 return 0;
1785}
1786
1787/**
1788 * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
1789 * @pbuffer: The pointer to buffer to copy the read data into.
1790 * @pque: The pointer to the queue to be read.
1791 * @index: The index into the queue entry.
1792 *
1793 * Description:
1794 * This routine reads out a single entry from the given queue's index location
1795 * and copies it into the buffer provided.
1796 *
1797 * Returns:
1798 * This function returns 0 when it fails, otherwise, it returns the length of
1799 * the data read into the buffer provided.
1800 **/
1801static int
1802lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
1803 uint32_t index)
1804{
1805 int offset, esize;
1806 uint32_t *pentry;
1807
1808 if (!pbuffer || !pque)
1809 return 0;
1810
1811 esize = pque->entry_size;
1812 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
1813 "QE-INDEX[%04d]:\n", index);
1814
1815 offset = 0;
1816 pentry = pque->qe[index].address;
1817 while (esize > 0) {
1818 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
1819 "%08x ", *pentry);
1820 pentry++;
1821 offset += sizeof(uint32_t);
1822 esize -= sizeof(uint32_t);
1823 if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
1824 len += snprintf(pbuffer+len,
1825 LPFC_QUE_ACC_BUF_SIZE-len, "\n");
1826 }
1827 len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
1828
1829 return len;
1830}
1831
1832/**
1833 * lpfc_idiag_queacc_read - idiag debugfs read port queue
1834 * @file: The file pointer to read from.
1835 * @buf: The buffer to copy the data to.
1836 * @nbytes: The number of bytes to read.
1837 * @ppos: The position in the file to start reading from.
1838 *
1839 * Description:
1840 * This routine reads data from the @phba device queue memory according to the
1841 * idiag command, and copies to user @buf. Depending on the queue dump read
1842 * command setup, it does either a single queue entry read or browing through
1843 * all entries of the queue.
1844 *
1845 * Returns:
1846 * This function returns the amount of data that was read (this could be less
1847 * than @nbytes if the end of the file was reached) or a negative error value.
1848 **/
1849static ssize_t
1850lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
1851 loff_t *ppos)
1852{
1853 struct lpfc_debug *debug = file->private_data;
1854 uint32_t last_index, index, count;
1855 struct lpfc_queue *pque = NULL;
1856 char *pbuffer;
1857 int len = 0;
1858
1859 /* This is a user read operation */
1860 debug->op = LPFC_IDIAG_OP_RD;
1861
1862 if (!debug->buffer)
1863 debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL);
1864 if (!debug->buffer)
1865 return 0;
1866 pbuffer = debug->buffer;
1867
1868 if (*ppos)
1869 return 0;
1870
1871 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1872 index = idiag.cmd.data[2];
1873 count = idiag.cmd.data[3];
1874 pque = (struct lpfc_queue *)idiag.ptr_private;
1875 } else
1876 return 0;
1877
1878 /* Browse the queue starting from index */
1879 if (count == LPFC_QUE_ACC_BROWSE)
1880 goto que_browse;
1881
1882 /* Read a single entry from the queue */
1883 len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
1884
1885 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1886
1887que_browse:
1888
1889 /* Browse all entries from the queue */
1890 last_index = idiag.offset.last_rd;
1891 index = last_index;
1892
1893 while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) {
1894 len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
1895 index++;
1896 if (index > pque->entry_count - 1)
1897 break;
1898 }
1899
1900 /* Set up the offset for next portion of pci cfg read */
1901 if (index > pque->entry_count - 1)
1902 index = 0;
1903 idiag.offset.last_rd = index;
1904
1905 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1906}
1907
1908/**
1909 * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands
1910 * @file: The file pointer to read from.
1911 * @buf: The buffer to copy the user data from.
1912 * @nbytes: The number of bytes to get.
1913 * @ppos: The position in the file to start reading from.
1914 *
1915 * This routine get the debugfs idiag command struct from user space and then
1916 * perform the syntax check for port queue read (dump) or write (set) command
1917 * accordingly. In the case of port queue read command, it sets up the command
1918 * in the idiag command struct for the following debugfs read operation. In
1919 * the case of port queue write operation, it executes the write operation
1920 * into the port queue entry accordingly.
1921 *
1922 * It returns the @nbytges passing in from debugfs user space when successful.
1923 * In case of error conditions, it returns proper error code back to the user
1924 * space.
1925 **/
1926static ssize_t
1927lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
1928 size_t nbytes, loff_t *ppos)
1929{
1930 struct lpfc_debug *debug = file->private_data;
1931 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1932 uint32_t qidx, quetp, queid, index, count, offset, value;
1933 uint32_t *pentry;
1934 struct lpfc_queue *pque;
1935 int rc;
1936
1937 /* This is a user write operation */
1938 debug->op = LPFC_IDIAG_OP_WR;
1939
1940 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1941 if (rc < 0)
1942 return rc;
1943
1944 /* Get and sanity check on command feilds */
1945 quetp = idiag.cmd.data[0];
1946 queid = idiag.cmd.data[1];
1947 index = idiag.cmd.data[2];
1948 count = idiag.cmd.data[3];
1949 offset = idiag.cmd.data[4];
1950 value = idiag.cmd.data[5];
1951
1952 /* Sanity check on command line arguments */
1953 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
1954 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
1955 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
1956 if (rc != LPFC_QUE_ACC_WR_CMD_ARG)
1957 goto error_out;
1958 if (count != 1)
1959 goto error_out;
1960 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1961 if (rc != LPFC_QUE_ACC_RD_CMD_ARG)
1962 goto error_out;
1963 } else
1964 goto error_out;
1965
1966 switch (quetp) {
1967 case LPFC_IDIAG_EQ:
1968 /* Slow-path event queue */
1969 if (phba->sli4_hba.sp_eq->queue_id == queid) {
1970 /* Sanity check */
1971 rc = lpfc_idiag_que_param_check(
1972 phba->sli4_hba.sp_eq, index, count);
1973 if (rc)
1974 goto error_out;
1975 idiag.ptr_private = phba->sli4_hba.sp_eq;
1976 goto pass_check;
1977 }
1978 /* Fast-path event queue */
1979 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
1980 if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
1981 /* Sanity check */
1982 rc = lpfc_idiag_que_param_check(
1983 phba->sli4_hba.fp_eq[qidx],
1984 index, count);
1985 if (rc)
1986 goto error_out;
1987 idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
1988 goto pass_check;
1989 }
1990 }
1991 goto error_out;
1992 break;
1993 case LPFC_IDIAG_CQ:
1994 /* MBX complete queue */
1995 if (phba->sli4_hba.mbx_cq->queue_id == queid) {
1996 /* Sanity check */
1997 rc = lpfc_idiag_que_param_check(
1998 phba->sli4_hba.mbx_cq, index, count);
1999 if (rc)
2000 goto error_out;
2001 idiag.ptr_private = phba->sli4_hba.mbx_cq;
2002 goto pass_check;
2003 }
2004 /* ELS complete queue */
2005 if (phba->sli4_hba.els_cq->queue_id == queid) {
2006 /* Sanity check */
2007 rc = lpfc_idiag_que_param_check(
2008 phba->sli4_hba.els_cq, index, count);
2009 if (rc)
2010 goto error_out;
2011 idiag.ptr_private = phba->sli4_hba.els_cq;
2012 goto pass_check;
2013 }
2014 /* FCP complete queue */
2015 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2016 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
2017 /* Sanity check */
2018 rc = lpfc_idiag_que_param_check(
2019 phba->sli4_hba.fcp_cq[qidx],
2020 index, count);
2021 if (rc)
2022 goto error_out;
2023 idiag.ptr_private =
2024 phba->sli4_hba.fcp_cq[qidx];
2025 goto pass_check;
2026 }
2027 }
2028 goto error_out;
2029 break;
2030 case LPFC_IDIAG_MQ:
2031 /* MBX work queue */
2032 if (phba->sli4_hba.mbx_wq->queue_id == queid) {
2033 /* Sanity check */
2034 rc = lpfc_idiag_que_param_check(
2035 phba->sli4_hba.mbx_wq, index, count);
2036 if (rc)
2037 goto error_out;
2038 idiag.ptr_private = phba->sli4_hba.mbx_wq;
2039 goto pass_check;
2040 }
2041 break;
2042 case LPFC_IDIAG_WQ:
2043 /* ELS work queue */
2044 if (phba->sli4_hba.els_wq->queue_id == queid) {
2045 /* Sanity check */
2046 rc = lpfc_idiag_que_param_check(
2047 phba->sli4_hba.els_wq, index, count);
2048 if (rc)
2049 goto error_out;
2050 idiag.ptr_private = phba->sli4_hba.els_wq;
2051 goto pass_check;
2052 }
2053 /* FCP work queue */
2054 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
2055 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
2056 /* Sanity check */
2057 rc = lpfc_idiag_que_param_check(
2058 phba->sli4_hba.fcp_wq[qidx],
2059 index, count);
2060 if (rc)
2061 goto error_out;
2062 idiag.ptr_private =
2063 phba->sli4_hba.fcp_wq[qidx];
2064 goto pass_check;
2065 }
2066 }
2067 goto error_out;
2068 break;
2069 case LPFC_IDIAG_RQ:
2070 /* HDR queue */
2071 if (phba->sli4_hba.hdr_rq->queue_id == queid) {
2072 /* Sanity check */
2073 rc = lpfc_idiag_que_param_check(
2074 phba->sli4_hba.hdr_rq, index, count);
2075 if (rc)
2076 goto error_out;
2077 idiag.ptr_private = phba->sli4_hba.hdr_rq;
2078 goto pass_check;
2079 }
2080 /* DAT queue */
2081 if (phba->sli4_hba.dat_rq->queue_id == queid) {
2082 /* Sanity check */
2083 rc = lpfc_idiag_que_param_check(
2084 phba->sli4_hba.dat_rq, index, count);
2085 if (rc)
2086 goto error_out;
2087 idiag.ptr_private = phba->sli4_hba.dat_rq;
2088 goto pass_check;
2089 }
2090 goto error_out;
2091 break;
2092 default:
2093 goto error_out;
2094 break;
2095 }
2096
2097pass_check:
2098
2099 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
2100 if (count == LPFC_QUE_ACC_BROWSE)
2101 idiag.offset.last_rd = index;
2102 }
2103
2104 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
2105 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
2106 idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
2107 /* Additional sanity checks on write operation */
2108 pque = (struct lpfc_queue *)idiag.ptr_private;
2109 if (offset > pque->entry_size/sizeof(uint32_t) - 1)
2110 goto error_out;
2111 pentry = pque->qe[index].address;
2112 pentry += offset;
2113 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
2114 *pentry = value;
2115 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST)
2116 *pentry |= value;
2117 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL)
2118 *pentry &= ~value;
2119 }
2120 return nbytes;
2121
2122error_out:
2123 /* Clean out command structure on command error out */
2124 memset(&idiag, 0, sizeof(idiag));
2125 return -EINVAL;
2126}
2127
2128/**
2129 * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
2130 * @phba: The pointer to hba structure.
2131 * @pbuffer: The pointer to the buffer to copy the data to.
2132 * @len: The lenght of bytes to copied.
2133 * @drbregid: The id to doorbell registers.
2134 *
2135 * Description:
2136 * This routine reads a doorbell register and copies its content to the
2137 * user buffer pointed to by @pbuffer.
2138 *
2139 * Returns:
2140 * This function returns the amount of data that was copied into @pbuffer.
2141 **/
2142static int
2143lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
2144 int len, uint32_t drbregid)
2145{
2146
2147 if (!pbuffer)
2148 return 0;
2149
2150 switch (drbregid) {
2151 case LPFC_DRB_EQCQ:
2152 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2153 "EQCQ-DRB-REG: 0x%08x\n",
2154 readl(phba->sli4_hba.EQCQDBregaddr));
2155 break;
2156 case LPFC_DRB_MQ:
2157 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2158 "MQ-DRB-REG: 0x%08x\n",
2159 readl(phba->sli4_hba.MQDBregaddr));
2160 break;
2161 case LPFC_DRB_WQ:
2162 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2163 "WQ-DRB-REG: 0x%08x\n",
2164 readl(phba->sli4_hba.WQDBregaddr));
2165 break;
2166 case LPFC_DRB_RQ:
2167 len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
2168 "RQ-DRB-REG: 0x%08x\n",
2169 readl(phba->sli4_hba.RQDBregaddr));
2170 break;
2171 default:
2172 break;
2173 }
2174
2175 return len;
2176}
2177
2178/**
2179 * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell
2180 * @file: The file pointer to read from.
2181 * @buf: The buffer to copy the data to.
2182 * @nbytes: The number of bytes to read.
2183 * @ppos: The position in the file to start reading from.
2184 *
2185 * Description:
2186 * This routine reads data from the @phba device doorbell register according
2187 * to the idiag command, and copies to user @buf. Depending on the doorbell
2188 * register read command setup, it does either a single doorbell register
2189 * read or dump all doorbell registers.
2190 *
2191 * Returns:
2192 * This function returns the amount of data that was read (this could be less
2193 * than @nbytes if the end of the file was reached) or a negative error value.
2194 **/
2195static ssize_t
2196lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
2197 loff_t *ppos)
2198{
2199 struct lpfc_debug *debug = file->private_data;
2200 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2201 uint32_t drb_reg_id, i;
2202 char *pbuffer;
2203 int len = 0;
2204
2205 /* This is a user read operation */
2206 debug->op = LPFC_IDIAG_OP_RD;
2207
2208 if (!debug->buffer)
2209 debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL);
2210 if (!debug->buffer)
2211 return 0;
2212 pbuffer = debug->buffer;
2213
2214 if (*ppos)
2215 return 0;
2216
2217 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
2218 drb_reg_id = idiag.cmd.data[0];
2219 else
2220 return 0;
2221
2222 if (drb_reg_id == LPFC_DRB_ACC_ALL)
2223 for (i = 1; i <= LPFC_DRB_MAX; i++)
2224 len = lpfc_idiag_drbacc_read_reg(phba,
2225 pbuffer, len, i);
2226 else
2227 len = lpfc_idiag_drbacc_read_reg(phba,
2228 pbuffer, len, drb_reg_id);
2229
2230 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2231}
2232
2233/**
2234 * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands
2235 * @file: The file pointer to read from.
2236 * @buf: The buffer to copy the user data from.
2237 * @nbytes: The number of bytes to get.
2238 * @ppos: The position in the file to start reading from.
2239 *
2240 * This routine get the debugfs idiag command struct from user space and then
2241 * perform the syntax check for port doorbell register read (dump) or write
2242 * (set) command accordingly. In the case of port queue read command, it sets
2243 * up the command in the idiag command struct for the following debugfs read
2244 * operation. In the case of port doorbell register write operation, it
2245 * executes the write operation into the port doorbell register accordingly.
2246 *
2247 * It returns the @nbytges passing in from debugfs user space when successful.
2248 * In case of error conditions, it returns proper error code back to the user
2249 * space.
2250 **/
2251static ssize_t
2252lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2253 size_t nbytes, loff_t *ppos)
2254{
2255 struct lpfc_debug *debug = file->private_data;
2256 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2257 uint32_t drb_reg_id, value, reg_val;
2258 void __iomem *drb_reg;
2259 int rc;
2260
2261 /* This is a user write operation */
2262 debug->op = LPFC_IDIAG_OP_WR;
2263
2264 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2265 if (rc < 0)
2266 return rc;
2267
2268 /* Sanity check on command line arguments */
2269 drb_reg_id = idiag.cmd.data[0];
2270 value = idiag.cmd.data[1];
2271
2272 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2273 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
2274 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2275 if (rc != LPFC_DRB_ACC_WR_CMD_ARG)
2276 goto error_out;
2277 if (drb_reg_id > LPFC_DRB_MAX)
2278 goto error_out;
2279 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) {
2280 if (rc != LPFC_DRB_ACC_RD_CMD_ARG)
2281 goto error_out;
2282 if ((drb_reg_id > LPFC_DRB_MAX) &&
2283 (drb_reg_id != LPFC_DRB_ACC_ALL))
2284 goto error_out;
2285 } else
2286 goto error_out;
2287
2288 /* Perform the write access operation */
2289 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2290 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
2291 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2292 switch (drb_reg_id) {
2293 case LPFC_DRB_EQCQ:
2294 drb_reg = phba->sli4_hba.EQCQDBregaddr;
2295 break;
2296 case LPFC_DRB_MQ:
2297 drb_reg = phba->sli4_hba.MQDBregaddr;
2298 break;
2299 case LPFC_DRB_WQ:
2300 drb_reg = phba->sli4_hba.WQDBregaddr;
2301 break;
2302 case LPFC_DRB_RQ:
2303 drb_reg = phba->sli4_hba.RQDBregaddr;
2304 break;
2305 default:
2306 goto error_out;
2307 }
2308
2309 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR)
2310 reg_val = value;
2311 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) {
2312 reg_val = readl(drb_reg);
2313 reg_val |= value;
2314 }
2315 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
2316 reg_val = readl(drb_reg);
2317 reg_val &= ~value;
2318 }
2319 writel(reg_val, drb_reg);
2320 readl(drb_reg); /* flush */
2321 }
2322 return nbytes;
2323
2324error_out:
2325 /* Clean out command structure on command error out */
2326 memset(&idiag, 0, sizeof(idiag));
2327 return -EINVAL;
2328}
2329
1891#undef lpfc_debugfs_op_disc_trc 2330#undef lpfc_debugfs_op_disc_trc
1892static const struct file_operations lpfc_debugfs_op_disc_trc = { 2331static const struct file_operations lpfc_debugfs_op_disc_trc = {
1893 .owner = THIS_MODULE, 2332 .owner = THIS_MODULE,
@@ -1986,6 +2425,26 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
1986 .release = lpfc_idiag_release, 2425 .release = lpfc_idiag_release,
1987}; 2426};
1988 2427
2428#undef lpfc_idiag_op_queacc
2429static const struct file_operations lpfc_idiag_op_queAcc = {
2430 .owner = THIS_MODULE,
2431 .open = lpfc_idiag_open,
2432 .llseek = lpfc_debugfs_lseek,
2433 .read = lpfc_idiag_queacc_read,
2434 .write = lpfc_idiag_queacc_write,
2435 .release = lpfc_idiag_cmd_release,
2436};
2437
2438#undef lpfc_idiag_op_drbacc
2439static const struct file_operations lpfc_idiag_op_drbAcc = {
2440 .owner = THIS_MODULE,
2441 .open = lpfc_idiag_open,
2442 .llseek = lpfc_debugfs_lseek,
2443 .read = lpfc_idiag_drbacc_read,
2444 .write = lpfc_idiag_drbacc_write,
2445 .release = lpfc_idiag_cmd_release,
2446};
2447
1989#endif 2448#endif
1990 2449
1991/** 2450/**
@@ -2261,6 +2720,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2261 } 2720 }
2262 } 2721 }
2263 2722
2723 /* iDiag access PCI function queue */
2724 snprintf(name, sizeof(name), "queAcc");
2725 if (!phba->idiag_que_acc) {
2726 phba->idiag_que_acc =
2727 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
2728 phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
2729 if (!phba->idiag_que_acc) {
2730 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2731 "2926 Can't create idiag debugfs\n");
2732 goto debug_failed;
2733 }
2734 }
2735
2736 /* iDiag access PCI function doorbell registers */
2737 snprintf(name, sizeof(name), "drbAcc");
2738 if (!phba->idiag_drb_acc) {
2739 phba->idiag_drb_acc =
2740 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
2741 phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
2742 if (!phba->idiag_drb_acc) {
2743 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2744 "2927 Can't create idiag debugfs\n");
2745 goto debug_failed;
2746 }
2747 }
2748
2264debug_failed: 2749debug_failed:
2265 return; 2750 return;
2266#endif 2751#endif
@@ -2339,6 +2824,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2339 * iDiag release 2824 * iDiag release
2340 */ 2825 */
2341 if (phba->sli_rev == LPFC_SLI_REV4) { 2826 if (phba->sli_rev == LPFC_SLI_REV4) {
2827 if (phba->idiag_drb_acc) {
2828 /* iDiag drbAcc */
2829 debugfs_remove(phba->idiag_drb_acc);
2830 phba->idiag_drb_acc = NULL;
2831 }
2832 if (phba->idiag_que_acc) {
2833 /* iDiag queAcc */
2834 debugfs_remove(phba->idiag_que_acc);
2835 phba->idiag_que_acc = NULL;
2836 }
2342 if (phba->idiag_que_info) { 2837 if (phba->idiag_que_info) {
2343 /* iDiag queInfo */ 2838 /* iDiag queInfo */
2344 debugfs_remove(phba->idiag_que_info); 2839 debugfs_remove(phba->idiag_que_info);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 91b9a9427cda..6525a5e62d27 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,13 +39,42 @@
39/* hbqinfo output buffer size */ 39/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 40#define LPFC_HBQINFO_SIZE 8192
41 41
42/* rdPciConf output buffer size */ 42/* pciConf */
43#define LPFC_PCI_CFG_BROWSE 0xffff
44#define LPFC_PCI_CFG_RD_CMD_ARG 2
45#define LPFC_PCI_CFG_WR_CMD_ARG 3
43#define LPFC_PCI_CFG_SIZE 4096 46#define LPFC_PCI_CFG_SIZE 4096
44#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) 47#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
45#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) 48#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
46 49
47/* queue info output buffer size */ 50/* queue info */
48#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 51#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
52
53/* queue acc */
54#define LPFC_QUE_ACC_BROWSE 0xffff
55#define LPFC_QUE_ACC_RD_CMD_ARG 4
56#define LPFC_QUE_ACC_WR_CMD_ARG 6
57#define LPFC_QUE_ACC_BUF_SIZE 4096
58#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2)
59
60#define LPFC_IDIAG_EQ 1
61#define LPFC_IDIAG_CQ 2
62#define LPFC_IDIAG_MQ 3
63#define LPFC_IDIAG_WQ 4
64#define LPFC_IDIAG_RQ 5
65
66/* doorbell acc */
67#define LPFC_DRB_ACC_ALL 0xffff
68#define LPFC_DRB_ACC_RD_CMD_ARG 1
69#define LPFC_DRB_ACC_WR_CMD_ARG 2
70#define LPFC_DRB_ACC_BUF_SIZE 256
71
72#define LPFC_DRB_EQCQ 1
73#define LPFC_DRB_MQ 2
74#define LPFC_DRB_WQ 3
75#define LPFC_DRB_RQ 4
76
77#define LPFC_DRB_MAX 4
49 78
50#define SIZE_U8 sizeof(uint8_t) 79#define SIZE_U8 sizeof(uint8_t)
51#define SIZE_U16 sizeof(uint16_t) 80#define SIZE_U16 sizeof(uint16_t)
@@ -73,13 +102,23 @@ struct lpfc_idiag_offset {
73 uint32_t last_rd; 102 uint32_t last_rd;
74}; 103};
75 104
76#define LPFC_IDIAG_CMD_DATA_SIZE 4 105#define LPFC_IDIAG_CMD_DATA_SIZE 8
77struct lpfc_idiag_cmd { 106struct lpfc_idiag_cmd {
78 uint32_t opcode; 107 uint32_t opcode;
79#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 108#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
80#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 109#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
81#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 110#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
82#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 111#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
112
113#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
114#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
115#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
116#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014
117
118#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021
119#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
120#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
121#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
83 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; 122 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
84}; 123};
85 124
@@ -87,6 +126,7 @@ struct lpfc_idiag {
87 uint32_t active; 126 uint32_t active;
88 struct lpfc_idiag_cmd cmd; 127 struct lpfc_idiag_cmd cmd;
89 struct lpfc_idiag_offset offset; 128 struct lpfc_idiag_offset offset;
129 void *ptr_private;
90}; 130};
91#endif 131#endif
92 132
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d34b69f9cdb1..e2c452467c8b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -670,6 +670,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
670 * Driver needs to re-reg VPI in order for f/w 670 * Driver needs to re-reg VPI in order for f/w
671 * to update the MAC address. 671 * to update the MAC address.
672 */ 672 */
673 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
673 lpfc_register_new_vport(phba, vport, ndlp); 674 lpfc_register_new_vport(phba, vport, ndlp);
674 return 0; 675 return 0;
675 } 676 }
@@ -869,8 +870,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
869 */ 870 */
870 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 871 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
871 (phba->fcf.fcf_flag & FCF_DISCOVERY) && 872 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
872 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 873 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
873 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 874 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
874 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 875 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
875 "2611 FLOGI failed on FCF (x%x), " 876 "2611 FLOGI failed on FCF (x%x), "
876 "status:x%x/x%x, tmo:x%x, perform " 877 "status:x%x/x%x, tmo:x%x, perform "
@@ -1085,14 +1086,15 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1085 if (sp->cmn.fcphHigh < FC_PH3) 1086 if (sp->cmn.fcphHigh < FC_PH3)
1086 sp->cmn.fcphHigh = FC_PH3; 1087 sp->cmn.fcphHigh = FC_PH3;
1087 1088
1088 if ((phba->sli_rev == LPFC_SLI_REV4) && 1089 if (phba->sli_rev == LPFC_SLI_REV4) {
1089 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1090 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1090 LPFC_SLI_INTF_IF_TYPE_0)) { 1091 LPFC_SLI_INTF_IF_TYPE_0) {
1091 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1092 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1092 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1093 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1093 /* FLOGI needs to be 3 for WQE FCFI */ 1094 /* FLOGI needs to be 3 for WQE FCFI */
1094 /* Set the fcfi to the fcfi we registered with */ 1095 /* Set the fcfi to the fcfi we registered with */
1095 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1096 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1097 }
1096 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1098 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1097 sp->cmn.request_multiple_Nport = 1; 1099 sp->cmn.request_multiple_Nport = 1;
1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1100 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
@@ -4107,13 +4109,13 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
4107 pcmd += sizeof(uint32_t); 4109 pcmd += sizeof(uint32_t);
4108 rrq = (struct RRQ *)pcmd; 4110 rrq = (struct RRQ *)pcmd;
4109 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4111 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4110 rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); 4112 rxid = bf_get(rrq_rxid, rrq);
4111 4113
4112 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4113 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4115 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4114 " x%x x%x\n", 4116 " x%x x%x\n",
4115 be32_to_cpu(bf_get(rrq_did, rrq)), 4117 be32_to_cpu(bf_get(rrq_did, rrq)),
4116 be16_to_cpu(bf_get(rrq_oxid, rrq)), 4118 bf_get(rrq_oxid, rrq),
4117 rxid, 4119 rxid,
4118 iocb->iotag, iocb->iocb.ulpContext); 4120 iocb->iotag, iocb->iocb.ulpContext);
4119 4121
@@ -4121,7 +4123,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
4121 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4123 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4122 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4124 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4123 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4125 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4124 xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); 4126 xri = bf_get(rrq_oxid, rrq);
4125 else 4127 else
4126 xri = rxid; 4128 xri = rxid;
4127 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4129 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
@@ -7290,8 +7292,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7290 struct lpfc_vport *vport = cmdiocb->vport; 7292 struct lpfc_vport *vport = cmdiocb->vport;
7291 IOCB_t *irsp; 7293 IOCB_t *irsp;
7292 struct lpfc_nodelist *ndlp; 7294 struct lpfc_nodelist *ndlp;
7293 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7295 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7294 7296
7297 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
7295 irsp = &rspiocb->iocb; 7298 irsp = &rspiocb->iocb;
7296 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7299 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7297 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7300 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
@@ -7302,6 +7305,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7302 7305
7303 /* Trigger the release of the ndlp after logo */ 7306 /* Trigger the release of the ndlp after logo */
7304 lpfc_nlp_put(ndlp); 7307 lpfc_nlp_put(ndlp);
7308
7309 /* NPIV LOGO completes to NPort <nlp_DID> */
7310 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7311 "2928 NPIV LOGO completes to NPort x%x "
7312 "Data: x%x x%x x%x x%x\n",
7313 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7314 irsp->ulpTimeout, vport->num_disc_nodes);
7315
7316 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7317 spin_lock_irq(shost->host_lock);
7318 vport->fc_flag &= ~FC_FABRIC;
7319 spin_unlock_irq(shost->host_lock);
7320 }
7305} 7321}
7306 7322
7307/** 7323/**
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 301498301a8f..7a35df5e2038 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3569,6 +3569,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3569 "rport add: did:x%x flg:x%x type x%x", 3569 "rport add: did:x%x flg:x%x type x%x",
3570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3571 3571
3572 /* Don't add the remote port if unloading. */
3573 if (vport->load_flag & FC_UNLOADING)
3574 return;
3575
3572 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 3576 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3573 if (!rport || !get_device(&rport->dev)) { 3577 if (!rport || !get_device(&rport->dev)) {
3574 dev_printk(KERN_WARNING, &phba->pcidev->dev, 3578 dev_printk(KERN_WARNING, &phba->pcidev->dev,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8433ac0d9fb4..4dff668ebdad 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1059,6 +1059,11 @@ struct rq_context {
1059#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ 1059#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
1060#define lpfc_rq_context_rqe_size_MASK 0x0000000F 1060#define lpfc_rq_context_rqe_size_MASK 0x0000000F
1061#define lpfc_rq_context_rqe_size_WORD word0 1061#define lpfc_rq_context_rqe_size_WORD word0
1062#define LPFC_RQE_SIZE_8 2
1063#define LPFC_RQE_SIZE_16 3
1064#define LPFC_RQE_SIZE_32 4
1065#define LPFC_RQE_SIZE_64 5
1066#define LPFC_RQE_SIZE_128 6
1062#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ 1067#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
1063#define lpfc_rq_context_page_size_MASK 0x000000FF 1068#define lpfc_rq_context_page_size_MASK 0x000000FF
1064#define lpfc_rq_context_page_size_WORD word0 1069#define lpfc_rq_context_page_size_WORD word0
@@ -2108,6 +2113,8 @@ struct lpfc_mbx_pc_sli4_params {
2108#define sgl_pp_align_WORD word12 2113#define sgl_pp_align_WORD word12
2109 uint32_t rsvd_13_63[51]; 2114 uint32_t rsvd_13_63[51];
2110}; 2115};
2116#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
2117 &(~((SLI4_PAGE_SIZE)-1)))
2111 2118
2112struct lpfc_sli4_parameters { 2119struct lpfc_sli4_parameters {
2113 uint32_t word0; 2120 uint32_t word0;
@@ -2491,6 +2498,9 @@ struct wqe_common {
2491#define wqe_reqtag_SHIFT 0 2498#define wqe_reqtag_SHIFT 0
2492#define wqe_reqtag_MASK 0x0000FFFF 2499#define wqe_reqtag_MASK 0x0000FFFF
2493#define wqe_reqtag_WORD word9 2500#define wqe_reqtag_WORD word9
2501#define wqe_temp_rpi_SHIFT 16
2502#define wqe_temp_rpi_MASK 0x0000FFFF
2503#define wqe_temp_rpi_WORD word9
2494#define wqe_rcvoxid_SHIFT 16 2504#define wqe_rcvoxid_SHIFT 16
2495#define wqe_rcvoxid_MASK 0x0000FFFF 2505#define wqe_rcvoxid_MASK 0x0000FFFF
2496#define wqe_rcvoxid_WORD word9 2506#define wqe_rcvoxid_WORD word9
@@ -2524,7 +2534,7 @@ struct wqe_common {
2524#define wqe_wqes_WORD word10 2534#define wqe_wqes_WORD word10
2525/* Note that this field overlaps above fields */ 2535/* Note that this field overlaps above fields */
2526#define wqe_wqid_SHIFT 1 2536#define wqe_wqid_SHIFT 1
2527#define wqe_wqid_MASK 0x0000007f 2537#define wqe_wqid_MASK 0x00007fff
2528#define wqe_wqid_WORD word10 2538#define wqe_wqid_WORD word10
2529#define wqe_pri_SHIFT 16 2539#define wqe_pri_SHIFT 16
2530#define wqe_pri_MASK 0x00000007 2540#define wqe_pri_MASK 0x00000007
@@ -2621,7 +2631,11 @@ struct xmit_els_rsp64_wqe {
2621 uint32_t rsvd4; 2631 uint32_t rsvd4;
2622 struct wqe_did wqe_dest; 2632 struct wqe_did wqe_dest;
2623 struct wqe_common wqe_com; /* words 6-11 */ 2633 struct wqe_common wqe_com; /* words 6-11 */
2624 uint32_t rsvd_12_15[4]; 2634 uint32_t word12;
2635#define wqe_rsp_temp_rpi_SHIFT 0
2636#define wqe_rsp_temp_rpi_MASK 0x0000FFFF
2637#define wqe_rsp_temp_rpi_WORD word12
2638 uint32_t rsvd_13_15[3];
2625}; 2639};
2626 2640
2627struct xmit_bls_rsp64_wqe { 2641struct xmit_bls_rsp64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 505f88443b5c..7dda036a1af3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3209,9 +3209,9 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3209 phba->sli4_hba.link_state.logical_speed = 3209 phba->sli4_hba.link_state.logical_speed =
3210 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3210 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3212 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3212 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3213 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3213 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3214 "speed:%dMbps Fault:%d\n", 3214 "Logical speed:%dMbps Fault:%d\n",
3215 phba->sli4_hba.link_state.speed, 3215 phba->sli4_hba.link_state.speed,
3216 phba->sli4_hba.link_state.topology, 3216 phba->sli4_hba.link_state.topology,
3217 phba->sli4_hba.link_state.status, 3217 phba->sli4_hba.link_state.status,
@@ -4906,6 +4906,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4906 uint16_t rpi_limit, curr_rpi_range; 4906 uint16_t rpi_limit, curr_rpi_range;
4907 struct lpfc_dmabuf *dmabuf; 4907 struct lpfc_dmabuf *dmabuf;
4908 struct lpfc_rpi_hdr *rpi_hdr; 4908 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count;
4909 4910
4910 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4911 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4912 phba->sli4_hba.max_cfg_param.max_rpi - 1;
@@ -4920,7 +4921,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4920 * and to allow the full max_rpi range per port. 4921 * and to allow the full max_rpi range per port.
4921 */ 4922 */
4922 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4923 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4923 return NULL; 4924 rpi_count = rpi_limit - curr_rpi_range;
4925 else
4926 rpi_count = LPFC_RPI_HDR_COUNT;
4924 4927
4925 /* 4928 /*
4926 * First allocate the protocol header region for the port. The 4929 * First allocate the protocol header region for the port. The
@@ -4961,7 +4964,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4961 * The next_rpi stores the next module-64 rpi value to post 4964 * The next_rpi stores the next module-64 rpi value to post
4962 * in any subsequent rpi memory region postings. 4965 * in any subsequent rpi memory region postings.
4963 */ 4966 */
4964 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4967 phba->sli4_hba.next_rpi += rpi_count;
4965 spin_unlock_irq(&phba->hbalock); 4968 spin_unlock_irq(&phba->hbalock);
4966 return rpi_hdr; 4969 return rpi_hdr;
4967 4970
@@ -7004,7 +7007,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7004 lpfc_sli4_bar0_register_memmap(phba, if_type); 7007 lpfc_sli4_bar0_register_memmap(phba, if_type);
7005 } 7008 }
7006 7009
7007 if (pci_resource_start(pdev, 2)) { 7010 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7011 (pci_resource_start(pdev, 2))) {
7008 /* 7012 /*
7009 * Map SLI4 if type 0 HBA Control Register base to a kernel 7013 * Map SLI4 if type 0 HBA Control Register base to a kernel
7010 * virtual address and setup the registers. 7014 * virtual address and setup the registers.
@@ -7021,7 +7025,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7021 lpfc_sli4_bar1_register_memmap(phba); 7025 lpfc_sli4_bar1_register_memmap(phba);
7022 } 7026 }
7023 7027
7024 if (pci_resource_start(pdev, 4)) { 7028 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7029 (pci_resource_start(pdev, 4))) {
7025 /* 7030 /*
7026 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7031 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7027 * virtual address and setup the registers. 7032 * virtual address and setup the registers.
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index fbab9734e9b4..e6ce9033f85e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1736,7 +1736,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1736 } 1736 }
1737 1737
1738 /* Setup for the none-embedded mbox command */ 1738 /* Setup for the none-embedded mbox command */
1739 pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1742 /* Allocate record for keeping SGE virtual addresses */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fe7cc84e773b..84e4481b2406 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3238,9 +3238,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3238 if (!lpfc_cmd) { 3238 if (!lpfc_cmd) {
3239 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3239 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3240 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 3240 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3241 "x%x ID %d " 3241 "x%x ID %d LUN %d\n",
3242 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3242 ret, cmnd->device->id, cmnd->device->lun);
3243 cmnd->device->lun, cmnd->serial_number);
3244 return SUCCESS; 3243 return SUCCESS;
3245 } 3244 }
3246 3245
@@ -3318,16 +3317,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3318 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3317 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3319 "0748 abort handler timed out waiting " 3318 "0748 abort handler timed out waiting "
3320 "for abort to complete: ret %#x, ID %d, " 3319 "for abort to complete: ret %#x, ID %d, "
3321 "LUN %d, snum %#lx\n", 3320 "LUN %d\n",
3322 ret, cmnd->device->id, cmnd->device->lun, 3321 ret, cmnd->device->id, cmnd->device->lun);
3323 cmnd->serial_number);
3324 } 3322 }
3325 3323
3326 out: 3324 out:
3327 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3325 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3328 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 3326 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3329 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3327 "LUN %d\n", ret, cmnd->device->id,
3330 cmnd->device->lun, cmnd->serial_number); 3328 cmnd->device->lun);
3331 return ret; 3329 return ret;
3332} 3330}
3333 3331
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dacabbe0a586..837d272cb2d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4769,8 +4769,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4769 else 4769 else
4770 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4770 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4771 4771
4772 if (phba->sli_rev != LPFC_SLI_REV4 || 4772 if (phba->sli_rev != LPFC_SLI_REV4) {
4773 !(phba->hba_flag & HBA_FCOE_MODE)) {
4774 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4773 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4775 "0376 READ_REV Error. SLI Level %d " 4774 "0376 READ_REV Error. SLI Level %d "
4776 "FCoE enabled %d\n", 4775 "FCoE enabled %d\n",
@@ -5018,10 +5017,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5018 lpfc_reg_fcfi(phba, mboxq); 5017 lpfc_reg_fcfi(phba, mboxq);
5019 mboxq->vport = phba->pport; 5018 mboxq->vport = phba->pport;
5020 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5019 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5021 if (rc == MBX_SUCCESS) 5020 if (rc != MBX_SUCCESS)
5022 rc = 0;
5023 else
5024 goto out_unset_queue; 5021 goto out_unset_queue;
5022 rc = 0;
5023 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
5024 &mboxq->u.mqe.un.reg_fcfi);
5025 } 5025 }
5026 /* 5026 /*
5027 * The port is ready, set the host's link state to LINK_DOWN 5027 * The port is ready, set the host's link state to LINK_DOWN
@@ -6402,6 +6402,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6402 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 6402 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
6403 int numBdes, i; 6403 int numBdes, i;
6404 struct ulp_bde64 bde; 6404 struct ulp_bde64 bde;
6405 struct lpfc_nodelist *ndlp;
6405 6406
6406 fip = phba->hba_flag & HBA_FIP_SUPPORT; 6407 fip = phba->hba_flag & HBA_FIP_SUPPORT;
6407 /* The fcp commands will set command type */ 6408 /* The fcp commands will set command type */
@@ -6447,6 +6448,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6447 6448
6448 switch (iocbq->iocb.ulpCommand) { 6449 switch (iocbq->iocb.ulpCommand) {
6449 case CMD_ELS_REQUEST64_CR: 6450 case CMD_ELS_REQUEST64_CR:
6451 ndlp = (struct lpfc_nodelist *)iocbq->context1;
6450 if (!iocbq->iocb.ulpLe) { 6452 if (!iocbq->iocb.ulpLe) {
6451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6452 "2007 Only Limited Edition cmd Format" 6454 "2007 Only Limited Edition cmd Format"
@@ -6472,6 +6474,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6472 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6474 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6473 >> LPFC_FIP_ELS_ID_SHIFT); 6475 >> LPFC_FIP_ELS_ID_SHIFT);
6474 } 6476 }
6477 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
6475 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 6478 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6476 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 6479 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6477 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 6480 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6604,6 +6607,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6604 command_type = OTHER_COMMAND; 6607 command_type = OTHER_COMMAND;
6605 break; 6608 break;
6606 case CMD_XMIT_ELS_RSP64_CX: 6609 case CMD_XMIT_ELS_RSP64_CX:
6610 ndlp = (struct lpfc_nodelist *)iocbq->context1;
6607 /* words0-2 BDE memcpy */ 6611 /* words0-2 BDE memcpy */
6608 /* word3 iocb=iotag32 wqe=response_payload_len */ 6612 /* word3 iocb=iotag32 wqe=response_payload_len */
6609 wqe->xmit_els_rsp.response_payload_len = xmit_len; 6613 wqe->xmit_els_rsp.response_payload_len = xmit_len;
@@ -6626,6 +6630,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6626 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 6630 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6627 LPFC_WQE_LENLOC_WORD3); 6631 LPFC_WQE_LENLOC_WORD3);
6628 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 6632 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6633 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
6629 command_type = OTHER_COMMAND; 6634 command_type = OTHER_COMMAND;
6630 break; 6635 break;
6631 case CMD_CLOSE_XRI_CN: 6636 case CMD_CLOSE_XRI_CN:
@@ -10522,8 +10527,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
10522 bf_set(lpfc_mbox_hdr_version, &shdr->request, 10527 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10523 phba->sli4_hba.pc_sli4_params.cqv); 10528 phba->sli4_hba.pc_sli4_params.cqv);
10524 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 10529 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
10525 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 10530 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
10526 (PAGE_SIZE/SLI4_PAGE_SIZE)); 10531 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
10527 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 10532 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
10528 eq->queue_id); 10533 eq->queue_id);
10529 } else { 10534 } else {
@@ -10967,6 +10972,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10967 &rq_create->u.request.context, 10972 &rq_create->u.request.context,
10968 hrq->entry_count); 10973 hrq->entry_count);
10969 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 10974 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
10975 bf_set(lpfc_rq_context_rqe_size,
10976 &rq_create->u.request.context,
10977 LPFC_RQE_SIZE_8);
10978 bf_set(lpfc_rq_context_page_size,
10979 &rq_create->u.request.context,
10980 (PAGE_SIZE/SLI4_PAGE_SIZE));
10970 } else { 10981 } else {
10971 switch (hrq->entry_count) { 10982 switch (hrq->entry_count) {
10972 default: 10983 default:
@@ -11042,9 +11053,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
11042 phba->sli4_hba.pc_sli4_params.rqv); 11053 phba->sli4_hba.pc_sli4_params.rqv);
11043 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 11054 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
11044 bf_set(lpfc_rq_context_rqe_count_1, 11055 bf_set(lpfc_rq_context_rqe_count_1,
11045 &rq_create->u.request.context, 11056 &rq_create->u.request.context, hrq->entry_count);
11046 hrq->entry_count);
11047 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 11057 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
11058 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
11059 LPFC_RQE_SIZE_8);
11060 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
11061 (PAGE_SIZE/SLI4_PAGE_SIZE));
11048 } else { 11062 } else {
11049 switch (drq->entry_count) { 11063 switch (drq->entry_count) {
11050 default: 11064 default:
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2404d1d65563..c03921b1232c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.22" 21#define LPFC_DRIVER_VERSION "8.3.23"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index f2684dd09ed0..5c1776406c96 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1469,8 +1469,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1469 if( scb->state & SCB_ABORT ) { 1469 if( scb->state & SCB_ABORT ) {
1470 1470
1471 printk(KERN_WARNING 1471 printk(KERN_WARNING
1472 "megaraid: aborted cmd %lx[%x] complete.\n", 1472 "megaraid: aborted cmd [%x] complete.\n",
1473 scb->cmd->serial_number, scb->idx); 1473 scb->idx);
1474 1474
1475 scb->cmd->result = (DID_ABORT << 16); 1475 scb->cmd->result = (DID_ABORT << 16);
1476 1476
@@ -1488,8 +1488,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1488 if( scb->state & SCB_RESET ) { 1488 if( scb->state & SCB_RESET ) {
1489 1489
1490 printk(KERN_WARNING 1490 printk(KERN_WARNING
1491 "megaraid: reset cmd %lx[%x] complete.\n", 1491 "megaraid: reset cmd [%x] complete.\n",
1492 scb->cmd->serial_number, scb->idx); 1492 scb->idx);
1493 1493
1494 scb->cmd->result = (DID_RESET << 16); 1494 scb->cmd->result = (DID_RESET << 16);
1495 1495
@@ -1958,8 +1958,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1958 struct list_head *pos, *next; 1958 struct list_head *pos, *next;
1959 scb_t *scb; 1959 scb_t *scb;
1960 1960
1961 printk(KERN_WARNING "megaraid: %s-%lx cmd=%x <c=%d t=%d l=%d>\n", 1961 printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
1962 (aor == SCB_ABORT)? "ABORTING":"RESET", cmd->serial_number, 1962 (aor == SCB_ABORT)? "ABORTING":"RESET",
1963 cmd->cmnd[0], cmd->device->channel, 1963 cmd->cmnd[0], cmd->device->channel,
1964 cmd->device->id, cmd->device->lun); 1964 cmd->device->id, cmd->device->lun);
1965 1965
@@ -1983,9 +1983,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1983 if( scb->state & SCB_ISSUED ) { 1983 if( scb->state & SCB_ISSUED ) {
1984 1984
1985 printk(KERN_WARNING 1985 printk(KERN_WARNING
1986 "megaraid: %s-%lx[%x], fw owner.\n", 1986 "megaraid: %s[%x], fw owner.\n",
1987 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1987 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1988 cmd->serial_number, scb->idx); 1988 scb->idx);
1989 1989
1990 return FALSE; 1990 return FALSE;
1991 } 1991 }
@@ -1996,9 +1996,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1996 * list 1996 * list
1997 */ 1997 */
1998 printk(KERN_WARNING 1998 printk(KERN_WARNING
1999 "megaraid: %s-%lx[%x], driver owner.\n", 1999 "megaraid: %s-[%x], driver owner.\n",
2000 (aor==SCB_ABORT) ? "ABORTING":"RESET", 2000 (aor==SCB_ABORT) ? "ABORTING":"RESET",
2001 cmd->serial_number, scb->idx); 2001 scb->idx);
2002 2002
2003 mega_free_scb(adapter, scb); 2003 mega_free_scb(adapter, scb);
2004 2004
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 1dba32870b4c..2e6619eff3ea 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -2315,8 +2315,8 @@ megaraid_mbox_dpc(unsigned long devp)
2315 // Was an abort issued for this command earlier 2315 // Was an abort issued for this command earlier
2316 if (scb->state & SCB_ABORT) { 2316 if (scb->state & SCB_ABORT) {
2317 con_log(CL_ANN, (KERN_NOTICE 2317 con_log(CL_ANN, (KERN_NOTICE
2318 "megaraid: aborted cmd %lx[%x] completed\n", 2318 "megaraid: aborted cmd [%x] completed\n",
2319 scp->serial_number, scb->sno)); 2319 scb->sno));
2320 } 2320 }
2321 2321
2322 /* 2322 /*
@@ -2472,8 +2472,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2472 raid_dev = ADAP2RAIDDEV(adapter); 2472 raid_dev = ADAP2RAIDDEV(adapter);
2473 2473
2474 con_log(CL_ANN, (KERN_WARNING 2474 con_log(CL_ANN, (KERN_WARNING
2475 "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n", 2475 "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
2476 scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp), 2476 scp->cmnd[0], SCP2CHANNEL(scp),
2477 SCP2TARGET(scp), SCP2LUN(scp))); 2477 SCP2TARGET(scp), SCP2LUN(scp)));
2478 2478
2479 // If FW has stopped responding, simply return failure 2479 // If FW has stopped responding, simply return failure
@@ -2496,9 +2496,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2496 list_del_init(&scb->list); // from completed list 2496 list_del_init(&scb->list); // from completed list
2497 2497
2498 con_log(CL_ANN, (KERN_WARNING 2498 con_log(CL_ANN, (KERN_WARNING
2499 "megaraid: %ld:%d[%d:%d], abort from completed list\n", 2499 "megaraid: %d[%d:%d], abort from completed list\n",
2500 scp->serial_number, scb->sno, 2500 scb->sno, scb->dev_channel, scb->dev_target));
2501 scb->dev_channel, scb->dev_target));
2502 2501
2503 scp->result = (DID_ABORT << 16); 2502 scp->result = (DID_ABORT << 16);
2504 scp->scsi_done(scp); 2503 scp->scsi_done(scp);
@@ -2527,9 +2526,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2527 ASSERT(!(scb->state & SCB_ISSUED)); 2526 ASSERT(!(scb->state & SCB_ISSUED));
2528 2527
2529 con_log(CL_ANN, (KERN_WARNING 2528 con_log(CL_ANN, (KERN_WARNING
2530 "megaraid abort: %ld[%d:%d], driver owner\n", 2529 "megaraid abort: [%d:%d], driver owner\n",
2531 scp->serial_number, scb->dev_channel, 2530 scb->dev_channel, scb->dev_target));
2532 scb->dev_target));
2533 2531
2534 scp->result = (DID_ABORT << 16); 2532 scp->result = (DID_ABORT << 16);
2535 scp->scsi_done(scp); 2533 scp->scsi_done(scp);
@@ -2560,25 +2558,21 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2560 2558
2561 if (!(scb->state & SCB_ISSUED)) { 2559 if (!(scb->state & SCB_ISSUED)) {
2562 con_log(CL_ANN, (KERN_WARNING 2560 con_log(CL_ANN, (KERN_WARNING
2563 "megaraid abort: %ld%d[%d:%d], invalid state\n", 2561 "megaraid abort: %d[%d:%d], invalid state\n",
2564 scp->serial_number, scb->sno, scb->dev_channel, 2562 scb->sno, scb->dev_channel, scb->dev_target));
2565 scb->dev_target));
2566 BUG(); 2563 BUG();
2567 } 2564 }
2568 else { 2565 else {
2569 con_log(CL_ANN, (KERN_WARNING 2566 con_log(CL_ANN, (KERN_WARNING
2570 "megaraid abort: %ld:%d[%d:%d], fw owner\n", 2567 "megaraid abort: %d[%d:%d], fw owner\n",
2571 scp->serial_number, scb->sno, scb->dev_channel, 2568 scb->sno, scb->dev_channel, scb->dev_target));
2572 scb->dev_target));
2573 } 2569 }
2574 } 2570 }
2575 } 2571 }
2576 spin_unlock_irq(&adapter->lock); 2572 spin_unlock_irq(&adapter->lock);
2577 2573
2578 if (!found) { 2574 if (!found) {
2579 con_log(CL_ANN, (KERN_WARNING 2575 con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
2580 "megaraid abort: scsi cmd:%ld, do now own\n",
2581 scp->serial_number));
2582 2576
2583 // FIXME: Should there be a callback for this command? 2577 // FIXME: Should there be a callback for this command?
2584 return SUCCESS; 2578 return SUCCESS;
@@ -2649,9 +2643,8 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2649 } else { 2643 } else {
2650 if (scb->scp == scp) { // Found command 2644 if (scb->scp == scp) { // Found command
2651 con_log(CL_ANN, (KERN_WARNING 2645 con_log(CL_ANN, (KERN_WARNING
2652 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2646 "megaraid: %d[%d:%d], reset from pending list\n",
2653 scp->serial_number, scb->sno, 2647 scb->sno, scb->dev_channel, scb->dev_target));
2654 scb->dev_channel, scb->dev_target));
2655 } else { 2648 } else {
2656 con_log(CL_ANN, (KERN_WARNING 2649 con_log(CL_ANN, (KERN_WARNING
2657 "megaraid: IO packet with %d[%d:%d] being reset\n", 2650 "megaraid: IO packet with %d[%d:%d] being reset\n",
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 66d4cea4df98..89c623ebadbc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1751,10 +1751,9 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1751 list_del_init(&reset_cmd->list); 1751 list_del_init(&reset_cmd->list);
1752 if (reset_cmd->scmd) { 1752 if (reset_cmd->scmd) {
1753 reset_cmd->scmd->result = DID_RESET << 16; 1753 reset_cmd->scmd->result = DID_RESET << 16;
1754 printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n", 1754 printk(KERN_NOTICE "%d:%p reset [%02x]\n",
1755 reset_index, reset_cmd, 1755 reset_index, reset_cmd,
1756 reset_cmd->scmd->cmnd[0], 1756 reset_cmd->scmd->cmnd[0]);
1757 reset_cmd->scmd->serial_number);
1758 1757
1759 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 1758 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
1760 megasas_return_cmd(instance, reset_cmd); 1759 megasas_return_cmd(instance, reset_cmd);
@@ -1879,8 +1878,8 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1879 1878
1880 instance = (struct megasas_instance *)scmd->device->host->hostdata; 1879 instance = (struct megasas_instance *)scmd->device->host->hostdata;
1881 1880
1882 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", 1881 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
1883 scmd->serial_number, scmd->cmnd[0], scmd->retries); 1882 scmd->cmnd[0], scmd->retries);
1884 1883
1885 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 1884 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1886 printk(KERN_ERR "megasas: cannot recover from previous reset " 1885 printk(KERN_ERR "megasas: cannot recover from previous reset "
@@ -2349,9 +2348,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
2349 cmd->frame_phys_addr , 2348 cmd->frame_phys_addr ,
2350 0, instance->reg_set); 2349 0, instance->reg_set);
2351 } else if (cmd->scmd) { 2350 } else if (cmd->scmd) {
2352 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx" 2351 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
2353 "detected on the internal queue, issue again.\n", 2352 "detected on the internal queue, issue again.\n",
2354 cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number); 2353 cmd, cmd->scmd->cmnd[0]);
2355 2354
2356 atomic_inc(&instance->fw_outstanding); 2355 atomic_inc(&instance->fw_outstanding);
2357 instance->instancet->fire_cmd(instance, 2356 instance->instancet->fire_cmd(instance,
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 197aa1b3f0f3..494474779532 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -415,8 +415,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
415#if 1 415#if 1
416 if (DEBUG_TARGET(cmd)) { 416 if (DEBUG_TARGET(cmd)) {
417 int i; 417 int i;
418 printk(KERN_DEBUG "mesh_start: %p ser=%lu tgt=%d cmd=", 418 printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
419 cmd, cmd->serial_number, id);
420 for (i = 0; i < cmd->cmd_len; ++i) 419 for (i = 0; i < cmd->cmd_len; ++i)
421 printk(" %x", cmd->cmnd[i]); 420 printk(" %x", cmd->cmnd[i]);
422 printk(" use_sg=%d buffer=%p bufflen=%u\n", 421 printk(" use_sg=%d buffer=%p bufflen=%u\n",
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 3346357031e9..efa0255491c2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -522,7 +522,8 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
522 desc = "Device Status Change"; 522 desc = "Device Status Change";
523 break; 523 break;
524 case MPI2_EVENT_IR_OPERATION_STATUS: 524 case MPI2_EVENT_IR_OPERATION_STATUS:
525 desc = "IR Operation Status"; 525 if (!ioc->hide_ir_msg)
526 desc = "IR Operation Status";
526 break; 527 break;
527 case MPI2_EVENT_SAS_DISCOVERY: 528 case MPI2_EVENT_SAS_DISCOVERY:
528 { 529 {
@@ -553,16 +554,20 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
553 desc = "SAS Enclosure Device Status Change"; 554 desc = "SAS Enclosure Device Status Change";
554 break; 555 break;
555 case MPI2_EVENT_IR_VOLUME: 556 case MPI2_EVENT_IR_VOLUME:
556 desc = "IR Volume"; 557 if (!ioc->hide_ir_msg)
558 desc = "IR Volume";
557 break; 559 break;
558 case MPI2_EVENT_IR_PHYSICAL_DISK: 560 case MPI2_EVENT_IR_PHYSICAL_DISK:
559 desc = "IR Physical Disk"; 561 if (!ioc->hide_ir_msg)
562 desc = "IR Physical Disk";
560 break; 563 break;
561 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 564 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
562 desc = "IR Configuration Change List"; 565 if (!ioc->hide_ir_msg)
566 desc = "IR Configuration Change List";
563 break; 567 break;
564 case MPI2_EVENT_LOG_ENTRY_ADDED: 568 case MPI2_EVENT_LOG_ENTRY_ADDED:
565 desc = "Log Entry Added"; 569 if (!ioc->hide_ir_msg)
570 desc = "Log Entry Added";
566 break; 571 break;
567 } 572 }
568 573
@@ -616,7 +621,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
616 originator_str = "PL"; 621 originator_str = "PL";
617 break; 622 break;
618 case 2: 623 case 2:
619 originator_str = "IR"; 624 if (!ioc->hide_ir_msg)
625 originator_str = "IR";
626 else
627 originator_str = "WarpDrive";
620 break; 628 break;
621 } 629 }
622 630
@@ -1508,6 +1516,7 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1508 } 1516 }
1509 ioc->scsi_lookup[i].cb_idx = 0xFF; 1517 ioc->scsi_lookup[i].cb_idx = 0xFF;
1510 ioc->scsi_lookup[i].scmd = NULL; 1518 ioc->scsi_lookup[i].scmd = NULL;
1519 ioc->scsi_lookup[i].direct_io = 0;
1511 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 1520 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1512 &ioc->free_list); 1521 &ioc->free_list);
1513 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1522 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -1844,10 +1853,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1844 printk("), "); 1853 printk("), ");
1845 printk("Capabilities=("); 1854 printk("Capabilities=(");
1846 1855
1847 if (ioc->facts.IOCCapabilities & 1856 if (!ioc->hide_ir_msg) {
1848 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 1857 if (ioc->facts.IOCCapabilities &
1849 printk("Raid"); 1858 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
1850 i++; 1859 printk("Raid");
1860 i++;
1861 }
1851 } 1862 }
1852 1863
1853 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 1864 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
@@ -3680,6 +3691,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3680 u32 reply_address; 3691 u32 reply_address;
3681 u16 smid; 3692 u16 smid;
3682 struct _tr_list *delayed_tr, *delayed_tr_next; 3693 struct _tr_list *delayed_tr, *delayed_tr_next;
3694 u8 hide_flag;
3683 3695
3684 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3696 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3685 __func__)); 3697 __func__));
@@ -3706,6 +3718,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3706 ioc->scsi_lookup[i].cb_idx = 0xFF; 3718 ioc->scsi_lookup[i].cb_idx = 0xFF;
3707 ioc->scsi_lookup[i].smid = smid; 3719 ioc->scsi_lookup[i].smid = smid;
3708 ioc->scsi_lookup[i].scmd = NULL; 3720 ioc->scsi_lookup[i].scmd = NULL;
3721 ioc->scsi_lookup[i].direct_io = 0;
3709 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 3722 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3710 &ioc->free_list); 3723 &ioc->free_list);
3711 } 3724 }
@@ -3766,6 +3779,15 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3766 if (sleep_flag == CAN_SLEEP) 3779 if (sleep_flag == CAN_SLEEP)
3767 _base_static_config_pages(ioc); 3780 _base_static_config_pages(ioc);
3768 3781
3782 if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) {
3783 if (ioc->manu_pg10.OEMIdentifier == 0x80) {
3784 hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
3785 MFG_PAGE10_HIDE_SSDS_MASK);
3786 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
3787 ioc->mfg_pg10_hide_flag = hide_flag;
3788 }
3789 }
3790
3769 if (ioc->wait_for_port_enable_to_complete) { 3791 if (ioc->wait_for_port_enable_to_complete) {
3770 if (diag_buffer_enable != 0) 3792 if (diag_buffer_enable != 0)
3771 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); 3793 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 500328245f61..2a3c05f6db8b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 08
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 01
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -189,6 +189,16 @@
189#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 189#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046
190 190
191/* 191/*
192 * WarpDrive Specific Log codes
193 */
194
195#define MPT2_WARPDRIVE_LOGENTRY (0x8002)
196#define MPT2_WARPDRIVE_LC_SSDT (0x41)
197#define MPT2_WARPDRIVE_LC_SSDLW (0x43)
198#define MPT2_WARPDRIVE_LC_SSDLF (0x44)
199#define MPT2_WARPDRIVE_LC_BRMF (0x4D)
200
201/*
192 * per target private data 202 * per target private data
193 */ 203 */
194#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 204#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -199,6 +209,7 @@
199 * struct MPT2SAS_TARGET - starget private hostdata 209 * struct MPT2SAS_TARGET - starget private hostdata
200 * @starget: starget object 210 * @starget: starget object
201 * @sas_address: target sas address 211 * @sas_address: target sas address
212 * @raid_device: raid_device pointer to access volume data
202 * @handle: device handle 213 * @handle: device handle
203 * @num_luns: number luns 214 * @num_luns: number luns
204 * @flags: MPT_TARGET_FLAGS_XXX flags 215 * @flags: MPT_TARGET_FLAGS_XXX flags
@@ -208,6 +219,7 @@
208struct MPT2SAS_TARGET { 219struct MPT2SAS_TARGET {
209 struct scsi_target *starget; 220 struct scsi_target *starget;
210 u64 sas_address; 221 u64 sas_address;
222 struct _raid_device *raid_device;
211 u16 handle; 223 u16 handle;
212 int num_luns; 224 int num_luns;
213 u32 flags; 225 u32 flags;
@@ -215,6 +227,7 @@ struct MPT2SAS_TARGET {
215 u8 tm_busy; 227 u8 tm_busy;
216}; 228};
217 229
230
218/* 231/*
219 * per device private data 232 * per device private data
220 */ 233 */
@@ -262,6 +275,12 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
262 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10, 275 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
263 Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t; 276 Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
264 277
278#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
279#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
280#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01)
281#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02)
282
283
265struct MPT2SAS_DEVICE { 284struct MPT2SAS_DEVICE {
266 struct MPT2SAS_TARGET *sas_target; 285 struct MPT2SAS_TARGET *sas_target;
267 unsigned int lun; 286 unsigned int lun;
@@ -341,6 +360,7 @@ struct _sas_device {
341 * @sdev: scsi device struct (volumes are single lun) 360 * @sdev: scsi device struct (volumes are single lun)
342 * @wwid: unique identifier for the volume 361 * @wwid: unique identifier for the volume
343 * @handle: device handle 362 * @handle: device handle
363 * @block_size: Block size of the volume
344 * @id: target id 364 * @id: target id
345 * @channel: target channel 365 * @channel: target channel
346 * @volume_type: the raid level 366 * @volume_type: the raid level
@@ -348,20 +368,33 @@ struct _sas_device {
348 * @num_pds: number of hidden raid components 368 * @num_pds: number of hidden raid components
349 * @responding: used in _scsih_raid_device_mark_responding 369 * @responding: used in _scsih_raid_device_mark_responding
350 * @percent_complete: resync percent complete 370 * @percent_complete: resync percent complete
371 * @direct_io_enabled: Whether direct io to PDs are allowed or not
372 * @stripe_exponent: X where 2powX is the stripe sz in blocks
373 * @max_lba: Maximum number of LBA in the volume
374 * @stripe_sz: Stripe Size of the volume
375 * @device_info: Device info of the volume member disk
376 * @pd_handle: Array of handles of the physical drives for direct I/O in le16
351 */ 377 */
378#define MPT_MAX_WARPDRIVE_PDS 8
352struct _raid_device { 379struct _raid_device {
353 struct list_head list; 380 struct list_head list;
354 struct scsi_target *starget; 381 struct scsi_target *starget;
355 struct scsi_device *sdev; 382 struct scsi_device *sdev;
356 u64 wwid; 383 u64 wwid;
357 u16 handle; 384 u16 handle;
385 u16 block_sz;
358 int id; 386 int id;
359 int channel; 387 int channel;
360 u8 volume_type; 388 u8 volume_type;
361 u32 device_info;
362 u8 num_pds; 389 u8 num_pds;
363 u8 responding; 390 u8 responding;
364 u8 percent_complete; 391 u8 percent_complete;
392 u8 direct_io_enabled;
393 u8 stripe_exponent;
394 u64 max_lba;
395 u32 stripe_sz;
396 u32 device_info;
397 u16 pd_handle[MPT_MAX_WARPDRIVE_PDS];
365}; 398};
366 399
367/** 400/**
@@ -470,6 +503,7 @@ struct chain_tracker {
470 * @smid: system message id 503 * @smid: system message id
471 * @scmd: scsi request pointer 504 * @scmd: scsi request pointer
472 * @cb_idx: callback index 505 * @cb_idx: callback index
506 * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
473 * @chain_list: list of chains associated to this IO 507 * @chain_list: list of chains associated to this IO
474 * @tracker_list: list of free request (ioc->free_list) 508 * @tracker_list: list of free request (ioc->free_list)
475 */ 509 */
@@ -477,14 +511,14 @@ struct scsiio_tracker {
477 u16 smid; 511 u16 smid;
478 struct scsi_cmnd *scmd; 512 struct scsi_cmnd *scmd;
479 u8 cb_idx; 513 u8 cb_idx;
514 u8 direct_io;
480 struct list_head chain_list; 515 struct list_head chain_list;
481 struct list_head tracker_list; 516 struct list_head tracker_list;
482}; 517};
483 518
484/** 519/**
485 * struct request_tracker - misc mf request tracker 520 * struct request_tracker - firmware request tracker
486 * @smid: system message id 521 * @smid: system message id
487 * @scmd: scsi request pointer
488 * @cb_idx: callback index 522 * @cb_idx: callback index
489 * @tracker_list: list of free request (ioc->free_list) 523 * @tracker_list: list of free request (ioc->free_list)
490 */ 524 */
@@ -832,6 +866,11 @@ struct MPT2SAS_ADAPTER {
832 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; 866 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
833 u32 ring_buffer_offset; 867 u32 ring_buffer_offset;
834 u32 ring_buffer_sz; 868 u32 ring_buffer_sz;
869 u8 is_warpdrive;
870 u8 hide_ir_msg;
871 u8 mfg_pg10_hide_flag;
872 u8 hide_drives;
873
835}; 874};
836 875
837typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 876typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index d72f1f2b1392..437c2d94c45a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1041,7 +1041,10 @@ _ctl_getiocinfo(void __user *arg)
1041 __func__)); 1041 __func__));
1042 1042
1043 memset(&karg, 0 , sizeof(karg)); 1043 memset(&karg, 0 , sizeof(karg));
1044 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1044 if (ioc->is_warpdrive)
1045 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1046 else
1047 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1045 if (ioc->pfacts) 1048 if (ioc->pfacts)
1046 karg.port_number = ioc->pfacts[0].PortNumber; 1049 karg.port_number = ioc->pfacts[0].PortNumber;
1047 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); 1050 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 69916e46e04f..11ff1d5fb8f0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -133,6 +133,7 @@ struct mpt2_ioctl_pci_info {
133#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) 133#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
134#define MPT2_IOCTL_INTERFACE_SAS (0x03) 134#define MPT2_IOCTL_INTERFACE_SAS (0x03)
135#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) 135#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
136#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
136#define MPT2_IOCTL_VERSION_LENGTH (32) 137#define MPT2_IOCTL_VERSION_LENGTH (32)
137 138
138/** 139/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d2064a0533ae..f12e02358d6d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -233,6 +233,9 @@ static struct pci_device_id scsih_pci_table[] = {
233 PCI_ANY_ID, PCI_ANY_ID }, 233 PCI_ANY_ID, PCI_ANY_ID },
234 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 234 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
235 PCI_ANY_ID, PCI_ANY_ID }, 235 PCI_ANY_ID, PCI_ANY_ID },
236 /* SSS6200 */
237 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
238 PCI_ANY_ID, PCI_ANY_ID },
236 {0} /* Terminating entry */ 239 {0} /* Terminating entry */
237}; 240};
238MODULE_DEVICE_TABLE(pci, scsih_pci_table); 241MODULE_DEVICE_TABLE(pci, scsih_pci_table);
@@ -1256,6 +1259,7 @@ _scsih_target_alloc(struct scsi_target *starget)
1256 sas_target_priv_data->handle = raid_device->handle; 1259 sas_target_priv_data->handle = raid_device->handle;
1257 sas_target_priv_data->sas_address = raid_device->wwid; 1260 sas_target_priv_data->sas_address = raid_device->wwid;
1258 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1261 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1262 sas_target_priv_data->raid_device = raid_device;
1259 raid_device->starget = starget; 1263 raid_device->starget = starget;
1260 } 1264 }
1261 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1265 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -1455,7 +1459,10 @@ static int
1455_scsih_is_raid(struct device *dev) 1459_scsih_is_raid(struct device *dev)
1456{ 1460{
1457 struct scsi_device *sdev = to_scsi_device(dev); 1461 struct scsi_device *sdev = to_scsi_device(dev);
1462 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
1458 1463
1464 if (ioc->is_warpdrive)
1465 return 0;
1459 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 1466 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1460} 1467}
1461 1468
@@ -1480,7 +1487,7 @@ _scsih_get_resync(struct device *dev)
1480 sdev->channel); 1487 sdev->channel);
1481 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1488 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1482 1489
1483 if (!raid_device) 1490 if (!raid_device || ioc->is_warpdrive)
1484 goto out; 1491 goto out;
1485 1492
1486 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1493 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
@@ -1640,6 +1647,212 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1640 1647
1641 kfree(vol_pg0); 1648 kfree(vol_pg0);
1642} 1649}
1650/**
1651 * _scsih_disable_ddio - Disable direct I/O for all the volumes
1652 * @ioc: per adapter object
1653 */
1654static void
1655_scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
1656{
1657 Mpi2RaidVolPage1_t vol_pg1;
1658 Mpi2ConfigReply_t mpi_reply;
1659 struct _raid_device *raid_device;
1660 u16 handle;
1661 u16 ioc_status;
1662
1663 handle = 0xFFFF;
1664 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
1665 &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1666 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1667 MPI2_IOCSTATUS_MASK;
1668 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1669 break;
1670 handle = le16_to_cpu(vol_pg1.DevHandle);
1671 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
1672 if (raid_device)
1673 raid_device->direct_io_enabled = 0;
1674 }
1675 return;
1676}
1677
1678
1679/**
1680 * _scsih_get_num_volumes - Get number of volumes in the ioc
1681 * @ioc: per adapter object
1682 */
1683static u8
1684_scsih_get_num_volumes(struct MPT2SAS_ADAPTER *ioc)
1685{
1686 Mpi2RaidVolPage1_t vol_pg1;
1687 Mpi2ConfigReply_t mpi_reply;
1688 u16 handle;
1689 u8 vol_cnt = 0;
1690 u16 ioc_status;
1691
1692 handle = 0xFFFF;
1693 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
1694 &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
1695 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1696 MPI2_IOCSTATUS_MASK;
1697 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1698 break;
1699 vol_cnt++;
1700 handle = le16_to_cpu(vol_pg1.DevHandle);
1701 }
1702 return vol_cnt;
1703}
1704
1705
1706/**
1707 * _scsih_init_warpdrive_properties - Set properties for warpdrive direct I/O.
1708 * @ioc: per adapter object
1709 * @raid_device: the raid_device object
1710 */
1711static void
1712_scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
1713 struct _raid_device *raid_device)
1714{
1715 Mpi2RaidVolPage0_t *vol_pg0;
1716 Mpi2RaidPhysDiskPage0_t pd_pg0;
1717 Mpi2ConfigReply_t mpi_reply;
1718 u16 sz;
1719 u8 num_pds, count;
1720 u64 mb = 1024 * 1024;
1721 u64 tb_2 = 2 * mb * mb;
1722 u64 capacity;
1723 u32 stripe_sz;
1724 u8 i, stripe_exp;
1725
1726 if (!ioc->is_warpdrive)
1727 return;
1728
1729 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
1730 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1731 "globally as drives are exposed\n", ioc->name);
1732 return;
1733 }
1734 if (_scsih_get_num_volumes(ioc) > 1) {
1735 _scsih_disable_ddio(ioc);
1736 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1737 "globally as number of drives > 1\n", ioc->name);
1738 return;
1739 }
1740 if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
1741 &num_pds)) || !num_pds) {
1742 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1743 "Failure in computing number of drives\n", ioc->name);
1744 return;
1745 }
1746
1747 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
1748 sizeof(Mpi2RaidVol0PhysDisk_t));
1749 vol_pg0 = kzalloc(sz, GFP_KERNEL);
1750 if (!vol_pg0) {
1751 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1752 "Memory allocation failure for RVPG0\n", ioc->name);
1753 return;
1754 }
1755
1756 if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
1757 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
1758 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1759 "Failure in retrieving RVPG0\n", ioc->name);
1760 kfree(vol_pg0);
1761 return;
1762 }
1763
1764 /*
1765 * WARPDRIVE:If number of physical disks in a volume exceeds the max pds
1766 * assumed for WARPDRIVE, disable direct I/O
1767 */
1768 if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
1769 printk(MPT2SAS_WARN_FMT "WarpDrive : Direct IO is disabled "
1770 "for the drive with handle(0x%04x): num_mem=%d, "
1771 "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
1772 num_pds, MPT_MAX_WARPDRIVE_PDS);
1773 kfree(vol_pg0);
1774 return;
1775 }
1776 for (count = 0; count < num_pds; count++) {
1777 if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
1778 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
1779 vol_pg0->PhysDisk[count].PhysDiskNum) ||
1780 pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) {
1781 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
1782 "disabled for the drive with handle(0x%04x) member"
1783 "handle retrieval failed for member number=%d\n",
1784 ioc->name, raid_device->handle,
1785 vol_pg0->PhysDisk[count].PhysDiskNum);
1786 goto out_error;
1787 }
1788 raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
1789 }
1790
1791 /*
1792 * Assumption for WD: Direct I/O is not supported if the volume is
1793 * not RAID0, if the stripe size is not 64KB, if the block size is
1794 * not 512 and if the volume size is >2TB
1795 */
1796 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
1797 le16_to_cpu(vol_pg0->BlockSize) != 512) {
1798 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1799 "for the drive with handle(0x%04x): type=%d, "
1800 "s_sz=%uK, blk_size=%u\n", ioc->name,
1801 raid_device->handle, raid_device->volume_type,
1802 le32_to_cpu(vol_pg0->StripeSize)/2,
1803 le16_to_cpu(vol_pg0->BlockSize));
1804 goto out_error;
1805 }
1806
1807 capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
1808 (le64_to_cpu(vol_pg0->MaxLBA) + 1);
1809
1810 if (capacity > tb_2) {
1811 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1812 "for the drive with handle(0x%04x) since drive sz > 2TB\n",
1813 ioc->name, raid_device->handle);
1814 goto out_error;
1815 }
1816
1817 stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
1818 stripe_exp = 0;
1819 for (i = 0; i < 32; i++) {
1820 if (stripe_sz & 1)
1821 break;
1822 stripe_exp++;
1823 stripe_sz >>= 1;
1824 }
1825 if (i == 32) {
1826 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
1827 "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
1828 ioc->name, raid_device->handle,
1829 le32_to_cpu(vol_pg0->StripeSize)/2);
1830 goto out_error;
1831 }
1832 raid_device->stripe_exponent = stripe_exp;
1833 raid_device->direct_io_enabled = 1;
1834
1835 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
1836 " with handle(0x%04x)\n", ioc->name, raid_device->handle);
1837 /*
1838 * WARPDRIVE: Though the following fields are not used for direct IO,
1839 * stored for future purpose:
1840 */
1841 raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA);
1842 raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
1843 raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize);
1844
1845
1846 kfree(vol_pg0);
1847 return;
1848
1849out_error:
1850 raid_device->direct_io_enabled = 0;
1851 for (count = 0; count < num_pds; count++)
1852 raid_device->pd_handle[count] = 0;
1853 kfree(vol_pg0);
1854 return;
1855}
1643 1856
1644/** 1857/**
1645 * _scsih_enable_tlr - setting TLR flags 1858 * _scsih_enable_tlr - setting TLR flags
@@ -1710,6 +1923,11 @@ _scsih_slave_configure(struct scsi_device *sdev)
1710 1923
1711 _scsih_get_volume_capabilities(ioc, raid_device); 1924 _scsih_get_volume_capabilities(ioc, raid_device);
1712 1925
1926 /*
1927 * WARPDRIVE: Initialize the required data for Direct IO
1928 */
1929 _scsih_init_warpdrive_properties(ioc, raid_device);
1930
1713 /* RAID Queue Depth Support 1931 /* RAID Queue Depth Support
1714 * IS volume = underlying qdepth of drive type, either 1932 * IS volume = underlying qdepth of drive type, either
1715 * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH 1933 * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH
@@ -1757,14 +1975,16 @@ _scsih_slave_configure(struct scsi_device *sdev)
1757 break; 1975 break;
1758 } 1976 }
1759 1977
1760 sdev_printk(KERN_INFO, sdev, "%s: " 1978 if (!ioc->hide_ir_msg)
1761 "handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", 1979 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
1762 r_level, raid_device->handle, 1980 "wwid(0x%016llx), pd_count(%d), type(%s)\n",
1763 (unsigned long long)raid_device->wwid, 1981 r_level, raid_device->handle,
1764 raid_device->num_pds, ds); 1982 (unsigned long long)raid_device->wwid,
1983 raid_device->num_pds, ds);
1765 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1984 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
1766 /* raid transport support */ 1985 /* raid transport support */
1767 _scsih_set_level(sdev, raid_device); 1986 if (!ioc->is_warpdrive)
1987 _scsih_set_level(sdev, raid_device);
1768 return 0; 1988 return 0;
1769 } 1989 }
1770 1990
@@ -2133,8 +2353,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2133 switch (type) { 2353 switch (type) {
2134 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 2354 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2135 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task); 2355 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
2136 if (scmd_lookup && (scmd_lookup->serial_number == 2356 if (scmd_lookup)
2137 scmd->serial_number))
2138 rc = FAILED; 2357 rc = FAILED;
2139 else 2358 else
2140 rc = SUCCESS; 2359 rc = SUCCESS;
@@ -2182,16 +2401,20 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2182 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 2401 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
2183 struct _sas_device *sas_device = NULL; 2402 struct _sas_device *sas_device = NULL;
2184 unsigned long flags; 2403 unsigned long flags;
2404 char *device_str = NULL;
2185 2405
2186 if (!priv_target) 2406 if (!priv_target)
2187 return; 2407 return;
2408 if (ioc->hide_ir_msg)
2409 device_str = "WarpDrive";
2410 else
2411 device_str = "volume";
2188 2412
2189 scsi_print_command(scmd); 2413 scsi_print_command(scmd);
2190 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 2414 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2191 starget_printk(KERN_INFO, starget, "volume handle(0x%04x), " 2415 starget_printk(KERN_INFO, starget, "%s handle(0x%04x), "
2192 "volume wwid(0x%016llx)\n", 2416 "%s wwid(0x%016llx)\n", device_str, priv_target->handle,
2193 priv_target->handle, 2417 device_str, (unsigned long long)priv_target->sas_address);
2194 (unsigned long long)priv_target->sas_address);
2195 } else { 2418 } else {
2196 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2419 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2197 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2420 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
@@ -3130,6 +3353,9 @@ _scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc,
3130 a = 0; 3353 a = 0;
3131 b = 0; 3354 b = 0;
3132 3355
3356 if (ioc->is_warpdrive)
3357 return;
3358
3133 /* Volume Resets for Deleted or Removed */ 3359 /* Volume Resets for Deleted or Removed */
3134 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 3360 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
3135 for (i = 0; i < event_data->NumElements; i++, element++) { 3361 for (i = 0; i < event_data->NumElements; i++, element++) {
@@ -3347,6 +3573,105 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3347} 3573}
3348 3574
3349/** 3575/**
3576 * _scsih_scsi_direct_io_get - returns direct io flag
3577 * @ioc: per adapter object
3578 * @smid: system request message index
3579 *
3580 * Returns the smid stored scmd pointer.
3581 */
3582static inline u8
3583_scsih_scsi_direct_io_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
3584{
3585 return ioc->scsi_lookup[smid - 1].direct_io;
3586}
3587
3588/**
3589 * _scsih_scsi_direct_io_set - sets direct io flag
3590 * @ioc: per adapter object
3591 * @smid: system request message index
3592 * @direct_io: Zero or non-zero value to set in the direct_io flag
3593 *
3594 * Returns Nothing.
3595 */
3596static inline void
3597_scsih_scsi_direct_io_set(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
3598{
3599 ioc->scsi_lookup[smid - 1].direct_io = direct_io;
3600}
3601
3602
3603/**
3604 * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
3605 * @ioc: per adapter object
3606 * @scmd: pointer to scsi command object
3607 * @raid_device: pointer to raid device data structure
3608 * @mpi_request: pointer to the SCSI_IO reqest message frame
3609 * @smid: system request message index
3610 *
3611 * Returns nothing
3612 */
3613static void
3614_scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3615 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
3616 u16 smid)
3617{
3618 u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
3619 u32 stripe_sz, stripe_exp;
3620 u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
3621 u8 cdb0 = scmd->cmnd[0];
3622
3623 /*
3624 * Try Direct I/O to RAID memeber disks
3625 */
3626 if (cdb0 == READ_16 || cdb0 == READ_10 ||
3627 cdb0 == WRITE_16 || cdb0 == WRITE_10) {
3628 cdb_ptr = mpi_request->CDB.CDB32;
3629
3630 if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
3631 | cdb_ptr[5])) {
3632 io_size = scsi_bufflen(scmd) >> 9;
3633 /* get virtual lba */
3634 lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
3635 &cdb_ptr[6];
3636 tmp_ptr = (u8 *)&v_lba + 3;
3637 *tmp_ptr-- = *lba_ptr1++;
3638 *tmp_ptr-- = *lba_ptr1++;
3639 *tmp_ptr-- = *lba_ptr1++;
3640 *tmp_ptr = *lba_ptr1;
3641
3642 if (((u64)v_lba + (u64)io_size - 1) <=
3643 (u32)raid_device->max_lba) {
3644 stripe_sz = raid_device->stripe_sz;
3645 stripe_exp = raid_device->stripe_exponent;
3646 stripe_off = v_lba & (stripe_sz - 1);
3647
3648 /* Check whether IO falls within a stripe */
3649 if ((stripe_off + io_size) <= stripe_sz) {
3650 num_pds = raid_device->num_pds;
3651 p_lba = v_lba >> stripe_exp;
3652 stripe_unit = p_lba / num_pds;
3653 column = p_lba % num_pds;
3654 p_lba = (stripe_unit << stripe_exp) +
3655 stripe_off;
3656 mpi_request->DevHandle =
3657 cpu_to_le16(raid_device->
3658 pd_handle[column]);
3659 tmp_ptr = (u8 *)&p_lba + 3;
3660 *lba_ptr2++ = *tmp_ptr--;
3661 *lba_ptr2++ = *tmp_ptr--;
3662 *lba_ptr2++ = *tmp_ptr--;
3663 *lba_ptr2 = *tmp_ptr;
3664 /*
3665 * WD: To indicate this I/O is directI/O
3666 */
3667 _scsih_scsi_direct_io_set(ioc, smid, 1);
3668 }
3669 }
3670 }
3671 }
3672}
3673
3674/**
3350 * _scsih_qcmd - main scsi request entry point 3675 * _scsih_qcmd - main scsi request entry point
3351 * @scmd: pointer to scsi command object 3676 * @scmd: pointer to scsi command object
3352 * @done: function pointer to be invoked on completion 3677 * @done: function pointer to be invoked on completion
@@ -3363,6 +3688,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3363 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3688 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3364 struct MPT2SAS_DEVICE *sas_device_priv_data; 3689 struct MPT2SAS_DEVICE *sas_device_priv_data;
3365 struct MPT2SAS_TARGET *sas_target_priv_data; 3690 struct MPT2SAS_TARGET *sas_target_priv_data;
3691 struct _raid_device *raid_device;
3366 Mpi2SCSIIORequest_t *mpi_request; 3692 Mpi2SCSIIORequest_t *mpi_request;
3367 u32 mpi_control; 3693 u32 mpi_control;
3368 u16 smid; 3694 u16 smid;
@@ -3424,8 +3750,10 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3424 3750
3425 } else 3751 } else
3426 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 3752 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3427 /* Make sure Device is not raid volume */ 3753 /* Make sure Device is not raid volume.
3428 if (!_scsih_is_raid(&scmd->device->sdev_gendev) && 3754 * We do not expose raid functionality to upper layer for warpdrive.
3755 */
3756 if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
3429 sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 3757 sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3430 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3758 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3431 3759
@@ -3473,9 +3801,14 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3473 } 3801 }
3474 } 3802 }
3475 3803
3804 raid_device = sas_target_priv_data->raid_device;
3805 if (raid_device && raid_device->direct_io_enabled)
3806 _scsih_setup_direct_io(ioc, scmd, raid_device, mpi_request,
3807 smid);
3808
3476 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) 3809 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
3477 mpt2sas_base_put_smid_scsi_io(ioc, smid, 3810 mpt2sas_base_put_smid_scsi_io(ioc, smid,
3478 sas_device_priv_data->sas_target->handle); 3811 le16_to_cpu(mpi_request->DevHandle));
3479 else 3812 else
3480 mpt2sas_base_put_smid_default(ioc, smid); 3813 mpt2sas_base_put_smid_default(ioc, smid);
3481 return 0; 3814 return 0;
@@ -3540,10 +3873,16 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3540 unsigned long flags; 3873 unsigned long flags;
3541 struct scsi_target *starget = scmd->device->sdev_target; 3874 struct scsi_target *starget = scmd->device->sdev_target;
3542 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 3875 struct MPT2SAS_TARGET *priv_target = starget->hostdata;
3876 char *device_str = NULL;
3543 3877
3544 if (!priv_target) 3878 if (!priv_target)
3545 return; 3879 return;
3546 3880
3881 if (ioc->hide_ir_msg)
3882 device_str = "WarpDrive";
3883 else
3884 device_str = "volume";
3885
3547 if (log_info == 0x31170000) 3886 if (log_info == 0x31170000)
3548 return; 3887 return;
3549 3888
@@ -3660,8 +3999,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3660 scsi_print_command(scmd); 3999 scsi_print_command(scmd);
3661 4000
3662 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 4001 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3663 printk(MPT2SAS_WARN_FMT "\tvolume wwid(0x%016llx)\n", ioc->name, 4002 printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
3664 (unsigned long long)priv_target->sas_address); 4003 device_str, (unsigned long long)priv_target->sas_address);
3665 } else { 4004 } else {
3666 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3667 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 4006 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
@@ -3840,6 +4179,20 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3840 scmd->result = DID_NO_CONNECT << 16; 4179 scmd->result = DID_NO_CONNECT << 16;
3841 goto out; 4180 goto out;
3842 } 4181 }
4182 /*
4183 * WARPDRIVE: If direct_io is set then it is directIO,
4184 * the failed direct I/O should be redirected to volume
4185 */
4186 if (_scsih_scsi_direct_io_get(ioc, smid)) {
4187 _scsih_scsi_direct_io_set(ioc, smid, 0);
4188 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4189 mpi_request->DevHandle =
4190 cpu_to_le16(sas_device_priv_data->sas_target->handle);
4191 mpt2sas_base_put_smid_scsi_io(ioc, smid,
4192 sas_device_priv_data->sas_target->handle);
4193 return 0;
4194 }
4195
3843 4196
3844 /* turning off TLR */ 4197 /* turning off TLR */
3845 scsi_state = mpi_reply->SCSIState; 4198 scsi_state = mpi_reply->SCSIState;
@@ -3848,7 +4201,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3848 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 4201 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
3849 if (!sas_device_priv_data->tlr_snoop_check) { 4202 if (!sas_device_priv_data->tlr_snoop_check) {
3850 sas_device_priv_data->tlr_snoop_check++; 4203 sas_device_priv_data->tlr_snoop_check++;
3851 if (!_scsih_is_raid(&scmd->device->sdev_gendev) && 4204 /* Make sure Device is not raid volume.
4205 * We do not expose raid functionality to upper layer for warpdrive.
4206 */
4207 if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
3852 sas_is_tlr_enabled(scmd->device) && 4208 sas_is_tlr_enabled(scmd->device) &&
3853 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 4209 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
3854 sas_disable_tlr(scmd->device); 4210 sas_disable_tlr(scmd->device);
@@ -4681,8 +5037,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
4681 5037
4682 _scsih_ublock_io_device(ioc, sas_device_backup.handle); 5038 _scsih_ublock_io_device(ioc, sas_device_backup.handle);
4683 5039
4684 mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address, 5040 if (!ioc->hide_drives)
4685 sas_device_backup.sas_address_parent); 5041 mpt2sas_transport_port_remove(ioc,
5042 sas_device_backup.sas_address,
5043 sas_device_backup.sas_address_parent);
4686 5044
4687 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" 5045 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
4688 "(0x%016llx)\n", ioc->name, sas_device_backup.handle, 5046 "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
@@ -5413,6 +5771,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
5413 &sas_device->volume_wwid); 5771 &sas_device->volume_wwid);
5414 set_bit(handle, ioc->pd_handles); 5772 set_bit(handle, ioc->pd_handles);
5415 _scsih_reprobe_target(sas_device->starget, 1); 5773 _scsih_reprobe_target(sas_device->starget, 1);
5774
5416} 5775}
5417 5776
5418/** 5777/**
@@ -5591,7 +5950,8 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
5591 Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; 5950 Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
5592 5951
5593#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5952#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5594 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 5953 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
5954 && !ioc->hide_ir_msg)
5595 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 5955 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
5596 5956
5597#endif 5957#endif
@@ -5614,16 +5974,20 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
5614 le16_to_cpu(element->VolDevHandle)); 5974 le16_to_cpu(element->VolDevHandle));
5615 break; 5975 break;
5616 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 5976 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
5617 _scsih_sas_pd_hide(ioc, element); 5977 if (!ioc->is_warpdrive)
5978 _scsih_sas_pd_hide(ioc, element);
5618 break; 5979 break;
5619 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 5980 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
5620 _scsih_sas_pd_expose(ioc, element); 5981 if (!ioc->is_warpdrive)
5982 _scsih_sas_pd_expose(ioc, element);
5621 break; 5983 break;
5622 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 5984 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
5623 _scsih_sas_pd_add(ioc, element); 5985 if (!ioc->is_warpdrive)
5986 _scsih_sas_pd_add(ioc, element);
5624 break; 5987 break;
5625 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 5988 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
5626 _scsih_sas_pd_delete(ioc, element); 5989 if (!ioc->is_warpdrive)
5990 _scsih_sas_pd_delete(ioc, element);
5627 break; 5991 break;
5628 } 5992 }
5629 } 5993 }
@@ -5654,9 +6018,10 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
5654 6018
5655 handle = le16_to_cpu(event_data->VolDevHandle); 6019 handle = le16_to_cpu(event_data->VolDevHandle);
5656 state = le32_to_cpu(event_data->NewValue); 6020 state = le32_to_cpu(event_data->NewValue);
5657 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " 6021 if (!ioc->hide_ir_msg)
5658 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 6022 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5659 le32_to_cpu(event_data->PreviousValue), state)); 6023 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
6024 le32_to_cpu(event_data->PreviousValue), state));
5660 6025
5661 switch (state) { 6026 switch (state) {
5662 case MPI2_RAID_VOL_STATE_MISSING: 6027 case MPI2_RAID_VOL_STATE_MISSING:
@@ -5736,9 +6101,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5736 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 6101 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
5737 state = le32_to_cpu(event_data->NewValue); 6102 state = le32_to_cpu(event_data->NewValue);
5738 6103
5739 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " 6104 if (!ioc->hide_ir_msg)
5740 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, 6105 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
5741 le32_to_cpu(event_data->PreviousValue), state)); 6106 "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
6107 le32_to_cpu(event_data->PreviousValue), state));
5742 6108
5743 switch (state) { 6109 switch (state) {
5744 case MPI2_RAID_PD_STATE_ONLINE: 6110 case MPI2_RAID_PD_STATE_ONLINE:
@@ -5747,7 +6113,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
5747 case MPI2_RAID_PD_STATE_OPTIMAL: 6113 case MPI2_RAID_PD_STATE_OPTIMAL:
5748 case MPI2_RAID_PD_STATE_HOT_SPARE: 6114 case MPI2_RAID_PD_STATE_HOT_SPARE:
5749 6115
5750 set_bit(handle, ioc->pd_handles); 6116 if (!ioc->is_warpdrive)
6117 set_bit(handle, ioc->pd_handles);
5751 6118
5752 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6119 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5753 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6120 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
@@ -5851,7 +6218,8 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
5851 u16 handle; 6218 u16 handle;
5852 6219
5853#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 6220#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5854 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 6221 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6222 && !ioc->hide_ir_msg)
5855 _scsih_sas_ir_operation_status_event_debug(ioc, 6223 _scsih_sas_ir_operation_status_event_debug(ioc,
5856 event_data); 6224 event_data);
5857#endif 6225#endif
@@ -5910,7 +6278,7 @@ static void
5910_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, 6278_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5911 u16 slot, u16 handle) 6279 u16 slot, u16 handle)
5912{ 6280{
5913 struct MPT2SAS_TARGET *sas_target_priv_data; 6281 struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
5914 struct scsi_target *starget; 6282 struct scsi_target *starget;
5915 struct _sas_device *sas_device; 6283 struct _sas_device *sas_device;
5916 unsigned long flags; 6284 unsigned long flags;
@@ -5918,7 +6286,7 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5918 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6286 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5919 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 6287 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
5920 if (sas_device->sas_address == sas_address && 6288 if (sas_device->sas_address == sas_address &&
5921 sas_device->slot == slot && sas_device->starget) { 6289 sas_device->slot == slot) {
5922 sas_device->responding = 1; 6290 sas_device->responding = 1;
5923 starget = sas_device->starget; 6291 starget = sas_device->starget;
5924 if (starget && starget->hostdata) { 6292 if (starget && starget->hostdata) {
@@ -5927,13 +6295,15 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5927 sas_target_priv_data->deleted = 0; 6295 sas_target_priv_data->deleted = 0;
5928 } else 6296 } else
5929 sas_target_priv_data = NULL; 6297 sas_target_priv_data = NULL;
5930 starget_printk(KERN_INFO, sas_device->starget, 6298 if (starget)
5931 "handle(0x%04x), sas_addr(0x%016llx), enclosure " 6299 starget_printk(KERN_INFO, starget,
5932 "logical id(0x%016llx), slot(%d)\n", handle, 6300 "handle(0x%04x), sas_addr(0x%016llx), "
5933 (unsigned long long)sas_device->sas_address, 6301 "enclosure logical id(0x%016llx), "
5934 (unsigned long long) 6302 "slot(%d)\n", handle,
5935 sas_device->enclosure_logical_id, 6303 (unsigned long long)sas_device->sas_address,
5936 sas_device->slot); 6304 (unsigned long long)
6305 sas_device->enclosure_logical_id,
6306 sas_device->slot);
5937 if (sas_device->handle == handle) 6307 if (sas_device->handle == handle)
5938 goto out; 6308 goto out;
5939 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6309 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
@@ -6025,6 +6395,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
6025 starget_printk(KERN_INFO, raid_device->starget, 6395 starget_printk(KERN_INFO, raid_device->starget,
6026 "handle(0x%04x), wwid(0x%016llx)\n", handle, 6396 "handle(0x%04x), wwid(0x%016llx)\n", handle,
6027 (unsigned long long)raid_device->wwid); 6397 (unsigned long long)raid_device->wwid);
6398 /*
6399 * WARPDRIVE: The handles of the PDs might have changed
6400 * across the host reset so re-initialize the
6401 * required data for Direct IO
6402 */
6403 _scsih_init_warpdrive_properties(ioc, raid_device);
6028 if (raid_device->handle == handle) 6404 if (raid_device->handle == handle)
6029 goto out; 6405 goto out;
6030 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6406 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
@@ -6086,18 +6462,20 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
6086 } 6462 }
6087 6463
6088 /* refresh the pd_handles */ 6464 /* refresh the pd_handles */
6089 phys_disk_num = 0xFF; 6465 if (!ioc->is_warpdrive) {
6090 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 6466 phys_disk_num = 0xFF;
6091 while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 6467 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
6092 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 6468 while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
6093 phys_disk_num))) { 6469 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
6094 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6470 phys_disk_num))) {
6095 MPI2_IOCSTATUS_MASK; 6471 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6096 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6472 MPI2_IOCSTATUS_MASK;
6097 break; 6473 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
6098 phys_disk_num = pd_pg0.PhysDiskNum; 6474 break;
6099 handle = le16_to_cpu(pd_pg0.DevHandle); 6475 phys_disk_num = pd_pg0.PhysDiskNum;
6100 set_bit(handle, ioc->pd_handles); 6476 handle = le16_to_cpu(pd_pg0.DevHandle);
6477 set_bit(handle, ioc->pd_handles);
6478 }
6101 } 6479 }
6102} 6480}
6103 6481
@@ -6243,6 +6621,50 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6243} 6621}
6244 6622
6245/** 6623/**
6624 * _scsih_hide_unhide_sas_devices - add/remove device to/from OS
6625 * @ioc: per adapter object
6626 *
6627 * Return nothing.
6628 */
6629static void
6630_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6631{
6632 struct _sas_device *sas_device, *sas_device_next;
6633
6634 if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag !=
6635 MFG_PAGE10_HIDE_IF_VOL_PRESENT)
6636 return;
6637
6638 if (ioc->hide_drives) {
6639 if (_scsih_get_num_volumes(ioc))
6640 return;
6641 ioc->hide_drives = 0;
6642 list_for_each_entry_safe(sas_device, sas_device_next,
6643 &ioc->sas_device_list, list) {
6644 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6645 sas_device->sas_address_parent)) {
6646 _scsih_sas_device_remove(ioc, sas_device);
6647 } else if (!sas_device->starget) {
6648 mpt2sas_transport_port_remove(ioc,
6649 sas_device->sas_address,
6650 sas_device->sas_address_parent);
6651 _scsih_sas_device_remove(ioc, sas_device);
6652 }
6653 }
6654 } else {
6655 if (!_scsih_get_num_volumes(ioc))
6656 return;
6657 ioc->hide_drives = 1;
6658 list_for_each_entry_safe(sas_device, sas_device_next,
6659 &ioc->sas_device_list, list) {
6660 mpt2sas_transport_port_remove(ioc,
6661 sas_device->sas_address,
6662 sas_device->sas_address_parent);
6663 }
6664 }
6665}
6666
6667/**
6246 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) 6668 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
6247 * @ioc: per adapter object 6669 * @ioc: per adapter object
6248 * @reset_phase: phase 6670 * @reset_phase: phase
@@ -6326,6 +6748,7 @@ _firmware_event_work(struct work_struct *work)
6326 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, 6748 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
6327 flags); 6749 flags);
6328 _scsih_remove_unresponding_sas_devices(ioc); 6750 _scsih_remove_unresponding_sas_devices(ioc);
6751 _scsih_hide_unhide_sas_devices(ioc);
6329 return; 6752 return;
6330 } 6753 }
6331 6754
@@ -6425,6 +6848,53 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6425 (Mpi2EventDataIrVolume_t *) 6848 (Mpi2EventDataIrVolume_t *)
6426 mpi_reply->EventData); 6849 mpi_reply->EventData);
6427 break; 6850 break;
6851 case MPI2_EVENT_LOG_ENTRY_ADDED:
6852 {
6853 Mpi2EventDataLogEntryAdded_t *log_entry;
6854 u32 *log_code;
6855
6856 if (!ioc->is_warpdrive)
6857 break;
6858
6859 log_entry = (Mpi2EventDataLogEntryAdded_t *)
6860 mpi_reply->EventData;
6861 log_code = (u32 *)log_entry->LogData;
6862
6863 if (le16_to_cpu(log_entry->LogEntryQualifier)
6864 != MPT2_WARPDRIVE_LOGENTRY)
6865 break;
6866
6867 switch (le32_to_cpu(*log_code)) {
6868 case MPT2_WARPDRIVE_LC_SSDT:
6869 printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
6870 "IO Throttling has occurred in the WarpDrive "
6871 "subsystem. Check WarpDrive documentation for "
6872 "additional details.\n", ioc->name);
6873 break;
6874 case MPT2_WARPDRIVE_LC_SSDLW:
6875 printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
6876 "Program/Erase Cycles for the WarpDrive subsystem "
6877 "in degraded range. Check WarpDrive documentation "
6878 "for additional details.\n", ioc->name);
6879 break;
6880 case MPT2_WARPDRIVE_LC_SSDLF:
6881 printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
6882 "There are no Program/Erase Cycles for the "
6883 "WarpDrive subsystem. The storage device will be "
6884 "in read-only mode. Check WarpDrive documentation "
6885 "for additional details.\n", ioc->name);
6886 break;
6887 case MPT2_WARPDRIVE_LC_BRMF:
6888 printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
6889 "The Backup Rail Monitor has failed on the "
6890 "WarpDrive subsystem. Check WarpDrive "
6891 "documentation for additional details.\n",
6892 ioc->name);
6893 break;
6894 }
6895
6896 break;
6897 }
6428 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 6898 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
6429 case MPI2_EVENT_IR_OPERATION_STATUS: 6899 case MPI2_EVENT_IR_OPERATION_STATUS:
6430 case MPI2_EVENT_SAS_DISCOVERY: 6900 case MPI2_EVENT_SAS_DISCOVERY:
@@ -6583,7 +7053,8 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
6583 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 7053 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
6584 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 7054 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
6585 7055
6586 printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); 7056 if (!ioc->hide_ir_msg)
7057 printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name);
6587 init_completion(&ioc->scsih_cmds.done); 7058 init_completion(&ioc->scsih_cmds.done);
6588 mpt2sas_base_put_smid_default(ioc, smid); 7059 mpt2sas_base_put_smid_default(ioc, smid);
6589 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 7060 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
@@ -6597,10 +7068,11 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
6597 if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { 7068 if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) {
6598 mpi_reply = ioc->scsih_cmds.reply; 7069 mpi_reply = ioc->scsih_cmds.reply;
6599 7070
6600 printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " 7071 if (!ioc->hide_ir_msg)
6601 "ioc_status(0x%04x), loginfo(0x%08x)\n", 7072 printk(MPT2SAS_INFO_FMT "IR shutdown (complete): "
6602 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 7073 "ioc_status(0x%04x), loginfo(0x%08x)\n",
6603 le32_to_cpu(mpi_reply->IOCLogInfo)); 7074 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
7075 le32_to_cpu(mpi_reply->IOCLogInfo));
6604 } 7076 }
6605 7077
6606 out: 7078 out:
@@ -6759,6 +7231,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
6759 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7231 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6760 list_move_tail(&sas_device->list, &ioc->sas_device_list); 7232 list_move_tail(&sas_device->list, &ioc->sas_device_list);
6761 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7233 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7234
7235 if (ioc->hide_drives)
7236 return;
6762 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 7237 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6763 sas_device->sas_address_parent)) { 7238 sas_device->sas_address_parent)) {
6764 _scsih_sas_device_remove(ioc, sas_device); 7239 _scsih_sas_device_remove(ioc, sas_device);
@@ -6812,6 +7287,9 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
6812 list_move_tail(&sas_device->list, &ioc->sas_device_list); 7287 list_move_tail(&sas_device->list, &ioc->sas_device_list);
6813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7288 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6814 7289
7290 if (ioc->hide_drives)
7291 continue;
7292
6815 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 7293 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
6816 sas_device->sas_address_parent)) { 7294 sas_device->sas_address_parent)) {
6817 _scsih_sas_device_remove(ioc, sas_device); 7295 _scsih_sas_device_remove(ioc, sas_device);
@@ -6882,6 +7360,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6882 ioc->id = mpt_ids++; 7360 ioc->id = mpt_ids++;
6883 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); 7361 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
6884 ioc->pdev = pdev; 7362 ioc->pdev = pdev;
7363 if (id->device == MPI2_MFGPAGE_DEVID_SSS6200) {
7364 ioc->is_warpdrive = 1;
7365 ioc->hide_ir_msg = 1;
7366 } else
7367 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
6885 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 7368 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
6886 ioc->tm_cb_idx = tm_cb_idx; 7369 ioc->tm_cb_idx = tm_cb_idx;
6887 ioc->ctl_cb_idx = ctl_cb_idx; 7370 ioc->ctl_cb_idx = ctl_cb_idx;
@@ -6947,6 +7430,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6947 } 7430 }
6948 7431
6949 ioc->wait_for_port_enable_to_complete = 0; 7432 ioc->wait_for_port_enable_to_complete = 0;
7433 if (ioc->is_warpdrive) {
7434 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
7435 ioc->hide_drives = 0;
7436 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
7437 ioc->hide_drives = 1;
7438 else {
7439 if (_scsih_get_num_volumes(ioc))
7440 ioc->hide_drives = 1;
7441 else
7442 ioc->hide_drives = 0;
7443 }
7444 } else
7445 ioc->hide_drives = 0;
7446
6950 _scsih_probe_devices(ioc); 7447 _scsih_probe_devices(ioc);
6951 return 0; 7448 return 0;
6952 7449
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index 6de7af27e507..c82b012aba37 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com>
6# 7#
7# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
8# 9#
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
index ffbf759e46f1..87b231a5bd5e 100644
--- a/drivers/scsi/mvsas/Makefile
+++ b/drivers/scsi/mvsas/Makefile
@@ -3,6 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6# 7#
7# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
8# 9#
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index afc7f6f3a13e..13c960481391 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index 42e947d9795e..545889bd9753 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index eed4c5c72013..78162c3c36e6 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 23ed9b164669..8835befe2c0e 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index a67e1c4172f9..1753a6fc42d0 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 1849da1f030d..bc00c940743c 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -34,6 +35,8 @@ enum chip_flavors {
34 chip_6485, 35 chip_6485,
35 chip_9480, 36 chip_9480,
36 chip_9180, 37 chip_9180,
38 chip_9445,
39 chip_9485,
37 chip_1300, 40 chip_1300,
38 chip_1320 41 chip_1320
39}; 42};
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 938d045e4180..90b636611cde 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -25,13 +26,24 @@
25 26
26#include "mv_sas.h" 27#include "mv_sas.h"
27 28
29static int lldd_max_execute_num = 1;
30module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
31MODULE_PARM_DESC(collector, "\n"
32 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
33 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n");
36
28static struct scsi_transport_template *mvs_stt; 37static struct scsi_transport_template *mvs_stt;
38struct kmem_cache *mvs_task_list_cache;
29static const struct mvs_chip_info mvs_chips[] = { 39static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 40 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 41 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 42 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 43 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 44 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
45 [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
46 [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
35 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 47 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
36 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 48 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
37}; 49};
@@ -107,7 +119,6 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
107 119
108static void mvs_free(struct mvs_info *mvi) 120static void mvs_free(struct mvs_info *mvi)
109{ 121{
110 int i;
111 struct mvs_wq *mwq; 122 struct mvs_wq *mwq;
112 int slot_nr; 123 int slot_nr;
113 124
@@ -119,12 +130,8 @@ static void mvs_free(struct mvs_info *mvi)
119 else 130 else
120 slot_nr = MVS_SLOTS; 131 slot_nr = MVS_SLOTS;
121 132
122 for (i = 0; i < mvi->tags_num; i++) { 133 if (mvi->dma_pool)
123 struct mvs_slot_info *slot = &mvi->slot_info[i]; 134 pci_pool_destroy(mvi->dma_pool);
124 if (slot->buf)
125 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
126 slot->buf, slot->buf_dma);
127 }
128 135
129 if (mvi->tx) 136 if (mvi->tx)
130 dma_free_coherent(mvi->dev, 137 dma_free_coherent(mvi->dev,
@@ -213,6 +220,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
213static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) 220static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
214{ 221{
215 int i = 0, slot_nr; 222 int i = 0, slot_nr;
223 char pool_name[32];
216 224
217 if (mvi->flags & MVF_FLAG_SOC) 225 if (mvi->flags & MVF_FLAG_SOC)
218 slot_nr = MVS_SOC_SLOTS; 226 slot_nr = MVS_SOC_SLOTS;
@@ -272,18 +280,14 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
272 if (!mvi->bulk_buffer) 280 if (!mvi->bulk_buffer)
273 goto err_out; 281 goto err_out;
274#endif 282#endif
275 for (i = 0; i < slot_nr; i++) { 283 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
276 struct mvs_slot_info *slot = &mvi->slot_info[i]; 284 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
277 285 if (!mvi->dma_pool) {
278 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, 286 printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
279 &slot->buf_dma, GFP_KERNEL);
280 if (!slot->buf) {
281 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
282 goto err_out; 287 goto err_out;
283 }
284 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
285 ++mvi->tags_num;
286 } 288 }
289 mvi->tags_num = slot_nr;
290
287 /* Initialize tags */ 291 /* Initialize tags */
288 mvs_tag_init(mvi); 292 mvs_tag_init(mvi);
289 return 0; 293 return 0;
@@ -484,7 +488,7 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
484 488
485 sha->num_phys = nr_core * chip_info->n_phy; 489 sha->num_phys = nr_core * chip_info->n_phy;
486 490
487 sha->lldd_max_execute_num = 1; 491 sha->lldd_max_execute_num = lldd_max_execute_num;
488 492
489 if (mvi->flags & MVF_FLAG_SOC) 493 if (mvi->flags & MVF_FLAG_SOC)
490 can_queue = MVS_SOC_CAN_QUEUE; 494 can_queue = MVS_SOC_CAN_QUEUE;
@@ -670,6 +674,24 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
670 { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, 674 { PCI_VDEVICE(TTI, 0x2740), chip_9480 },
671 { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, 675 { PCI_VDEVICE(TTI, 0x2744), chip_9480 },
672 { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, 676 { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
677 {
678 .vendor = 0x1b4b,
679 .device = 0x9445,
680 .subvendor = PCI_ANY_ID,
681 .subdevice = 0x9480,
682 .class = 0,
683 .class_mask = 0,
684 .driver_data = chip_9445,
685 },
686 {
687 .vendor = 0x1b4b,
688 .device = 0x9485,
689 .subvendor = PCI_ANY_ID,
690 .subdevice = 0x9480,
691 .class = 0,
692 .class_mask = 0,
693 .driver_data = chip_9485,
694 },
673 695
674 { } /* terminate list */ 696 { } /* terminate list */
675}; 697};
@@ -690,6 +712,14 @@ static int __init mvs_init(void)
690 if (!mvs_stt) 712 if (!mvs_stt)
691 return -ENOMEM; 713 return -ENOMEM;
692 714
715 mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list),
716 0, SLAB_HWCACHE_ALIGN, NULL);
717 if (!mvs_task_list_cache) {
718 rc = -ENOMEM;
719 mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__);
720 goto err_out;
721 }
722
693 rc = pci_register_driver(&mvs_pci_driver); 723 rc = pci_register_driver(&mvs_pci_driver);
694 724
695 if (rc) 725 if (rc)
@@ -706,6 +736,7 @@ static void __exit mvs_exit(void)
706{ 736{
707 pci_unregister_driver(&mvs_pci_driver); 737 pci_unregister_driver(&mvs_pci_driver);
708 sas_release_transport(mvs_stt); 738 sas_release_transport(mvs_stt);
739 kmem_cache_destroy(mvs_task_list_cache);
709} 740}
710 741
711module_init(mvs_init); 742module_init(mvs_init);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index adedaa916ecb..0ef27425c447 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -862,178 +863,286 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
862} 863}
863 864
864#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) 865#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
865static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 866static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
866 struct completion *completion,int is_tmf, 867 struct mvs_tmf_task *tmf, int *pass)
867 struct mvs_tmf_task *tmf)
868{ 868{
869 struct domain_device *dev = task->dev; 869 struct domain_device *dev = task->dev;
870 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 870 struct mvs_device *mvi_dev = dev->lldd_dev;
871 struct mvs_info *mvi = mvi_dev->mvi_info;
872 struct mvs_task_exec_info tei; 871 struct mvs_task_exec_info tei;
873 struct sas_task *t = task;
874 struct mvs_slot_info *slot; 872 struct mvs_slot_info *slot;
875 u32 tag = 0xdeadbeef, rc, n_elem = 0; 873 u32 tag = 0xdeadbeef, n_elem = 0;
876 u32 n = num, pass = 0; 874 int rc = 0;
877 unsigned long flags = 0, flags_libsas = 0;
878 875
879 if (!dev->port) { 876 if (!dev->port) {
880 struct task_status_struct *tsm = &t->task_status; 877 struct task_status_struct *tsm = &task->task_status;
881 878
882 tsm->resp = SAS_TASK_UNDELIVERED; 879 tsm->resp = SAS_TASK_UNDELIVERED;
883 tsm->stat = SAS_PHY_DOWN; 880 tsm->stat = SAS_PHY_DOWN;
881 /*
882 * libsas will use dev->port, should
883 * not call task_done for sata
884 */
884 if (dev->dev_type != SATA_DEV) 885 if (dev->dev_type != SATA_DEV)
885 t->task_done(t); 886 task->task_done(task);
886 return 0; 887 return rc;
887 } 888 }
888 889
889 spin_lock_irqsave(&mvi->lock, flags); 890 if (DEV_IS_GONE(mvi_dev)) {
890 do { 891 if (mvi_dev)
891 dev = t->dev; 892 mv_dprintk("device %d not ready.\n",
892 mvi_dev = dev->lldd_dev; 893 mvi_dev->device_id);
893 if (DEV_IS_GONE(mvi_dev)) { 894 else
894 if (mvi_dev) 895 mv_dprintk("device %016llx not ready.\n",
895 mv_dprintk("device %d not ready.\n", 896 SAS_ADDR(dev->sas_addr));
896 mvi_dev->device_id);
897 else
898 mv_dprintk("device %016llx not ready.\n",
899 SAS_ADDR(dev->sas_addr));
900 897
901 rc = SAS_PHY_DOWN; 898 rc = SAS_PHY_DOWN;
902 goto out_done; 899 return rc;
903 } 900 }
901 tei.port = dev->port->lldd_port;
902 if (tei.port && !tei.port->port_attached && !tmf) {
903 if (sas_protocol_ata(task->task_proto)) {
904 struct task_status_struct *ts = &task->task_status;
905 mv_dprintk("SATA/STP port %d does not attach"
906 "device.\n", dev->port->id);
907 ts->resp = SAS_TASK_COMPLETE;
908 ts->stat = SAS_PHY_DOWN;
904 909
905 if (dev->port->id >= mvi->chip->n_phy) 910 task->task_done(task);
906 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
907 else
908 tei.port = &mvi->port[dev->port->id];
909
910 if (tei.port && !tei.port->port_attached) {
911 if (sas_protocol_ata(t->task_proto)) {
912 struct task_status_struct *ts = &t->task_status;
913
914 mv_dprintk("port %d does not"
915 "attached device.\n", dev->port->id);
916 ts->stat = SAS_PROTO_RESPONSE;
917 ts->stat = SAS_PHY_DOWN;
918 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
919 flags_libsas);
920 spin_unlock_irqrestore(&mvi->lock, flags);
921 t->task_done(t);
922 spin_lock_irqsave(&mvi->lock, flags);
923 spin_lock_irqsave(dev->sata_dev.ap->lock,
924 flags_libsas);
925 if (n > 1)
926 t = list_entry(t->list.next,
927 struct sas_task, list);
928 continue;
929 } else {
930 struct task_status_struct *ts = &t->task_status;
931 ts->resp = SAS_TASK_UNDELIVERED;
932 ts->stat = SAS_PHY_DOWN;
933 t->task_done(t);
934 if (n > 1)
935 t = list_entry(t->list.next,
936 struct sas_task, list);
937 continue;
938 }
939 }
940 911
941 if (!sas_protocol_ata(t->task_proto)) {
942 if (t->num_scatter) {
943 n_elem = dma_map_sg(mvi->dev,
944 t->scatter,
945 t->num_scatter,
946 t->data_dir);
947 if (!n_elem) {
948 rc = -ENOMEM;
949 goto err_out;
950 }
951 }
952 } else { 912 } else {
953 n_elem = t->num_scatter; 913 struct task_status_struct *ts = &task->task_status;
914 mv_dprintk("SAS port %d does not attach"
915 "device.\n", dev->port->id);
916 ts->resp = SAS_TASK_UNDELIVERED;
917 ts->stat = SAS_PHY_DOWN;
918 task->task_done(task);
954 } 919 }
920 return rc;
921 }
955 922
956 rc = mvs_tag_alloc(mvi, &tag); 923 if (!sas_protocol_ata(task->task_proto)) {
957 if (rc) 924 if (task->num_scatter) {
958 goto err_out; 925 n_elem = dma_map_sg(mvi->dev,
926 task->scatter,
927 task->num_scatter,
928 task->data_dir);
929 if (!n_elem) {
930 rc = -ENOMEM;
931 goto prep_out;
932 }
933 }
934 } else {
935 n_elem = task->num_scatter;
936 }
959 937
960 slot = &mvi->slot_info[tag]; 938 rc = mvs_tag_alloc(mvi, &tag);
939 if (rc)
940 goto err_out;
961 941
942 slot = &mvi->slot_info[tag];
962 943
963 t->lldd_task = NULL; 944 task->lldd_task = NULL;
964 slot->n_elem = n_elem; 945 slot->n_elem = n_elem;
965 slot->slot_tag = tag; 946 slot->slot_tag = tag;
966 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 947
948 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
949 if (!slot->buf)
950 goto err_out_tag;
951 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
952
953 tei.task = task;
954 tei.hdr = &mvi->slot[tag];
955 tei.tag = tag;
956 tei.n_elem = n_elem;
957 switch (task->task_proto) {
958 case SAS_PROTOCOL_SMP:
959 rc = mvs_task_prep_smp(mvi, &tei);
960 break;
961 case SAS_PROTOCOL_SSP:
962 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
963 break;
964 case SAS_PROTOCOL_SATA:
965 case SAS_PROTOCOL_STP:
966 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
967 rc = mvs_task_prep_ata(mvi, &tei);
968 break;
969 default:
970 dev_printk(KERN_ERR, mvi->dev,
971 "unknown sas_task proto: 0x%x\n",
972 task->task_proto);
973 rc = -EINVAL;
974 break;
975 }
967 976
968 tei.task = t; 977 if (rc) {
969 tei.hdr = &mvi->slot[tag]; 978 mv_dprintk("rc is %x\n", rc);
970 tei.tag = tag; 979 goto err_out_slot_buf;
971 tei.n_elem = n_elem; 980 }
972 switch (t->task_proto) { 981 slot->task = task;
973 case SAS_PROTOCOL_SMP: 982 slot->port = tei.port;
974 rc = mvs_task_prep_smp(mvi, &tei); 983 task->lldd_task = slot;
975 break; 984 list_add_tail(&slot->entry, &tei.port->list);
976 case SAS_PROTOCOL_SSP: 985 spin_lock(&task->task_state_lock);
977 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 986 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
978 break; 987 spin_unlock(&task->task_state_lock);
979 case SAS_PROTOCOL_SATA:
980 case SAS_PROTOCOL_STP:
981 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
982 rc = mvs_task_prep_ata(mvi, &tei);
983 break;
984 default:
985 dev_printk(KERN_ERR, mvi->dev,
986 "unknown sas_task proto: 0x%x\n",
987 t->task_proto);
988 rc = -EINVAL;
989 break;
990 }
991 988
992 if (rc) { 989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
993 mv_dprintk("rc is %x\n", rc); 990 mvi_dev->running_req++;
994 goto err_out_tag; 991 ++(*pass);
995 } 992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
996 slot->task = t;
997 slot->port = tei.port;
998 t->lldd_task = slot;
999 list_add_tail(&slot->entry, &tei.port->list);
1000 /* TODO: select normal or high priority */
1001 spin_lock(&t->task_state_lock);
1002 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1003 spin_unlock(&t->task_state_lock);
1004
1005 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1006 mvi_dev->running_req++;
1007 ++pass;
1008 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1009 if (n > 1)
1010 t = list_entry(t->list.next, struct sas_task, list);
1011 if (likely(pass))
1012 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1013 (MVS_CHIP_SLOT_SZ - 1));
1014 993
1015 } while (--n); 994 return rc;
1016 rc = 0;
1017 goto out_done;
1018 995
996err_out_slot_buf:
997 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1019err_out_tag: 998err_out_tag:
1020 mvs_tag_free(mvi, tag); 999 mvs_tag_free(mvi, tag);
1021err_out: 1000err_out:
1022 1001
1023 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 1002 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
1024 if (!sas_protocol_ata(t->task_proto)) 1003 if (!sas_protocol_ata(task->task_proto))
1025 if (n_elem) 1004 if (n_elem)
1026 dma_unmap_sg(mvi->dev, t->scatter, n_elem, 1005 dma_unmap_sg(mvi->dev, task->scatter, n_elem,
1027 t->data_dir); 1006 task->data_dir);
1028out_done: 1007prep_out:
1008 return rc;
1009}
1010
1011static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
1012{
1013 struct mvs_task_list *first = NULL;
1014
1015 for (; *num > 0; --*num) {
1016 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
1017
1018 if (!mvs_list)
1019 break;
1020
1021 INIT_LIST_HEAD(&mvs_list->list);
1022 if (!first)
1023 first = mvs_list;
1024 else
1025 list_add_tail(&mvs_list->list, &first->list);
1026
1027 }
1028
1029 return first;
1030}
1031
1032static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
1033{
1034 LIST_HEAD(list);
1035 struct list_head *pos, *a;
1036 struct mvs_task_list *mlist = NULL;
1037
1038 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
1039
1040 list_for_each_safe(pos, a, &list) {
1041 list_del_init(pos);
1042 mlist = list_entry(pos, struct mvs_task_list, list);
1043 kmem_cache_free(mvs_task_list_cache, mlist);
1044 }
1045}
1046
1047static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1048 struct completion *completion, int is_tmf,
1049 struct mvs_tmf_task *tmf)
1050{
1051 struct domain_device *dev = task->dev;
1052 struct mvs_info *mvi = NULL;
1053 u32 rc = 0;
1054 u32 pass = 0;
1055 unsigned long flags = 0;
1056
1057 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
1058
1059 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1060 spin_unlock_irq(dev->sata_dev.ap->lock);
1061
1062 spin_lock_irqsave(&mvi->lock, flags);
1063 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
1064 if (rc)
1065 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1066
1067 if (likely(pass))
1068 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1069 (MVS_CHIP_SLOT_SZ - 1));
1029 spin_unlock_irqrestore(&mvi->lock, flags); 1070 spin_unlock_irqrestore(&mvi->lock, flags);
1071
1072 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1073 spin_lock_irq(dev->sata_dev.ap->lock);
1074
1075 return rc;
1076}
1077
1078static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1079 struct completion *completion, int is_tmf,
1080 struct mvs_tmf_task *tmf)
1081{
1082 struct domain_device *dev = task->dev;
1083 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
1084 struct mvs_info *mvi = NULL;
1085 struct sas_task *t = task;
1086 struct mvs_task_list *mvs_list = NULL, *a;
1087 LIST_HEAD(q);
1088 int pass[2] = {0};
1089 u32 rc = 0;
1090 u32 n = num;
1091 unsigned long flags = 0;
1092
1093 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
1094 if (n) {
1095 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
1096 rc = -ENOMEM;
1097 goto free_list;
1098 }
1099
1100 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
1101
1102 list_for_each_entry(a, &q, list) {
1103 a->task = t;
1104 t = list_entry(t->list.next, struct sas_task, list);
1105 }
1106
1107 list_for_each_entry(a, &q , list) {
1108
1109 t = a->task;
1110 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
1111
1112 spin_lock_irqsave(&mvi->lock, flags);
1113 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
1114 if (rc)
1115 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1116 spin_unlock_irqrestore(&mvi->lock, flags);
1117 }
1118
1119 if (likely(pass[0]))
1120 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
1121 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1122
1123 if (likely(pass[1]))
1124 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
1125 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1126
1127 list_del_init(&q);
1128
1129free_list:
1130 if (mvs_list)
1131 mvs_task_free_list(mvs_list);
1132
1030 return rc; 1133 return rc;
1031} 1134}
1032 1135
1033int mvs_queue_command(struct sas_task *task, const int num, 1136int mvs_queue_command(struct sas_task *task, const int num,
1034 gfp_t gfp_flags) 1137 gfp_t gfp_flags)
1035{ 1138{
1036 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); 1139 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1140 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
1141
1142 if (sas->lldd_max_execute_num < 2)
1143 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1144 else
1145 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1037} 1146}
1038 1147
1039static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 1148static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1067,6 +1176,11 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1067 /* do nothing */ 1176 /* do nothing */
1068 break; 1177 break;
1069 } 1178 }
1179
1180 if (slot->buf) {
1181 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1182 slot->buf = NULL;
1183 }
1070 list_del_init(&slot->entry); 1184 list_del_init(&slot->entry);
1071 task->lldd_task = NULL; 1185 task->lldd_task = NULL;
1072 slot->task = NULL; 1186 slot->task = NULL;
@@ -1255,6 +1369,7 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1255 spin_lock_irqsave(&mvi->lock, flags); 1369 spin_lock_irqsave(&mvi->lock, flags);
1256 port->port_attached = 1; 1370 port->port_attached = 1;
1257 phy->port = port; 1371 phy->port = port;
1372 sas_port->lldd_port = port;
1258 if (phy->phy_type & PORT_TYPE_SAS) { 1373 if (phy->phy_type & PORT_TYPE_SAS) {
1259 port->wide_port_phymap = sas_port->phy_mask; 1374 port->wide_port_phymap = sas_port->phy_mask;
1260 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1375 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 77ddc7c1e5f2..1367d8b9350d 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2007 Red Hat, Inc. 4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com> 5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
6 * 7 *
7 * This file is licensed under GPLv2. 8 * This file is licensed under GPLv2.
8 * 9 *
@@ -67,6 +68,7 @@ extern struct mvs_tgt_initiator mvs_tgt;
67extern struct mvs_info *tgt_mvi; 68extern struct mvs_info *tgt_mvi;
68extern const struct mvs_dispatch mvs_64xx_dispatch; 69extern const struct mvs_dispatch mvs_64xx_dispatch;
69extern const struct mvs_dispatch mvs_94xx_dispatch; 70extern const struct mvs_dispatch mvs_94xx_dispatch;
71extern struct kmem_cache *mvs_task_list_cache;
70 72
71#define DEV_IS_EXPANDER(type) \ 73#define DEV_IS_EXPANDER(type) \
72 ((type == EDGE_DEV) || (type == FANOUT_DEV)) 74 ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -341,6 +343,7 @@ struct mvs_info {
341 dma_addr_t bulk_buffer_dma; 343 dma_addr_t bulk_buffer_dma;
342#define TRASH_BUCKET_SIZE 0x20000 344#define TRASH_BUCKET_SIZE 0x20000
343#endif 345#endif
346 void *dma_pool;
344 struct mvs_slot_info slot_info[0]; 347 struct mvs_slot_info slot_info[0];
345}; 348};
346 349
@@ -367,6 +370,11 @@ struct mvs_task_exec_info {
367 int n_elem; 370 int n_elem;
368}; 371};
369 372
373struct mvs_task_list {
374 struct sas_task *task;
375 struct list_head list;
376};
377
370 378
371/******************** function prototype *********************/ 379/******************** function prototype *********************/
372void mvs_get_sas_addr(void *buf, u32 buflen); 380void mvs_get_sas_addr(void *buf, u32 buflen);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 835d8d66e696..4b3b4755945c 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8147,7 +8147,7 @@ static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
8147 unsigned long flags; 8147 unsigned long flags;
8148 struct scsi_cmnd *done_list; 8148 struct scsi_cmnd *done_list;
8149 8149
8150 printk("ncr53c8xx_abort: command pid %lu\n", cmd->serial_number); 8150 printk("ncr53c8xx_abort\n");
8151 8151
8152 NCR_LOCK_NCB(np, flags); 8152 NCR_LOCK_NCB(np, flags);
8153 8153
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8ba5744c267e..d838205ab169 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4066,7 +4066,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4066 } */ 4066 } */
4067 printk(" tag=%d, transfersize=0x%x \n", 4067 printk(" tag=%d, transfersize=0x%x \n",
4068 cmd->tag, cmd->transfersize); 4068 cmd->tag, cmd->transfersize);
4069 printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd)); 4069 printk(" SP=0x%p\n", CMD_SP(cmd));
4070 printk(" underflow size = 0x%x, direction=0x%x\n", 4070 printk(" underflow size = 0x%x, direction=0x%x\n",
4071 cmd->underflow, cmd->sc_data_direction); 4071 cmd->underflow, cmd->sc_data_direction);
4072} 4072}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d3e58d763b43..532313e0725e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -496,8 +496,8 @@ do_read:
496 offset = 0; 496 offset = 0;
497 } 497 }
498 498
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset, 499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 SFP_BLOCK_SIZE); 500 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 501 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 502 qla_printk(KERN_WARNING, ha,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 503 "Unable to read SFP data (%x/%x/%x).\n", rval,
@@ -628,12 +628,12 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
628 628
629 memcpy(ha->edc_data, &buf[8], len); 629 memcpy(ha->edc_data, &buf[8], len);
630 630
631 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma, 631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 ha->edc_data, len, opt); 632 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 633 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 634 DEBUG2(qla_printk(KERN_INFO, ha,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
636 rval, dev, adr, opt, len, *buf)); 636 rval, dev, adr, opt, len, buf[8]));
637 return 0; 637 return 0;
638 } 638 }
639 639
@@ -685,8 +685,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
685 return -EINVAL; 685 return -EINVAL;
686 686
687 memset(ha->edc_data, 0, len); 687 memset(ha->edc_data, 0, len);
688 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma, 688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 ha->edc_data, len, opt); 689 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 690 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 691 DEBUG2(qla_printk(KERN_INFO, ha,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
@@ -1568,7 +1568,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1568 1568
1569 /* Now that the rport has been deleted, set the fcport state to 1569 /* Now that the rport has been deleted, set the fcport state to
1570 FCS_DEVICE_DEAD */ 1570 FCS_DEVICE_DEAD */
1571 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 1571 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1572 1572
1573 /* 1573 /*
1574 * Transport has effectively 'deleted' the rport, clear 1574 * Transport has effectively 'deleted' the rport, clear
@@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1877 1877
1878 scsi_remove_host(vha->host); 1878 scsi_remove_host(vha->host);
1879 1879
1880 /* Allow timer to run to drain queued items, when removing vp */
1881 qla24xx_deallocate_vp_id(vha);
1882
1880 if (vha->timer_active) { 1883 if (vha->timer_active) {
1881 qla2x00_vp_stop_timer(vha); 1884 qla2x00_vp_stop_timer(vha);
1882 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1883 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1884 } 1887 }
1885 1888
1886 qla24xx_deallocate_vp_id(vha);
1887
1888 /* No pending activities shall be there on the vha now */ 1889 /* No pending activities shall be there on the vha now */
1889 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1890 * the net we have placed below */ 1891 * the net we have placed below */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 903b0586ded3..8c10e2c4928e 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 074a999c7017..0f0f54e35f06 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 096141148257..c53719a9a747 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index b74e6b5743dc..930414541ec6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ee20353c8550..cc5a79259d33 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1717,6 +1717,14 @@ typedef struct fc_port {
1717#define FCS_DEVICE_LOST 3 1717#define FCS_DEVICE_LOST 3
1718#define FCS_ONLINE 4 1718#define FCS_ONLINE 4
1719 1719
1720static const char * const port_state_str[] = {
1721 "Unknown",
1722 "UNCONFIGURED",
1723 "DEAD",
1724 "LOST",
1725 "ONLINE"
1726};
1727
1720/* 1728/*
1721 * FC port flags. 1729 * FC port flags.
1722 */ 1730 */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 6271353e8c51..a5a4e1275bf2 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index f5ba09c8a663..691783abfb69 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -416,8 +416,7 @@ struct cmd_type_6 {
416 uint8_t vp_index; 416 uint8_t vp_index;
417 417
418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ 418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
419 uint16_t fcp_data_dseg_len; /* Data segment length. */ 419 uint32_t fcp_data_dseg_len; /* Data segment length. */
420 uint16_t reserved_1; /* MUST be set to 0. */
421}; 420};
422 421
423#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */ 422#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d48326ee3f61..0b381224ae4b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -39,6 +39,8 @@ extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
39extern int qla2x00_perform_loop_resync(scsi_qla_host_t *); 39extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
40extern int qla2x00_loop_resync(scsi_qla_host_t *); 40extern int qla2x00_loop_resync(scsi_qla_host_t *);
41 41
42extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
43
42extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 44extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
43extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
44 46
@@ -100,6 +102,8 @@ extern int ql2xgffidenable;
100extern int ql2xenabledif; 102extern int ql2xenabledif;
101extern int ql2xenablehba_err_chk; 103extern int ql2xenablehba_err_chk;
102extern int ql2xtargetreset; 104extern int ql2xtargetreset;
105extern int ql2xdontresethba;
106extern unsigned int ql2xmaxlun;
103 107
104extern int qla2x00_loop_reset(scsi_qla_host_t *); 108extern int qla2x00_loop_reset(scsi_qla_host_t *);
105extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 109extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -319,15 +323,12 @@ extern int
319qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); 323qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
320 324
321extern int 325extern int
322qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); 326qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
323 327 uint16_t, uint16_t, uint16_t, uint16_t);
324extern int
325qla2x00_read_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t,
326 uint8_t *, uint16_t, uint16_t);
327 328
328extern int 329extern int
329qla2x00_write_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t, 330qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
330 uint8_t *, uint16_t, uint16_t); 331 uint16_t, uint16_t, uint16_t, uint16_t);
331 332
332extern int 333extern int
333qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); 334qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
@@ -549,7 +550,6 @@ extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
549extern int qla82xx_rd_32(struct qla_hw_data *, ulong); 550extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
550extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); 551extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
551extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); 552extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
552extern void qla82xx_rom_unlock(struct qla_hw_data *);
553 553
554/* ISP 8021 IDC */ 554/* ISP 8021 IDC */
555extern void qla82xx_clear_drv_active(struct qla_hw_data *); 555extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 74a91b6dfc68..8cd9066ad906 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8575808dbae0..920b76bfbb93 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -35,8 +35,6 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 38static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 39static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *); 40static int qla25xx_init_queues(struct qla_hw_data *);
@@ -385,8 +383,18 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
385 383
386 switch (data[0]) { 384 switch (data[0]) {
387 case MBS_COMMAND_COMPLETE: 385 case MBS_COMMAND_COMPLETE:
386 /*
387 * Driver must validate login state - If PRLI not complete,
388 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
389 * requests.
390 */
391 rval = qla2x00_get_port_database(vha, fcport, 0);
392 if (rval != QLA_SUCCESS) {
393 qla2x00_post_async_logout_work(vha, fcport, NULL);
394 qla2x00_post_async_login_work(vha, fcport, NULL);
395 break;
396 }
388 if (fcport->flags & FCF_FCP2_DEVICE) { 397 if (fcport->flags & FCF_FCP2_DEVICE) {
389 fcport->flags |= FCF_ASYNC_SENT;
390 qla2x00_post_async_adisc_work(vha, fcport, data); 398 qla2x00_post_async_adisc_work(vha, fcport, data);
391 break; 399 break;
392 } 400 }
@@ -397,7 +405,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
397 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 405 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
398 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 406 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
399 else 407 else
400 qla2x00_mark_device_lost(vha, fcport, 1, 1); 408 qla2x00_mark_device_lost(vha, fcport, 1, 0);
401 break; 409 break;
402 case MBS_PORT_ID_USED: 410 case MBS_PORT_ID_USED:
403 fcport->loop_id = data[1]; 411 fcport->loop_id = data[1];
@@ -409,7 +417,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
409 rval = qla2x00_find_new_loop_id(vha, fcport); 417 rval = qla2x00_find_new_loop_id(vha, fcport);
410 if (rval != QLA_SUCCESS) { 418 if (rval != QLA_SUCCESS) {
411 fcport->flags &= ~FCF_ASYNC_SENT; 419 fcport->flags &= ~FCF_ASYNC_SENT;
412 qla2x00_mark_device_lost(vha, fcport, 1, 1); 420 qla2x00_mark_device_lost(vha, fcport, 1, 0);
413 break; 421 break;
414 } 422 }
415 qla2x00_post_async_login_work(vha, fcport, NULL); 423 qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -441,7 +449,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
441 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 449 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
442 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 450 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
443 else 451 else
444 qla2x00_mark_device_lost(vha, fcport, 1, 1); 452 qla2x00_mark_device_lost(vha, fcport, 1, 0);
445 453
446 return; 454 return;
447} 455}
@@ -2536,7 +2544,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2536 fcport->vp_idx = vha->vp_idx; 2544 fcport->vp_idx = vha->vp_idx;
2537 fcport->port_type = FCT_UNKNOWN; 2545 fcport->port_type = FCT_UNKNOWN;
2538 fcport->loop_id = FC_NO_LOOP_ID; 2546 fcport->loop_id = FC_NO_LOOP_ID;
2539 atomic_set(&fcport->state, FCS_UNCONFIGURED); 2547 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2540 fcport->supported_classes = FC_COS_UNSPECIFIED; 2548 fcport->supported_classes = FC_COS_UNSPECIFIED;
2541 2549
2542 return fcport; 2550 return fcport;
@@ -2722,7 +2730,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2722 "loop_id=0x%04x\n", 2730 "loop_id=0x%04x\n",
2723 vha->host_no, fcport->loop_id)); 2731 vha->host_no, fcport->loop_id));
2724 2732
2725 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2733 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2726 } 2734 }
2727 } 2735 }
2728 2736
@@ -2934,7 +2942,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2934 qla2x00_iidma_fcport(vha, fcport); 2942 qla2x00_iidma_fcport(vha, fcport);
2935 qla24xx_update_fcport_fcp_prio(vha, fcport); 2943 qla24xx_update_fcport_fcp_prio(vha, fcport);
2936 qla2x00_reg_remote_port(vha, fcport); 2944 qla2x00_reg_remote_port(vha, fcport);
2937 atomic_set(&fcport->state, FCS_ONLINE); 2945 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
2938} 2946}
2939 2947
2940/* 2948/*
@@ -3391,7 +3399,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3391 * Context: 3399 * Context:
3392 * Kernel context. 3400 * Kernel context.
3393 */ 3401 */
3394static int 3402int
3395qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3403qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3396{ 3404{
3397 int rval; 3405 int rval;
@@ -5202,7 +5210,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5202 } 5210 }
5203 5211
5204 /* Reset Initialization control block */ 5212 /* Reset Initialization control block */
5205 memset(icb, 0, sizeof(struct init_cb_81xx)); 5213 memset(icb, 0, ha->init_cb_size);
5206 5214
5207 /* Copy 1st segment. */ 5215 /* Copy 1st segment. */
5208 dptr1 = (uint8_t *)icb; 5216 dptr1 = (uint8_t *)icb;
@@ -5427,6 +5435,13 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5427 ha->isp_abort_cnt = 0; 5435 ha->isp_abort_cnt = 0;
5428 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5436 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5429 5437
5438 /* Update the firmware version */
5439 qla2x00_get_fw_version(vha, &ha->fw_major_version,
5440 &ha->fw_minor_version, &ha->fw_subminor_version,
5441 &ha->fw_attributes, &ha->fw_memory_size,
5442 ha->mpi_version, &ha->mpi_capabilities,
5443 ha->phy_version);
5444
5430 if (ha->fce) { 5445 if (ha->fce) {
5431 ha->flags.fce_enabled = 1; 5446 ha->flags.fce_enabled = 1;
5432 memset(ha->fce, 0, 5447 memset(ha->fce, 0,
@@ -5508,26 +5523,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
5508 * 5523 *
5509 * Return: 5524 * Return:
5510 * non-zero (if found) 5525 * non-zero (if found)
5511 * 0 (if not found) 5526 * -1 (if not found)
5512 * 5527 *
5513 * Context: 5528 * Context:
5514 * Kernel context 5529 * Kernel context
5515 */ 5530 */
5516uint8_t 5531static int
5517qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5532qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5518{ 5533{
5519 int i, entries; 5534 int i, entries;
5520 uint8_t pid_match, wwn_match; 5535 uint8_t pid_match, wwn_match;
5521 uint8_t priority; 5536 int priority;
5522 uint32_t pid1, pid2; 5537 uint32_t pid1, pid2;
5523 uint64_t wwn1, wwn2; 5538 uint64_t wwn1, wwn2;
5524 struct qla_fcp_prio_entry *pri_entry; 5539 struct qla_fcp_prio_entry *pri_entry;
5525 struct qla_hw_data *ha = vha->hw; 5540 struct qla_hw_data *ha = vha->hw;
5526 5541
5527 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 5542 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5528 return 0; 5543 return -1;
5529 5544
5530 priority = 0; 5545 priority = -1;
5531 entries = ha->fcp_prio_cfg->num_entries; 5546 entries = ha->fcp_prio_cfg->num_entries;
5532 pri_entry = &ha->fcp_prio_cfg->entry[0]; 5547 pri_entry = &ha->fcp_prio_cfg->entry[0];
5533 5548
@@ -5610,7 +5625,7 @@ int
5610qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5625qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5611{ 5626{
5612 int ret; 5627 int ret;
5613 uint8_t priority; 5628 int priority;
5614 uint16_t mb[5]; 5629 uint16_t mb[5];
5615 5630
5616 if (fcport->port_type != FCT_TARGET || 5631 if (fcport->port_type != FCT_TARGET ||
@@ -5618,6 +5633,9 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5618 return QLA_FUNCTION_FAILED; 5633 return QLA_FUNCTION_FAILED;
5619 5634
5620 priority = qla24xx_get_fcp_prio(vha, fcport); 5635 priority = qla24xx_get_fcp_prio(vha, fcport);
5636 if (priority < 0)
5637 return QLA_FUNCTION_FAILED;
5638
5621 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 5639 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5622 if (ret == QLA_SUCCESS) 5640 if (ret == QLA_SUCCESS)
5623 fcport->fcp_prio = priority; 5641 fcport->fcp_prio = priority;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 48f97a92e33d..4c8167e11f69 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -83,3 +83,22 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
83 } 83 }
84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); 84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
85} 85}
86
87static inline void
88qla2x00_set_fcport_state(fc_port_t *fcport, int state)
89{
90 int old_state;
91
92 old_state = atomic_read(&fcport->state);
93 atomic_set(&fcport->state, state);
94
95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
98 "scsi(%ld): FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no,
100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa));
103 }
104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d78d5896fc33..7bac3cd109d6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 712518d05128..9c0f0e3389eb 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -843,7 +843,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
843 qla_printk(KERN_WARNING, ha, 843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index); 844 "Invalid SCSI completion handle %d.\n", index);
845 845
846 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 846 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
848 else
849 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
847 return; 850 return;
848 } 851 }
849 852
@@ -861,7 +864,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
861 qla_printk(KERN_WARNING, ha, 864 qla_printk(KERN_WARNING, ha,
862 "Invalid ISP SCSI completion handle\n"); 865 "Invalid ISP SCSI completion handle\n");
863 866
864 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 867 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
869 else
870 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
865 } 871 }
866} 872}
867 873
@@ -878,7 +884,10 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
878 if (index >= MAX_OUTSTANDING_COMMANDS) { 884 if (index >= MAX_OUTSTANDING_COMMANDS) {
879 qla_printk(KERN_WARNING, ha, 885 qla_printk(KERN_WARNING, ha,
880 "%s: Invalid completion handle (%x).\n", func, index); 886 "%s: Invalid completion handle (%x).\n", func, index);
881 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 887 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else
890 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
882 goto done; 891 goto done;
883 } 892 }
884 sp = req->outstanding_cmds[index]; 893 sp = req->outstanding_cmds[index];
@@ -1564,7 +1573,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1564 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1565 sts->handle); 1574 sts->handle);
1566 1575
1567 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1576 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1578 else
1579 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1568 qla2xxx_wake_dpc(vha); 1580 qla2xxx_wake_dpc(vha);
1569 return; 1581 return;
1570 } 1582 }
@@ -1794,12 +1806,13 @@ out:
1794 if (logit) 1806 if (logit)
1795 DEBUG2(qla_printk(KERN_INFO, ha, 1807 DEBUG2(qla_printk(KERN_INFO, ha,
1796 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1797 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1798 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1799 cp->device->id, cp->device->lun, comp_status, scsi_status, 1811 cp->device->id, cp->device->lun, comp_status, scsi_status,
1800 cp->result, ox_id, cp->cmnd[0], 1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area,
1801 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1],
1802 resid_len, fw_resid_len)); 1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1803 1816
1804 if (rsp->status_srb == NULL) 1817 if (rsp->status_srb == NULL)
1805 qla2x00_sp_compl(ha, sp); 1818 qla2x00_sp_compl(ha, sp);
@@ -1908,13 +1921,17 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1908 qla2x00_sp_compl(ha, sp); 1921 qla2x00_sp_compl(ha, sp);
1909 1922
1910 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1911 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) {
1912 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1913 vha->host_no)); 1927 vha->host_no));
1914 qla_printk(KERN_WARNING, ha, 1928 qla_printk(KERN_WARNING, ha,
1915 "Error entry - invalid handle\n"); 1929 "Error entry - invalid handle\n");
1916 1930
1917 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1931 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1933 else
1934 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1918 qla2xxx_wake_dpc(vha); 1935 qla2xxx_wake_dpc(vha);
1919 } 1936 }
1920} 1937}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 34893397ac84..c26f0acdfecc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1261,11 +1261,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1261 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1263 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(printk("%s(%ld): Unable to verify " 1264 DEBUG2(qla_printk(KERN_WARNING, ha,
1265 "login-state (%x/%x) for loop_id %x\n", 1265 "scsi(%ld): Unable to verify login-state (%x/%x) "
1266 __func__, vha->host_no, 1266 " - portid=%02x%02x%02x.\n", vha->host_no,
1267 pd24->current_login_state, 1267 pd24->current_login_state, pd24->last_login_state,
1268 pd24->last_login_state, fcport->loop_id)); 1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1269 rval = QLA_FUNCTION_FAILED; 1270 rval = QLA_FUNCTION_FAILED;
1270 goto gpd_error_out; 1271 goto gpd_error_out;
1271 } 1272 }
@@ -1289,6 +1290,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1289 /* Check for logged in state. */ 1290 /* Check for logged in state. */
1290 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1291 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) "
1295 " - portid=%02x%02x%02x.\n", vha->host_no,
1296 pd->master_state, pd->slave_state,
1297 fcport->d_id.b.domain, fcport->d_id.b.area,
1298 fcport->d_id.b.al_pa));
1292 rval = QLA_FUNCTION_FAILED; 1299 rval = QLA_FUNCTION_FAILED;
1293 goto gpd_error_out; 1300 goto gpd_error_out;
1294 } 1301 }
@@ -1883,7 +1890,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1883 lg->handle = MAKE_HANDLE(req->id, lg->handle); 1890 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1884 lg->nport_handle = cpu_to_le16(loop_id); 1891 lg->nport_handle = cpu_to_le16(loop_id);
1885 lg->control_flags = 1892 lg->control_flags =
1886 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1893 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
1894 LCF_FREE_NPORT);
1887 lg->port_id[0] = al_pa; 1895 lg->port_id[0] = al_pa;
1888 lg->port_id[1] = area; 1896 lg->port_id[1] = area;
1889 lg->port_id[2] = domain; 1897 lg->port_id[2] = domain;
@@ -2362,7 +2370,7 @@ qla24xx_abort_command(srb_t *sp)
2362 abt->entry_count = 1; 2370 abt->entry_count = 1;
2363 abt->handle = MAKE_HANDLE(req->id, abt->handle); 2371 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2364 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2372 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2365 abt->handle_to_abort = handle; 2373 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2366 abt->port_id[0] = fcport->d_id.b.al_pa; 2374 abt->port_id[0] = fcport->d_id.b.al_pa;
2367 abt->port_id[1] = fcport->d_id.b.area; 2375 abt->port_id[1] = fcport->d_id.b.area;
2368 abt->port_id[2] = fcport->d_id.b.domain; 2376 abt->port_id[2] = fcport->d_id.b.domain;
@@ -2779,44 +2787,6 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2779} 2787}
2780 2788
2781int 2789int
2782qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2783 uint16_t off, uint16_t count)
2784{
2785 int rval;
2786 mbx_cmd_t mc;
2787 mbx_cmd_t *mcp = &mc;
2788
2789 if (!IS_FWI2_CAPABLE(vha->hw))
2790 return QLA_FUNCTION_FAILED;
2791
2792 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2793
2794 mcp->mb[0] = MBC_READ_SFP;
2795 mcp->mb[1] = addr;
2796 mcp->mb[2] = MSW(sfp_dma);
2797 mcp->mb[3] = LSW(sfp_dma);
2798 mcp->mb[6] = MSW(MSD(sfp_dma));
2799 mcp->mb[7] = LSW(MSD(sfp_dma));
2800 mcp->mb[8] = count;
2801 mcp->mb[9] = off;
2802 mcp->mb[10] = 0;
2803 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2804 mcp->in_mb = MBX_0;
2805 mcp->tov = MBX_TOV_SECONDS;
2806 mcp->flags = 0;
2807 rval = qla2x00_mailbox_command(vha, mcp);
2808
2809 if (rval != QLA_SUCCESS) {
2810 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2811 vha->host_no, rval, mcp->mb[0]));
2812 } else {
2813 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2814 }
2815
2816 return rval;
2817}
2818
2819int
2820qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 2790qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2821 uint16_t *port_speed, uint16_t *mb) 2791 uint16_t *port_speed, uint16_t *mb)
2822{ 2792{
@@ -3581,15 +3551,22 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3581} 3551}
3582 3552
3583int 3553int
3584qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, 3554qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3585 dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) 3555 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3586{ 3556{
3587 int rval; 3557 int rval;
3588 mbx_cmd_t mc; 3558 mbx_cmd_t mc;
3589 mbx_cmd_t *mcp = &mc; 3559 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw;
3561
3562 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED;
3590 3564
3591 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3592 3566
3567 if (len == 1)
3568 opt |= BIT_0;
3569
3593 mcp->mb[0] = MBC_READ_SFP; 3570 mcp->mb[0] = MBC_READ_SFP;
3594 mcp->mb[1] = dev; 3571 mcp->mb[1] = dev;
3595 mcp->mb[2] = MSW(sfp_dma); 3572 mcp->mb[2] = MSW(sfp_dma);
@@ -3597,17 +3574,16 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3597 mcp->mb[6] = MSW(MSD(sfp_dma)); 3574 mcp->mb[6] = MSW(MSD(sfp_dma));
3598 mcp->mb[7] = LSW(MSD(sfp_dma)); 3575 mcp->mb[7] = LSW(MSD(sfp_dma));
3599 mcp->mb[8] = len; 3576 mcp->mb[8] = len;
3600 mcp->mb[9] = adr; 3577 mcp->mb[9] = off;
3601 mcp->mb[10] = opt; 3578 mcp->mb[10] = opt;
3602 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3579 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3603 mcp->in_mb = MBX_0; 3580 mcp->in_mb = MBX_1|MBX_0;
3604 mcp->tov = MBX_TOV_SECONDS; 3581 mcp->tov = MBX_TOV_SECONDS;
3605 mcp->flags = 0; 3582 mcp->flags = 0;
3606 rval = qla2x00_mailbox_command(vha, mcp); 3583 rval = qla2x00_mailbox_command(vha, mcp);
3607 3584
3608 if (opt & BIT_0) 3585 if (opt & BIT_0)
3609 if (sfp) 3586 *sfp = mcp->mb[1];
3610 *sfp = mcp->mb[8];
3611 3587
3612 if (rval != QLA_SUCCESS) { 3588 if (rval != QLA_SUCCESS) {
3613 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
@@ -3620,18 +3596,24 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3620} 3596}
3621 3597
3622int 3598int
3623qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, 3599qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3624 dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) 3600 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3625{ 3601{
3626 int rval; 3602 int rval;
3627 mbx_cmd_t mc; 3603 mbx_cmd_t mc;
3628 mbx_cmd_t *mcp = &mc; 3604 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw;
3606
3607 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED;
3629 3609
3630 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3631 3611
3612 if (len == 1)
3613 opt |= BIT_0;
3614
3632 if (opt & BIT_0) 3615 if (opt & BIT_0)
3633 if (sfp) 3616 len = *sfp;
3634 len = *sfp;
3635 3617
3636 mcp->mb[0] = MBC_WRITE_SFP; 3618 mcp->mb[0] = MBC_WRITE_SFP;
3637 mcp->mb[1] = dev; 3619 mcp->mb[1] = dev;
@@ -3640,10 +3622,10 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3640 mcp->mb[6] = MSW(MSD(sfp_dma)); 3622 mcp->mb[6] = MSW(MSD(sfp_dma));
3641 mcp->mb[7] = LSW(MSD(sfp_dma)); 3623 mcp->mb[7] = LSW(MSD(sfp_dma));
3642 mcp->mb[8] = len; 3624 mcp->mb[8] = len;
3643 mcp->mb[9] = adr; 3625 mcp->mb[9] = off;
3644 mcp->mb[10] = opt; 3626 mcp->mb[10] = opt;
3645 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3627 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3646 mcp->in_mb = MBX_0; 3628 mcp->in_mb = MBX_1|MBX_0;
3647 mcp->tov = MBX_TOV_SECONDS; 3629 mcp->tov = MBX_TOV_SECONDS;
3648 mcp->flags = 0; 3630 mcp->flags = 0;
3649 rval = qla2x00_mailbox_command(vha, mcp); 3631 rval = qla2x00_mailbox_command(vha, mcp);
@@ -4160,63 +4142,32 @@ int
4160qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) 4142qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4161{ 4143{
4162 int rval; 4144 int rval;
4163 mbx_cmd_t mc; 4145 uint8_t byte;
4164 mbx_cmd_t *mcp = &mc;
4165 struct qla_hw_data *ha = vha->hw; 4146 struct qla_hw_data *ha = vha->hw;
4166 4147
4167 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no)); 4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
4168 4149
4169 /* High bits. */ 4150 /* Integer part */
4170 mcp->mb[0] = MBC_READ_SFP; 4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4171 mcp->mb[1] = 0x98;
4172 mcp->mb[2] = 0;
4173 mcp->mb[3] = 0;
4174 mcp->mb[6] = 0;
4175 mcp->mb[7] = 0;
4176 mcp->mb[8] = 1;
4177 mcp->mb[9] = 0x01;
4178 mcp->mb[10] = BIT_13|BIT_0;
4179 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4180 mcp->in_mb = MBX_1|MBX_0;
4181 mcp->tov = MBX_TOV_SECONDS;
4182 mcp->flags = 0;
4183 rval = qla2x00_mailbox_command(vha, mcp);
4184 if (rval != QLA_SUCCESS) { 4152 if (rval != QLA_SUCCESS) {
4185 DEBUG2_3_11(printk(KERN_WARNING 4153 DEBUG2_3_11(printk(KERN_WARNING
4186 "%s(%ld): failed=%x (%x).\n", __func__, 4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4187 vha->host_no, rval, mcp->mb[0]));
4188 ha->flags.thermal_supported = 0; 4155 ha->flags.thermal_supported = 0;
4189 goto fail; 4156 goto fail;
4190 } 4157 }
4191 *temp = mcp->mb[1] & 0xFF; 4158 *temp = byte;
4192 4159
4193 /* Low bits. */ 4160 /* Fraction part */
4194 mcp->mb[0] = MBC_READ_SFP; 4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4195 mcp->mb[1] = 0x98;
4196 mcp->mb[2] = 0;
4197 mcp->mb[3] = 0;
4198 mcp->mb[6] = 0;
4199 mcp->mb[7] = 0;
4200 mcp->mb[8] = 1;
4201 mcp->mb[9] = 0x10;
4202 mcp->mb[10] = BIT_13|BIT_0;
4203 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4204 mcp->in_mb = MBX_1|MBX_0;
4205 mcp->tov = MBX_TOV_SECONDS;
4206 mcp->flags = 0;
4207 rval = qla2x00_mailbox_command(vha, mcp);
4208 if (rval != QLA_SUCCESS) { 4162 if (rval != QLA_SUCCESS) {
4209 DEBUG2_3_11(printk(KERN_WARNING 4163 DEBUG2_3_11(printk(KERN_WARNING
4210 "%s(%ld): failed=%x (%x).\n", __func__, 4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4211 vha->host_no, rval, mcp->mb[0]));
4212 ha->flags.thermal_supported = 0; 4165 ha->flags.thermal_supported = 0;
4213 goto fail; 4166 goto fail;
4214 } 4167 }
4215 *frac = ((mcp->mb[1] & 0xFF) >> 6) * 25; 4168 *frac = (byte >> 6) * 25;
4216 4169
4217 if (rval == QLA_SUCCESS) 4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no));
4218 DEBUG11(printk(KERN_INFO
4219 "%s(%ld): done.\n", __func__, ha->host_no));
4220fail: 4171fail:
4221 return rval; 4172 return rval;
4222} 4173}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2b69392a71a1..5e343919acad 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -136,7 +136,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
137 137
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
140 } 140 }
141} 141}
142 142
@@ -456,7 +456,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
456 else 456 else
457 host->max_cmd_len = MAX_CMDSZ; 457 host->max_cmd_len = MAX_CMDSZ;
458 host->max_channel = MAX_BUSES - 1; 458 host->max_channel = MAX_BUSES - 1;
459 host->max_lun = MAX_LUNS; 459 host->max_lun = ql2xmaxlun;
460 host->unique_id = host->host_no; 460 host->unique_id = host->host_no;
461 host->max_id = MAX_TARGETS_2200; 461 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 462 host->transportt = qla2xxx_transport_vport_template;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 455fe134d31d..e1138bcc834c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -844,6 +844,12 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
844 return 0; 844 return 0;
845} 845}
846 846
847static void
848qla82xx_rom_unlock(struct qla_hw_data *ha)
849{
850 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
851}
852
847static int 853static int
848qla82xx_wait_rom_busy(struct qla_hw_data *ha) 854qla82xx_wait_rom_busy(struct qla_hw_data *ha)
849{ 855{
@@ -924,7 +930,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 return -1; 930 return -1;
925 } 931 }
926 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 932 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
927 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 933 qla82xx_rom_unlock(ha);
928 return ret; 934 return ret;
929} 935}
930 936
@@ -1056,7 +1062,7 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1056 ret = qla82xx_flash_wait_write_finish(ha); 1062 ret = qla82xx_flash_wait_write_finish(ha);
1057 1063
1058done_write: 1064done_write:
1059 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1065 qla82xx_rom_unlock(ha);
1060 return ret; 1066 return ret;
1061} 1067}
1062 1068
@@ -1081,12 +1087,26 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1081 /* Halt all the indiviual PEGs and other blocks of the ISP */ 1087 /* Halt all the indiviual PEGs and other blocks of the ISP */
1082 qla82xx_rom_lock(ha); 1088 qla82xx_rom_lock(ha);
1083 1089
1084 /* mask all niu interrupts */ 1090 /* disable all I2Q */
1091 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1092 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1093 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1094 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1095 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1096 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1097
1098 /* disable all niu interrupts */
1085 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1099 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1086 /* disable xge rx/tx */ 1100 /* disable xge rx/tx */
1087 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1101 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1088 /* disable xg1 rx/tx */ 1102 /* disable xg1 rx/tx */
1089 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1103 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1104 /* disable sideband mac */
1105 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1106 /* disable ap0 mac */
1107 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1108 /* disable ap1 mac */
1109 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1090 1110
1091 /* halt sre */ 1111 /* halt sre */
1092 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1112 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -1101,6 +1121,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1101 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1121 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1102 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1122 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1103 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1123 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1124 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1104 1125
1105 /* halt pegs */ 1126 /* halt pegs */
1106 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1127 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -1108,9 +1129,9 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1108 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1129 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1109 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1130 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1110 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1131 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1132 msleep(20);
1111 1133
1112 /* big hammer */ 1134 /* big hammer */
1113 msleep(1000);
1114 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 1135 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1115 /* don't reset CAM block on reset */ 1136 /* don't reset CAM block on reset */
1116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1137 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
@@ -1129,7 +1150,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1129 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1150 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1130 msleep(20); 1151 msleep(20);
1131 1152
1132 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1153 qla82xx_rom_unlock(ha);
1133 1154
1134 /* Read the signature value from the flash. 1155 /* Read the signature value from the flash.
1135 * Offset 0: Contain signature (0xcafecafe) 1156 * Offset 0: Contain signature (0xcafecafe)
@@ -2395,9 +2416,13 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2395 2416
2396 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2397 qla_printk(KERN_ERR, ha, 2418 qla_printk(KERN_ERR, ha,
2398 "Firmware loaded successfully from flash\n"); 2419 "Firmware loaded successfully from flash\n");
2399 return QLA_SUCCESS; 2420 return QLA_SUCCESS;
2421 } else {
2422 qla_printk(KERN_ERR, ha,
2423 "Firmware load from flash failed\n");
2400 } 2424 }
2425
2401try_blob_fw: 2426try_blob_fw:
2402 qla_printk(KERN_INFO, ha, 2427 qla_printk(KERN_INFO, ha,
2403 "Attempting to load firmware from blob\n"); 2428 "Attempting to load firmware from blob\n");
@@ -2548,11 +2573,11 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2548 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2573 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2549 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2574 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2550 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2575 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2551 cmd_pkt->fcp_data_dseg_len = dsd_list_len; 2576 *dsd_seg++ = cpu_to_le32(dsd_list_len);
2552 } else { 2577 } else {
2553 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2578 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2554 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2579 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2555 *cur_dsd++ = dsd_list_len; 2580 *cur_dsd++ = cpu_to_le32(dsd_list_len);
2556 } 2581 }
2557 cur_dsd = (uint32_t *)next_dsd; 2582 cur_dsd = (uint32_t *)next_dsd;
2558 while (avail_dsds) { 2583 while (avail_dsds) {
@@ -2991,7 +3016,7 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2991 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2992 3017
2993done_unprotect: 3018done_unprotect:
2994 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3019 qla82xx_rom_unlock(ha);
2995 return ret; 3020 return ret;
2996} 3021}
2997 3022
@@ -3020,7 +3045,7 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3020 if (qla82xx_write_disable_flash(ha) != 0) 3045 if (qla82xx_write_disable_flash(ha) != 0)
3021 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3022done_protect: 3047done_protect:
3023 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3048 qla82xx_rom_unlock(ha);
3024 return ret; 3049 return ret;
3025} 3050}
3026 3051
@@ -3048,7 +3073,7 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3048 } 3073 }
3049 ret = qla82xx_flash_wait_write_finish(ha); 3074 ret = qla82xx_flash_wait_write_finish(ha);
3050done: 3075done:
3051 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3076 qla82xx_rom_unlock(ha);
3052 return ret; 3077 return ret;
3053} 3078}
3054 3079
@@ -3228,7 +3253,7 @@ void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3228 * else died while holding it. 3253 * else died while holding it.
3229 * In either case, unlock. 3254 * In either case, unlock.
3230 */ 3255 */
3231 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3256 qla82xx_rom_unlock(ha);
3232} 3257}
3233 3258
3234/* 3259/*
@@ -3528,15 +3553,18 @@ int
3528qla82xx_device_state_handler(scsi_qla_host_t *vha) 3553qla82xx_device_state_handler(scsi_qla_host_t *vha)
3529{ 3554{
3530 uint32_t dev_state; 3555 uint32_t dev_state;
3556 uint32_t old_dev_state;
3531 int rval = QLA_SUCCESS; 3557 int rval = QLA_SUCCESS;
3532 unsigned long dev_init_timeout; 3558 unsigned long dev_init_timeout;
3533 struct qla_hw_data *ha = vha->hw; 3559 struct qla_hw_data *ha = vha->hw;
3560 int loopcount = 0;
3534 3561
3535 qla82xx_idc_lock(ha); 3562 qla82xx_idc_lock(ha);
3536 if (!vha->flags.init_done) 3563 if (!vha->flags.init_done)
3537 qla82xx_set_drv_active(vha); 3564 qla82xx_set_drv_active(vha);
3538 3565
3539 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state;
3540 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3541 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3542 3570
@@ -3553,10 +3581,16 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3553 break; 3581 break;
3554 } 3582 }
3555 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3583 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3556 qla_printk(KERN_INFO, ha, 3584 if (old_dev_state != dev_state) {
3557 "2:Device state is 0x%x = %s\n", dev_state, 3585 loopcount = 0;
3558 dev_state < MAX_STATES ? 3586 old_dev_state = dev_state;
3559 qdev_state[dev_state] : "Unknown"); 3587 }
3588 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha,
3590 "2:Device state is 0x%x = %s\n", dev_state,
3591 dev_state < MAX_STATES ?
3592 qdev_state[dev_state] : "Unknown");
3593 }
3560 3594
3561 switch (dev_state) { 3595 switch (dev_state) {
3562 case QLA82XX_DEV_READY: 3596 case QLA82XX_DEV_READY:
@@ -3570,6 +3604,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3570 qla82xx_idc_lock(ha); 3604 qla82xx_idc_lock(ha);
3571 break; 3605 break;
3572 case QLA82XX_DEV_NEED_RESET: 3606 case QLA82XX_DEV_NEED_RESET:
3607 if (!ql2xdontresethba)
3573 qla82xx_need_reset_handler(vha); 3608 qla82xx_need_reset_handler(vha);
3574 dev_init_timeout = jiffies + 3609 dev_init_timeout = jiffies +
3575 (ha->nx_dev_init_timeout * HZ); 3610 (ha->nx_dev_init_timeout * HZ);
@@ -3604,6 +3639,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3604 msleep(1000); 3639 msleep(1000);
3605 qla82xx_idc_lock(ha); 3640 qla82xx_idc_lock(ha);
3606 } 3641 }
3642 loopcount++;
3607 } 3643 }
3608exit: 3644exit:
3609 qla82xx_idc_unlock(ha); 3645 qla82xx_idc_unlock(ha);
@@ -3621,7 +3657,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3621 if (dev_state == QLA82XX_DEV_NEED_RESET && 3657 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3622 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3623 qla_printk(KERN_WARNING, ha, 3659 qla_printk(KERN_WARNING, ha,
3624 "%s(): Adapter reset needed!\n", __func__); 3660 "scsi(%ld) %s: Adapter reset needed!\n",
3661 vha->host_no, __func__);
3625 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3626 qla2xxx_wake_dpc(vha); 3663 qla2xxx_wake_dpc(vha);
3627 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
@@ -3632,10 +3669,27 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3632 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3633 qla2xxx_wake_dpc(vha); 3670 qla2xxx_wake_dpc(vha);
3634 } else { 3671 } else {
3635 qla82xx_check_fw_alive(vha);
3636 if (qla82xx_check_fw_alive(vha)) { 3672 if (qla82xx_check_fw_alive(vha)) {
3637 halt_status = qla82xx_rd_32(ha, 3673 halt_status = qla82xx_rd_32(ha,
3638 QLA82XX_PEG_HALT_STATUS1); 3674 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
3680 " PEG_NET_4_PC: 0x%x\n",
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c),
3685 qla82xx_rd_32(ha,
3686 QLA82XX_CRB_PEG_NET_1 + 0x3c),
3687 qla82xx_rd_32(ha,
3688 QLA82XX_CRB_PEG_NET_2 + 0x3c),
3689 qla82xx_rd_32(ha,
3690 QLA82XX_CRB_PEG_NET_3 + 0x3c),
3691 qla82xx_rd_32(ha,
3692 QLA82XX_CRB_PEG_NET_4 + 0x3c));
3639 if (halt_status & HALT_STATUS_UNRECOVERABLE) { 3693 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3640 set_bit(ISP_UNRECOVERABLE, 3694 set_bit(ISP_UNRECOVERABLE,
3641 &vha->dpc_flags); 3695 &vha->dpc_flags);
@@ -3651,8 +3705,9 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3651 if (ha->flags.mbox_busy) { 3705 if (ha->flags.mbox_busy) {
3652 ha->flags.mbox_int = 1; 3706 ha->flags.mbox_int = 1;
3653 DEBUG2(qla_printk(KERN_ERR, ha, 3707 DEBUG2(qla_printk(KERN_ERR, ha,
3654 "Due to fw hung, doing premature " 3708 "scsi(%ld) Due to fw hung, doing "
3655 "completion of mbx command\n")); 3709 "premature completion of mbx "
3710 "command\n", vha->host_no));
3656 if (test_bit(MBX_INTR_WAIT, 3711 if (test_bit(MBX_INTR_WAIT,
3657 &ha->mbx_cmd_flags)) 3712 &ha->mbx_cmd_flags))
3658 complete(&ha->mbx_intr_comp); 3713 complete(&ha->mbx_intr_comp);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index ed5883f1778a..8a21832c6693 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index aa7747529165..f461925a9dfc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -164,6 +164,20 @@ module_param(ql2xasynctmfenable, int, S_IRUGO);
164MODULE_PARM_DESC(ql2xasynctmfenable, 164MODULE_PARM_DESC(ql2xasynctmfenable,
165 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 165 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
166 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 166 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
167
168int ql2xdontresethba;
169module_param(ql2xdontresethba, int, S_IRUGO);
170MODULE_PARM_DESC(ql2xdontresethba,
171 "Option to specify reset behaviour\n"
172 " 0 (Default) -- Reset on failure.\n"
173 " 1 -- Do not reset on failure.\n");
174
175uint ql2xmaxlun = MAX_LUNS;
176module_param(ql2xmaxlun, uint, S_IRUGO);
177MODULE_PARM_DESC(ql2xmaxlun,
178 "Defines the maximum LU number to register with the SCSI "
179 "midlayer. Default is 65535.");
180
167/* 181/*
168 * SCSI host template entry points 182 * SCSI host template entry points
169 */ 183 */
@@ -528,7 +542,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
528static int 542static int
529qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 543qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
530{ 544{
531 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 545 scsi_qla_host_t *vha = shost_priv(host);
532 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 546 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
533 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 547 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
534 struct qla_hw_data *ha = vha->hw; 548 struct qla_hw_data *ha = vha->hw;
@@ -2128,7 +2142,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2128 else 2142 else
2129 host->max_cmd_len = MAX_CMDSZ; 2143 host->max_cmd_len = MAX_CMDSZ;
2130 host->max_channel = MAX_BUSES - 1; 2144 host->max_channel = MAX_BUSES - 1;
2131 host->max_lun = MAX_LUNS; 2145 host->max_lun = ql2xmaxlun;
2132 host->transportt = qla2xxx_transport_template; 2146 host->transportt = qla2xxx_transport_template;
2133 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2147 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2134 2148
@@ -2360,21 +2374,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
2360 base_vha = pci_get_drvdata(pdev); 2374 base_vha = pci_get_drvdata(pdev);
2361 ha = base_vha->hw; 2375 ha = base_vha->hw;
2362 2376
2363 spin_lock_irqsave(&ha->vport_slock, flags); 2377 mutex_lock(&ha->vport_lock);
2364 list_for_each_entry(vha, &ha->vp_list, list) { 2378 while (ha->cur_vport_count) {
2365 atomic_inc(&vha->vref_count); 2379 struct Scsi_Host *scsi_host;
2366 2380
2367 if (vha->fc_vport) { 2381 spin_lock_irqsave(&ha->vport_slock, flags);
2368 spin_unlock_irqrestore(&ha->vport_slock, flags);
2369 2382
2370 fc_vport_terminate(vha->fc_vport); 2383 BUG_ON(base_vha->list.next == &ha->vp_list);
2384 /* This assumes first entry in ha->vp_list is always base vha */
2385 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
2386 scsi_host = scsi_host_get(vha->host);
2371 2387
2372 spin_lock_irqsave(&ha->vport_slock, flags); 2388 spin_unlock_irqrestore(&ha->vport_slock, flags);
2373 } 2389 mutex_unlock(&ha->vport_lock);
2390
2391 fc_vport_terminate(vha->fc_vport);
2392 scsi_host_put(vha->host);
2374 2393
2375 atomic_dec(&vha->vref_count); 2394 mutex_lock(&ha->vport_lock);
2376 } 2395 }
2377 spin_unlock_irqrestore(&ha->vport_slock, flags); 2396 mutex_unlock(&ha->vport_lock);
2378 2397
2379 set_bit(UNLOADING, &base_vha->dpc_flags); 2398 set_bit(UNLOADING, &base_vha->dpc_flags);
2380 2399
@@ -2544,7 +2563,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2544{ 2563{
2545 if (atomic_read(&fcport->state) == FCS_ONLINE && 2564 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2546 vha->vp_idx == fcport->vp_idx) { 2565 vha->vp_idx == fcport->vp_idx) {
2547 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2566 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2548 qla2x00_schedule_rport_del(vha, fcport, defer); 2567 qla2x00_schedule_rport_del(vha, fcport, defer);
2549 } 2568 }
2550 /* 2569 /*
@@ -2552,7 +2571,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2552 * port but do the retries. 2571 * port but do the retries.
2553 */ 2572 */
2554 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 2573 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2555 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2574 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2556 2575
2557 if (!do_login) 2576 if (!do_login)
2558 return; 2577 return;
@@ -2607,7 +2626,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2607 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2626 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2608 continue; 2627 continue;
2609 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2628 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2610 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2629 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2611 if (defer) 2630 if (defer)
2612 qla2x00_schedule_rport_del(vha, fcport, defer); 2631 qla2x00_schedule_rport_del(vha, fcport, defer);
2613 else if (vha->vp_idx == fcport->vp_idx) 2632 else if (vha->vp_idx == fcport->vp_idx)
@@ -3214,6 +3233,17 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3214 fcport->d_id.b.area, 3233 fcport->d_id.b.area,
3215 fcport->d_id.b.al_pa); 3234 fcport->d_id.b.al_pa);
3216 3235
3236 if (fcport->loop_id == FC_NO_LOOP_ID) {
3237 fcport->loop_id = next_loopid =
3238 ha->min_external_loopid;
3239 status = qla2x00_find_new_loop_id(
3240 vha, fcport);
3241 if (status != QLA_SUCCESS) {
3242 /* Ran out of IDs to use */
3243 break;
3244 }
3245 }
3246
3217 if (IS_ALOGIO_CAPABLE(ha)) { 3247 if (IS_ALOGIO_CAPABLE(ha)) {
3218 fcport->flags |= FCF_ASYNC_SENT; 3248 fcport->flags |= FCF_ASYNC_SENT;
3219 data[0] = 0; 3249 data[0] = 0;
@@ -3604,7 +3634,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3604 if (!pci_channel_offline(ha->pdev)) 3634 if (!pci_channel_offline(ha->pdev))
3605 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3635 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3606 3636
3607 if (IS_QLA82XX(ha)) { 3637 /* Make sure qla82xx_watchdog is run only for physical port */
3638 if (!vha->vp_idx && IS_QLA82XX(ha)) {
3608 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 3639 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3609 start_dpc++; 3640 start_dpc++;
3610 qla82xx_watchdog(vha); 3641 qla82xx_watchdog(vha);
@@ -3612,7 +3643,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3612 3643
3613 /* Loop down handler. */ 3644 /* Loop down handler. */
3614 if (atomic_read(&vha->loop_down_timer) > 0 && 3645 if (atomic_read(&vha->loop_down_timer) > 0 &&
3615 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 3646 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
3647 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
3616 && vha->flags.online) { 3648 && vha->flags.online) {
3617 3649
3618 if (atomic_read(&vha->loop_down_timer) == 3650 if (atomic_read(&vha->loop_down_timer) ==
@@ -3648,7 +3680,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3648 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3680 if (!(sfcp->flags & FCF_FCP2_DEVICE))
3649 continue; 3681 continue;
3650 3682
3651 set_bit(ISP_ABORT_NEEDED, 3683 if (IS_QLA82XX(ha))
3684 set_bit(FCOE_CTX_RESET_NEEDED,
3685 &vha->dpc_flags);
3686 else
3687 set_bit(ISP_ABORT_NEEDED,
3652 &vha->dpc_flags); 3688 &vha->dpc_flags);
3653 break; 3689 break;
3654 } 3690 }
@@ -3667,7 +3703,12 @@ qla2x00_timer(scsi_qla_host_t *vha)
3667 qla_printk(KERN_WARNING, ha, 3703 qla_printk(KERN_WARNING, ha,
3668 "Loop down - aborting ISP.\n"); 3704 "Loop down - aborting ISP.\n");
3669 3705
3670 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3706 if (IS_QLA82XX(ha))
3707 set_bit(FCOE_CTX_RESET_NEEDED,
3708 &vha->dpc_flags);
3709 else
3710 set_bit(ISP_ABORT_NEEDED,
3711 &vha->dpc_flags);
3671 } 3712 }
3672 } 3713 }
3673 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3714 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
@@ -3675,8 +3716,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3675 atomic_read(&vha->loop_down_timer))); 3716 atomic_read(&vha->loop_down_timer)));
3676 } 3717 }
3677 3718
3678 /* Check if beacon LED needs to be blinked */ 3719 /* Check if beacon LED needs to be blinked for physical host only */
3679 if (ha->beacon_blink_led == 1) { 3720 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3680 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 3721 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3681 start_dpc++; 3722 start_dpc++;
3682 } 3723 }
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index f0b2b9986a55..d70f03008981 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 22070621206c..693647661ed1 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3a260c3f055a..062c97bf62f5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.00" 10#define QLA2XXX_VERSION "8.03.07.03-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 7 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 230ba097d28c..c22f2a764d9d 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2068,15 +2068,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
2068 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 2068 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
2069 unsigned int id = cmd->device->id; 2069 unsigned int id = cmd->device->id;
2070 unsigned int lun = cmd->device->lun; 2070 unsigned int lun = cmd->device->lun;
2071 unsigned long serial = cmd->serial_number;
2072 unsigned long flags; 2071 unsigned long flags;
2073 struct srb *srb = NULL; 2072 struct srb *srb = NULL;
2074 int ret = SUCCESS; 2073 int ret = SUCCESS;
2075 int wait = 0; 2074 int wait = 0;
2076 2075
2077 ql4_printk(KERN_INFO, ha, 2076 ql4_printk(KERN_INFO, ha,
2078 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", 2077 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
2079 ha->host_no, id, lun, cmd, serial); 2078 ha->host_no, id, lun, cmd);
2080 2079
2081 spin_lock_irqsave(&ha->hardware_lock, flags); 2080 spin_lock_irqsave(&ha->hardware_lock, flags);
2082 srb = (struct srb *) CMD_SP(cmd); 2081 srb = (struct srb *) CMD_SP(cmd);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 633c2395a92a..abea2cf05c2e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -321,6 +321,12 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
321 "changed. The Linux SCSI layer does not " 321 "changed. The Linux SCSI layer does not "
322 "automatically adjust these parameters.\n"); 322 "automatically adjust these parameters.\n");
323 323
324 if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
325 scmd_printk(KERN_WARNING, scmd,
326 "Warning! Received an indication that the "
327 "LUN reached a thin provisioning soft "
328 "threshold.\n");
329
324 /* 330 /*
325 * Pass the UA upwards for a determination in the completion 331 * Pass the UA upwards for a determination in the completion
326 * functions. 332 * functions.
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c99da926fdac..f46855cd853d 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -386,13 +386,59 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
386 * @s: output goes here 386 * @s: output goes here
387 * @p: not used 387 * @p: not used
388 */ 388 */
389static int proc_scsi_show(struct seq_file *s, void *p) 389static int always_match(struct device *dev, void *data)
390{ 390{
391 seq_printf(s, "Attached devices:\n"); 391 return 1;
392 bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice); 392}
393 return 0; 393
394static inline struct device *next_scsi_device(struct device *start)
395{
396 struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
397 always_match);
398 put_device(start);
399 return next;
394} 400}
395 401
402static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos)
403{
404 struct device *dev = NULL;
405 loff_t n = *pos;
406
407 while ((dev = next_scsi_device(dev))) {
408 if (!n--)
409 break;
410 sfile->private++;
411 }
412 return dev;
413}
414
415static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
416{
417 (*pos)++;
418 sfile->private++;
419 return next_scsi_device(v);
420}
421
422static void scsi_seq_stop(struct seq_file *sfile, void *v)
423{
424 put_device(v);
425}
426
427static int scsi_seq_show(struct seq_file *sfile, void *dev)
428{
429 if (!sfile->private)
430 seq_puts(sfile, "Attached devices:\n");
431
432 return proc_print_scsidevice(dev, sfile);
433}
434
435static const struct seq_operations scsi_seq_ops = {
436 .start = scsi_seq_start,
437 .next = scsi_seq_next,
438 .stop = scsi_seq_stop,
439 .show = scsi_seq_show
440};
441
396/** 442/**
397 * proc_scsi_open - glue function 443 * proc_scsi_open - glue function
398 * @inode: not used 444 * @inode: not used
@@ -406,7 +452,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
406 * We don't really need this for the write case but it doesn't 452 * We don't really need this for the write case but it doesn't
407 * harm either. 453 * harm either.
408 */ 454 */
409 return single_open(file, proc_scsi_show, NULL); 455 return seq_open(file, &scsi_seq_ops);
410} 456}
411 457
412static const struct file_operations proc_scsi_operations = { 458static const struct file_operations proc_scsi_operations = {
@@ -415,7 +461,7 @@ static const struct file_operations proc_scsi_operations = {
415 .read = seq_read, 461 .read = seq_read,
416 .write = proc_scsi_write, 462 .write = proc_scsi_write,
417 .llseek = seq_lseek, 463 .llseek = seq_lseek,
418 .release = single_release, 464 .release = seq_release,
419}; 465};
420 466
421/** 467/**
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 8bca8c25ba69..84a1fdf67864 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -275,10 +275,8 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost)
275 275
276 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) { 276 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
277 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i], 277 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
278 hash_list) { 278 hash_list)
279 list_del(&tcmd->hash_list); 279 list_move(&tcmd->hash_list, &cmds);
280 list_add(&tcmd->hash_list, &cmds);
281 }
282 } 280 }
283 281
284 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 282 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 815069d13f9b..1b214910b714 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -422,8 +422,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
422 422
423 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), 423 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
424 "fc_wq_%d", shost->host_no); 424 "fc_wq_%d", shost->host_no);
425 fc_host->work_q = create_singlethread_workqueue( 425 fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0);
426 fc_host->work_q_name);
427 if (!fc_host->work_q) 426 if (!fc_host->work_q)
428 return -ENOMEM; 427 return -ENOMEM;
429 428
@@ -431,8 +430,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
431 snprintf(fc_host->devloss_work_q_name, 430 snprintf(fc_host->devloss_work_q_name,
432 sizeof(fc_host->devloss_work_q_name), 431 sizeof(fc_host->devloss_work_q_name),
433 "fc_dl_%d", shost->host_no); 432 "fc_dl_%d", shost->host_no);
434 fc_host->devloss_work_q = create_singlethread_workqueue( 433 fc_host->devloss_work_q =
435 fc_host->devloss_work_q_name); 434 alloc_workqueue(fc_host->devloss_work_q_name, 0, 0);
436 if (!fc_host->devloss_work_q) { 435 if (!fc_host->devloss_work_q) {
437 destroy_workqueue(fc_host->work_q); 436 destroy_workqueue(fc_host->work_q);
438 fc_host->work_q = NULL; 437 fc_host->work_q = NULL;
@@ -2489,6 +2488,8 @@ fc_rport_final_delete(struct work_struct *work)
2489 unsigned long flags; 2488 unsigned long flags;
2490 int do_callback = 0; 2489 int do_callback = 0;
2491 2490
2491 fc_terminate_rport_io(rport);
2492
2492 /* 2493 /*
2493 * if a scan is pending, flush the SCSI Host work_q so that 2494 * if a scan is pending, flush the SCSI Host work_q so that
2494 * that we can reclaim the rport scan work element. 2495 * that we can reclaim the rport scan work element.
@@ -2496,8 +2497,6 @@ fc_rport_final_delete(struct work_struct *work)
2496 if (rport->flags & FC_RPORT_SCAN_PENDING) 2497 if (rport->flags & FC_RPORT_SCAN_PENDING)
2497 scsi_flush_work(shost); 2498 scsi_flush_work(shost);
2498 2499
2499 fc_terminate_rport_io(rport);
2500
2501 /* 2500 /*
2502 * Cancel any outstanding timers. These should really exist 2501 * Cancel any outstanding timers. These should really exist
2503 * only when rmmod'ing the LLDD and we're asking for 2502 * only when rmmod'ing the LLDD and we're asking for
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index a124a28f2ccb..a1baccce05f0 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -565,12 +565,12 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
565 pDCB->TagMask |= 1 << tag[1]; 565 pDCB->TagMask |= 1 << tag[1];
566 pSRB->TagNumber = tag[1]; 566 pSRB->TagNumber = tag[1];
567 DC390_write8(ScsiFifo, tag[1]); 567 DC390_write8(ScsiFifo, tag[1]);
568 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for Cmd %li (SRB %p), block tag %02x\n", scmd->serial_number, pSRB, tag[1])); 568 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1]));
569 cmd = SEL_W_ATN3; 569 cmd = SEL_W_ATN3;
570 } else { 570 } else {
571 /* No TagQ */ 571 /* No TagQ */
572//no_tag: 572//no_tag:
573 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for Cmd %li (SRB %p), No TagQ\n", disc_allowed ? "" : "o", scmd->serial_number, pSRB)); 573 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB));
574 } 574 }
575 575
576 pSRB->SRBState = SRB_START_; 576 pSRB->SRBState = SRB_START_;
@@ -620,8 +620,8 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
620 if (DC390_read8 (Scsi_Status) & INTERRUPT) 620 if (DC390_read8 (Scsi_Status) & INTERRUPT)
621 { 621 {
622 dc390_freetag (pDCB, pSRB); 622 dc390_freetag (pDCB, pSRB);
623 DEBUG0(printk ("DC390: Interrupt during Start SCSI (pid %li, target %02i-%02i)\n", 623 DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
624 scmd->serial_number, scmd->device->id, scmd->device->lun)); 624 scmd->device->id, scmd->device->lun));
625 pSRB->SRBState = SRB_READY; 625 pSRB->SRBState = SRB_READY;
626 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); 626 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
627 pACB->SelLost++; 627 pACB->SelLost++;
@@ -1705,8 +1705,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1705 1705
1706 status = pSRB->TargetStatus; 1706 status = pSRB->TargetStatus;
1707 1707
1708 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p, pid %li\n", status, pcmd->result,\ 1708 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB));
1709 pSRB, pcmd->serial_number));
1710 if(pSRB->SRBFlag & AUTO_REQSENSE) 1709 if(pSRB->SRBFlag & AUTO_REQSENSE)
1711 { /* Last command was a Request Sense */ 1710 { /* Last command was a Request Sense */
1712 pSRB->SRBFlag &= ~AUTO_REQSENSE; 1711 pSRB->SRBFlag &= ~AUTO_REQSENSE;
@@ -1727,7 +1726,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1727 } else { 1726 } else {
1728 SET_RES_DRV(pcmd->result, DRIVER_SENSE); 1727 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1729 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); 1728 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1730 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1729 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1731 pSRB->TotalXferredLen = 0; 1730 pSRB->TotalXferredLen = 0;
1732 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1731 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1733 } 1732 }
@@ -1747,7 +1746,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1747 else if (status == SAM_STAT_TASK_SET_FULL) 1746 else if (status == SAM_STAT_TASK_SET_FULL)
1748 { 1747 {
1749 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); 1748 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1750 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1749 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1751 pSRB->TotalXferredLen = 0; 1750 pSRB->TotalXferredLen = 0;
1752 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1751 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1753 } 1752 }
@@ -1801,7 +1800,7 @@ cmd_done:
1801 /* Add to free list */ 1800 /* Add to free list */
1802 dc390_Free_insert (pACB, pSRB); 1801 dc390_Free_insert (pACB, pSRB);
1803 1802
1804 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done pid %li\n", pcmd->serial_number)); 1803 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n"));
1805 pcmd->scsi_done (pcmd); 1804 pcmd->scsi_done (pcmd);
1806 1805
1807 return; 1806 return;
@@ -1997,8 +1996,7 @@ static int DC390_abort(struct scsi_cmnd *cmd)
1997 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; 1996 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata;
1998 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; 1997 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata;
1999 1998
2000 scmd_printk(KERN_WARNING, cmd, 1999 scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n");
2001 "DC390: Abort command (pid %li)\n", cmd->serial_number);
2002 2000
2003 /* abort() is too stupid for already sent commands at the moment. 2001 /* abort() is too stupid for already sent commands at the moment.
2004 * If it's called we are in trouble anyway, so let's dump some info 2002 * If it's called we are in trouble anyway, so let's dump some info
@@ -2006,7 +2004,7 @@ static int DC390_abort(struct scsi_cmnd *cmd)
2006 dc390_dumpinfo(pACB, pDCB, NULL); 2004 dc390_dumpinfo(pACB, pDCB, NULL);
2007 2005
2008 pDCB->DCBFlag |= ABORT_DEV_; 2006 pDCB->DCBFlag |= ABORT_DEV_;
2009 printk(KERN_INFO "DC390: Aborted pid %li\n", cmd->serial_number); 2007 printk(KERN_INFO "DC390: Aborted.\n");
2010 2008
2011 return FAILED; 2009 return FAILED;
2012} 2010}
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index edfc5da8be4c..90e104d6b558 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1256,8 +1256,8 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1256 j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number; 1256 j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
1257 1257
1258 if (SCpnt->host_scribble) 1258 if (SCpnt->host_scribble)
1259 panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", 1259 panic("%s: qcomm, SCpnt %p already active.\n",
1260 BN(j), SCpnt->serial_number, SCpnt); 1260 BN(j), SCpnt);
1261 1261
1262 /* i is the mailbox number, look for the first free mailbox 1262 /* i is the mailbox number, look for the first free mailbox
1263 starting from last_cp_used */ 1263 starting from last_cp_used */
@@ -1286,9 +1286,9 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1286 cpp->cpp_index = i; 1286 cpp->cpp_index = i;
1287 SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; 1287 SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
1288 1288
1289 if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n", 1289 if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n",
1290 BN(j), i, SCpnt->device->channel, SCpnt->device->id, 1290 BN(j), i, SCpnt->device->channel, SCpnt->device->id,
1291 SCpnt->device->lun, SCpnt->serial_number); 1291 SCpnt->device->lun);
1292 1292
1293 cpp->opcode = OP_SCSI; 1293 cpp->opcode = OP_SCSI;
1294 cpp->channel = SCpnt->device->channel; 1294 cpp->channel = SCpnt->device->channel;
@@ -1315,7 +1315,7 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
1315 unmap_dma(i, j); 1315 unmap_dma(i, j);
1316 SCpnt->host_scribble = NULL; 1316 SCpnt->host_scribble = NULL;
1317 scmd_printk(KERN_INFO, SCpnt, 1317 scmd_printk(KERN_INFO, SCpnt,
1318 "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); 1318 "qcomm, adapter busy.\n");
1319 return 1; 1319 return 1;
1320 } 1320 }
1321 1321
@@ -1337,14 +1337,12 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1337 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; 1337 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1338 1338
1339 if (SCarg->host_scribble == NULL) { 1339 if (SCarg->host_scribble == NULL) {
1340 scmd_printk(KERN_INFO, SCarg, "abort, pid %ld inactive.\n", 1340 scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
1341 SCarg->serial_number);
1342 return SUCCESS; 1341 return SUCCESS;
1343 } 1342 }
1344 1343
1345 i = *(unsigned int *)SCarg->host_scribble; 1344 i = *(unsigned int *)SCarg->host_scribble;
1346 scmd_printk(KERN_INFO, SCarg, "abort, mbox %d, pid %ld.\n", 1345 scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
1347 i, SCarg->serial_number);
1348 1346
1349 if (i >= sh[j]->can_queue) 1347 if (i >= sh[j]->can_queue)
1350 panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j)); 1348 panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
@@ -1387,8 +1385,7 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1387 SCarg->result = DID_ABORT << 16; 1385 SCarg->result = DID_ABORT << 16;
1388 SCarg->host_scribble = NULL; 1386 SCarg->host_scribble = NULL;
1389 HD(j)->cp_stat[i] = FREE; 1387 HD(j)->cp_stat[i] = FREE;
1390 printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", 1388 printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
1391 BN(j), i, SCarg->serial_number);
1392 SCarg->scsi_done(SCarg); 1389 SCarg->scsi_done(SCarg);
1393 return SUCCESS; 1390 return SUCCESS;
1394 } 1391 }
@@ -1403,12 +1400,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1403 struct scsi_cmnd *SCpnt; 1400 struct scsi_cmnd *SCpnt;
1404 1401
1405 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; 1402 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1406 scmd_printk(KERN_INFO, SCarg, "reset, enter, pid %ld.\n", SCarg->serial_number); 1403 scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
1407 1404
1408 spin_lock_irq(sh[j]->host_lock); 1405 spin_lock_irq(sh[j]->host_lock);
1409 1406
1410 if (SCarg->host_scribble == NULL) 1407 if (SCarg->host_scribble == NULL)
1411 printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->serial_number); 1408 printk("%s: reset, inactive.\n", BN(j));
1412 1409
1413 if (HD(j)->in_reset) { 1410 if (HD(j)->in_reset) {
1414 printk("%s: reset, exit, already in reset.\n", BN(j)); 1411 printk("%s: reset, exit, already in reset.\n", BN(j));
@@ -1445,14 +1442,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1445 1442
1446 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { 1443 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
1447 HD(j)->cp_stat[i] = ABORTING; 1444 HD(j)->cp_stat[i] = ABORTING;
1448 printk("%s: reset, mbox %d aborting, pid %ld.\n", 1445 printk("%s: reset, mbox %d aborting.\n", BN(j), i);
1449 BN(j), i, SCpnt->serial_number);
1450 } 1446 }
1451 1447
1452 else { 1448 else {
1453 HD(j)->cp_stat[i] = IN_RESET; 1449 HD(j)->cp_stat[i] = IN_RESET;
1454 printk("%s: reset, mbox %d in reset, pid %ld.\n", 1450 printk("%s: reset, mbox %d in reset.\n", BN(j), i);
1455 BN(j), i, SCpnt->serial_number);
1456 } 1451 }
1457 1452
1458 if (SCpnt->host_scribble == NULL) 1453 if (SCpnt->host_scribble == NULL)
@@ -1500,8 +1495,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1500 /* This mailbox is still waiting for its interrupt */ 1495 /* This mailbox is still waiting for its interrupt */
1501 HD(j)->cp_stat[i] = LOCKED; 1496 HD(j)->cp_stat[i] = LOCKED;
1502 1497
1503 printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", 1498 printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
1504 BN(j), i, SCpnt->serial_number);
1505 } 1499 }
1506 1500
1507 else if (HD(j)->cp_stat[i] == ABORTING) { 1501 else if (HD(j)->cp_stat[i] == ABORTING) {
@@ -1513,8 +1507,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1513 /* This mailbox was never queued to the adapter */ 1507 /* This mailbox was never queued to the adapter */
1514 HD(j)->cp_stat[i] = FREE; 1508 HD(j)->cp_stat[i] = FREE;
1515 1509
1516 printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", 1510 printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
1517 BN(j), i, SCpnt->serial_number);
1518 } 1511 }
1519 1512
1520 else 1513 else
@@ -1528,7 +1521,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1528 HD(j)->in_reset = FALSE; 1521 HD(j)->in_reset = FALSE;
1529 do_trace = FALSE; 1522 do_trace = FALSE;
1530 1523
1531 if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->serial_number); 1524 if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
1532 else printk("%s: reset, exit.\n", BN(j)); 1525 else printk("%s: reset, exit.\n", BN(j));
1533 1526
1534 spin_unlock_irq(sh[j]->host_lock); 1527 spin_unlock_irq(sh[j]->host_lock);
@@ -1671,10 +1664,10 @@ static int reorder(unsigned int j, unsigned long cursec,
1671 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1664 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1672 for (n = 0; n < n_ready; n++) { 1665 for (n = 0; n < n_ready; n++) {
1673 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1666 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1674 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ 1667 printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\
1675 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1668 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1676 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1669 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1677 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1670 SCpnt->lun, k, flushcount, n_ready,
1678 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), 1671 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1679 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1672 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1680 YESNO(overlap), cpp->xdir); 1673 YESNO(overlap), cpp->xdir);
@@ -1709,9 +1702,9 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
1709 1702
1710 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { 1703 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1711 scmd_printk(KERN_INFO, SCpnt, 1704 scmd_printk(KERN_INFO, SCpnt,
1712 "%s, pid %ld, mbox %d, adapter" 1705 "%s, mbox %d, adapter"
1713 " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), 1706 " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
1714 SCpnt->serial_number, k); 1707 k);
1715 HD(j)->cp_stat[k] = ABORTING; 1708 HD(j)->cp_stat[k] = ABORTING;
1716 continue; 1709 continue;
1717 } 1710 }
@@ -1793,12 +1786,12 @@ static irqreturn_t ihdlr(unsigned int j)
1793 if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i); 1786 if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
1794 1787
1795 if (SCpnt->host_scribble == NULL) 1788 if (SCpnt->host_scribble == NULL)
1796 panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i, 1789 panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
1797 SCpnt->serial_number, SCpnt); 1790 SCpnt);
1798 1791
1799 if (*(unsigned int *)SCpnt->host_scribble != i) 1792 if (*(unsigned int *)SCpnt->host_scribble != i)
1800 panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", 1793 panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
1801 BN(j), i, SCpnt->serial_number, *(unsigned int *)SCpnt->host_scribble); 1794 BN(j), i, *(unsigned int *)SCpnt->host_scribble);
1802 1795
1803 sync_dma(i, j); 1796 sync_dma(i, j);
1804 1797
@@ -1841,8 +1834,8 @@ static irqreturn_t ihdlr(unsigned int j)
1841 (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 && 1834 (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
1842 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) 1835 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
1843 scmd_printk(KERN_INFO, SCpnt, 1836 scmd_printk(KERN_INFO, SCpnt,
1844 "ihdlr, pid %ld, target_status 0x%x, sense key 0x%x.\n", 1837 "ihdlr, target_status 0x%x, sense key 0x%x.\n",
1845 SCpnt->serial_number, spp->target_status, 1838 spp->target_status,
1846 SCpnt->sense_buffer[2]); 1839 SCpnt->sense_buffer[2]);
1847 1840
1848 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0; 1841 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
@@ -1913,8 +1906,8 @@ static irqreturn_t ihdlr(unsigned int j)
1913 do_trace || msg_byte(spp->target_status)) 1906 do_trace || msg_byte(spp->target_status))
1914#endif 1907#endif
1915 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\ 1908 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
1916 " pid %ld, reg 0x%x, count %d.\n", 1909 " reg 0x%x, count %d.\n",
1917 i, spp->adapter_status, spp->target_status, SCpnt->serial_number, 1910 i, spp->adapter_status, spp->target_status,
1918 reg, HD(j)->iocount); 1911 reg, HD(j)->iocount);
1919 1912
1920 unmap_dma(i, j); 1913 unmap_dma(i, j);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 4468ae3610f7..97ae716134d0 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -381,7 +381,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
381 hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; 381 hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata;
382 382
383 DB(DB_QUEUE_COMMAND, 383 DB(DB_QUEUE_COMMAND,
384 printk("Q-%d-%02x-%ld( ", cmd->device->id, cmd->cmnd[0], cmd->serial_number)) 384 printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0]))
385 385
386/* Set up a few fields in the scsi_cmnd structure for our own use: 386/* Set up a few fields in the scsi_cmnd structure for our own use:
387 * - host_scribble is the pointer to the next cmd in the input queue 387 * - host_scribble is the pointer to the next cmd in the input queue
@@ -462,7 +462,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
462 462
463 wd33c93_execute(cmd->device->host); 463 wd33c93_execute(cmd->device->host);
464 464
465 DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) 465 DB(DB_QUEUE_COMMAND, printk(")Q "))
466 466
467 spin_unlock_irq(&hostdata->lock); 467 spin_unlock_irq(&hostdata->lock);
468 return 0; 468 return 0;
@@ -687,7 +687,7 @@ wd33c93_execute(struct Scsi_Host *instance)
687 */ 687 */
688 688
689 DB(DB_EXECUTE, 689 DB(DB_EXECUTE,
690 printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) 690 printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
691} 691}
692 692
693static void 693static void
@@ -963,7 +963,7 @@ wd33c93_intr(struct Scsi_Host *instance)
963 case CSR_XFER_DONE | PHS_COMMAND: 963 case CSR_XFER_DONE | PHS_COMMAND:
964 case CSR_UNEXP | PHS_COMMAND: 964 case CSR_UNEXP | PHS_COMMAND:
965 case CSR_SRV_REQ | PHS_COMMAND: 965 case CSR_SRV_REQ | PHS_COMMAND:
966 DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) 966 DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
967 transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, 967 transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR,
968 hostdata); 968 hostdata);
969 hostdata->state = S_CONNECTED; 969 hostdata->state = S_CONNECTED;
@@ -1007,7 +1007,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1007 switch (msg) { 1007 switch (msg) {
1008 1008
1009 case COMMAND_COMPLETE: 1009 case COMMAND_COMPLETE:
1010 DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) 1010 DB(DB_INTR, printk("CCMP"))
1011 write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); 1011 write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
1012 hostdata->state = S_PRE_CMP_DISC; 1012 hostdata->state = S_PRE_CMP_DISC;
1013 break; 1013 break;
@@ -1174,7 +1174,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1174 1174
1175 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); 1175 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
1176 if (phs == 0x60) { 1176 if (phs == 0x60) {
1177 DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) 1177 DB(DB_INTR, printk("SX-DONE"))
1178 cmd->SCp.Message = COMMAND_COMPLETE; 1178 cmd->SCp.Message = COMMAND_COMPLETE;
1179 lun = read_wd33c93(regs, WD_TARGET_LUN); 1179 lun = read_wd33c93(regs, WD_TARGET_LUN);
1180 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) 1180 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
@@ -1200,8 +1200,8 @@ wd33c93_intr(struct Scsi_Host *instance)
1200 wd33c93_execute(instance); 1200 wd33c93_execute(instance);
1201 } else { 1201 } else {
1202 printk 1202 printk
1203 ("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", 1203 ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---",
1204 asr, sr, phs, cmd->serial_number); 1204 asr, sr, phs);
1205 spin_unlock_irqrestore(&hostdata->lock, flags); 1205 spin_unlock_irqrestore(&hostdata->lock, flags);
1206 } 1206 }
1207 break; 1207 break;
@@ -1266,7 +1266,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1266 spin_unlock_irqrestore(&hostdata->lock, flags); 1266 spin_unlock_irqrestore(&hostdata->lock, flags);
1267 return; 1267 return;
1268 } 1268 }
1269 DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) 1269 DB(DB_INTR, printk("UNEXP_DISC"))
1270 hostdata->connected = NULL; 1270 hostdata->connected = NULL;
1271 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 1271 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1272 hostdata->state = S_UNCONNECTED; 1272 hostdata->state = S_UNCONNECTED;
@@ -1292,7 +1292,7 @@ wd33c93_intr(struct Scsi_Host *instance)
1292 */ 1292 */
1293 1293
1294 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); 1294 write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
1295 DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) 1295 DB(DB_INTR, printk("DISC"))
1296 if (cmd == NULL) { 1296 if (cmd == NULL) {
1297 printk(" - Already disconnected! "); 1297 printk(" - Already disconnected! ");
1298 hostdata->state = S_UNCONNECTED; 1298 hostdata->state = S_UNCONNECTED;
@@ -1491,7 +1491,6 @@ wd33c93_intr(struct Scsi_Host *instance)
1491 } else 1491 } else
1492 hostdata->state = S_CONNECTED; 1492 hostdata->state = S_CONNECTED;
1493 1493
1494 DB(DB_INTR, printk("-%ld", cmd->serial_number))
1495 spin_unlock_irqrestore(&hostdata->lock, flags); 1494 spin_unlock_irqrestore(&hostdata->lock, flags);
1496 break; 1495 break;
1497 1496
@@ -1637,8 +1636,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1637 cmd->host_scribble = NULL; 1636 cmd->host_scribble = NULL;
1638 cmd->result = DID_ABORT << 16; 1637 cmd->result = DID_ABORT << 16;
1639 printk 1638 printk
1640 ("scsi%d: Abort - removing command %ld from input_Q. ", 1639 ("scsi%d: Abort - removing command from input_Q. ",
1641 instance->host_no, cmd->serial_number); 1640 instance->host_no);
1642 enable_irq(cmd->device->host->irq); 1641 enable_irq(cmd->device->host->irq);
1643 cmd->scsi_done(cmd); 1642 cmd->scsi_done(cmd);
1644 return SUCCESS; 1643 return SUCCESS;
@@ -1662,8 +1661,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1662 uchar sr, asr; 1661 uchar sr, asr;
1663 unsigned long timeout; 1662 unsigned long timeout;
1664 1663
1665 printk("scsi%d: Aborting connected command %ld - ", 1664 printk("scsi%d: Aborting connected command - ",
1666 instance->host_no, cmd->serial_number); 1665 instance->host_no);
1667 1666
1668 printk("stopping DMA - "); 1667 printk("stopping DMA - ");
1669 if (hostdata->dma == D_DMA_RUNNING) { 1668 if (hostdata->dma == D_DMA_RUNNING) {
@@ -1729,8 +1728,8 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1729 while (tmp) { 1728 while (tmp) {
1730 if (tmp == cmd) { 1729 if (tmp == cmd) {
1731 printk 1730 printk
1732 ("scsi%d: Abort - command %ld found on disconnected_Q - ", 1731 ("scsi%d: Abort - command found on disconnected_Q - ",
1733 instance->host_no, cmd->serial_number); 1732 instance->host_no);
1734 printk("Abort SNOOZE. "); 1733 printk("Abort SNOOZE. ");
1735 enable_irq(cmd->device->host->irq); 1734 enable_irq(cmd->device->host->irq);
1736 return FAILED; 1735 return FAILED;
@@ -2180,8 +2179,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2180 strcat(bp, "\nconnected: "); 2179 strcat(bp, "\nconnected: ");
2181 if (hd->connected) { 2180 if (hd->connected) {
2182 cmd = (struct scsi_cmnd *) hd->connected; 2181 cmd = (struct scsi_cmnd *) hd->connected;
2183 sprintf(tbuf, " %ld-%d:%d(%02x)", 2182 sprintf(tbuf, " %d:%d(%02x)",
2184 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2183 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2185 strcat(bp, tbuf); 2184 strcat(bp, tbuf);
2186 } 2185 }
2187 } 2186 }
@@ -2189,8 +2188,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2189 strcat(bp, "\ninput_Q: "); 2188 strcat(bp, "\ninput_Q: ");
2190 cmd = (struct scsi_cmnd *) hd->input_Q; 2189 cmd = (struct scsi_cmnd *) hd->input_Q;
2191 while (cmd) { 2190 while (cmd) {
2192 sprintf(tbuf, " %ld-%d:%d(%02x)", 2191 sprintf(tbuf, " %d:%d(%02x)",
2193 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2192 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2194 strcat(bp, tbuf); 2193 strcat(bp, tbuf);
2195 cmd = (struct scsi_cmnd *) cmd->host_scribble; 2194 cmd = (struct scsi_cmnd *) cmd->host_scribble;
2196 } 2195 }
@@ -2199,8 +2198,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2199 strcat(bp, "\ndisconnected_Q:"); 2198 strcat(bp, "\ndisconnected_Q:");
2200 cmd = (struct scsi_cmnd *) hd->disconnected_Q; 2199 cmd = (struct scsi_cmnd *) hd->disconnected_Q;
2201 while (cmd) { 2200 while (cmd) {
2202 sprintf(tbuf, " %ld-%d:%d(%02x)", 2201 sprintf(tbuf, " %d:%d(%02x)",
2203 cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); 2202 cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2204 strcat(bp, tbuf); 2203 strcat(bp, tbuf);
2205 cmd = (struct scsi_cmnd *) cmd->host_scribble; 2204 cmd = (struct scsi_cmnd *) cmd->host_scribble;
2206 } 2205 }
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 6f34963b3c64..7ad48585c5e6 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
662static int ssb_pci_sprom_get(struct ssb_bus *bus, 662static int ssb_pci_sprom_get(struct ssb_bus *bus,
663 struct ssb_sprom *sprom) 663 struct ssb_sprom *sprom)
664{ 664{
665 const struct ssb_sprom *fallback;
666 int err; 665 int err;
667 u16 *buf; 666 u16 *buf;
668 667
@@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
707 if (err) { 706 if (err) {
708 /* All CRC attempts failed. 707 /* All CRC attempts failed.
709 * Maybe there is no SPROM on the device? 708 * Maybe there is no SPROM on the device?
710 * If we have a fallback, use that. */ 709 * Now we ask the arch code if there is some sprom
711 fallback = ssb_get_fallback_sprom(); 710 * available for this device in some other storage */
712 if (fallback) { 711 err = ssb_fill_sprom_with_fallback(bus, sprom);
713 memcpy(sprom, fallback, sizeof(*sprom)); 712 if (err) {
713 ssb_printk(KERN_WARNING PFX "WARNING: Using"
714 " fallback SPROM failed (err %d)\n",
715 err);
716 } else {
717 ssb_dprintk(KERN_DEBUG PFX "Using SPROM"
718 " revision %d provided by"
719 " platform.\n", sprom->revision);
714 err = 0; 720 err = 0;
715 goto out_free; 721 goto out_free;
716 } 722 }
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 5f34d7a3e3a5..45ff0e3a3828 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -17,7 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18 18
19 19
20static const struct ssb_sprom *fallback_sprom; 20static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out);
21 21
22 22
23static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, 23static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
@@ -145,36 +145,43 @@ out:
145} 145}
146 146
147/** 147/**
148 * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. 148 * ssb_arch_register_fallback_sprom - Registers a method providing a
149 * fallback SPROM if no SPROM is found.
149 * 150 *
150 * @sprom: The SPROM data structure to register. 151 * @sprom_callback: The callback function.
151 * 152 *
152 * With this function the architecture implementation may register a fallback 153 * With this function the architecture implementation may register a
153 * SPROM data structure. The fallback is only used for PCI based SSB devices, 154 * callback handler which fills the SPROM data structure. The fallback is
154 * where no valid SPROM can be found in the shadow registers. 155 * only used for PCI based SSB devices, where no valid SPROM can be found
156 * in the shadow registers.
155 * 157 *
156 * This function is useful for weird architectures that have a half-assed SSB device 158 * This function is useful for weird architectures that have a half-assed
157 * hardwired to their PCI bus. 159 * SSB device hardwired to their PCI bus.
158 * 160 *
159 * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently 161 * Note that it does only work with PCI attached SSB devices. PCMCIA
160 * don't use this fallback. 162 * devices currently don't use this fallback.
161 * Architectures must provide the SPROM for native SSB devices anyway, 163 * Architectures must provide the SPROM for native SSB devices anyway, so
162 * so the fallback also isn't used for native devices. 164 * the fallback also isn't used for native devices.
163 * 165 *
164 * This function is available for architecture code, only. So it is not exported. 166 * This function is available for architecture code, only. So it is not
167 * exported.
165 */ 168 */
166int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) 169int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus,
170 struct ssb_sprom *out))
167{ 171{
168 if (fallback_sprom) 172 if (get_fallback_sprom)
169 return -EEXIST; 173 return -EEXIST;
170 fallback_sprom = sprom; 174 get_fallback_sprom = sprom_callback;
171 175
172 return 0; 176 return 0;
173} 177}
174 178
175const struct ssb_sprom *ssb_get_fallback_sprom(void) 179int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out)
176{ 180{
177 return fallback_sprom; 181 if (!get_fallback_sprom)
182 return -ENOENT;
183
184 return get_fallback_sprom(bus, out);
178} 185}
179 186
180/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ 187/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 0331139a726f..77653014db0b 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
171 const char *buf, size_t count, 171 const char *buf, size_t count,
172 int (*sprom_check_crc)(const u16 *sprom, size_t size), 172 int (*sprom_check_crc)(const u16 *sprom, size_t size),
173 int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); 173 int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom));
174extern const struct ssb_sprom *ssb_get_fallback_sprom(void); 174extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus,
175 struct ssb_sprom *out);
175 176
176 177
177/* core.c */ 178/* core.c */
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index c93ef207b0b4..c0f0ac7c1cdb 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/statfs.h> 30#include <linux/statfs.h>
31#include <linux/writeback.h> 31#include <linux/writeback.h>
32#include <linux/prefetch.h>
32 33
33#include "netfs.h" 34#include "netfs.h"
34 35
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 9ef2dbbfa62b..5cb0f0ef6af0 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -30,5 +30,6 @@ config TCM_PSCSI
30 passthrough access to Linux/SCSI device 30 passthrough access to Linux/SCSI device
31 31
32source "drivers/target/loopback/Kconfig" 32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig"
33 34
34endif 35endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 1178bbfc68fe..21df808a992c 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,3 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24 24
25# Fabric modules 25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 000000000000..40caf458e89e
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
1config TCM_FC
2 tristate "TCM_FC fabric Plugin"
3 depends on LIBFC
4 help
5 Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 000000000000..7a5c2b64cf65
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,15 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
2 -I$(srctree)/drivers/scsi/ \
3 -I$(srctree)/include/scsi/ \
4 -I$(srctree)/drivers/target/tcm_fc/
5
6tcm_fc-y += tfc_cmd.o \
7 tfc_conf.o \
8 tfc_io.o \
9 tfc_sess.o
10
11obj-$(CONFIG_TCM_FC) += tcm_fc.o
12
13ifdef CONFIGFS_TCM_FC_DEBUG
14EXTRA_CFLAGS += -DTCM_FC_DEBUG
15endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 000000000000..defff32b7880
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __TCM_FC_H__
18#define __TCM_FC_H__
19
20#define FT_VERSION "0.3"
21
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id {
51 __u8 format;
52 __u8 __resvd1[7];
53 __u8 wwpn[8];
54 __u8 __resvd2[8];
55} __attribute__((__packed__));
56
57/*
58 * Session (remote port).
59 */
60struct ft_sess {
61 u32 port_id; /* for hash lookup use only */
62 u32 params;
63 u16 max_frame; /* maximum frame size */
64 u64 port_name; /* port name for transport ID */
65 struct ft_tport *tport;
66 struct se_session *se_sess;
67 struct hlist_node hash; /* linkage in ft_sess_hash table */
68 struct rcu_head rcu;
69 struct kref kref; /* ref for hash and outstanding I/Os */
70};
71
72/*
73 * Hash table of sessions per local port.
74 * Hash lookup by remote port FC_ID.
75 */
76#define FT_SESS_HASH_BITS 6
77#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
78
79/*
80 * Per local port data.
81 * This is created only after a TPG exists that allows target function
82 * for the local port. If the TPG exists, this is allocated when
83 * we're notified that the local port has been created, or when
84 * the first PRLI provider callback is received.
85 */
86struct ft_tport {
87 struct fc_lport *lport;
88 struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
89 u32 sess_count; /* number of sessions in hash */
90 struct rcu_head rcu;
91 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
92};
93
94/*
95 * Node ID and authentication.
96 */
97struct ft_node_auth {
98 u64 port_name;
99 u64 node_name;
100};
101
102/*
103 * Node ACL for FC remote port session.
104 */
105struct ft_node_acl {
106 struct ft_node_auth node_auth;
107 struct se_node_acl se_node_acl;
108};
109
110struct ft_lun {
111 u32 index;
112 char name[FT_LUN_NAMELEN];
113};
114
115/*
116 * Target portal group (local port).
117 */
118struct ft_tpg {
119 u32 index;
120 struct ft_lport_acl *lport_acl;
121 struct ft_tport *tport; /* active tport or NULL */
122 struct list_head list; /* linkage in ft_lport_acl tpg_list */
123 struct list_head lun_list; /* head of LUNs */
124 struct se_portal_group se_tpg;
125 struct task_struct *thread; /* processing thread */
126 struct se_queue_obj qobj; /* queue for processing thread */
127};
128
129struct ft_lport_acl {
130 u64 wwpn;
131 char name[FT_NAMELEN];
132 struct list_head list;
133 struct list_head tpg_list;
134 struct se_wwn fc_lport_wwn;
135};
136
137enum ft_cmd_state {
138 FC_CMD_ST_NEW = 0,
139 FC_CMD_ST_REJ
140};
141
142/*
143 * Commands
144 */
145struct ft_cmd {
146 enum ft_cmd_state state;
147 u16 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
151 struct fc_frame *req_frame;
152 unsigned char *cdb; /* pointer to CDB inside frame */
153 u32 write_data_len; /* data received on writes */
154 struct se_queue_req se_req;
155 /* Local sense buffer */
156 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
157 u32 was_ddp_setup:1; /* Set only if ddp is setup */
158 struct scatterlist *sg; /* Set only if DDP is setup */
159 u32 sg_cnt; /* No. of item in scatterlist */
160};
161
162extern struct list_head ft_lport_list;
163extern struct mutex ft_lport_lock;
164extern struct fc4_prov ft_prov;
165extern struct target_fabric_configfs *ft_configfs;
166
167/*
168 * Fabric methods.
169 */
170
171/*
172 * Session ops.
173 */
174void ft_sess_put(struct ft_sess *);
175int ft_sess_shutdown(struct se_session *);
176void ft_sess_close(struct se_session *);
177void ft_sess_stop(struct se_session *, int, int);
178int ft_sess_logged_in(struct se_session *);
179u32 ft_sess_get_index(struct se_session *);
180u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
181void ft_sess_set_erl0(struct se_session *);
182
183void ft_lport_add(struct fc_lport *, void *);
184void ft_lport_del(struct fc_lport *, void *);
185int ft_lport_notify(struct notifier_block *, unsigned long, void *);
186
187/*
188 * IO methods.
189 */
190void ft_check_stop_free(struct se_cmd *);
191void ft_release_cmd(struct se_cmd *);
192int ft_queue_status(struct se_cmd *);
193int ft_queue_data_in(struct se_cmd *);
194int ft_write_pending(struct se_cmd *);
195int ft_write_pending_status(struct se_cmd *);
196u32 ft_get_task_tag(struct se_cmd *);
197int ft_get_cmd_state(struct se_cmd *);
198void ft_new_cmd_failure(struct se_cmd *);
199int ft_queue_tm_resp(struct se_cmd *);
200int ft_is_state_remove(struct se_cmd *);
201
202/*
203 * other internal functions.
204 */
205int ft_thread(void *);
206void ft_recv_req(struct ft_sess *, struct fc_frame *);
207struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
208struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
209
210void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
211void ft_dump_cmd(struct ft_cmd *, const char *caller);
212
213ssize_t ft_format_wwn(char *, size_t, u64);
214
215#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..49e51778f733
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/libfc.h>
39#include <scsi/fc_encode.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h>
47#include <target/target_core_base.h>
48#include <target/target_core_tmr.h>
49#include <target/configfs_macros.h>
50
51#include "tcm_fc.h"
52
53/*
54 * Dump cmd state for debugging.
55 */
56void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
57{
58 struct fc_exch *ep;
59 struct fc_seq *sp;
60 struct se_cmd *se_cmd;
61 struct se_mem *mem;
62 struct se_transport_task *task;
63
64 if (!(ft_debug_logging & FT_DEBUG_IO))
65 return;
66
67 se_cmd = &cmd->se_cmd;
68 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
69 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
70 printk(KERN_INFO "%s: cmd %p cdb %p\n",
71 caller, cmd, cmd->cdb);
72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
73
74 task = T_TASK(se_cmd);
75 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
76 caller, cmd, task, task->t_tasks_se_num,
77 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
78 if (task->t_mem_list)
79 list_for_each_entry(mem, task->t_mem_list, se_list)
80 printk(KERN_INFO "%s: cmd %p mem %p page %p "
81 "len 0x%x off 0x%x\n",
82 caller, cmd, mem,
83 mem->se_page, mem->se_len, mem->se_off);
84 sp = cmd->seq;
85 if (sp) {
86 ep = fc_seq_exch(sp);
87 printk(KERN_INFO "%s: cmd %p sid %x did %x "
88 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
89 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
90 sp->id, ep->esb_stat);
91 }
92 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
93 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
94}
95
96/*
97 * Get LUN from CDB.
98 */
99static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
100{
101 u64 lun;
102
103 lun = lunp[1];
104 switch (lunp[0] >> 6) {
105 case 0:
106 break;
107 case 1:
108 lun |= (lunp[0] & 0x3f) << 8;
109 break;
110 default:
111 return -1;
112 }
113 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
114 return -1;
115 cmd->lun = lun;
116 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
117}
118
119static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
120{
121 struct se_queue_obj *qobj;
122 unsigned long flags;
123
124 qobj = &sess->tport->tpg->qobj;
125 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
126 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
127 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
128 atomic_inc(&qobj->queue_cnt);
129 wake_up_interruptible(&qobj->thread_wq);
130}
131
132static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
133{
134 unsigned long flags;
135 struct se_queue_req *qr;
136
137 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
138 if (list_empty(&qobj->qobj_list)) {
139 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
140 return NULL;
141 }
142 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
143 list_del(&qr->qr_list);
144 atomic_dec(&qobj->queue_cnt);
145 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
146 return container_of(qr, struct ft_cmd, se_req);
147}
148
149static void ft_free_cmd(struct ft_cmd *cmd)
150{
151 struct fc_frame *fp;
152 struct fc_lport *lport;
153
154 if (!cmd)
155 return;
156 fp = cmd->req_frame;
157 lport = fr_dev(fp);
158 if (fr_seq(fp))
159 lport->tt.seq_release(fr_seq(fp));
160 fc_frame_free(fp);
161 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
162 kfree(cmd);
163}
164
165void ft_release_cmd(struct se_cmd *se_cmd)
166{
167 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
168
169 ft_free_cmd(cmd);
170}
171
172void ft_check_stop_free(struct se_cmd *se_cmd)
173{
174 transport_generic_free_cmd(se_cmd, 0, 1, 0);
175}
176
177/*
178 * Send response.
179 */
180int ft_queue_status(struct se_cmd *se_cmd)
181{
182 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
183 struct fc_frame *fp;
184 struct fcp_resp_with_ext *fcp;
185 struct fc_lport *lport;
186 struct fc_exch *ep;
187 size_t len;
188
189 ft_dump_cmd(cmd, __func__);
190 ep = fc_seq_exch(cmd->seq);
191 lport = ep->lp;
192 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
193 fp = fc_frame_alloc(lport, len);
194 if (!fp) {
195 /* XXX shouldn't just drop it - requeue and retry? */
196 return 0;
197 }
198 fcp = fc_frame_payload_get(fp, len);
199 memset(fcp, 0, len);
200 fcp->resp.fr_status = se_cmd->scsi_status;
201
202 len = se_cmd->scsi_sense_length;
203 if (len) {
204 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
205 fcp->ext.fr_sns_len = htonl(len);
206 memcpy((fcp + 1), se_cmd->sense_buffer, len);
207 }
208
209 /*
210 * Test underflow and overflow with one mask. Usually both are off.
211 * Bidirectional commands are not handled yet.
212 */
213 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
214 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
215 fcp->resp.fr_flags |= FCP_RESID_OVER;
216 else
217 fcp->resp.fr_flags |= FCP_RESID_UNDER;
218 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
219 }
220
221 /*
222 * Send response.
223 */
224 cmd->seq = lport->tt.seq_start_next(cmd->seq);
225 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
226 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
227
228 lport->tt.seq_send(lport, cmd->seq, fp);
229 lport->tt.exch_done(cmd->seq);
230 return 0;
231}
232
233int ft_write_pending_status(struct se_cmd *se_cmd)
234{
235 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
236
237 return cmd->write_data_len != se_cmd->data_length;
238}
239
240/*
241 * Send TX_RDY (transfer ready).
242 */
243int ft_write_pending(struct se_cmd *se_cmd)
244{
245 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
246 struct fc_frame *fp;
247 struct fcp_txrdy *txrdy;
248 struct fc_lport *lport;
249 struct fc_exch *ep;
250 struct fc_frame_header *fh;
251 u32 f_ctl;
252
253 ft_dump_cmd(cmd, __func__);
254
255 ep = fc_seq_exch(cmd->seq);
256 lport = ep->lp;
257 fp = fc_frame_alloc(lport, sizeof(*txrdy));
258 if (!fp)
259 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
260
261 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
262 memset(txrdy, 0, sizeof(*txrdy));
263 txrdy->ft_burst_len = htonl(se_cmd->data_length);
264
265 cmd->seq = lport->tt.seq_start_next(cmd->seq);
266 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
267 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
268
269 fh = fc_frame_header_get(fp);
270 f_ctl = ntoh24(fh->fh_f_ctl);
271
272 /* Only if it is 'Exchange Responder' */
273 if (f_ctl & FC_FC_EX_CTX) {
274 /* Target is 'exchange responder' and sending XFER_READY
275 * to 'exchange initiator (initiator)'
276 */
277 if ((ep->xid <= lport->lro_xid) &&
278 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
279 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
280 /*
281 * Map se_mem list to scatterlist, so that
282 * DDP can be setup. DDP setup function require
283 * scatterlist. se_mem_list is internal to
284 * TCM/LIO target
285 */
286 transport_do_task_sg_chain(se_cmd);
287 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
288 cmd->sg_cnt =
289 T_TASK(se_cmd)->t_tasks_sg_chained_no;
290 }
291 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
292 cmd->sg, cmd->sg_cnt))
293 cmd->was_ddp_setup = 1;
294 }
295 }
296 lport->tt.seq_send(lport, cmd->seq, fp);
297 return 0;
298}
299
300u32 ft_get_task_tag(struct se_cmd *se_cmd)
301{
302 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
303
304 return fc_seq_exch(cmd->seq)->rxid;
305}
306
307int ft_get_cmd_state(struct se_cmd *se_cmd)
308{
309 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
310
311 return cmd->state;
312}
313
314int ft_is_state_remove(struct se_cmd *se_cmd)
315{
316 return 0; /* XXX TBD */
317}
318
319void ft_new_cmd_failure(struct se_cmd *se_cmd)
320{
321 /* XXX TBD */
322 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
323}
324
325/*
326 * FC sequence response handler for follow-on sequences (data) and aborts.
327 */
328static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
329{
330 struct ft_cmd *cmd = arg;
331 struct fc_frame_header *fh;
332
333 if (IS_ERR(fp)) {
334 /* XXX need to find cmd if queued */
335 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
336 cmd->seq = NULL;
337 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
338 return;
339 }
340
341 fh = fc_frame_header_get(fp);
342
343 switch (fh->fh_r_ctl) {
344 case FC_RCTL_DD_SOL_DATA: /* write data */
345 ft_recv_write_data(cmd, fp);
346 break;
347 case FC_RCTL_DD_UNSOL_CTL: /* command */
348 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
349 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
350 default:
351 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
352 __func__, fh->fh_r_ctl);
353 fc_frame_free(fp);
354 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
355 break;
356 }
357}
358
359/*
360 * Send a FCP response including SCSI status and optional FCP rsp_code.
361 * status is SAM_STAT_GOOD (zero) iff code is valid.
362 * This is used in error cases, such as allocation failures.
363 */
364static void ft_send_resp_status(struct fc_lport *lport,
365 const struct fc_frame *rx_fp,
366 u32 status, enum fcp_resp_rsp_codes code)
367{
368 struct fc_frame *fp;
369 struct fc_seq *sp;
370 const struct fc_frame_header *fh;
371 size_t len;
372 struct fcp_resp_with_ext *fcp;
373 struct fcp_resp_rsp_info *info;
374
375 fh = fc_frame_header_get(rx_fp);
376 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
377 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
378 len = sizeof(*fcp);
379 if (status == SAM_STAT_GOOD)
380 len += sizeof(*info);
381 fp = fc_frame_alloc(lport, len);
382 if (!fp)
383 return;
384 fcp = fc_frame_payload_get(fp, len);
385 memset(fcp, 0, len);
386 fcp->resp.fr_status = status;
387 if (status == SAM_STAT_GOOD) {
388 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
389 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
390 info = (struct fcp_resp_rsp_info *)(fcp + 1);
391 info->rsp_code = code;
392 }
393
394 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
395 sp = fr_seq(fp);
396 if (sp)
397 lport->tt.seq_send(lport, sp, fp);
398 else
399 lport->tt.frame_send(lport, fp);
400}
401
402/*
403 * Send error or task management response.
404 * Always frees the cmd and associated state.
405 */
406static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
407{
408 ft_send_resp_status(cmd->sess->tport->lport,
409 cmd->req_frame, SAM_STAT_GOOD, code);
410 ft_free_cmd(cmd);
411}
412
413/*
414 * Handle Task Management Request.
415 */
416static void ft_send_tm(struct ft_cmd *cmd)
417{
418 struct se_tmr_req *tmr;
419 struct fcp_cmnd *fcp;
420 u8 tm_func;
421
422 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
423
424 switch (fcp->fc_tm_flags) {
425 case FCP_TMF_LUN_RESET:
426 tm_func = TMR_LUN_RESET;
427 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
428 ft_dump_cmd(cmd, __func__);
429 transport_send_check_condition_and_sense(&cmd->se_cmd,
430 cmd->se_cmd.scsi_sense_reason, 0);
431 ft_sess_put(cmd->sess);
432 return;
433 }
434 break;
435 case FCP_TMF_TGT_RESET:
436 tm_func = TMR_TARGET_WARM_RESET;
437 break;
438 case FCP_TMF_CLR_TASK_SET:
439 tm_func = TMR_CLEAR_TASK_SET;
440 break;
441 case FCP_TMF_ABT_TASK_SET:
442 tm_func = TMR_ABORT_TASK_SET;
443 break;
444 case FCP_TMF_CLR_ACA:
445 tm_func = TMR_CLEAR_ACA;
446 break;
447 default:
448 /*
449 * FCP4r01 indicates having a combination of
450 * tm_flags set is invalid.
451 */
452 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
453 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
454 return;
455 }
456
457 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
458 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
459 if (!tmr) {
460 FT_TM_DBG("alloc failed\n");
461 ft_send_resp_code(cmd, FCP_TMF_FAILED);
462 return;
463 }
464 cmd->se_cmd.se_tmr_req = tmr;
465 transport_generic_handle_tmr(&cmd->se_cmd);
466}
467
468/*
469 * Send status from completed task management request.
470 */
471int ft_queue_tm_resp(struct se_cmd *se_cmd)
472{
473 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
474 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
475 enum fcp_resp_rsp_codes code;
476
477 switch (tmr->response) {
478 case TMR_FUNCTION_COMPLETE:
479 code = FCP_TMF_CMPL;
480 break;
481 case TMR_LUN_DOES_NOT_EXIST:
482 code = FCP_TMF_INVALID_LUN;
483 break;
484 case TMR_FUNCTION_REJECTED:
485 code = FCP_TMF_REJECTED;
486 break;
487 case TMR_TASK_DOES_NOT_EXIST:
488 case TMR_TASK_STILL_ALLEGIANT:
489 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
490 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
491 case TMR_FUNCTION_AUTHORIZATION_FAILED:
492 default:
493 code = FCP_TMF_FAILED;
494 break;
495 }
496 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
497 tmr->function, tmr->response, code);
498 ft_send_resp_code(cmd, code);
499 return 0;
500}
501
502/*
503 * Handle incoming FCP command.
504 */
505static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
506{
507 struct ft_cmd *cmd;
508 struct fc_lport *lport = sess->tport->lport;
509
510 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
511 if (!cmd)
512 goto busy;
513 cmd->sess = sess;
514 cmd->seq = lport->tt.seq_assign(lport, fp);
515 if (!cmd->seq) {
516 kfree(cmd);
517 goto busy;
518 }
519 cmd->req_frame = fp; /* hold frame during cmd */
520 ft_queue_cmd(sess, cmd);
521 return;
522
523busy:
524 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
525 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
526 fc_frame_free(fp);
527 ft_sess_put(sess); /* undo get from lookup */
528}
529
530
531/*
532 * Handle incoming FCP frame.
533 * Caller has verified that the frame is type FCP.
534 */
535void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
536{
537 struct fc_frame_header *fh = fc_frame_header_get(fp);
538
539 switch (fh->fh_r_ctl) {
540 case FC_RCTL_DD_UNSOL_CMD: /* command */
541 ft_recv_cmd(sess, fp);
542 break;
543 case FC_RCTL_DD_SOL_DATA: /* write data */
544 case FC_RCTL_DD_UNSOL_CTL:
545 case FC_RCTL_DD_SOL_CTL:
546 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
547 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
548 default:
549 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
550 __func__, fh->fh_r_ctl);
551 fc_frame_free(fp);
552 ft_sess_put(sess); /* undo get from lookup */
553 break;
554 }
555}
556
557/*
558 * Send new command to target.
559 */
560static void ft_send_cmd(struct ft_cmd *cmd)
561{
562 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
563 struct se_cmd *se_cmd;
564 struct fcp_cmnd *fcp;
565 int data_dir;
566 u32 data_len;
567 int task_attr;
568 int ret;
569
570 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
571 if (!fcp)
572 goto err;
573
574 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
575 goto err; /* not handling longer CDBs yet */
576
577 if (fcp->fc_tm_flags) {
578 task_attr = FCP_PTA_SIMPLE;
579 data_dir = DMA_NONE;
580 data_len = 0;
581 } else {
582 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
583 case 0:
584 data_dir = DMA_NONE;
585 break;
586 case FCP_CFL_RDDATA:
587 data_dir = DMA_FROM_DEVICE;
588 break;
589 case FCP_CFL_WRDATA:
590 data_dir = DMA_TO_DEVICE;
591 break;
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */
594 }
595
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb;
600 }
601
602 se_cmd = &cmd->se_cmd;
603 /*
604 * Initialize struct se_cmd descriptor from target_core_mod
605 * infrastructure
606 */
607 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
608 data_len, data_dir, task_attr,
609 &cmd->ft_sense_buffer[0]);
610 /*
611 * Check for FCP task management flags
612 */
613 if (fcp->fc_tm_flags) {
614 ft_send_tm(cmd);
615 return;
616 }
617
618 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
619
620 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
621 if (ret < 0) {
622 ft_dump_cmd(cmd, __func__);
623 transport_send_check_condition_and_sense(&cmd->se_cmd,
624 cmd->se_cmd.scsi_sense_reason, 0);
625 return;
626 }
627
628 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
629
630 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
631 ft_dump_cmd(cmd, __func__);
632
633 if (ret == -1) {
634 transport_send_check_condition_and_sense(se_cmd,
635 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
636 transport_generic_free_cmd(se_cmd, 0, 1, 0);
637 return;
638 }
639 if (ret == -2) {
640 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
641 ft_queue_status(se_cmd);
642 else
643 transport_send_check_condition_and_sense(se_cmd,
644 se_cmd->scsi_sense_reason, 0);
645 transport_generic_free_cmd(se_cmd, 0, 1, 0);
646 return;
647 }
648 transport_generic_handle_cdb(se_cmd);
649 return;
650
651err:
652 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
653 return;
654}
655
656/*
657 * Handle request in the command thread.
658 */
659static void ft_exec_req(struct ft_cmd *cmd)
660{
661 FT_IO_DBG("cmd state %x\n", cmd->state);
662 switch (cmd->state) {
663 case FC_CMD_ST_NEW:
664 ft_send_cmd(cmd);
665 break;
666 default:
667 break;
668 }
669}
670
671/*
672 * Processing thread.
673 * Currently one thread per tpg.
674 */
675int ft_thread(void *arg)
676{
677 struct ft_tpg *tpg = arg;
678 struct se_queue_obj *qobj = &tpg->qobj;
679 struct ft_cmd *cmd;
680 int ret;
681
682 set_user_nice(current, -20);
683
684 while (!kthread_should_stop()) {
685 ret = wait_event_interruptible(qobj->thread_wq,
686 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
687 if (ret < 0 || kthread_should_stop())
688 goto out;
689 cmd = ft_dequeue_cmd(qobj);
690 if (cmd)
691 ft_exec_req(cmd);
692 }
693
694out:
695 return 0;
696}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 000000000000..fcdbbffe88cc
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,677 @@
1/*******************************************************************************
2 * Filename: tcm_fc.c
3 *
4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c
6 *
7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/kthread.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/configfs.h>
35#include <linux/ctype.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h>
51#include <target/target_core_base.h>
52#include <target/configfs_macros.h>
53
54#include "tcm_fc.h"
55
56struct target_fabric_configfs *ft_configfs;
57
58LIST_HEAD(ft_lport_list);
59DEFINE_MUTEX(ft_lport_lock);
60
61unsigned int ft_debug_logging;
62module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65/*
66 * Parse WWN.
67 * If strict, we require lower-case hex and colon separators to be sure
68 * the name is the same as what would be generated by ft_format_wwn()
69 * so the name and wwn are mapped one-to-one.
70 */
71static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
72{
73 const char *cp;
74 char c;
75 u32 nibble;
76 u32 byte = 0;
77 u32 pos = 0;
78 u32 err;
79
80 *wwn = 0;
81 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
82 c = *cp;
83 if (c == '\n' && cp[1] == '\0')
84 continue;
85 if (strict && pos++ == 2 && byte++ < 7) {
86 pos = 0;
87 if (c == ':')
88 continue;
89 err = 1;
90 goto fail;
91 }
92 if (c == '\0') {
93 err = 2;
94 if (strict && byte != 8)
95 goto fail;
96 return cp - name;
97 }
98 err = 3;
99 if (isdigit(c))
100 nibble = c - '0';
101 else if (isxdigit(c) && (islower(c) || !strict))
102 nibble = tolower(c) - 'a' + 10;
103 else
104 goto fail;
105 *wwn = (*wwn << 4) | nibble;
106 }
107 err = 4;
108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte);
111 return -1;
112}
113
114ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
115{
116 u8 b[8];
117
118 put_unaligned_be64(wwn, b);
119 return snprintf(buf, len,
120 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
121 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
122}
123
124static ssize_t ft_wwn_show(void *arg, char *buf)
125{
126 u64 *wwn = arg;
127 ssize_t len;
128
129 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
130 buf[len++] = '\n';
131 return len;
132}
133
134static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
135{
136 ssize_t ret;
137 u64 wwn;
138
139 ret = ft_parse_wwn(buf, &wwn, 0);
140 if (ret > 0)
141 *(u64 *)arg = wwn;
142 return ret;
143}
144
145/*
146 * ACL auth ops.
147 */
148
149static ssize_t ft_nacl_show_port_name(
150 struct se_node_acl *se_nacl,
151 char *page)
152{
153 struct ft_node_acl *acl = container_of(se_nacl,
154 struct ft_node_acl, se_node_acl);
155
156 return ft_wwn_show(&acl->node_auth.port_name, page);
157}
158
159static ssize_t ft_nacl_store_port_name(
160 struct se_node_acl *se_nacl,
161 const char *page,
162 size_t count)
163{
164 struct ft_node_acl *acl = container_of(se_nacl,
165 struct ft_node_acl, se_node_acl);
166
167 return ft_wwn_store(&acl->node_auth.port_name, page, count);
168}
169
170TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
171
172static ssize_t ft_nacl_show_node_name(
173 struct se_node_acl *se_nacl,
174 char *page)
175{
176 struct ft_node_acl *acl = container_of(se_nacl,
177 struct ft_node_acl, se_node_acl);
178
179 return ft_wwn_show(&acl->node_auth.node_name, page);
180}
181
182static ssize_t ft_nacl_store_node_name(
183 struct se_node_acl *se_nacl,
184 const char *page,
185 size_t count)
186{
187 struct ft_node_acl *acl = container_of(se_nacl,
188 struct ft_node_acl, se_node_acl);
189
190 return ft_wwn_store(&acl->node_auth.node_name, page, count);
191}
192
193TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
194
195static struct configfs_attribute *ft_nacl_base_attrs[] = {
196 &ft_nacl_port_name.attr,
197 &ft_nacl_node_name.attr,
198 NULL,
199};
200
201/*
202 * ACL ops.
203 */
204
205/*
206 * Add ACL for an initiator. The ACL is named arbitrarily.
207 * The port_name and/or node_name are attributes.
208 */
209static struct se_node_acl *ft_add_acl(
210 struct se_portal_group *se_tpg,
211 struct config_group *group,
212 const char *name)
213{
214 struct ft_node_acl *acl;
215 struct ft_tpg *tpg;
216 u64 wwpn;
217 u32 q_depth;
218
219 FT_CONF_DBG("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
223 return ERR_PTR(-EINVAL);
224
225 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
226 if (!(acl))
227 return ERR_PTR(-ENOMEM);
228 acl->node_auth.port_name = wwpn;
229
230 q_depth = 32; /* XXX bogus default - get from tpg? */
231 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
232 &acl->se_node_acl, name, q_depth);
233}
234
235static void ft_del_acl(struct se_node_acl *se_acl)
236{
237 struct se_portal_group *se_tpg = se_acl->se_tpg;
238 struct ft_tpg *tpg;
239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl);
241
242 FT_CONF_DBG("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item));
244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg);
248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
250 kfree(acl);
251}
252
253struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
254{
255 struct ft_node_acl *found = NULL;
256 struct ft_node_acl *acl;
257 struct se_portal_group *se_tpg = &tpg->se_tpg;
258 struct se_node_acl *se_acl;
259
260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name);
269 found = acl;
270 /* XXX need to hold onto ACL */
271 break;
272 }
273 }
274 spin_unlock_bh(&se_tpg->acl_node_lock);
275 return found;
276}
277
278struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
279{
280 struct ft_node_acl *acl;
281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!(acl)) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
285 return NULL;
286 }
287 FT_CONF_DBG("acl %p\n", acl);
288 return &acl->se_node_acl;
289}
290
291static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
292 struct se_node_acl *se_acl)
293{
294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl);
296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
298 kfree(acl);
299}
300
301/*
302 * local_port port_group (tpg) ops.
303 */
304static struct se_portal_group *ft_add_tpg(
305 struct se_wwn *wwn,
306 struct config_group *group,
307 const char *name)
308{
309 struct ft_lport_acl *lacl;
310 struct ft_tpg *tpg;
311 unsigned long index;
312 int ret;
313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
315
316 /*
317 * Name must be "tpgt_" followed by the index.
318 */
319 if (strstr(name, "tpgt_") != name)
320 return NULL;
321 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
322 return NULL;
323
324 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
326 if (!tpg)
327 return NULL;
328 tpg->index = index;
329 tpg->lport_acl = lacl;
330 INIT_LIST_HEAD(&tpg->lun_list);
331 transport_init_queue_obj(&tpg->qobj);
332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) {
336 kfree(tpg);
337 return NULL;
338 }
339
340 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
341 if (IS_ERR(tpg->thread)) {
342 kfree(tpg);
343 return NULL;
344 }
345
346 mutex_lock(&ft_lport_lock);
347 list_add_tail(&tpg->list, &lacl->tpg_list);
348 mutex_unlock(&ft_lport_lock);
349
350 return &tpg->se_tpg;
351}
352
353static void ft_del_tpg(struct se_portal_group *se_tpg)
354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356
357 FT_CONF_DBG("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359
360 kthread_stop(tpg->thread);
361
362 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
363 synchronize_rcu();
364
365 mutex_lock(&ft_lport_lock);
366 list_del(&tpg->list);
367 if (tpg->tport) {
368 tpg->tport->tpg = NULL;
369 tpg->tport = NULL;
370 }
371 mutex_unlock(&ft_lport_lock);
372
373 core_tpg_deregister(se_tpg);
374 kfree(tpg);
375}
376
377/*
378 * Verify that an lport is configured to use the tcm_fc module, and return
379 * the target port group that should be used.
380 *
381 * The caller holds ft_lport_lock.
382 */
383struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
384{
385 struct ft_lport_acl *lacl;
386 struct ft_tpg *tpg;
387
388 list_for_each_entry(lacl, &ft_lport_list, list) {
389 if (lacl->wwpn == lport->wwpn) {
390 list_for_each_entry(tpg, &lacl->tpg_list, list)
391 return tpg; /* XXX for now return first entry */
392 return NULL;
393 }
394 }
395 return NULL;
396}
397
398/*
399 * target config instance ops.
400 */
401
402/*
403 * Add lport to allowed config.
404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
405 */
406static struct se_wwn *ft_add_lport(
407 struct target_fabric_configfs *tf,
408 struct config_group *group,
409 const char *name)
410{
411 struct ft_lport_acl *lacl;
412 struct ft_lport_acl *old_lacl;
413 u64 wwpn;
414
415 FT_CONF_DBG("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
419 if (!lacl)
420 return NULL;
421 lacl->wwpn = wwpn;
422 INIT_LIST_HEAD(&lacl->tpg_list);
423
424 mutex_lock(&ft_lport_lock);
425 list_for_each_entry(old_lacl, &ft_lport_list, list) {
426 if (old_lacl->wwpn == wwpn) {
427 mutex_unlock(&ft_lport_lock);
428 kfree(lacl);
429 return NULL;
430 }
431 }
432 list_add_tail(&lacl->list, &ft_lport_list);
433 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
434 mutex_unlock(&ft_lport_lock);
435
436 return &lacl->fc_lport_wwn;
437}
438
439static void ft_del_lport(struct se_wwn *wwn)
440{
441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn);
443
444 FT_CONF_DBG("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list);
448 mutex_unlock(&ft_lport_lock);
449
450 kfree(lacl);
451}
452
453static ssize_t ft_wwn_show_attr_version(
454 struct target_fabric_configfs *tf,
455 char *page)
456{
457 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
458 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
459}
460
461TF_WWN_ATTR_RO(ft, version);
462
463static struct configfs_attribute *ft_wwn_attrs[] = {
464 &ft_wwn_version.attr,
465 NULL,
466};
467
468static char *ft_get_fabric_name(void)
469{
470 return "fc";
471}
472
473static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
474{
475 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
476
477 return tpg->lport_acl->name;
478}
479
480static u16 ft_get_tag(struct se_portal_group *se_tpg)
481{
482 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
483
484 /*
485 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
486 * to represent the SCSI Target Port.
487 */
488 return tpg->index;
489}
490
491static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
492{
493 return 1;
494}
495
496static int ft_check_false(struct se_portal_group *se_tpg)
497{
498 return 0;
499}
500
501static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
502{
503}
504
505static u16 ft_get_fabric_sense_len(void)
506{
507 return 0;
508}
509
510static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
511{
512 return 0;
513}
514
515static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
516{
517 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
518
519 return tpg->index;
520}
521
522static u64 ft_pack_lun(unsigned int index)
523{
524 WARN_ON(index >= 256);
525 /* Caller wants this byte-swapped */
526 return cpu_to_le64((index & 0xff) << 8);
527}
528
529static struct target_core_fabric_ops ft_fabric_ops = {
530 .get_fabric_name = ft_get_fabric_name,
531 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
532 .tpg_get_wwn = ft_get_fabric_wwn,
533 .tpg_get_tag = ft_get_tag,
534 .tpg_get_default_depth = ft_get_default_depth,
535 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
536 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
537 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
538 .tpg_check_demo_mode = ft_check_false,
539 .tpg_check_demo_mode_cache = ft_check_false,
540 .tpg_check_demo_mode_write_protect = ft_check_false,
541 .tpg_check_prod_mode_write_protect = ft_check_false,
542 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
543 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
544 .tpg_get_inst_index = ft_tpg_get_inst_index,
545 .check_stop_free = ft_check_stop_free,
546 .release_cmd_to_pool = ft_release_cmd,
547 .release_cmd_direct = ft_release_cmd,
548 .shutdown_session = ft_sess_shutdown,
549 .close_session = ft_sess_close,
550 .stop_session = ft_sess_stop,
551 .fall_back_to_erl0 = ft_sess_set_erl0,
552 .sess_logged_in = ft_sess_logged_in,
553 .sess_get_index = ft_sess_get_index,
554 .sess_get_initiator_sid = NULL,
555 .write_pending = ft_write_pending,
556 .write_pending_status = ft_write_pending_status,
557 .set_default_node_attributes = ft_set_default_node_attr,
558 .get_task_tag = ft_get_task_tag,
559 .get_cmd_state = ft_get_cmd_state,
560 .new_cmd_failure = ft_new_cmd_failure,
561 .queue_data_in = ft_queue_data_in,
562 .queue_status = ft_queue_status,
563 .queue_tm_rsp = ft_queue_tm_resp,
564 .get_fabric_sense_len = ft_get_fabric_sense_len,
565 .set_fabric_sense_len = ft_set_fabric_sense_len,
566 .is_state_remove = ft_is_state_remove,
567 .pack_lun = ft_pack_lun,
568 /*
569 * Setup function pointers for generic logic in
570 * target_core_fabric_configfs.c
571 */
572 .fabric_make_wwn = &ft_add_lport,
573 .fabric_drop_wwn = &ft_del_lport,
574 .fabric_make_tpg = &ft_add_tpg,
575 .fabric_drop_tpg = &ft_del_tpg,
576 .fabric_post_link = NULL,
577 .fabric_pre_unlink = NULL,
578 .fabric_make_np = NULL,
579 .fabric_drop_np = NULL,
580 .fabric_make_nodeacl = &ft_add_acl,
581 .fabric_drop_nodeacl = &ft_del_acl,
582};
583
584int ft_register_configfs(void)
585{
586 struct target_fabric_configfs *fabric;
587 int ret;
588
589 /*
590 * Register the top level struct config_item_type with TCM core
591 */
592 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
593 if (!fabric) {
594 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
595 __func__);
596 return -1;
597 }
598 fabric->tf_ops = ft_fabric_ops;
599
600 /* Allowing support for task_sg_chaining */
601 fabric->tf_ops.task_sg_chaining = 1;
602
603 /*
604 * Setup default attribute lists for various fabric->tf_cit_tmpl
605 */
606 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
607 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
608 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
609 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
610 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
611 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
612 ft_nacl_base_attrs;
613 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
614 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
615 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
616 /*
617 * register the fabric for use within TCM
618 */
619 ret = target_fabric_configfs_register(fabric);
620 if (ret < 0) {
621 FT_CONF_DBG("target_fabric_configfs_register() for"
622 " FC Target failed!\n");
623 printk(KERN_INFO
624 "%s: target_fabric_configfs_register() failed!\n",
625 __func__);
626 target_fabric_configfs_free(fabric);
627 return -1;
628 }
629
630 /*
631 * Setup our local pointer to *fabric.
632 */
633 ft_configfs = fabric;
634 return 0;
635}
636
637void ft_deregister_configfs(void)
638{
639 if (!ft_configfs)
640 return;
641 target_fabric_configfs_deregister(ft_configfs);
642 ft_configfs = NULL;
643}
644
645static struct notifier_block ft_notifier = {
646 .notifier_call = ft_lport_notify
647};
648
649static int __init ft_init(void)
650{
651 if (ft_register_configfs())
652 return -1;
653 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
654 ft_deregister_configfs();
655 return -1;
656 }
657 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
658 fc_lport_iterate(ft_lport_add, NULL);
659 return 0;
660}
661
662static void __exit ft_exit(void)
663{
664 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
665 &ft_notifier);
666 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
667 fc_lport_iterate(ft_lport_del, NULL);
668 ft_deregister_configfs();
669 synchronize_rcu();
670}
671
672#ifdef MODULE
673MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
674MODULE_LICENSE("GPL");
675module_init(ft_init);
676module_exit(ft_exit);
677#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 000000000000..4c3c0efbe13f
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining <0x%x>, "
207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 000000000000..a3bd57f2ea32
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <linux/rcupdate.h>
34#include <linux/rculist.h>
35#include <linux/kref.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_device.h>
47#include <target/target_core_tpg.h>
48#include <target/target_core_configfs.h>
49#include <target/target_core_base.h>
50#include <target/configfs_macros.h>
51
52#include <scsi/libfc.h>
53#include "tcm_fc.h"
54
55static void ft_sess_delete_all(struct ft_tport *);
56
57/*
58 * Lookup or allocate target local port.
59 * Caller holds ft_lport_lock.
60 */
61static struct ft_tport *ft_tport_create(struct fc_lport *lport)
62{
63 struct ft_tpg *tpg;
64 struct ft_tport *tport;
65 int i;
66
67 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
68 if (tport && tport->tpg)
69 return tport;
70
71 tpg = ft_lport_find_tpg(lport);
72 if (!tpg)
73 return NULL;
74
75 if (tport) {
76 tport->tpg = tpg;
77 return tport;
78 }
79
80 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
81 if (!tport)
82 return NULL;
83
84 tport->lport = lport;
85 tport->tpg = tpg;
86 tpg->tport = tport;
87 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
88 INIT_HLIST_HEAD(&tport->hash[i]);
89
90 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
91 return tport;
92}
93
94/*
95 * Free tport via RCU.
96 */
97static void ft_tport_rcu_free(struct rcu_head *rcu)
98{
99 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
100
101 kfree(tport);
102}
103
104/*
105 * Delete a target local port.
106 * Caller holds ft_lport_lock.
107 */
108static void ft_tport_delete(struct ft_tport *tport)
109{
110 struct fc_lport *lport;
111 struct ft_tpg *tpg;
112
113 ft_sess_delete_all(tport);
114 lport = tport->lport;
115 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
116 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
117
118 tpg = tport->tpg;
119 if (tpg) {
120 tpg->tport = NULL;
121 tport->tpg = NULL;
122 }
123 call_rcu(&tport->rcu, ft_tport_rcu_free);
124}
125
126/*
127 * Add local port.
128 * Called thru fc_lport_iterate().
129 */
130void ft_lport_add(struct fc_lport *lport, void *arg)
131{
132 mutex_lock(&ft_lport_lock);
133 ft_tport_create(lport);
134 mutex_unlock(&ft_lport_lock);
135}
136
137/*
138 * Delete local port.
139 * Called thru fc_lport_iterate().
140 */
141void ft_lport_del(struct fc_lport *lport, void *arg)
142{
143 struct ft_tport *tport;
144
145 mutex_lock(&ft_lport_lock);
146 tport = lport->prov[FC_TYPE_FCP];
147 if (tport)
148 ft_tport_delete(tport);
149 mutex_unlock(&ft_lport_lock);
150}
151
152/*
153 * Notification of local port change from libfc.
154 * Create or delete local port and associated tport.
155 */
156int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
157{
158 struct fc_lport *lport = arg;
159
160 switch (event) {
161 case FC_LPORT_EV_ADD:
162 ft_lport_add(lport, NULL);
163 break;
164 case FC_LPORT_EV_DEL:
165 ft_lport_del(lport, NULL);
166 break;
167 }
168 return NOTIFY_DONE;
169}
170
171/*
172 * Hash function for FC_IDs.
173 */
174static u32 ft_sess_hash(u32 port_id)
175{
176 return hash_32(port_id, FT_SESS_HASH_BITS);
177}
178
179/*
180 * Find session in local port.
181 * Sessions and hash lists are RCU-protected.
182 * A reference is taken which must be eventually freed.
183 */
184static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
185{
186 struct ft_tport *tport;
187 struct hlist_head *head;
188 struct hlist_node *pos;
189 struct ft_sess *sess;
190
191 rcu_read_lock();
192 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
193 if (!tport)
194 goto out;
195
196 head = &tport->hash[ft_sess_hash(port_id)];
197 hlist_for_each_entry_rcu(sess, pos, head, hash) {
198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref);
200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
202 return sess;
203 }
204 }
205out:
206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id);
208 return NULL;
209}
210
211/*
212 * Allocate session and enter it in the hash for the local port.
213 * Caller holds ft_lport_lock.
214 */
215static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
216 struct ft_node_acl *acl)
217{
218 struct ft_sess *sess;
219 struct hlist_head *head;
220 struct hlist_node *pos;
221
222 head = &tport->hash[ft_sess_hash(port_id)];
223 hlist_for_each_entry_rcu(sess, pos, head, hash)
224 if (sess->port_id == port_id)
225 return sess;
226
227 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
228 if (!sess)
229 return NULL;
230
231 sess->se_sess = transport_init_session();
232 if (!sess->se_sess) {
233 kfree(sess);
234 return NULL;
235 }
236 sess->se_sess->se_node_acl = &acl->se_node_acl;
237 sess->tport = tport;
238 sess->port_id = port_id;
239 kref_init(&sess->kref); /* ref for table entry */
240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++;
242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess);
247 return sess;
248}
249
250/*
251 * Unhash the session.
252 * Caller holds ft_lport_lock.
253 */
254static void ft_sess_unhash(struct ft_sess *sess)
255{
256 struct ft_tport *tport = sess->tport;
257
258 hlist_del_rcu(&sess->hash);
259 BUG_ON(!tport->sess_count);
260 tport->sess_count--;
261 sess->port_id = -1;
262 sess->params = 0;
263}
264
265/*
266 * Delete session from hash.
267 * Caller holds ft_lport_lock.
268 */
269static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
270{
271 struct hlist_head *head;
272 struct hlist_node *pos;
273 struct ft_sess *sess;
274
275 head = &tport->hash[ft_sess_hash(port_id)];
276 hlist_for_each_entry_rcu(sess, pos, head, hash) {
277 if (sess->port_id == port_id) {
278 ft_sess_unhash(sess);
279 return sess;
280 }
281 }
282 return NULL;
283}
284
285/*
286 * Delete all sessions from tport.
287 * Caller holds ft_lport_lock.
288 */
289static void ft_sess_delete_all(struct ft_tport *tport)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct ft_sess *sess;
294
295 for (head = tport->hash;
296 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
297 hlist_for_each_entry_rcu(sess, pos, head, hash) {
298 ft_sess_unhash(sess);
299 transport_deregister_session_configfs(sess->se_sess);
300 ft_sess_put(sess); /* release from table */
301 }
302 }
303}
304
305/*
306 * TCM ops for sessions.
307 */
308
309/*
310 * Determine whether session is allowed to be shutdown in the current context.
311 * Returns non-zero if the session should be shutdown.
312 */
313int ft_sess_shutdown(struct se_session *se_sess)
314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316
317 FT_SESS_DBG("port_id %x\n", sess->port_id);
318 return 1;
319}
320
321/*
322 * Remove session and send PRLO.
323 * This is called when the ACL is being deleted or queue depth is changing.
324 */
325void ft_sess_close(struct se_session *se_sess)
326{
327 struct ft_sess *sess = se_sess->fabric_sess_ptr;
328 struct fc_lport *lport;
329 u32 port_id;
330
331 mutex_lock(&ft_lport_lock);
332 lport = sess->tport->lport;
333 port_id = sess->port_id;
334 if (port_id == -1) {
335 mutex_lock(&ft_lport_lock);
336 return;
337 }
338 FT_SESS_DBG("port_id %x\n", port_id);
339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess);
342 ft_sess_put(sess);
343 /* XXX Send LOGO or PRLO */
344 synchronize_rcu(); /* let transport deregister happen */
345}
346
347void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350
351 FT_SESS_DBG("port_id %x\n", sess->port_id);
352}
353
354int ft_sess_logged_in(struct se_session *se_sess)
355{
356 struct ft_sess *sess = se_sess->fabric_sess_ptr;
357
358 return sess->port_id != -1;
359}
360
361u32 ft_sess_get_index(struct se_session *se_sess)
362{
363 struct ft_sess *sess = se_sess->fabric_sess_ptr;
364
365 return sess->port_id; /* XXX TBD probably not what is needed */
366}
367
368u32 ft_sess_get_port_name(struct se_session *se_sess,
369 unsigned char *buf, u32 len)
370{
371 struct ft_sess *sess = se_sess->fabric_sess_ptr;
372
373 return ft_format_wwn(buf, len, sess->port_name);
374}
375
376void ft_sess_set_erl0(struct se_session *se_sess)
377{
378 /* XXX TBD called when out of memory */
379}
380
381/*
382 * libfc ops involving sessions.
383 */
384
385static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
386 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
387{
388 struct ft_tport *tport;
389 struct ft_sess *sess;
390 struct ft_node_acl *acl;
391 u32 fcp_parm;
392
393 tport = ft_tport_create(rdata->local_port);
394 if (!tport)
395 return 0; /* not a target for this local port */
396
397 acl = ft_acl_get(tport->tpg, rdata);
398 if (!acl)
399 return 0;
400
401 if (!rspp)
402 goto fill;
403
404 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
405 return FC_SPP_RESP_NO_PA;
406
407 /*
408 * If both target and initiator bits are off, the SPP is invalid.
409 */
410 fcp_parm = ntohl(rspp->spp_params);
411 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
412 return FC_SPP_RESP_INVL;
413
414 /*
415 * Create session (image pair) only if requested by
416 * EST_IMG_PAIR flag and if the requestor is an initiator.
417 */
418 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
419 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
420 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
421 return FC_SPP_RESP_CONF;
422 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
423 if (!sess)
424 return FC_SPP_RESP_RES;
425 if (!sess->params)
426 rdata->prli_count++;
427 sess->params = fcp_parm;
428 sess->port_name = rdata->ids.port_name;
429 sess->max_frame = rdata->maxframe_size;
430
431 /* XXX TBD - clearing actions. unit attn, see 4.10 */
432 }
433
434 /*
435 * OR in our service parameters with other provider (initiator), if any.
436 * TBD XXX - indicate RETRY capability?
437 */
438fill:
439 fcp_parm = ntohl(spp->spp_params);
440 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
441 return FC_SPP_RESP_ACK;
442}
443
444/**
445 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
446 * @rdata: remote port private
447 * @spp_len: service parameter page length
448 * @rspp: received service parameter page (NULL for outgoing PRLI)
449 * @spp: response service parameter page
450 *
451 * Returns spp response code.
452 */
453static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
454 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
455{
456 int ret;
457
458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret;
464}
465
466static void ft_sess_rcu_free(struct rcu_head *rcu)
467{
468 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
469
470 transport_deregister_session(sess->se_sess);
471 kfree(sess);
472}
473
474static void ft_sess_free(struct kref *kref)
475{
476 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
477
478 call_rcu(&sess->rcu, ft_sess_rcu_free);
479}
480
481void ft_sess_put(struct ft_sess *sess)
482{
483 int sess_held = atomic_read(&sess->kref.refcount);
484
485 BUG_ON(!sess_held);
486 kref_put(&sess->kref, ft_sess_free);
487}
488
489static void ft_prlo(struct fc_rport_priv *rdata)
490{
491 struct ft_sess *sess;
492 struct ft_tport *tport;
493
494 mutex_lock(&ft_lport_lock);
495 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
496 if (!tport) {
497 mutex_unlock(&ft_lport_lock);
498 return;
499 }
500 sess = ft_sess_delete(tport, rdata->ids.port_id);
501 if (!sess) {
502 mutex_unlock(&ft_lport_lock);
503 return;
504 }
505 mutex_unlock(&ft_lport_lock);
506 transport_deregister_session_configfs(sess->se_sess);
507 ft_sess_put(sess); /* release from table */
508 rdata->prli_count--;
509 /* XXX TBD - clearing actions. unit attn, see 4.10 */
510}
511
512/*
513 * Handle incoming FCP request.
514 * Caller has verified that the frame is type FCP.
515 */
516static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
517{
518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp);
520
521 FT_SESS_DBG("sid %x\n", sid);
522
523 sess = ft_sess_get(lport, sid);
524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp);
528 return;
529 }
530 ft_recv_req(sess, fp); /* must do ft_sess_put() */
531}
532
533/*
534 * Provider ops for libfc.
535 */
536struct fc4_prov ft_prov = {
537 .prli = ft_prli,
538 .prlo = ft_prlo,
539 .recv = ft_recv,
540 .module = THIS_MODULE,
541};
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 80484af781e1..b1f0f83b870d 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
1391 help 1391 help
1392 Support for Console on the NWP serial ports. 1392 Support for Console on the NWP serial ports.
1393 1393
1394config SERIAL_LANTIQ
1395 bool "Lantiq serial driver"
1396 depends on LANTIQ
1397 select SERIAL_CORE
1398 select SERIAL_CORE_CONSOLE
1399 help
1400 Support for console and UART on Lantiq SoCs.
1401
1394config SERIAL_QE 1402config SERIAL_QE
1395 tristate "Freescale QUICC Engine serial port support" 1403 tristate "Freescale QUICC Engine serial port support"
1396 depends on QUICC_ENGINE 1404 depends on QUICC_ENGINE
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index fee0690ef8e3..35276043d9d1 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
94obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o 94obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
95obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o 95obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
96obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o 96obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
97obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
new file mode 100644
index 000000000000..58cf279ed879
--- /dev/null
+++ b/drivers/tty/serial/lantiq.c
@@ -0,0 +1,756 @@
1/*
2 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) 2004 Infineon IFAP DC COM CPE
18 * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
19 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
20 * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com>
21 */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/init.h>
27#include <linux/console.h>
28#include <linux/sysrq.h>
29#include <linux/device.h>
30#include <linux/tty.h>
31#include <linux/tty_flip.h>
32#include <linux/serial_core.h>
33#include <linux/serial.h>
34#include <linux/platform_device.h>
35#include <linux/io.h>
36#include <linux/clk.h>
37
38#include <lantiq_soc.h>
39
40#define PORT_LTQ_ASC 111
41#define MAXPORTS 2
42#define UART_DUMMY_UER_RX 1
43#define DRVNAME "ltq_asc"
44#ifdef __BIG_ENDIAN
45#define LTQ_ASC_TBUF (0x0020 + 3)
46#define LTQ_ASC_RBUF (0x0024 + 3)
47#else
48#define LTQ_ASC_TBUF 0x0020
49#define LTQ_ASC_RBUF 0x0024
50#endif
51#define LTQ_ASC_FSTAT 0x0048
52#define LTQ_ASC_WHBSTATE 0x0018
53#define LTQ_ASC_STATE 0x0014
54#define LTQ_ASC_IRNCR 0x00F8
55#define LTQ_ASC_CLC 0x0000
56#define LTQ_ASC_ID 0x0008
57#define LTQ_ASC_PISEL 0x0004
58#define LTQ_ASC_TXFCON 0x0044
59#define LTQ_ASC_RXFCON 0x0040
60#define LTQ_ASC_CON 0x0010
61#define LTQ_ASC_BG 0x0050
62#define LTQ_ASC_IRNREN 0x00F4
63
64#define ASC_IRNREN_TX 0x1
65#define ASC_IRNREN_RX 0x2
66#define ASC_IRNREN_ERR 0x4
67#define ASC_IRNREN_TX_BUF 0x8
68#define ASC_IRNCR_TIR 0x1
69#define ASC_IRNCR_RIR 0x2
70#define ASC_IRNCR_EIR 0x4
71
72#define ASCOPT_CSIZE 0x3
73#define TXFIFO_FL 1
74#define RXFIFO_FL 1
75#define ASCCLC_DISS 0x2
76#define ASCCLC_RMCMASK 0x0000FF00
77#define ASCCLC_RMCOFFSET 8
78#define ASCCON_M_8ASYNC 0x0
79#define ASCCON_M_7ASYNC 0x2
80#define ASCCON_ODD 0x00000020
81#define ASCCON_STP 0x00000080
82#define ASCCON_BRS 0x00000100
83#define ASCCON_FDE 0x00000200
84#define ASCCON_R 0x00008000
85#define ASCCON_FEN 0x00020000
86#define ASCCON_ROEN 0x00080000
87#define ASCCON_TOEN 0x00100000
88#define ASCSTATE_PE 0x00010000
89#define ASCSTATE_FE 0x00020000
90#define ASCSTATE_ROE 0x00080000
91#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE)
92#define ASCWHBSTATE_CLRREN 0x00000001
93#define ASCWHBSTATE_SETREN 0x00000002
94#define ASCWHBSTATE_CLRPE 0x00000004
95#define ASCWHBSTATE_CLRFE 0x00000008
96#define ASCWHBSTATE_CLRROE 0x00000020
97#define ASCTXFCON_TXFEN 0x0001
98#define ASCTXFCON_TXFFLU 0x0002
99#define ASCTXFCON_TXFITLMASK 0x3F00
100#define ASCTXFCON_TXFITLOFF 8
101#define ASCRXFCON_RXFEN 0x0001
102#define ASCRXFCON_RXFFLU 0x0002
103#define ASCRXFCON_RXFITLMASK 0x3F00
104#define ASCRXFCON_RXFITLOFF 8
105#define ASCFSTAT_RXFFLMASK 0x003F
106#define ASCFSTAT_TXFFLMASK 0x3F00
107#define ASCFSTAT_TXFREEMASK 0x3F000000
108#define ASCFSTAT_TXFREEOFF 24
109
110static void lqasc_tx_chars(struct uart_port *port);
111static struct ltq_uart_port *lqasc_port[MAXPORTS];
112static struct uart_driver lqasc_reg;
113static DEFINE_SPINLOCK(ltq_asc_lock);
114
115struct ltq_uart_port {
116 struct uart_port port;
117 struct clk *clk;
118 unsigned int tx_irq;
119 unsigned int rx_irq;
120 unsigned int err_irq;
121};
122
123static inline struct
124ltq_uart_port *to_ltq_uart_port(struct uart_port *port)
125{
126 return container_of(port, struct ltq_uart_port, port);
127}
128
129static void
130lqasc_stop_tx(struct uart_port *port)
131{
132 return;
133}
134
135static void
136lqasc_start_tx(struct uart_port *port)
137{
138 unsigned long flags;
139 spin_lock_irqsave(&ltq_asc_lock, flags);
140 lqasc_tx_chars(port);
141 spin_unlock_irqrestore(&ltq_asc_lock, flags);
142 return;
143}
144
145static void
146lqasc_stop_rx(struct uart_port *port)
147{
148 ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
149}
150
151static void
152lqasc_enable_ms(struct uart_port *port)
153{
154}
155
156static int
157lqasc_rx_chars(struct uart_port *port)
158{
159 struct tty_struct *tty = tty_port_tty_get(&port->state->port);
160 unsigned int ch = 0, rsr = 0, fifocnt;
161
162 if (!tty) {
163 dev_dbg(port->dev, "%s:tty is busy now", __func__);
164 return -EBUSY;
165 }
166 fifocnt =
167 ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
168 while (fifocnt--) {
169 u8 flag = TTY_NORMAL;
170 ch = ltq_r8(port->membase + LTQ_ASC_RBUF);
171 rsr = (ltq_r32(port->membase + LTQ_ASC_STATE)
172 & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
173 tty_flip_buffer_push(tty);
174 port->icount.rx++;
175
176 /*
177 * Note that the error handling code is
178 * out of the main execution path
179 */
180 if (rsr & ASCSTATE_ANY) {
181 if (rsr & ASCSTATE_PE) {
182 port->icount.parity++;
183 ltq_w32_mask(0, ASCWHBSTATE_CLRPE,
184 port->membase + LTQ_ASC_WHBSTATE);
185 } else if (rsr & ASCSTATE_FE) {
186 port->icount.frame++;
187 ltq_w32_mask(0, ASCWHBSTATE_CLRFE,
188 port->membase + LTQ_ASC_WHBSTATE);
189 }
190 if (rsr & ASCSTATE_ROE) {
191 port->icount.overrun++;
192 ltq_w32_mask(0, ASCWHBSTATE_CLRROE,
193 port->membase + LTQ_ASC_WHBSTATE);
194 }
195
196 rsr &= port->read_status_mask;
197
198 if (rsr & ASCSTATE_PE)
199 flag = TTY_PARITY;
200 else if (rsr & ASCSTATE_FE)
201 flag = TTY_FRAME;
202 }
203
204 if ((rsr & port->ignore_status_mask) == 0)
205 tty_insert_flip_char(tty, ch, flag);
206
207 if (rsr & ASCSTATE_ROE)
208 /*
209 * Overrun is special, since it's reported
210 * immediately, and doesn't affect the current
211 * character
212 */
213 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
214 }
215 if (ch != 0)
216 tty_flip_buffer_push(tty);
217 tty_kref_put(tty);
218 return 0;
219}
220
221static void
222lqasc_tx_chars(struct uart_port *port)
223{
224 struct circ_buf *xmit = &port->state->xmit;
225 if (uart_tx_stopped(port)) {
226 lqasc_stop_tx(port);
227 return;
228 }
229
230 while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) &
231 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
232 if (port->x_char) {
233 ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF);
234 port->icount.tx++;
235 port->x_char = 0;
236 continue;
237 }
238
239 if (uart_circ_empty(xmit))
240 break;
241
242 ltq_w8(port->state->xmit.buf[port->state->xmit.tail],
243 port->membase + LTQ_ASC_TBUF);
244 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
245 port->icount.tx++;
246 }
247
248 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
249 uart_write_wakeup(port);
250}
251
252static irqreturn_t
253lqasc_tx_int(int irq, void *_port)
254{
255 unsigned long flags;
256 struct uart_port *port = (struct uart_port *)_port;
257 spin_lock_irqsave(&ltq_asc_lock, flags);
258 ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
259 spin_unlock_irqrestore(&ltq_asc_lock, flags);
260 lqasc_start_tx(port);
261 return IRQ_HANDLED;
262}
263
264static irqreturn_t
265lqasc_err_int(int irq, void *_port)
266{
267 unsigned long flags;
268 struct uart_port *port = (struct uart_port *)_port;
269 spin_lock_irqsave(&ltq_asc_lock, flags);
270 /* clear any pending interrupts */
271 ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
272 ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
273 spin_unlock_irqrestore(&ltq_asc_lock, flags);
274 return IRQ_HANDLED;
275}
276
277static irqreturn_t
278lqasc_rx_int(int irq, void *_port)
279{
280 unsigned long flags;
281 struct uart_port *port = (struct uart_port *)_port;
282 spin_lock_irqsave(&ltq_asc_lock, flags);
283 ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
284 lqasc_rx_chars(port);
285 spin_unlock_irqrestore(&ltq_asc_lock, flags);
286 return IRQ_HANDLED;
287}
288
289static unsigned int
290lqasc_tx_empty(struct uart_port *port)
291{
292 int status;
293 status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK;
294 return status ? 0 : TIOCSER_TEMT;
295}
296
297static unsigned int
298lqasc_get_mctrl(struct uart_port *port)
299{
300 return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR;
301}
302
303static void
304lqasc_set_mctrl(struct uart_port *port, u_int mctrl)
305{
306}
307
308static void
309lqasc_break_ctl(struct uart_port *port, int break_state)
310{
311}
312
313static int
314lqasc_startup(struct uart_port *port)
315{
316 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
317 int retval;
318
319 port->uartclk = clk_get_rate(ltq_port->clk);
320
321 ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
322 port->membase + LTQ_ASC_CLC);
323
324 ltq_w32(0, port->membase + LTQ_ASC_PISEL);
325 ltq_w32(
326 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
327 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
328 port->membase + LTQ_ASC_TXFCON);
329 ltq_w32(
330 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
331 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
332 port->membase + LTQ_ASC_RXFCON);
333 /* make sure other settings are written to hardware before
334 * setting enable bits
335 */
336 wmb();
337 ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN |
338 ASCCON_ROEN, port->membase + LTQ_ASC_CON);
339
340 retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
341 IRQF_DISABLED, "asc_tx", port);
342 if (retval) {
343 pr_err("failed to request lqasc_tx_int\n");
344 return retval;
345 }
346
347 retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
348 IRQF_DISABLED, "asc_rx", port);
349 if (retval) {
350 pr_err("failed to request lqasc_rx_int\n");
351 goto err1;
352 }
353
354 retval = request_irq(ltq_port->err_irq, lqasc_err_int,
355 IRQF_DISABLED, "asc_err", port);
356 if (retval) {
357 pr_err("failed to request lqasc_err_int\n");
358 goto err2;
359 }
360
361 ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
362 port->membase + LTQ_ASC_IRNREN);
363 return 0;
364
365err2:
366 free_irq(ltq_port->rx_irq, port);
367err1:
368 free_irq(ltq_port->tx_irq, port);
369 return retval;
370}
371
372static void
373lqasc_shutdown(struct uart_port *port)
374{
375 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
376 free_irq(ltq_port->tx_irq, port);
377 free_irq(ltq_port->rx_irq, port);
378 free_irq(ltq_port->err_irq, port);
379
380 ltq_w32(0, port->membase + LTQ_ASC_CON);
381 ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
382 port->membase + LTQ_ASC_RXFCON);
383 ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
384 port->membase + LTQ_ASC_TXFCON);
385}
386
387static void
388lqasc_set_termios(struct uart_port *port,
389 struct ktermios *new, struct ktermios *old)
390{
391 unsigned int cflag;
392 unsigned int iflag;
393 unsigned int divisor;
394 unsigned int baud;
395 unsigned int con = 0;
396 unsigned long flags;
397
398 cflag = new->c_cflag;
399 iflag = new->c_iflag;
400
401 switch (cflag & CSIZE) {
402 case CS7:
403 con = ASCCON_M_7ASYNC;
404 break;
405
406 case CS5:
407 case CS6:
408 default:
409 new->c_cflag &= ~ CSIZE;
410 new->c_cflag |= CS8;
411 con = ASCCON_M_8ASYNC;
412 break;
413 }
414
415 cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
416
417 if (cflag & CSTOPB)
418 con |= ASCCON_STP;
419
420 if (cflag & PARENB) {
421 if (!(cflag & PARODD))
422 con &= ~ASCCON_ODD;
423 else
424 con |= ASCCON_ODD;
425 }
426
427 port->read_status_mask = ASCSTATE_ROE;
428 if (iflag & INPCK)
429 port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
430
431 port->ignore_status_mask = 0;
432 if (iflag & IGNPAR)
433 port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
434
435 if (iflag & IGNBRK) {
436 /*
437 * If we're ignoring parity and break indicators,
438 * ignore overruns too (for real raw support).
439 */
440 if (iflag & IGNPAR)
441 port->ignore_status_mask |= ASCSTATE_ROE;
442 }
443
444 if ((cflag & CREAD) == 0)
445 port->ignore_status_mask |= UART_DUMMY_UER_RX;
446
447 /* set error signals - framing, parity and overrun, enable receiver */
448 con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN;
449
450 spin_lock_irqsave(&ltq_asc_lock, flags);
451
452 /* set up CON */
453 ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON);
454
455 /* Set baud rate - take a divider of 2 into account */
456 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
457 divisor = uart_get_divisor(port, baud);
458 divisor = divisor / 2 - 1;
459
460 /* disable the baudrate generator */
461 ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON);
462
463 /* make sure the fractional divider is off */
464 ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON);
465
466 /* set up to use divisor of 2 */
467 ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
468
469 /* now we can write the new baudrate into the register */
470 ltq_w32(divisor, port->membase + LTQ_ASC_BG);
471
472 /* turn the baudrate generator back on */
473 ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON);
474
475 /* enable rx */
476 ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
477
478 spin_unlock_irqrestore(&ltq_asc_lock, flags);
479
480 /* Don't rewrite B0 */
481 if (tty_termios_baud_rate(new))
482 tty_termios_encode_baud_rate(new, baud, baud);
483}
484
485static const char*
486lqasc_type(struct uart_port *port)
487{
488 if (port->type == PORT_LTQ_ASC)
489 return DRVNAME;
490 else
491 return NULL;
492}
493
494static void
495lqasc_release_port(struct uart_port *port)
496{
497 if (port->flags & UPF_IOREMAP) {
498 iounmap(port->membase);
499 port->membase = NULL;
500 }
501}
502
503static int
504lqasc_request_port(struct uart_port *port)
505{
506 struct platform_device *pdev = to_platform_device(port->dev);
507 struct resource *res;
508 int size;
509
510 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
511 if (!res) {
512 dev_err(&pdev->dev, "cannot obtain I/O memory region");
513 return -ENODEV;
514 }
515 size = resource_size(res);
516
517 res = devm_request_mem_region(&pdev->dev, res->start,
518 size, dev_name(&pdev->dev));
519 if (!res) {
520 dev_err(&pdev->dev, "cannot request I/O memory region");
521 return -EBUSY;
522 }
523
524 if (port->flags & UPF_IOREMAP) {
525 port->membase = devm_ioremap_nocache(&pdev->dev,
526 port->mapbase, size);
527 if (port->membase == NULL)
528 return -ENOMEM;
529 }
530 return 0;
531}
532
533static void
534lqasc_config_port(struct uart_port *port, int flags)
535{
536 if (flags & UART_CONFIG_TYPE) {
537 port->type = PORT_LTQ_ASC;
538 lqasc_request_port(port);
539 }
540}
541
542static int
543lqasc_verify_port(struct uart_port *port,
544 struct serial_struct *ser)
545{
546 int ret = 0;
547 if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC)
548 ret = -EINVAL;
549 if (ser->irq < 0 || ser->irq >= NR_IRQS)
550 ret = -EINVAL;
551 if (ser->baud_base < 9600)
552 ret = -EINVAL;
553 return ret;
554}
555
556static struct uart_ops lqasc_pops = {
557 .tx_empty = lqasc_tx_empty,
558 .set_mctrl = lqasc_set_mctrl,
559 .get_mctrl = lqasc_get_mctrl,
560 .stop_tx = lqasc_stop_tx,
561 .start_tx = lqasc_start_tx,
562 .stop_rx = lqasc_stop_rx,
563 .enable_ms = lqasc_enable_ms,
564 .break_ctl = lqasc_break_ctl,
565 .startup = lqasc_startup,
566 .shutdown = lqasc_shutdown,
567 .set_termios = lqasc_set_termios,
568 .type = lqasc_type,
569 .release_port = lqasc_release_port,
570 .request_port = lqasc_request_port,
571 .config_port = lqasc_config_port,
572 .verify_port = lqasc_verify_port,
573};
574
575static void
576lqasc_console_putchar(struct uart_port *port, int ch)
577{
578 int fifofree;
579
580 if (!port->membase)
581 return;
582
583 do {
584 fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT)
585 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
586 } while (fifofree == 0);
587 ltq_w8(ch, port->membase + LTQ_ASC_TBUF);
588}
589
590
591static void
592lqasc_console_write(struct console *co, const char *s, u_int count)
593{
594 struct ltq_uart_port *ltq_port;
595 struct uart_port *port;
596 unsigned long flags;
597
598 if (co->index >= MAXPORTS)
599 return;
600
601 ltq_port = lqasc_port[co->index];
602 if (!ltq_port)
603 return;
604
605 port = &ltq_port->port;
606
607 spin_lock_irqsave(&ltq_asc_lock, flags);
608 uart_console_write(port, s, count, lqasc_console_putchar);
609 spin_unlock_irqrestore(&ltq_asc_lock, flags);
610}
611
612static int __init
613lqasc_console_setup(struct console *co, char *options)
614{
615 struct ltq_uart_port *ltq_port;
616 struct uart_port *port;
617 int baud = 115200;
618 int bits = 8;
619 int parity = 'n';
620 int flow = 'n';
621
622 if (co->index >= MAXPORTS)
623 return -ENODEV;
624
625 ltq_port = lqasc_port[co->index];
626 if (!ltq_port)
627 return -ENODEV;
628
629 port = &ltq_port->port;
630
631 port->uartclk = clk_get_rate(ltq_port->clk);
632
633 if (options)
634 uart_parse_options(options, &baud, &parity, &bits, &flow);
635 return uart_set_options(port, co, baud, parity, bits, flow);
636}
637
638static struct console lqasc_console = {
639 .name = "ttyLTQ",
640 .write = lqasc_console_write,
641 .device = uart_console_device,
642 .setup = lqasc_console_setup,
643 .flags = CON_PRINTBUFFER,
644 .index = -1,
645 .data = &lqasc_reg,
646};
647
648static int __init
649lqasc_console_init(void)
650{
651 register_console(&lqasc_console);
652 return 0;
653}
654console_initcall(lqasc_console_init);
655
656static struct uart_driver lqasc_reg = {
657 .owner = THIS_MODULE,
658 .driver_name = DRVNAME,
659 .dev_name = "ttyLTQ",
660 .major = 0,
661 .minor = 0,
662 .nr = MAXPORTS,
663 .cons = &lqasc_console,
664};
665
666static int __init
667lqasc_probe(struct platform_device *pdev)
668{
669 struct ltq_uart_port *ltq_port;
670 struct uart_port *port;
671 struct resource *mmres, *irqres;
672 int tx_irq, rx_irq, err_irq;
673 struct clk *clk;
674 int ret;
675
676 mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
677 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
678 if (!mmres || !irqres)
679 return -ENODEV;
680
681 if (pdev->id >= MAXPORTS)
682 return -EBUSY;
683
684 if (lqasc_port[pdev->id] != NULL)
685 return -EBUSY;
686
687 clk = clk_get(&pdev->dev, "fpi");
688 if (IS_ERR(clk)) {
689 pr_err("failed to get fpi clk\n");
690 return -ENOENT;
691 }
692
693 tx_irq = platform_get_irq_byname(pdev, "tx");
694 rx_irq = platform_get_irq_byname(pdev, "rx");
695 err_irq = platform_get_irq_byname(pdev, "err");
696 if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
697 return -ENODEV;
698
699 ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
700 if (!ltq_port)
701 return -ENOMEM;
702
703 port = &ltq_port->port;
704
705 port->iotype = SERIAL_IO_MEM;
706 port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP;
707 port->ops = &lqasc_pops;
708 port->fifosize = 16;
709 port->type = PORT_LTQ_ASC,
710 port->line = pdev->id;
711 port->dev = &pdev->dev;
712
713 port->irq = tx_irq; /* unused, just to be backward-compatibe */
714 port->mapbase = mmres->start;
715
716 ltq_port->clk = clk;
717
718 ltq_port->tx_irq = tx_irq;
719 ltq_port->rx_irq = rx_irq;
720 ltq_port->err_irq = err_irq;
721
722 lqasc_port[pdev->id] = ltq_port;
723 platform_set_drvdata(pdev, ltq_port);
724
725 ret = uart_add_one_port(&lqasc_reg, port);
726
727 return ret;
728}
729
730static struct platform_driver lqasc_driver = {
731 .driver = {
732 .name = DRVNAME,
733 .owner = THIS_MODULE,
734 },
735};
736
737int __init
738init_lqasc(void)
739{
740 int ret;
741
742 ret = uart_register_driver(&lqasc_reg);
743 if (ret != 0)
744 return ret;
745
746 ret = platform_driver_probe(&lqasc_driver, lqasc_probe);
747 if (ret != 0)
748 uart_unregister_driver(&lqasc_reg);
749
750 return ret;
751}
752
753module_init(init_lqasc);
754
755MODULE_DESCRIPTION("Lantiq serial port driver");
756MODULE_LICENSE("GPL");
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 51fe1795d5a8..d2efe823c20d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -381,7 +381,13 @@ static int uio_get_minor(struct uio_device *idev)
381 retval = -ENOMEM; 381 retval = -ENOMEM;
382 goto exit; 382 goto exit;
383 } 383 }
384 idev->minor = id & MAX_ID_MASK; 384 if (id < UIO_MAX_DEVICES) {
385 idev->minor = id;
386 } else {
387 dev_err(idev->dev, "too many uio devices\n");
388 retval = -EINVAL;
389 idr_remove(&uio_idr, id);
390 }
385exit: 391exit:
386 mutex_unlock(&minor_lock); 392 mutex_unlock(&minor_lock);
387 return retval; 393 return retval;
@@ -587,14 +593,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
587 593
588static int uio_find_mem_index(struct vm_area_struct *vma) 594static int uio_find_mem_index(struct vm_area_struct *vma)
589{ 595{
590 int mi;
591 struct uio_device *idev = vma->vm_private_data; 596 struct uio_device *idev = vma->vm_private_data;
592 597
593 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 598 if (vma->vm_pgoff < MAX_UIO_MAPS) {
594 if (idev->info->mem[mi].size == 0) 599 if (idev->info->mem[vma->vm_pgoff].size == 0)
595 return -1; 600 return -1;
596 if (vma->vm_pgoff == mi) 601 return (int)vma->vm_pgoff;
597 return mi;
598 } 602 }
599 return -1; 603 return -1;
600} 604}
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 5ffdb483b015..a879fd5741f8 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -18,6 +18,9 @@
18 18
19#define PCI_VENDOR_ID_HILSCHER 0x15CF 19#define PCI_VENDOR_ID_HILSCHER 0x15CF
20#define PCI_DEVICE_ID_HILSCHER_NETX 0x0000 20#define PCI_DEVICE_ID_HILSCHER_NETX 0x0000
21#define PCI_DEVICE_ID_HILSCHER_NETPLC 0x0010
22#define PCI_SUBDEVICE_ID_NETPLC_RAM 0x0000
23#define PCI_SUBDEVICE_ID_NETPLC_FLASH 0x0001
21#define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235 24#define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235
22#define PCI_SUBDEVICE_ID_NXPCA 0x3335 25#define PCI_SUBDEVICE_ID_NXPCA 0x3335
23 26
@@ -66,6 +69,10 @@ static int __devinit netx_pci_probe(struct pci_dev *dev,
66 bar = 0; 69 bar = 0;
67 info->name = "netx"; 70 info->name = "netx";
68 break; 71 break;
72 case PCI_DEVICE_ID_HILSCHER_NETPLC:
73 bar = 0;
74 info->name = "netplc";
75 break;
69 default: 76 default:
70 bar = 2; 77 bar = 2;
71 info->name = "netx_plx"; 78 info->name = "netx_plx";
@@ -134,6 +141,18 @@ static struct pci_device_id netx_pci_ids[] = {
134 .subdevice = 0, 141 .subdevice = 0,
135 }, 142 },
136 { 143 {
144 .vendor = PCI_VENDOR_ID_HILSCHER,
145 .device = PCI_DEVICE_ID_HILSCHER_NETPLC,
146 .subvendor = PCI_VENDOR_ID_HILSCHER,
147 .subdevice = PCI_SUBDEVICE_ID_NETPLC_RAM,
148 },
149 {
150 .vendor = PCI_VENDOR_ID_HILSCHER,
151 .device = PCI_DEVICE_ID_HILSCHER_NETPLC,
152 .subvendor = PCI_VENDOR_ID_HILSCHER,
153 .subdevice = PCI_SUBDEVICE_ID_NETPLC_FLASH,
154 },
155 {
137 .vendor = PCI_VENDOR_ID_PLX, 156 .vendor = PCI_VENDOR_ID_PLX,
138 .device = PCI_DEVICE_ID_PLX_9030, 157 .device = PCI_DEVICE_ID_PLX_9030,
139 .subvendor = PCI_VENDOR_ID_PLX, 158 .subvendor = PCI_VENDOR_ID_PLX,
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 7174d518b8a6..0f424af7f109 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -189,6 +189,10 @@ static int uio_pdrv_genirq_remove(struct platform_device *pdev)
189 189
190 uio_unregister_device(priv->uioinfo); 190 uio_unregister_device(priv->uioinfo);
191 pm_runtime_disable(&pdev->dev); 191 pm_runtime_disable(&pdev->dev);
192
193 priv->uioinfo->handler = NULL;
194 priv->uioinfo->irqcontrol = NULL;
195
192 kfree(priv); 196 kfree(priv);
193 return 0; 197 return 0;
194} 198}
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 48a760220baf..bf6e11c758d5 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -38,6 +38,7 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41#include <linux/prefetch.h>
41 42
42#include <asm/byteorder.h> 43#include <asm/byteorder.h>
43#include <asm/io.h> 44#include <asm/io.h>
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 5408186afc35..ade40066decf 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/timer.h> 31#include <linux/timer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/prefetch.h>
33 34
34#include <linux/usb/ch9.h> 35#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h> 36#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index cb5cd422f3f5..82fd24935332 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -44,6 +44,7 @@
44#include <linux/usb/otg.h> 44#include <linux/usb/otg.h>
45#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
46#include <linux/clk.h> 46#include <linux/clk.h>
47#include <linux/prefetch.h>
47 48
48#include <asm/byteorder.h> 49#include <asm/byteorder.h>
49#include <asm/io.h> 50#include <asm/io.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 444b60aa15e9..365c02fc25fc 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -46,6 +46,7 @@
46#include <linux/seq_file.h> 46#include <linux/seq_file.h>
47#include <linux/debugfs.h> 47#include <linux/debugfs.h>
48#include <linux/io.h> 48#include <linux/io.h>
49#include <linux/prefetch.h>
49 50
50#include <asm/byteorder.h> 51#include <asm/byteorder.h>
51#include <asm/dma.h> 52#include <asm/dma.h>
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 78a39a41547d..57607696735c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -32,6 +32,7 @@
32#include <linux/irq.h> 32#include <linux/irq.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/prefetch.h>
35 36
36#include <asm/byteorder.h> 37#include <asm/byteorder.h>
37#include <mach/hardware.h> 38#include <mach/hardware.h>
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index f97570a847ca..9c37dad3e816 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -81,6 +81,7 @@
81#include <linux/pm.h> 81#include <linux/pm.h>
82#include <linux/io.h> 82#include <linux/io.h>
83#include <linux/bitmap.h> 83#include <linux/bitmap.h>
84#include <linux/prefetch.h>
84 85
85#include <asm/irq.h> 86#include <asm/irq.h>
86#include <asm/system.h> 87#include <asm/system.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 18b7099a8125..fafccc2fd331 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -47,6 +47,7 @@
47#include <linux/usb/sl811.h> 47#include <linux/usb/sl811.h>
48#include <linux/usb/hcd.h> 48#include <linux/usb/hcd.h>
49#include <linux/platform_device.h> 49#include <linux/platform_device.h>
50#include <linux/prefetch.h>
50 51
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/irq.h> 53#include <asm/irq.h>
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 09e52ba47ddf..ffc4193e9505 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -499,7 +499,6 @@ static int isd200_action( struct us_data *us, int action,
499 memset(&ata, 0, sizeof(ata)); 499 memset(&ata, 0, sizeof(ata));
500 srb->cmnd = info->cmnd; 500 srb->cmnd = info->cmnd;
501 srb->device = &srb_dev; 501 srb->device = &srb_dev;
502 ++srb->serial_number;
503 502
504 ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; 503 ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
505 ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; 504 ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2ab291241635..7aa4eea930f1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -4,7 +4,7 @@
4 * Author: Michael S. Tsirkin <mst@redhat.com> 4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 * 5 *
6 * Inspiration, some code, and most witty comments come from 6 * Inspiration, some code, and most witty comments come from
7 * Documentation/lguest/lguest.c, by Rusty Russell 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
8 * 8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. 9 * This work is licensed under the terms of the GNU GPL, version 2.
10 * 10 *
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 5b2b5ef4edba..64e41f5448c4 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -3117,7 +3117,7 @@ int __init atafb_init(void)
3117 atafb_ops.fb_setcolreg = &falcon_setcolreg; 3117 atafb_ops.fb_setcolreg = &falcon_setcolreg;
3118 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 3118 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher,
3119 IRQ_TYPE_PRIO, 3119 IRQ_TYPE_PRIO,
3120 "framebuffer/modeswitch", 3120 "framebuffer:modeswitch",
3121 falcon_vbl_switcher); 3121 falcon_vbl_switcher);
3122 if (error) 3122 if (error)
3123 return error; 3123 return error;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 68041d9dc260..695066b5b2e6 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -27,6 +27,7 @@
27#include <linux/fb.h> 27#include <linux/fb.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/prefetch.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include <video/udlfb.h> 32#include <video/udlfb.h>
32#include "edid.h" 33#include "edid.h"
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1b0f98bc51b5..022f9eb0b7bf 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -990,6 +990,12 @@ config BCM63XX_WDT
990 To compile this driver as a loadable module, choose M here. 990 To compile this driver as a loadable module, choose M here.
991 The module will be called bcm63xx_wdt. 991 The module will be called bcm63xx_wdt.
992 992
993config LANTIQ_WDT
994 tristate "Lantiq SoC watchdog"
995 depends on LANTIQ
996 help
997 Hardware driver for the Lantiq SoC Watchdog Timer.
998
993# PARISC Architecture 999# PARISC Architecture
994 1000
995# POWERPC Architecture 1001# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 3f8608b922a7..ed26f7094e47 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
123obj-$(CONFIG_TXX9_WDT) += txx9wdt.o 123obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
124obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o 124obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
125octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o 125octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
126obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
126 127
127# PARISC Architecture 128# PARISC Architecture
128 129
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
new file mode 100644
index 000000000000..7d82adac1cb2
--- /dev/null
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -0,0 +1,261 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 * Based on EP93xx wdt driver
8 */
9
10#include <linux/module.h>
11#include <linux/fs.h>
12#include <linux/miscdevice.h>
13#include <linux/watchdog.h>
14#include <linux/platform_device.h>
15#include <linux/uaccess.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18
19#include <lantiq.h>
20
21/* Section 3.4 of the datasheet
22 * The password sequence protects the WDT control register from unintended
23 * write actions, which might cause malfunction of the WDT.
24 *
25 * essentially the following two magic passwords need to be written to allow
26 * IO access to the WDT core
27 */
28#define LTQ_WDT_PW1 0x00BE0000
29#define LTQ_WDT_PW2 0x00DC0000
30
31#define LTQ_WDT_CR 0x0 /* watchdog control register */
32#define LTQ_WDT_SR 0x8 /* watchdog status register */
33
34#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */
35#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */
36#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */
37 /* divider to 0x40000 */
38#define LTQ_WDT_DIVIDER 0x40000
39#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */
40
41static int nowayout = WATCHDOG_NOWAYOUT;
42
43static void __iomem *ltq_wdt_membase;
44static unsigned long ltq_io_region_clk_rate;
45
46static unsigned long ltq_wdt_bootstatus;
47static unsigned long ltq_wdt_in_use;
48static int ltq_wdt_timeout = 30;
49static int ltq_wdt_ok_to_close;
50
51static void
52ltq_wdt_enable(void)
53{
54 ltq_wdt_timeout = ltq_wdt_timeout *
55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
56 if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
57 ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
58
59 /* write the first password magic */
60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
61 /* write the second magic plus the configuration and new timeout */
62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
63 LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
64}
65
66static void
67ltq_wdt_disable(void)
68{
69 /* write the first password magic */
70 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
71 /* write the second password magic with no config
72 * this turns the watchdog off
73 */
74 ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
75}
76
77static ssize_t
78ltq_wdt_write(struct file *file, const char __user *data,
79 size_t len, loff_t *ppos)
80{
81 if (len) {
82 if (!nowayout) {
83 size_t i;
84
85 ltq_wdt_ok_to_close = 0;
86 for (i = 0; i != len; i++) {
87 char c;
88
89 if (get_user(c, data + i))
90 return -EFAULT;
91 if (c == 'V')
92 ltq_wdt_ok_to_close = 1;
93 else
94 ltq_wdt_ok_to_close = 0;
95 }
96 }
97 ltq_wdt_enable();
98 }
99
100 return len;
101}
102
103static struct watchdog_info ident = {
104 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
105 WDIOF_CARDRESET,
106 .identity = "ltq_wdt",
107};
108
109static long
110ltq_wdt_ioctl(struct file *file,
111 unsigned int cmd, unsigned long arg)
112{
113 int ret = -ENOTTY;
114
115 switch (cmd) {
116 case WDIOC_GETSUPPORT:
117 ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
118 sizeof(ident)) ? -EFAULT : 0;
119 break;
120
121 case WDIOC_GETBOOTSTATUS:
122 ret = put_user(ltq_wdt_bootstatus, (int __user *)arg);
123 break;
124
125 case WDIOC_GETSTATUS:
126 ret = put_user(0, (int __user *)arg);
127 break;
128
129 case WDIOC_SETTIMEOUT:
130 ret = get_user(ltq_wdt_timeout, (int __user *)arg);
131 if (!ret)
132 ltq_wdt_enable();
133 /* intentional drop through */
134 case WDIOC_GETTIMEOUT:
135 ret = put_user(ltq_wdt_timeout, (int __user *)arg);
136 break;
137
138 case WDIOC_KEEPALIVE:
139 ltq_wdt_enable();
140 ret = 0;
141 break;
142 }
143 return ret;
144}
145
146static int
147ltq_wdt_open(struct inode *inode, struct file *file)
148{
149 if (test_and_set_bit(0, &ltq_wdt_in_use))
150 return -EBUSY;
151 ltq_wdt_in_use = 1;
152 ltq_wdt_enable();
153
154 return nonseekable_open(inode, file);
155}
156
157static int
158ltq_wdt_release(struct inode *inode, struct file *file)
159{
160 if (ltq_wdt_ok_to_close)
161 ltq_wdt_disable();
162 else
163 pr_err("ltq_wdt: watchdog closed without warning\n");
164 ltq_wdt_ok_to_close = 0;
165 clear_bit(0, &ltq_wdt_in_use);
166
167 return 0;
168}
169
170static const struct file_operations ltq_wdt_fops = {
171 .owner = THIS_MODULE,
172 .write = ltq_wdt_write,
173 .unlocked_ioctl = ltq_wdt_ioctl,
174 .open = ltq_wdt_open,
175 .release = ltq_wdt_release,
176 .llseek = no_llseek,
177};
178
179static struct miscdevice ltq_wdt_miscdev = {
180 .minor = WATCHDOG_MINOR,
181 .name = "watchdog",
182 .fops = &ltq_wdt_fops,
183};
184
185static int __init
186ltq_wdt_probe(struct platform_device *pdev)
187{
188 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
189 struct clk *clk;
190
191 if (!res) {
192 dev_err(&pdev->dev, "cannot obtain I/O memory region");
193 return -ENOENT;
194 }
195 res = devm_request_mem_region(&pdev->dev, res->start,
196 resource_size(res), dev_name(&pdev->dev));
197 if (!res) {
198 dev_err(&pdev->dev, "cannot request I/O memory region");
199 return -EBUSY;
200 }
201 ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
202 resource_size(res));
203 if (!ltq_wdt_membase) {
204 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
205 return -ENOMEM;
206 }
207
208 /* we do not need to enable the clock as it is always running */
209 clk = clk_get(&pdev->dev, "io");
210 WARN_ON(!clk);
211 ltq_io_region_clk_rate = clk_get_rate(clk);
212 clk_put(clk);
213
214 if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
215 ltq_wdt_bootstatus = WDIOF_CARDRESET;
216
217 return misc_register(&ltq_wdt_miscdev);
218}
219
220static int __devexit
221ltq_wdt_remove(struct platform_device *pdev)
222{
223 misc_deregister(&ltq_wdt_miscdev);
224
225 if (ltq_wdt_membase)
226 iounmap(ltq_wdt_membase);
227
228 return 0;
229}
230
231
232static struct platform_driver ltq_wdt_driver = {
233 .remove = __devexit_p(ltq_wdt_remove),
234 .driver = {
235 .name = "ltq_wdt",
236 .owner = THIS_MODULE,
237 },
238};
239
240static int __init
241init_ltq_wdt(void)
242{
243 return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
244}
245
246static void __exit
247exit_ltq_wdt(void)
248{
249 return platform_driver_unregister(&ltq_wdt_driver);
250}
251
252module_init(init_ltq_wdt);
253module_exit(exit_ltq_wdt);
254
255module_param(nowayout, int, 0);
256MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
257
258MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
259MODULE_DESCRIPTION("Lantiq SoC Watchdog");
260MODULE_LICENSE("GPL");
261MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 5ec5ac1f7878..1479dc4d6129 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -66,6 +66,7 @@ static struct {
66 int default_ticks; 66 int default_ticks;
67 unsigned long inuse; 67 unsigned long inuse;
68 unsigned gpio; 68 unsigned gpio;
69 int gstate;
69} mtx1_wdt_device; 70} mtx1_wdt_device;
70 71
71static void mtx1_wdt_trigger(unsigned long unused) 72static void mtx1_wdt_trigger(unsigned long unused)
@@ -75,13 +76,13 @@ static void mtx1_wdt_trigger(unsigned long unused)
75 spin_lock(&mtx1_wdt_device.lock); 76 spin_lock(&mtx1_wdt_device.lock);
76 if (mtx1_wdt_device.running) 77 if (mtx1_wdt_device.running)
77 ticks--; 78 ticks--;
78 /* 79
79 * toggle GPIO2_15 80 /* toggle wdt gpio */
80 */ 81 mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate;
81 tmp = au_readl(GPIO2_DIR); 82 if (mtx1_wdt_device.gstate)
82 tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | 83 gpio_direction_output(mtx1_wdt_device.gpio, 1);
83 ((~tmp) & (1 << mtx1_wdt_device.gpio)); 84 else
84 au_writel(tmp, GPIO2_DIR); 85 gpio_direction_input(mtx1_wdt_device.gpio);
85 86
86 if (mtx1_wdt_device.queue && ticks) 87 if (mtx1_wdt_device.queue && ticks)
87 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 88 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -103,7 +104,8 @@ static void mtx1_wdt_start(void)
103 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 104 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
104 if (!mtx1_wdt_device.queue) { 105 if (!mtx1_wdt_device.queue) {
105 mtx1_wdt_device.queue = 1; 106 mtx1_wdt_device.queue = 1;
106 gpio_set_value(mtx1_wdt_device.gpio, 1); 107 mtx1_wdt_device.gstate = 1;
108 gpio_direction_output(mtx1_wdt_device.gpio, 1);
107 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 109 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
108 } 110 }
109 mtx1_wdt_device.running++; 111 mtx1_wdt_device.running++;
@@ -117,7 +119,8 @@ static int mtx1_wdt_stop(void)
117 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 119 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
118 if (mtx1_wdt_device.queue) { 120 if (mtx1_wdt_device.queue) {
119 mtx1_wdt_device.queue = 0; 121 mtx1_wdt_device.queue = 0;
120 gpio_set_value(mtx1_wdt_device.gpio, 0); 122 mtx1_wdt_device.gstate = 0;
123 gpio_direction_output(mtx1_wdt_device.gpio, 0);
121 } 124 }
122 ticks = mtx1_wdt_device.default_ticks; 125 ticks = mtx1_wdt_device.default_ticks;
123 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); 126 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index f420f1ff7f13..4781f806701d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,21 +4,21 @@ obj-y += xenbus/
4nostackp := $(call cc-option, -fno-stack-protector) 4nostackp := $(call cc-option, -fno-stack-protector)
5CFLAGS_features.o := $(nostackp) 5CFLAGS_features.o := $(nostackp)
6 6
7obj-$(CONFIG_BLOCK) += biomerge.o 7obj-$(CONFIG_BLOCK) += biomerge.o
8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
10obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o 10obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o 11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
12obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o 12obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
13obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o 13obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
14obj-$(CONFIG_XENFS) += xenfs/ 14obj-$(CONFIG_XENFS) += xenfs/
15obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o 15obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
16obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o 16obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
17obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 17obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
18obj-$(CONFIG_XEN_DOM0) += pci.o 18obj-$(CONFIG_XEN_DOM0) += pci.o
19 19
20xen-evtchn-y := evtchn.o 20xen-evtchn-y := evtchn.o
21xen-gntdev-y := gntdev.o 21xen-gntdev-y := gntdev.o
22xen-gntalloc-y := gntalloc.o 22xen-gntalloc-y := gntalloc.o
23 23
24xen-platform-pci-y := platform-pci.o 24xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 043af8ad6b60..f54290baa3db 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -114,7 +114,6 @@ static void __balloon_append(struct page *page)
114 if (PageHighMem(page)) { 114 if (PageHighMem(page)) {
115 list_add_tail(&page->lru, &ballooned_pages); 115 list_add_tail(&page->lru, &ballooned_pages);
116 balloon_stats.balloon_high++; 116 balloon_stats.balloon_high++;
117 dec_totalhigh_pages();
118 } else { 117 } else {
119 list_add(&page->lru, &ballooned_pages); 118 list_add(&page->lru, &ballooned_pages);
120 balloon_stats.balloon_low++; 119 balloon_stats.balloon_low++;
@@ -124,6 +123,8 @@ static void __balloon_append(struct page *page)
124static void balloon_append(struct page *page) 123static void balloon_append(struct page *page)
125{ 124{
126 __balloon_append(page); 125 __balloon_append(page);
126 if (PageHighMem(page))
127 dec_totalhigh_pages();
127 totalram_pages--; 128 totalram_pages--;
128} 129}
129 130
@@ -193,7 +194,7 @@ static enum bp_state update_schedule(enum bp_state state)
193 return BP_EAGAIN; 194 return BP_EAGAIN;
194} 195}
195 196
196static unsigned long current_target(void) 197static long current_credit(void)
197{ 198{
198 unsigned long target = balloon_stats.target_pages; 199 unsigned long target = balloon_stats.target_pages;
199 200
@@ -202,7 +203,7 @@ static unsigned long current_target(void)
202 balloon_stats.balloon_low + 203 balloon_stats.balloon_low +
203 balloon_stats.balloon_high); 204 balloon_stats.balloon_high);
204 205
205 return target; 206 return target - balloon_stats.current_pages;
206} 207}
207 208
208static enum bp_state increase_reservation(unsigned long nr_pages) 209static enum bp_state increase_reservation(unsigned long nr_pages)
@@ -246,7 +247,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
246 set_phys_to_machine(pfn, frame_list[i]); 247 set_phys_to_machine(pfn, frame_list[i]);
247 248
248 /* Link back into the page tables if not highmem. */ 249 /* Link back into the page tables if not highmem. */
249 if (!xen_hvm_domain() && pfn < max_low_pfn) { 250 if (xen_pv_domain() && !PageHighMem(page)) {
250 int ret; 251 int ret;
251 ret = HYPERVISOR_update_va_mapping( 252 ret = HYPERVISOR_update_va_mapping(
252 (unsigned long)__va(pfn << PAGE_SHIFT), 253 (unsigned long)__va(pfn << PAGE_SHIFT),
@@ -293,7 +294,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
293 294
294 scrub_page(page); 295 scrub_page(page);
295 296
296 if (!xen_hvm_domain() && !PageHighMem(page)) { 297 if (xen_pv_domain() && !PageHighMem(page)) {
297 ret = HYPERVISOR_update_va_mapping( 298 ret = HYPERVISOR_update_va_mapping(
298 (unsigned long)__va(pfn << PAGE_SHIFT), 299 (unsigned long)__va(pfn << PAGE_SHIFT),
299 __pte_ma(0), 0); 300 __pte_ma(0), 0);
@@ -337,7 +338,7 @@ static void balloon_process(struct work_struct *work)
337 mutex_lock(&balloon_mutex); 338 mutex_lock(&balloon_mutex);
338 339
339 do { 340 do {
340 credit = current_target() - balloon_stats.current_pages; 341 credit = current_credit();
341 342
342 if (credit > 0) 343 if (credit > 0)
343 state = increase_reservation(credit); 344 state = increase_reservation(credit);
@@ -420,7 +421,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)
420 } 421 }
421 422
422 /* The balloon may be too large now. Shrink it if needed. */ 423 /* The balloon may be too large now. Shrink it if needed. */
423 if (current_target() != balloon_stats.current_pages) 424 if (current_credit())
424 schedule_delayed_work(&balloon_worker, 0); 425 schedule_delayed_work(&balloon_worker, 0);
425 426
426 mutex_unlock(&balloon_mutex); 427 mutex_unlock(&balloon_mutex);
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(free_xenballooned_pages);
429 430
430static int __init balloon_init(void) 431static int __init balloon_init(void)
431{ 432{
432 unsigned long pfn, nr_pages, extra_pfn_end; 433 unsigned long pfn, extra_pfn_end;
433 struct page *page; 434 struct page *page;
434 435
435 if (!xen_domain()) 436 if (!xen_domain())
@@ -437,11 +438,7 @@ static int __init balloon_init(void)
437 438
438 pr_info("xen/balloon: Initialising balloon driver.\n"); 439 pr_info("xen/balloon: Initialising balloon driver.\n");
439 440
440 if (xen_pv_domain()) 441 balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
441 nr_pages = xen_start_info->nr_pages;
442 else
443 nr_pages = max_pfn;
444 balloon_stats.current_pages = min(nr_pages, max_pfn);
445 balloon_stats.target_pages = balloon_stats.current_pages; 442 balloon_stats.target_pages = balloon_stats.current_pages;
446 balloon_stats.balloon_low = 0; 443 balloon_stats.balloon_low = 0;
447 balloon_stats.balloon_high = 0; 444 balloon_stats.balloon_high = 0;
@@ -466,7 +463,7 @@ static int __init balloon_init(void)
466 pfn < extra_pfn_end; 463 pfn < extra_pfn_end;
467 pfn++) { 464 pfn++) {
468 page = pfn_to_page(pfn); 465 page = pfn_to_page(pfn);
469 /* totalram_pages doesn't include the boot-time 466 /* totalram_pages and totalhigh_pages do not include the boot-time
470 balloon extension, so don't subtract from it. */ 467 balloon extension, so don't subtract from it. */
471 __balloon_append(page); 468 __balloon_append(page);
472 } 469 }
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 33167b43ac7e..3ff822b48145 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -101,6 +101,7 @@ struct irq_info
101 unsigned short gsi; 101 unsigned short gsi;
102 unsigned char vector; 102 unsigned char vector;
103 unsigned char flags; 103 unsigned char flags;
104 uint16_t domid;
104 } pirq; 105 } pirq;
105 } u; 106 } u;
106}; 107};
@@ -118,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
118static struct irq_chip xen_dynamic_chip; 119static struct irq_chip xen_dynamic_chip;
119static struct irq_chip xen_percpu_chip; 120static struct irq_chip xen_percpu_chip;
120static struct irq_chip xen_pirq_chip; 121static struct irq_chip xen_pirq_chip;
122static void enable_dynirq(struct irq_data *data);
123static void disable_dynirq(struct irq_data *data);
121 124
122/* Get info for IRQ */ 125/* Get info for IRQ */
123static struct irq_info *info_for_irq(unsigned irq) 126static struct irq_info *info_for_irq(unsigned irq)
@@ -184,6 +187,7 @@ static void xen_irq_info_pirq_init(unsigned irq,
184 unsigned short pirq, 187 unsigned short pirq,
185 unsigned short gsi, 188 unsigned short gsi,
186 unsigned short vector, 189 unsigned short vector,
190 uint16_t domid,
187 unsigned char flags) 191 unsigned char flags)
188{ 192{
189 struct irq_info *info = info_for_irq(irq); 193 struct irq_info *info = info_for_irq(irq);
@@ -193,6 +197,7 @@ static void xen_irq_info_pirq_init(unsigned irq,
193 info->u.pirq.pirq = pirq; 197 info->u.pirq.pirq = pirq;
194 info->u.pirq.gsi = gsi; 198 info->u.pirq.gsi = gsi;
195 info->u.pirq.vector = vector; 199 info->u.pirq.vector = vector;
200 info->u.pirq.domid = domid;
196 info->u.pirq.flags = flags; 201 info->u.pirq.flags = flags;
197} 202}
198 203
@@ -473,16 +478,6 @@ static void xen_free_irq(unsigned irq)
473 irq_free_desc(irq); 478 irq_free_desc(irq);
474} 479}
475 480
476static void pirq_unmask_notify(int irq)
477{
478 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
479
480 if (unlikely(pirq_needs_eoi(irq))) {
481 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
482 WARN_ON(rc);
483 }
484}
485
486static void pirq_query_unmask(int irq) 481static void pirq_query_unmask(int irq)
487{ 482{
488 struct physdev_irq_status_query irq_status; 483 struct physdev_irq_status_query irq_status;
@@ -506,6 +501,29 @@ static bool probing_irq(int irq)
506 return desc && desc->action == NULL; 501 return desc && desc->action == NULL;
507} 502}
508 503
504static void eoi_pirq(struct irq_data *data)
505{
506 int evtchn = evtchn_from_irq(data->irq);
507 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
508 int rc = 0;
509
510 irq_move_irq(data);
511
512 if (VALID_EVTCHN(evtchn))
513 clear_evtchn(evtchn);
514
515 if (pirq_needs_eoi(data->irq)) {
516 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
517 WARN_ON(rc);
518 }
519}
520
521static void mask_ack_pirq(struct irq_data *data)
522{
523 disable_dynirq(data);
524 eoi_pirq(data);
525}
526
509static unsigned int __startup_pirq(unsigned int irq) 527static unsigned int __startup_pirq(unsigned int irq)
510{ 528{
511 struct evtchn_bind_pirq bind_pirq; 529 struct evtchn_bind_pirq bind_pirq;
@@ -539,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq)
539 557
540out: 558out:
541 unmask_evtchn(evtchn); 559 unmask_evtchn(evtchn);
542 pirq_unmask_notify(irq); 560 eoi_pirq(irq_get_irq_data(irq));
543 561
544 return 0; 562 return 0;
545} 563}
@@ -579,18 +597,7 @@ static void enable_pirq(struct irq_data *data)
579 597
580static void disable_pirq(struct irq_data *data) 598static void disable_pirq(struct irq_data *data)
581{ 599{
582} 600 disable_dynirq(data);
583
584static void ack_pirq(struct irq_data *data)
585{
586 int evtchn = evtchn_from_irq(data->irq);
587
588 irq_move_irq(data);
589
590 if (VALID_EVTCHN(evtchn)) {
591 mask_evtchn(evtchn);
592 clear_evtchn(evtchn);
593 }
594} 601}
595 602
596static int find_irq_by_gsi(unsigned gsi) 603static int find_irq_by_gsi(unsigned gsi)
@@ -639,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
639 if (irq < 0) 646 if (irq < 0)
640 goto out; 647 goto out;
641 648
642 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
643 name);
644
645 irq_op.irq = irq; 649 irq_op.irq = irq;
646 irq_op.vector = 0; 650 irq_op.vector = 0;
647 651
@@ -655,9 +659,35 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
655 goto out; 659 goto out;
656 } 660 }
657 661
658 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, 662 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
659 shareable ? PIRQ_SHAREABLE : 0); 663 shareable ? PIRQ_SHAREABLE : 0);
660 664
665 pirq_query_unmask(irq);
666 /* We try to use the handler with the appropriate semantic for the
667 * type of interrupt: if the interrupt doesn't need an eoi
668 * (pirq_needs_eoi returns false), we treat it like an edge
669 * triggered interrupt so we use handle_edge_irq.
670 * As a matter of fact this only happens when the corresponding
671 * physical interrupt is edge triggered or an msi.
672 *
673 * On the other hand if the interrupt needs an eoi (pirq_needs_eoi
674 * returns true) we treat it like a level triggered interrupt so we
675 * use handle_fasteoi_irq like the native code does for this kind of
676 * interrupts.
677 * Depending on the Xen version, pirq_needs_eoi might return true
678 * not only for level triggered interrupts but for edge triggered
679 * interrupts too. In any case Xen always honors the eoi mechanism,
680 * not injecting any more pirqs of the same kind if the first one
681 * hasn't received an eoi yet. Therefore using the fasteoi handler
682 * is the right choice either way.
683 */
684 if (pirq_needs_eoi(irq))
685 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
686 handle_fasteoi_irq, name);
687 else
688 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
689 handle_edge_irq, name);
690
661out: 691out:
662 spin_unlock(&irq_mapping_update_lock); 692 spin_unlock(&irq_mapping_update_lock);
663 693
@@ -680,7 +710,8 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
680} 710}
681 711
682int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 712int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
683 int pirq, int vector, const char *name) 713 int pirq, int vector, const char *name,
714 domid_t domid)
684{ 715{
685 int irq, ret; 716 int irq, ret;
686 717
@@ -690,10 +721,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
690 if (irq == -1) 721 if (irq == -1)
691 goto out; 722 goto out;
692 723
693 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, 724 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
694 name); 725 name);
695 726
696 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); 727 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
697 ret = irq_set_msi_desc(irq, msidesc); 728 ret = irq_set_msi_desc(irq, msidesc);
698 if (ret < 0) 729 if (ret < 0)
699 goto error_irq; 730 goto error_irq;
@@ -722,9 +753,16 @@ int xen_destroy_irq(int irq)
722 753
723 if (xen_initial_domain()) { 754 if (xen_initial_domain()) {
724 unmap_irq.pirq = info->u.pirq.pirq; 755 unmap_irq.pirq = info->u.pirq.pirq;
725 unmap_irq.domid = DOMID_SELF; 756 unmap_irq.domid = info->u.pirq.domid;
726 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 757 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
727 if (rc) { 758 /* If another domain quits without making the pci_disable_msix
759 * call, the Xen hypervisor takes care of freeing the PIRQs
760 * (free_domain_pirqs).
761 */
762 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
763 printk(KERN_INFO "domain %d does not have %d anymore\n",
764 info->u.pirq.domid, info->u.pirq.pirq);
765 else if (rc) {
728 printk(KERN_WARNING "unmap irq failed %d\n", rc); 766 printk(KERN_WARNING "unmap irq failed %d\n", rc);
729 goto out; 767 goto out;
730 } 768 }
@@ -759,6 +797,12 @@ out:
759 return irq; 797 return irq;
760} 798}
761 799
800
801int xen_pirq_from_irq(unsigned irq)
802{
803 return pirq_from_irq(irq);
804}
805EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
762int bind_evtchn_to_irq(unsigned int evtchn) 806int bind_evtchn_to_irq(unsigned int evtchn)
763{ 807{
764 int irq; 808 int irq;
@@ -773,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
773 goto out; 817 goto out;
774 818
775 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 819 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
776 handle_fasteoi_irq, "event"); 820 handle_edge_irq, "event");
777 821
778 xen_irq_info_evtchn_init(irq, evtchn); 822 xen_irq_info_evtchn_init(irq, evtchn);
779 } 823 }
@@ -1179,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void)
1179 port = (word_idx * BITS_PER_LONG) + bit_idx; 1223 port = (word_idx * BITS_PER_LONG) + bit_idx;
1180 irq = evtchn_to_irq[port]; 1224 irq = evtchn_to_irq[port];
1181 1225
1182 mask_evtchn(port);
1183 clear_evtchn(port);
1184
1185 if (irq != -1) { 1226 if (irq != -1) {
1186 desc = irq_to_desc(irq); 1227 desc = irq_to_desc(irq);
1187 if (desc) 1228 if (desc)
@@ -1337,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data)
1337{ 1378{
1338 int evtchn = evtchn_from_irq(data->irq); 1379 int evtchn = evtchn_from_irq(data->irq);
1339 1380
1340 irq_move_masked_irq(data); 1381 irq_move_irq(data);
1341 1382
1342 if (VALID_EVTCHN(evtchn)) 1383 if (VALID_EVTCHN(evtchn))
1343 unmask_evtchn(evtchn); 1384 clear_evtchn(evtchn);
1385}
1386
1387static void mask_ack_dynirq(struct irq_data *data)
1388{
1389 disable_dynirq(data);
1390 ack_dynirq(data);
1344} 1391}
1345 1392
1346static int retrigger_dynirq(struct irq_data *data) 1393static int retrigger_dynirq(struct irq_data *data)
@@ -1502,6 +1549,18 @@ void xen_poll_irq(int irq)
1502 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1549 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1503} 1550}
1504 1551
1552/* Check whether the IRQ line is shared with other guests. */
1553int xen_test_irq_shared(int irq)
1554{
1555 struct irq_info *info = info_for_irq(irq);
1556 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1557
1558 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1559 return 0;
1560 return !(irq_status.flags & XENIRQSTAT_shared);
1561}
1562EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1563
1505void xen_irq_resume(void) 1564void xen_irq_resume(void)
1506{ 1565{
1507 unsigned int cpu, evtchn; 1566 unsigned int cpu, evtchn;
@@ -1535,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
1535 .irq_mask = disable_dynirq, 1594 .irq_mask = disable_dynirq,
1536 .irq_unmask = enable_dynirq, 1595 .irq_unmask = enable_dynirq,
1537 1596
1538 .irq_eoi = ack_dynirq, 1597 .irq_ack = ack_dynirq,
1598 .irq_mask_ack = mask_ack_dynirq,
1599
1539 .irq_set_affinity = set_affinity_irq, 1600 .irq_set_affinity = set_affinity_irq,
1540 .irq_retrigger = retrigger_dynirq, 1601 .irq_retrigger = retrigger_dynirq,
1541}; 1602};
@@ -1545,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
1545 1606
1546 .irq_startup = startup_pirq, 1607 .irq_startup = startup_pirq,
1547 .irq_shutdown = shutdown_pirq, 1608 .irq_shutdown = shutdown_pirq,
1548
1549 .irq_enable = enable_pirq, 1609 .irq_enable = enable_pirq,
1550 .irq_unmask = enable_pirq,
1551
1552 .irq_disable = disable_pirq, 1610 .irq_disable = disable_pirq,
1553 .irq_mask = disable_pirq,
1554 1611
1555 .irq_ack = ack_pirq, 1612 .irq_mask = disable_dynirq,
1613 .irq_unmask = enable_dynirq,
1614
1615 .irq_ack = eoi_pirq,
1616 .irq_eoi = eoi_pirq,
1617 .irq_mask_ack = mask_ack_pirq,
1556 1618
1557 .irq_set_affinity = set_affinity_irq, 1619 .irq_set_affinity = set_affinity_irq,
1558 1620
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index a7ffdfe19fc9..f6832f46aea4 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -427,6 +427,17 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
427 return 0; 427 return 0;
428} 428}
429 429
430static void gntalloc_vma_open(struct vm_area_struct *vma)
431{
432 struct gntalloc_gref *gref = vma->vm_private_data;
433 if (!gref)
434 return;
435
436 spin_lock(&gref_lock);
437 gref->users++;
438 spin_unlock(&gref_lock);
439}
440
430static void gntalloc_vma_close(struct vm_area_struct *vma) 441static void gntalloc_vma_close(struct vm_area_struct *vma)
431{ 442{
432 struct gntalloc_gref *gref = vma->vm_private_data; 443 struct gntalloc_gref *gref = vma->vm_private_data;
@@ -441,6 +452,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma)
441} 452}
442 453
443static struct vm_operations_struct gntalloc_vmops = { 454static struct vm_operations_struct gntalloc_vmops = {
455 .open = gntalloc_vma_open,
444 .close = gntalloc_vma_close, 456 .close = gntalloc_vma_close,
445}; 457};
446 458
@@ -471,8 +483,6 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
471 vma->vm_private_data = gref; 483 vma->vm_private_data = gref;
472 484
473 vma->vm_flags |= VM_RESERVED; 485 vma->vm_flags |= VM_RESERVED;
474 vma->vm_flags |= VM_DONTCOPY;
475 vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
476 486
477 vma->vm_ops = &gntalloc_vmops; 487 vma->vm_ops = &gntalloc_vmops;
478 488
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index b0f9e8fb0052..f914b26cf0c2 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -330,17 +330,26 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
330 330
331/* ------------------------------------------------------------------ */ 331/* ------------------------------------------------------------------ */
332 332
333static void gntdev_vma_open(struct vm_area_struct *vma)
334{
335 struct grant_map *map = vma->vm_private_data;
336
337 pr_debug("gntdev_vma_open %p\n", vma);
338 atomic_inc(&map->users);
339}
340
333static void gntdev_vma_close(struct vm_area_struct *vma) 341static void gntdev_vma_close(struct vm_area_struct *vma)
334{ 342{
335 struct grant_map *map = vma->vm_private_data; 343 struct grant_map *map = vma->vm_private_data;
336 344
337 pr_debug("close %p\n", vma); 345 pr_debug("gntdev_vma_close %p\n", vma);
338 map->vma = NULL; 346 map->vma = NULL;
339 vma->vm_private_data = NULL; 347 vma->vm_private_data = NULL;
340 gntdev_put_map(map); 348 gntdev_put_map(map);
341} 349}
342 350
343static struct vm_operations_struct gntdev_vmops = { 351static struct vm_operations_struct gntdev_vmops = {
352 .open = gntdev_vma_open,
344 .close = gntdev_vma_close, 353 .close = gntdev_vma_close,
345}; 354};
346 355
@@ -652,7 +661,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
652 661
653 vma->vm_ops = &gntdev_vmops; 662 vma->vm_ops = &gntdev_vmops;
654 663
655 vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; 664 vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
665
666 if (use_ptemod)
667 vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
656 668
657 vma->vm_private_data = map; 669 vma->vm_private_data = map;
658 670
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 3745a318defc..fd725cde6ad1 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
466 if (map_ops[i].status) 466 if (map_ops[i].status)
467 continue; 467 continue;
468 468
469 /* m2p override only supported for GNTMAP_contains_pte mappings */ 469 if (map_ops[i].flags & GNTMAP_contains_pte) {
470 if (!(map_ops[i].flags & GNTMAP_contains_pte)) 470 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
471 continue;
472 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
473 (map_ops[i].host_addr & ~PAGE_MASK)); 471 (map_ops[i].host_addr & ~PAGE_MASK));
474 mfn = pte_mfn(*pte); 472 mfn = pte_mfn(*pte);
475 ret = m2p_add_override(mfn, pages[i]); 473 } else {
474 /* If you really wanted to do this:
475 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
476 *
477 * The reason we do not implement it is b/c on the
478 * unmap path (gnttab_unmap_refs) we have no means of
479 * checking whether the page is !GNTMAP_contains_pte.
480 *
481 * That is without some extra data-structure to carry
482 * the struct page, bool clear_pte, and list_head next
483 * tuples and deal with allocation/delallocation, etc.
484 *
485 * The users of this API set the GNTMAP_contains_pte
486 * flag so lets just return not supported until it
487 * becomes neccessary to implement.
488 */
489 return -EOPNOTSUPP;
490 }
491 ret = m2p_add_override(mfn, pages[i],
492 map_ops[i].flags & GNTMAP_contains_pte);
476 if (ret) 493 if (ret)
477 return ret; 494 return ret;
478 } 495 }
@@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
494 return ret; 511 return ret;
495 512
496 for (i = 0; i < count; i++) { 513 for (i = 0; i < count; i++) {
497 ret = m2p_remove_override(pages[i]); 514 ret = m2p_remove_override(pages[i], true /* clear the PTE */);
498 if (ret) 515 if (ret)
499 return ret; 516 return ret;
500 } 517 }
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index a2eee574784e..0b5366b5be20 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -70,12 +70,7 @@ static int xen_suspend(void *data)
70 70
71 BUG_ON(!irqs_disabled()); 71 BUG_ON(!irqs_disabled());
72 72
73 err = sysdev_suspend(PMSG_FREEZE); 73 err = syscore_suspend();
74 if (!err) {
75 err = syscore_suspend();
76 if (err)
77 sysdev_resume();
78 }
79 if (err) { 74 if (err) {
80 printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", 75 printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n",
81 err); 76 err);
@@ -102,7 +97,6 @@ static int xen_suspend(void *data)
102 } 97 }
103 98
104 syscore_resume(); 99 syscore_resume();
105 sysdev_resume();
106 100
107 return 0; 101 return 0;
108} 102}
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 60f1827a32cb..1e0fe01eb670 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = {
215 .attrs = xen_compile_attrs, 215 .attrs = xen_compile_attrs,
216}; 216};
217 217
218int __init static xen_compilation_init(void) 218static int __init xen_compilation_init(void)
219{ 219{
220 return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); 220 return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
221} 221}