aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_bind.c24
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_perflib.c12
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/block/xen-blkfront.c10
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c13
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tpm/tpm_bios.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c33
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/dma/fsldma.c71
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/edac/Kconfig8
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/amd8111_edac.c4
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/gpu/drm/Kconfig14
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c102
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c149
-rw-r--r--drivers/gpu/drm/i915/intel_display.c26
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c137
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-iops.c21
-rw-r--r--drivers/ide/ide-lib.c27
-rw-r--r--drivers/ide/ide-pci-generic.c11
-rw-r--r--drivers/ide/ide-probe.c9
-rw-r--r--drivers/ide/via82cxxx.c2
-rw-r--r--drivers/idle/i7300_idle.c6
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c2
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/md/bitmap.c13
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/wm8350-core.c8
-rw-r--r--drivers/misc/enclosure.c6
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c43
-rw-r--r--drivers/oprofile/cpu_buffer.c8
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c63
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/fnic/Makefile15
-rw-r--r--drivers/scsi/fnic/cq_desc.h78
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h167
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h182
-rw-r--r--drivers/scsi/fnic/fcpio.h780
-rw-r--r--drivers/scsi/fnic/fnic.h265
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c56
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c742
-rw-r--r--drivers/scsi/fnic/fnic_io.h67
-rw-r--r--drivers/scsi/fnic/fnic_isr.c332
-rw-r--r--drivers/scsi/fnic/fnic_main.c942
-rw-r--r--drivers/scsi/fnic/fnic_res.c444
-rw-r--r--drivers/scsi/fnic/fnic_res.h197
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1850
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h58
-rw-r--r--drivers/scsi/fnic/vnic_cq.c85
-rw-r--r--drivers/scsi/fnic/vnic_cq.h121
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h62
-rw-r--r--drivers/scsi/fnic/vnic_dev.c690
-rw-r--r--drivers/scsi/fnic/vnic_dev.h161
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h281
-rw-r--r--drivers/scsi/fnic/vnic_intr.c60
-rw-r--r--drivers/scsi/fnic/vnic_intr.h118
-rw-r--r--drivers/scsi/fnic/vnic_nic.h69
-rw-r--r--drivers/scsi/fnic/vnic_resource.h61
-rw-r--r--drivers/scsi/fnic/vnic_rq.c196
-rw-r--r--drivers/scsi/fnic/vnic_rq.h235
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h99
-rw-r--r--drivers/scsi/fnic/vnic_stats.h68
-rw-r--r--drivers/scsi/fnic/vnic_wq.c182
-rw-r--r--drivers/scsi/fnic/vnic_wq.h175
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c117
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h128
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h2
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/serial/8250.c15
-rw-r--r--drivers/serial/8250_gsc.c4
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/amba-pl011.c2
-rw-r--r--drivers/serial/icom.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c5
-rw-r--r--drivers/usb/host/isp1760-hcd.c24
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/atmel_lcdfb.c10
-rw-r--r--drivers/video/omap/dispc.c14
-rw-r--r--drivers/video/omap/rfbi.c8
-rw-r--r--drivers/video/s3c-fb.c12
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c7
128 files changed, 10079 insertions, 366 deletions
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 95650f83ce2e..bc46de3d967f 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -116,9 +116,6 @@ int acpi_pci_bind(struct acpi_device *device)
116 struct acpi_pci_data *pdata; 116 struct acpi_pci_data *pdata;
117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 acpi_handle handle; 118 acpi_handle handle;
119 struct pci_dev *dev;
120 struct pci_bus *bus;
121
122 119
123 if (!device || !device->parent) 120 if (!device || !device->parent)
124 return -EINVAL; 121 return -EINVAL;
@@ -176,20 +173,9 @@ int acpi_pci_bind(struct acpi_device *device)
176 * Locate matching device in PCI namespace. If it doesn't exist 173 * Locate matching device in PCI namespace. If it doesn't exist
177 * this typically means that the device isn't currently inserted 174 * this typically means that the device isn't currently inserted
178 * (e.g. docking station, port replicator, etc.). 175 * (e.g. docking station, port replicator, etc.).
179 * We cannot simply search the global pci device list, since
180 * PCI devices are added to the global pci list when the root
181 * bridge start ops are run, which may not have happened yet.
182 */ 176 */
183 bus = pci_find_bus(data->id.segment, data->id.bus); 177 data->dev = pci_get_slot(pdata->bus,
184 if (bus) { 178 PCI_DEVFN(data->id.device, data->id.function));
185 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->devfn == PCI_DEVFN(data->id.device,
187 data->id.function)) {
188 data->dev = dev;
189 break;
190 }
191 }
192 }
193 if (!data->dev) { 179 if (!data->dev) {
194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 180 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
195 "Device %04x:%02x:%02x.%d not present in PCI namespace\n", 181 "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
@@ -259,9 +245,10 @@ int acpi_pci_bind(struct acpi_device *device)
259 245
260 end: 246 end:
261 kfree(buffer.pointer); 247 kfree(buffer.pointer);
262 if (result) 248 if (result) {
249 pci_dev_put(data->dev);
263 kfree(data); 250 kfree(data);
264 251 }
265 return result; 252 return result;
266} 253}
267 254
@@ -303,6 +290,7 @@ static int acpi_pci_unbind(struct acpi_device *device)
303 if (data->dev->subordinate) { 290 if (data->dev->subordinate) {
304 acpi_pci_irq_del_prt(data->id.segment, data->bus->number); 291 acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
305 } 292 }
293 pci_dev_put(data->dev);
306 kfree(data); 294 kfree(data);
307 295
308 end: 296 end:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 72069ba5f1ed..10a2d913635a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -148,6 +148,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
149 return; 149 return;
150 150
151 if (boot_cpu_has(X86_FEATURE_AMDC1E))
152 type = ACPI_STATE_C1;
153
151 /* 154 /*
152 * Check, if one of the previous states already marked the lapic 155 * Check, if one of the previous states already marked the lapic
153 * unstable 156 * unstable
@@ -611,6 +614,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
611 switch (cx->type) { 614 switch (cx->type) {
612 case ACPI_STATE_C1: 615 case ACPI_STATE_C1:
613 cx->valid = 1; 616 cx->valid = 1;
617 acpi_timer_check_state(i, pr, cx);
614 break; 618 break;
615 619
616 case ACPI_STATE_C2: 620 case ACPI_STATE_C2:
@@ -830,11 +834,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
830 834
831 /* Do not access any ACPI IO ports in suspend path */ 835 /* Do not access any ACPI IO ports in suspend path */
832 if (acpi_idle_suspend) { 836 if (acpi_idle_suspend) {
833 acpi_safe_halt();
834 local_irq_enable(); 837 local_irq_enable();
838 cpu_relax();
835 return 0; 839 return 0;
836 } 840 }
837 841
842 acpi_state_timer_broadcast(pr, cx, 1);
838 kt1 = ktime_get_real(); 843 kt1 = ktime_get_real();
839 acpi_idle_do_entry(cx); 844 acpi_idle_do_entry(cx);
840 kt2 = ktime_get_real(); 845 kt2 = ktime_get_real();
@@ -842,6 +847,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
842 847
843 local_irq_enable(); 848 local_irq_enable();
844 cx->usage++; 849 cx->usage++;
850 acpi_state_timer_broadcast(pr, cx, 0);
845 851
846 return idle_time; 852 return idle_time;
847} 853}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cafb41000f6b..60e543d3234e 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -309,9 +309,15 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
309 (u32) px->bus_master_latency, 309 (u32) px->bus_master_latency,
310 (u32) px->control, (u32) px->status)); 310 (u32) px->control, (u32) px->status));
311 311
312 if (!px->core_frequency) { 312 /*
313 printk(KERN_ERR PREFIX 313 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
314 "Invalid _PSS data: freq is zero\n"); 314 */
315 if (!px->core_frequency ||
316 ((u32)(px->core_frequency * 1000) !=
317 (px->core_frequency * 1000))) {
318 printk(KERN_ERR FW_BUG PREFIX
319 "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
320 px->core_frequency);
315 result = -EFAULT; 321 result = -EFAULT;
316 kfree(pr->performance->states); 322 kfree(pr->performance->states);
317 goto end; 323 goto end;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 7f16f5f8e7d3..227543789ba9 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -840,7 +840,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
840 state = acpi_get_throttling_state(pr, value); 840 state = acpi_get_throttling_state(pr, value);
841 if (state == -1) { 841 if (state == -1) {
842 ACPI_WARNING((AE_INFO, 842 ACPI_WARNING((AE_INFO,
843 "Invalid throttling state, reset\n")); 843 "Invalid throttling state, reset"));
844 state = 0; 844 state = 0;
845 ret = acpi_processor_set_throttling(pr, state); 845 ret = acpi_processor_set_throttling(pr, state);
846 if (ret) 846 if (ret)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 810cca90ca7f..1bdfb37377e3 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -570,6 +570,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
570 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"), 570 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
571 }, 571 },
572 }, 572 },
573 {
574 .callback = video_set_bqc_offset,
575 .ident = "eMachines E510",
576 .matches = {
577 DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
578 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
579 },
580 },
581 {
582 .callback = video_set_bqc_offset,
583 .ident = "Acer Aspire 5315",
584 .matches = {
585 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
586 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
587 },
588 },
573 {} 589 {}
574}; 590};
575 591
@@ -2334,7 +2350,7 @@ static int __init acpi_video_init(void)
2334 return acpi_video_register(); 2350 return acpi_video_register();
2335} 2351}
2336 2352
2337void __exit acpi_video_exit(void) 2353void acpi_video_exit(void)
2338{ 2354{
2339 2355
2340 acpi_bus_unregister_driver(&acpi_video_bus); 2356 acpi_bus_unregister_driver(&acpi_video_bus);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index dc030f1f00f1..c6599618523e 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -700,8 +700,10 @@ int bus_add_driver(struct device_driver *drv)
700 } 700 }
701 701
702 kobject_uevent(&priv->kobj, KOBJ_ADD); 702 kobject_uevent(&priv->kobj, KOBJ_ADD);
703 return error; 703 return 0;
704out_unregister: 704out_unregister:
705 kfree(drv->p);
706 drv->p = NULL;
705 kobject_put(&priv->kobj); 707 kobject_put(&priv->kobj);
706out_put_bus: 708out_put_bus:
707 bus_put(bus); 709 bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aa527b8a913..1977d4beb89e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -879,7 +879,7 @@ int device_add(struct device *dev)
879 } 879 }
880 880
881 if (!dev_name(dev)) 881 if (!dev_name(dev))
882 goto done; 882 goto name_error;
883 883
884 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 884 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
885 885
@@ -978,6 +978,9 @@ done:
978 cleanup_device_parent(dev); 978 cleanup_device_parent(dev);
979 if (parent) 979 if (parent)
980 put_device(parent); 980 put_device(parent);
981name_error:
982 kfree(dev->p);
983 dev->p = NULL;
981 goto done; 984 goto done;
982} 985}
983 986
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c51f11bb29ae..8ae0f63602e0 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -257,6 +257,10 @@ EXPORT_SYMBOL_GPL(driver_register);
257 */ 257 */
258void driver_unregister(struct device_driver *drv) 258void driver_unregister(struct device_driver *drv)
259{ 259{
260 if (!drv || !drv->p) {
261 WARN(1, "Unexpected driver unregister!\n");
262 return;
263 }
260 driver_remove_groups(drv, drv->groups); 264 driver_remove_groups(drv, drv->groups);
261 bus_remove_driver(drv); 265 bus_remove_driver(drv);
262} 266}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 69b4ddb7de3b..3e4bc699bc0f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -357,6 +357,7 @@ static void dpm_power_up(pm_message_t state)
357{ 357{
358 struct device *dev; 358 struct device *dev;
359 359
360 mutex_lock(&dpm_list_mtx);
360 list_for_each_entry(dev, &dpm_list, power.entry) 361 list_for_each_entry(dev, &dpm_list, power.entry)
361 if (dev->power.status > DPM_OFF) { 362 if (dev->power.status > DPM_OFF) {
362 int error; 363 int error;
@@ -366,6 +367,7 @@ static void dpm_power_up(pm_message_t state)
366 if (error) 367 if (error)
367 pm_dev_err(dev, state, " early", error); 368 pm_dev_err(dev, state, " early", error);
368 } 369 }
370 mutex_unlock(&dpm_list_mtx);
369} 371}
370 372
371/** 373/**
@@ -614,6 +616,7 @@ int device_power_down(pm_message_t state)
614 int error = 0; 616 int error = 0;
615 617
616 suspend_device_irqs(); 618 suspend_device_irqs();
619 mutex_lock(&dpm_list_mtx);
617 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 620 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
618 error = suspend_device_noirq(dev, state); 621 error = suspend_device_noirq(dev, state);
619 if (error) { 622 if (error) {
@@ -622,6 +625,7 @@ int device_power_down(pm_message_t state)
622 } 625 }
623 dev->power.status = DPM_OFF_IRQ; 626 dev->power.status = DPM_OFF_IRQ;
624 } 627 }
628 mutex_unlock(&dpm_list_mtx);
625 if (error) 629 if (error)
626 device_power_up(resume_event(state)); 630 device_power_up(resume_event(state));
627 return error; 631 return error;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8f905089b72b..a6cbf7b808e6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -934,8 +934,6 @@ static void blkfront_closing(struct xenbus_device *dev)
934 934
935 spin_lock_irqsave(&blkif_io_lock, flags); 935 spin_lock_irqsave(&blkif_io_lock, flags);
936 936
937 del_gendisk(info->gd);
938
939 /* No more blkif_request(). */ 937 /* No more blkif_request(). */
940 blk_stop_queue(info->rq); 938 blk_stop_queue(info->rq);
941 939
@@ -949,6 +947,8 @@ static void blkfront_closing(struct xenbus_device *dev)
949 blk_cleanup_queue(info->rq); 947 blk_cleanup_queue(info->rq);
950 info->rq = NULL; 948 info->rq = NULL;
951 949
950 del_gendisk(info->gd);
951
952 out: 952 out:
953 xenbus_frontend_closed(dev); 953 xenbus_frontend_closed(dev);
954} 954}
@@ -977,8 +977,10 @@ static void backend_changed(struct xenbus_device *dev,
977 break; 977 break;
978 978
979 case XenbusStateClosing: 979 case XenbusStateClosing:
980 if (info->gd == NULL) 980 if (info->gd == NULL) {
981 xenbus_dev_fatal(dev, -ENODEV, "gd is NULL"); 981 xenbus_frontend_closed(dev);
982 break;
983 }
982 bd = bdget_disk(info->gd, 0); 984 bd = bdget_disk(info->gd, 0);
983 if (bd == NULL) 985 if (bd == NULL)
984 xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 986 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 13929356135c..9b1624e0ddeb 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -587,7 +587,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
587 struct device_node *node = vdev->dev.archdata.of_node; 587 struct device_node *node = vdev->dev.archdata.of_node;
588 588
589 deviceno = vdev->unit_address; 589 deviceno = vdev->unit_address;
590 if (deviceno > VIOCD_MAX_CD) 590 if (deviceno >= VIOCD_MAX_CD)
591 return -ENODEV; 591 return -ENODEV;
592 if (!node) 592 if (!node)
593 return -ENODEV; 593 return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index aa83a0865ec1..09050797c76a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2856,6 +2856,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2856 /* Assume a single IPMB channel at zero. */ 2856 /* Assume a single IPMB channel at zero. */
2857 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2857 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2858 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 2858 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2859 intf->curr_channel = IPMI_MAX_CHANNELS;
2859 } 2860 }
2860 2861
2861 if (rv == 0) 2862 if (rv == 0)
@@ -3648,13 +3649,13 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3648 } 3649 }
3649 3650
3650 /* 3651 /*
3651 ** We need to make sure the channels have been initialized. 3652 * We need to make sure the channels have been initialized.
3652 ** The channel_handler routine will set the "curr_channel" 3653 * The channel_handler routine will set the "curr_channel"
3653 ** equal to or greater than IPMI_MAX_CHANNELS when all the 3654 * equal to or greater than IPMI_MAX_CHANNELS when all the
3654 ** channels for this interface have been initialized. 3655 * channels for this interface have been initialized.
3655 */ 3656 */
3656 if (intf->curr_channel < IPMI_MAX_CHANNELS) { 3657 if (intf->curr_channel < IPMI_MAX_CHANNELS) {
3657 requeue = 1; /* Just put the message back for now */ 3658 requeue = 0; /* Throw the message away */
3658 goto out; 3659 goto out;
3659 } 3660 }
3660 3661
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b2ced39d76b2..8c7444857a4b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1673,7 +1673,7 @@ unsigned int get_random_int(void)
1673 int ret; 1673 int ret;
1674 1674
1675 keyptr = get_keyptr(); 1675 keyptr = get_keyptr();
1676 hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret; 1676 hash[0] += current->pid + jiffies + get_cycles();
1677 1677
1678 ret = half_md4_transform(hash, keyptr->secret); 1678 ret = half_md4_transform(hash, keyptr->secret);
1679 put_cpu_var(get_random_int_hash); 1679 put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index ed306eb1057f..0c2f55a38b95 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
212 unsigned char * event_entry) 212 unsigned char * event_entry)
213{ 213{
214 const char *name = ""; 214 const char *name = "";
215 char data[40] = ""; 215 /* 41 so there is room for 40 data and 1 nul */
216 char data[41] = "";
216 int i, n_len = 0, d_len = 0; 217 int i, n_len = 0, d_len = 0;
217 struct tcpa_pc_event *pc_event; 218 struct tcpa_pc_event *pc_event;
218 219
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d270e8eb3e67..47d2ad0ae079 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1070,11 +1070,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071#endif 1071#endif
1072 1072
1073 unlock_policy_rwsem_write(cpu);
1074
1073 if (cpufreq_driver->target) 1075 if (cpufreq_driver->target)
1074 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1076 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1075 1077
1076 unlock_policy_rwsem_write(cpu);
1077
1078 kobject_put(&data->kobj); 1078 kobject_put(&data->kobj);
1079 1079
1080 /* we need to make sure that the underlying kobj is actually 1080 /* we need to make sure that the underlying kobj is actually
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 2ecd95e4ab1a..7a74d175287b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -91,6 +91,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
93 * is recursive for the same process. -Venki 93 * is recursive for the same process. -Venki
94 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
95 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
96 * raceless workqueue teardown.
94 */ 97 */
95static DEFINE_MUTEX(dbs_mutex); 98static DEFINE_MUTEX(dbs_mutex);
96 99
@@ -542,7 +545,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
542static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 545static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
543{ 546{
544 dbs_info->enable = 0; 547 dbs_info->enable = 0;
545 cancel_delayed_work(&dbs_info->work); 548 cancel_delayed_work_sync(&dbs_info->work);
546} 549}
547 550
548static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 551static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 338f428a15b7..e741c339df76 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -98,6 +98,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
98 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 98 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
99 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 99 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
100 * is recursive for the same process. -Venki 100 * is recursive for the same process. -Venki
101 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
102 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
103 * raceless workqueue teardown.
101 */ 104 */
102static DEFINE_MUTEX(dbs_mutex); 105static DEFINE_MUTEX(dbs_mutex);
103 106
@@ -562,7 +565,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
562static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 565static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
563{ 566{
564 dbs_info->enable = 0; 567 dbs_info->enable = 0;
565 cancel_delayed_work(&dbs_info->work); 568 cancel_delayed_work_sync(&dbs_info->work);
566} 569}
567 570
568static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 571static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f9f05d7a707d..6c6656d3b1e2 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -415,6 +415,7 @@ static void crypto_done_action(unsigned long arg)
415static int init_ixp_crypto(void) 415static int init_ixp_crypto(void)
416{ 416{
417 int ret = -ENODEV; 417 int ret = -ENODEV;
418 u32 msg[2] = { 0, 0 };
418 419
419 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | 420 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
420 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { 421 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
@@ -426,9 +427,35 @@ static int init_ixp_crypto(void)
426 return ret; 427 return ret;
427 428
428 if (!npe_running(npe_c)) { 429 if (!npe_running(npe_c)) {
429 npe_load_firmware(npe_c, npe_name(npe_c), dev); 430 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
431 if (ret) {
432 return ret;
433 }
434 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
435 goto npe_error;
436 } else {
437 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
438 goto npe_error;
439
440 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
441 goto npe_error;
430 } 442 }
431 443
444 switch ((msg[1]>>16) & 0xff) {
445 case 3:
446 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
447 npe_name(npe_c));
448 support_aes = 0;
449 break;
450 case 4:
451 case 5:
452 support_aes = 1;
453 break;
454 default:
455 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
456 npe_name(npe_c));
457 return -ENODEV;
458 }
432 /* buffer_pool will also be used to sometimes store the hmac, 459 /* buffer_pool will also be used to sometimes store the hmac,
433 * so assure it is large enough 460 * so assure it is large enough
434 */ 461 */
@@ -459,6 +486,10 @@ static int init_ixp_crypto(void)
459 486
460 qmgr_enable_irq(RECV_QID); 487 qmgr_enable_irq(RECV_QID);
461 return 0; 488 return 0;
489
490npe_error:
491 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
492 ret = -EIO;
462err: 493err:
463 if (ctx_pool) 494 if (ctx_pool)
464 dma_pool_destroy(ctx_pool); 495 dma_pool_destroy(ctx_pool);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd18255d..856b3cc25583 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
489MODULE_LICENSE("GPL"); 489MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Michal Ludvig"); 490MODULE_AUTHOR("Michal Ludvig");
491 491
492MODULE_ALIAS("aes-all"); 492MODULE_ALIAS("aes");
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index da8a8ed9e411..f18d1bde0439 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
179static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 179static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
180 struct fsl_desc_sw *desc) 180 struct fsl_desc_sw *desc)
181{ 181{
182 u64 snoop_bits;
183
184 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
185 ? FSL_DMA_SNEN : 0;
186
182 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 187 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
183 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, 188 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
184 64); 189 | snoop_bits, 64);
185} 190}
186 191
187static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 192static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
313 318
314static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 319static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
315{ 320{
316 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
317 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 321 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
322 struct fsl_desc_sw *desc;
318 unsigned long flags; 323 unsigned long flags;
319 dma_cookie_t cookie; 324 dma_cookie_t cookie;
320 325
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
322 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 327 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
323 328
324 cookie = fsl_chan->common.cookie; 329 cookie = fsl_chan->common.cookie;
325 cookie++; 330 list_for_each_entry(desc, &tx->tx_list, node) {
326 if (cookie < 0) 331 cookie++;
327 cookie = 1; 332 if (cookie < 0)
328 desc->async_tx.cookie = cookie; 333 cookie = 1;
329 fsl_chan->common.cookie = desc->async_tx.cookie;
330 334
331 append_ld_queue(fsl_chan, desc); 335 desc->async_tx.cookie = cookie;
332 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); 336 }
337
338 fsl_chan->common.cookie = cookie;
339 append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
340 list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
333 341
334 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 342 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
335 343
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
454{ 462{
455 struct fsl_dma_chan *fsl_chan; 463 struct fsl_dma_chan *fsl_chan;
456 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 464 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
465 struct list_head *list;
457 size_t copy; 466 size_t copy;
458 LIST_HEAD(link_chain);
459 467
460 if (!chan) 468 if (!chan)
461 return NULL; 469 return NULL;
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
472 if (!new) { 480 if (!new) {
473 dev_err(fsl_chan->dev, 481 dev_err(fsl_chan->dev,
474 "No free memory for link descriptor\n"); 482 "No free memory for link descriptor\n");
475 return NULL; 483 goto fail;
476 } 484 }
477#ifdef FSL_DMA_LD_DEBUG 485#ifdef FSL_DMA_LD_DEBUG
478 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 486 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
507 /* Set End-of-link to the last link descriptor of new list*/ 515 /* Set End-of-link to the last link descriptor of new list*/
508 set_ld_eol(fsl_chan, new); 516 set_ld_eol(fsl_chan, new);
509 517
510 return first ? &first->async_tx : NULL; 518 return &first->async_tx;
519
520fail:
521 if (!first)
522 return NULL;
523
524 list = &first->async_tx.tx_list;
525 list_for_each_entry_safe_reverse(new, prev, list, node) {
526 list_del(&new->node);
527 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
528 }
529
530 return NULL;
511} 531}
512 532
513/** 533/**
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
598 dma_addr_t next_dest_addr; 618 dma_addr_t next_dest_addr;
599 unsigned long flags; 619 unsigned long flags;
600 620
621 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
622
601 if (!dma_is_idle(fsl_chan)) 623 if (!dma_is_idle(fsl_chan))
602 return; 624 goto out_unlock;
603 625
604 dma_halt(fsl_chan); 626 dma_halt(fsl_chan);
605 627
606 /* If there are some link descriptors 628 /* If there are some link descriptors
607 * not transfered in queue. We need to start it. 629 * not transfered in queue. We need to start it.
608 */ 630 */
609 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
610 631
611 /* Find the first un-transfer desciptor */ 632 /* Find the first un-transfer desciptor */
612 for (ld_node = fsl_chan->ld_queue.next; 633 for (ld_node = fsl_chan->ld_queue.next;
@@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
617 fsl_chan->common.cookie) == DMA_SUCCESS); 638 fsl_chan->common.cookie) == DMA_SUCCESS);
618 ld_node = ld_node->next); 639 ld_node = ld_node->next);
619 640
620 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
621
622 if (ld_node != &fsl_chan->ld_queue) { 641 if (ld_node != &fsl_chan->ld_queue) {
623 /* Get the ld start address from ld_queue */ 642 /* Get the ld start address from ld_queue */
624 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 643 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
625 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 644 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
626 (void *)next_dest_addr); 645 (unsigned long long)next_dest_addr);
627 set_cdar(fsl_chan, next_dest_addr); 646 set_cdar(fsl_chan, next_dest_addr);
628 dma_start(fsl_chan); 647 dma_start(fsl_chan);
629 } else { 648 } else {
630 set_cdar(fsl_chan, 0); 649 set_cdar(fsl_chan, 0);
631 set_ndar(fsl_chan, 0); 650 set_ndar(fsl_chan, 0);
632 } 651 }
652
653out_unlock:
654 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
633} 655}
634 656
635/** 657/**
@@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
734 */ 756 */
735 if (stat & FSL_DMA_SR_EOSI) { 757 if (stat & FSL_DMA_SR_EOSI) {
736 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 758 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
737 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 759 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
738 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 760 (unsigned long long)get_cdar(fsl_chan),
761 (unsigned long long)get_ndar(fsl_chan));
739 stat &= ~FSL_DMA_SR_EOSI; 762 stat &= ~FSL_DMA_SR_EOSI;
740 update_cookie = 1; 763 update_cookie = 1;
741 } 764 }
@@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
830 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 853 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
831 854
832 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 855 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
833 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { 856 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
834 dev_err(fdev->dev, "There is no %d channel!\n", 857 dev_err(fdev->dev, "There is no %d channel!\n",
835 new_fsl_chan->id); 858 new_fsl_chan->id);
836 err = -EINVAL; 859 err = -EINVAL;
@@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
925 } 948 }
926 949
927 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 950 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
928 "controller at %p...\n", 951 "controller at 0x%llx...\n",
929 match->compatible, (void *)fdev->reg.start); 952 match->compatible, (unsigned long long)fdev->reg.start);
930 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 953 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
931 - fdev->reg.start + 1); 954 - fdev->reg.start + 1);
932 955
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1955ee8d6d20..a600fc0f7962 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
174 174
175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
176 if (i7300_idle_platform_probe(NULL, NULL) == 0) { 176 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
177 device->common.chancnt--; 177 device->common.chancnt--;
178 } 178 }
179#endif 179#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e5f5c5a8ba6c..956982f8739b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -192,16 +192,20 @@ config EDAC_PPC4XX
192 192
193config EDAC_AMD8131 193config EDAC_AMD8131
194 tristate "AMD8131 HyperTransport PCI-X Tunnel" 194 tristate "AMD8131 HyperTransport PCI-X Tunnel"
195 depends on EDAC_MM_EDAC && PCI 195 depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
196 help 196 help
197 Support for error detection and correction on the 197 Support for error detection and correction on the
198 AMD8131 HyperTransport PCI-X Tunnel chip. 198 AMD8131 HyperTransport PCI-X Tunnel chip.
199 Note, add more Kconfig dependency if it's adopted
200 on some machine other than Maple.
199 201
200config EDAC_AMD8111 202config EDAC_AMD8111
201 tristate "AMD8111 HyperTransport I/O Hub" 203 tristate "AMD8111 HyperTransport I/O Hub"
202 depends on EDAC_MM_EDAC && PCI 204 depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
203 help 205 help
204 Support for error detection and correction on the 206 Support for error detection and correction on the
205 AMD8111 HyperTransport I/O Hub chip. 207 AMD8111 HyperTransport I/O Hub chip.
208 Note, add more Kconfig dependency if it's adopted
209 on some machine other than Maple.
206 210
207endif # EDAC 211endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a5fdcf02f591..59076819135d 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -35,3 +35,5 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o 35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
36obj-$(CONFIG_EDAC_CELL) += cell_edac.o 36obj-$(CONFIG_EDAC_CELL) += cell_edac.o
37obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o 37obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
38obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
39obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 614692181120..2cb58ef743e0 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -389,7 +389,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
389 dev_info->edac_dev->dev = &dev_info->dev->dev; 389 dev_info->edac_dev->dev = &dev_info->dev->dev;
390 dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; 390 dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
391 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 391 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
392 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; 392 dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
393 393
394 if (edac_op_state == EDAC_OPSTATE_POLL) 394 if (edac_op_state == EDAC_OPSTATE_POLL)
395 dev_info->edac_dev->edac_check = dev_info->check; 395 dev_info->edac_dev->edac_check = dev_info->check;
@@ -473,7 +473,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
473 pci_info->edac_dev->dev = &pci_info->dev->dev; 473 pci_info->edac_dev->dev = &pci_info->dev->dev;
474 pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; 474 pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
475 pci_info->edac_dev->ctl_name = pci_info->ctl_name; 475 pci_info->edac_dev->ctl_name = pci_info->ctl_name;
476 pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id; 476 pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
477 477
478 if (edac_op_state == EDAC_OPSTATE_POLL) 478 if (edac_op_state == EDAC_OPSTATE_POLL)
479 pci_info->edac_dev->edac_check = pci_info->check; 479 pci_info->edac_dev->edac_check = pci_info->check;
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index c083b31cac5a..b432d60c622a 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -287,7 +287,7 @@ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
287 dev_info->edac_dev->dev = &dev_info->dev->dev; 287 dev_info->edac_dev->dev = &dev_info->dev->dev;
288 dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; 288 dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
289 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 289 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
290 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; 290 dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
291 291
292 if (edac_op_state == EDAC_OPSTATE_POLL) 292 if (edac_op_state == EDAC_OPSTATE_POLL)
293 dev_info->edac_dev->edac_check = amd8131_chipset.check; 293 dev_info->edac_dev->edac_check = amd8131_chipset.check;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4cd35d8fd799..f5d46e7199d4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -67,12 +67,18 @@ config DRM_I830
67 will load the correct one. 67 will load the correct one.
68 68
69config DRM_I915 69config DRM_I915
70 tristate "i915 driver"
70 select FB_CFB_FILLRECT 71 select FB_CFB_FILLRECT
71 select FB_CFB_COPYAREA 72 select FB_CFB_COPYAREA
72 select FB_CFB_IMAGEBLIT 73 select FB_CFB_IMAGEBLIT
73 select FB 74 select FB
74 select FRAMEBUFFER_CONSOLE if !EMBEDDED 75 select FRAMEBUFFER_CONSOLE if !EMBEDDED
75 tristate "i915 driver" 76 # i915 depends on ACPI_VIDEO when ACPI is enabled
77 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
78 select VIDEO_OUTPUT_CONTROL if ACPI
79 select BACKLIGHT_CLASS_DEVICE if ACPI
80 select INPUT if ACPI
81 select ACPI_VIDEO if ACPI
76 help 82 help
77 Choose this option if you have a system that has Intel 830M, 845G, 83 Choose this option if you have a system that has Intel 830M, 845G,
78 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 84 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
@@ -84,12 +90,6 @@ config DRM_I915
84config DRM_I915_KMS 90config DRM_I915_KMS
85 bool "Enable modesetting on intel by default" 91 bool "Enable modesetting on intel by default"
86 depends on DRM_I915 92 depends on DRM_I915
87 # i915 KMS depends on ACPI_VIDEO when ACPI is enabled
88 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
89 select VIDEO_OUTPUT_CONTROL if ACPI
90 select BACKLIGHT_CLASS_DEVICE if ACPI
91 select INPUT if ACPI
92 select ACPI_VIDEO if ACPI
93 help 93 help
94 Choose this option if you want kernel modesetting enabled by default, 94 Choose this option if you want kernel modesetting enabled by default,
95 and you have a new enough userspace to support this. Running old 95 and you have a new enough userspace to support this. Running old
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 6d80d17f1e96..0411d912d82a 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -170,6 +170,14 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
170 } 170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 (unsigned long long)map->offset, map->size, map->type); 172 (unsigned long long)map->offset, map->size, map->type);
173
174 /* page-align _DRM_SHM maps. They are allocated here so there is no security
175 * hole created by that and it works around various broken drivers that use
176 * a non-aligned quantity to map the SAREA. --BenH
177 */
178 if (map->type == _DRM_SHM)
179 map->size = PAGE_ALIGN(map->size);
180
173 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
175 return -EINVAL; 183 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f01def16a669..019b7c578236 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -481,7 +481,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
481 } 481 }
482 retcode = func(dev, kdata, file_priv); 482 retcode = func(dev, kdata, file_priv);
483 483
484 if ((retcode == 0) && (cmd & IOC_OUT)) { 484 if (cmd & IOC_OUT) {
485 if (copy_to_user((void __user *)arg, kdata, 485 if (copy_to_user((void __user *)arg, kdata,
486 _IOC_SIZE(cmd)) != 0) 486 _IOC_SIZE(cmd)) != 0)
487 retcode = -EFAULT; 487 retcode = -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9b149fe824c3..c431fa54bbb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -180,7 +180,8 @@ typedef struct drm_i915_private {
180 int backlight_duty_cycle; /* restore backlight to this value */ 180 int backlight_duty_cycle; /* restore backlight to this value */
181 bool panel_wants_dither; 181 bool panel_wants_dither;
182 struct drm_display_mode *panel_fixed_mode; 182 struct drm_display_mode *panel_fixed_mode;
183 struct drm_display_mode *vbt_mode; /* if any */ 183 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
184 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
184 185
185 /* Feature bits from the VBIOS */ 186 /* Feature bits from the VBIOS */
186 unsigned int int_tv_support:1; 187 unsigned int int_tv_support:1;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b189b49c7602..670d12881468 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1; 350 num_pages = last_data_page - first_data_page + 1;
351 351
352 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL) 353 if (user_pages == NULL)
354 return -ENOMEM; 354 return -ENOMEM;
355 355
@@ -429,7 +429,7 @@ fail_put_user_pages:
429 SetPageDirty(user_pages[i]); 429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]); 430 page_cache_release(user_pages[i]);
431 } 431 }
432 kfree(user_pages); 432 drm_free_large(user_pages);
433 433
434 return ret; 434 return ret;
435} 435}
@@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1; 650 num_pages = last_data_page - first_data_page + 1;
651 651
652 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL) 653 if (user_pages == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
@@ -719,7 +719,7 @@ out_unlock:
719out_unpin_pages: 719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++) 720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]); 721 page_cache_release(user_pages[i]);
722 kfree(user_pages); 722 drm_free_large(user_pages);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1; 825 num_pages = last_data_page - first_data_page + 1;
826 826
827 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL) 828 if (user_pages == NULL)
829 return -ENOMEM; 829 return -ENOMEM;
830 830
@@ -902,7 +902,7 @@ fail_unlock:
902fail_put_user_pages: 902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++) 903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]); 904 page_cache_release(user_pages[i]);
905 kfree(user_pages); 905 drm_free_large(user_pages);
906 906
907 return ret; 907 return ret;
908} 908}
@@ -1145,7 +1145,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1145 mutex_unlock(&dev->struct_mutex); 1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS; 1146 return VM_FAULT_SIGBUS;
1147 } 1147 }
1148 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1148
1149 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1150 if (ret) {
1151 mutex_unlock(&dev->struct_mutex);
1152 return VM_FAULT_SIGBUS;
1153 }
1154
1155 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1149 } 1156 }
1150 1157
1151 /* Need a new fence register? */ 1158 /* Need a new fence register? */
@@ -1375,7 +1382,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1375 mutex_unlock(&dev->struct_mutex); 1382 mutex_unlock(&dev->struct_mutex);
1376 return ret; 1383 return ret;
1377 } 1384 }
1378 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1385 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1379 } 1386 }
1380 1387
1381 drm_gem_object_unreference(obj); 1388 drm_gem_object_unreference(obj);
@@ -1408,9 +1415,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1408 } 1415 }
1409 obj_priv->dirty = 0; 1416 obj_priv->dirty = 0;
1410 1417
1411 drm_free(obj_priv->pages, 1418 drm_free_large(obj_priv->pages);
1412 page_count * sizeof(struct page *),
1413 DRM_MEM_DRIVER);
1414 obj_priv->pages = NULL; 1419 obj_priv->pages = NULL;
1415} 1420}
1416 1421
@@ -2024,8 +2029,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2024 */ 2029 */
2025 page_count = obj->size / PAGE_SIZE; 2030 page_count = obj->size / PAGE_SIZE;
2026 BUG_ON(obj_priv->pages != NULL); 2031 BUG_ON(obj_priv->pages != NULL);
2027 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), 2032 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2028 DRM_MEM_DRIVER);
2029 if (obj_priv->pages == NULL) { 2033 if (obj_priv->pages == NULL) {
2030 DRM_ERROR("Faled to allocate page list\n"); 2034 DRM_ERROR("Faled to allocate page list\n");
2031 obj_priv->pages_refcount--; 2035 obj_priv->pages_refcount--;
@@ -2131,8 +2135,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2131 return; 2135 return;
2132 } 2136 }
2133 2137
2134 pitch_val = (obj_priv->stride / 128) - 1; 2138 pitch_val = obj_priv->stride / 128;
2135 WARN_ON(pitch_val & ~0x0000000f); 2139 pitch_val = ffs(pitch_val) - 1;
2140 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2141
2136 val = obj_priv->gtt_offset; 2142 val = obj_priv->gtt_offset;
2137 if (obj_priv->tiling_mode == I915_TILING_Y) 2143 if (obj_priv->tiling_mode == I915_TILING_Y)
2138 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2144 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2424,6 +2430,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2424 if (obj_priv->pages == NULL) 2430 if (obj_priv->pages == NULL)
2425 return; 2431 return;
2426 2432
2433 /* XXX: The 865 in particular appears to be weird in how it handles
2434 * cache flushing. We haven't figured it out, but the
2435 * clflush+agp_chipset_flush doesn't appear to successfully get the
2436 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2437 */
2438 if (IS_I865G(obj->dev)) {
2439 wbinvd();
2440 return;
2441 }
2442
2427 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2443 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2428} 2444}
2429 2445
@@ -3111,7 +3127,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3111 reloc_count += exec_list[i].relocation_count; 3127 reloc_count += exec_list[i].relocation_count;
3112 } 3128 }
3113 3129
3114 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); 3130 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3115 if (*relocs == NULL) 3131 if (*relocs == NULL)
3116 return -ENOMEM; 3132 return -ENOMEM;
3117 3133
@@ -3125,8 +3141,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3125 exec_list[i].relocation_count * 3141 exec_list[i].relocation_count *
3126 sizeof(**relocs)); 3142 sizeof(**relocs));
3127 if (ret != 0) { 3143 if (ret != 0) {
3128 drm_free(*relocs, reloc_count * sizeof(**relocs), 3144 drm_free_large(*relocs);
3129 DRM_MEM_DRIVER);
3130 *relocs = NULL; 3145 *relocs = NULL;
3131 return -EFAULT; 3146 return -EFAULT;
3132 } 3147 }
@@ -3165,7 +3180,7 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3165 } 3180 }
3166 3181
3167err: 3182err:
3168 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3183 drm_free_large(relocs);
3169 3184
3170 return ret; 3185 return ret;
3171} 3186}
@@ -3198,10 +3213,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3198 return -EINVAL; 3213 return -EINVAL;
3199 } 3214 }
3200 /* Copy in the exec list from userland */ 3215 /* Copy in the exec list from userland */
3201 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 3216 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3202 DRM_MEM_DRIVER); 3217 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3203 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3204 DRM_MEM_DRIVER);
3205 if (exec_list == NULL || object_list == NULL) { 3218 if (exec_list == NULL || object_list == NULL) {
3206 DRM_ERROR("Failed to allocate exec or object list " 3219 DRM_ERROR("Failed to allocate exec or object list "
3207 "for %d buffers\n", 3220 "for %d buffers\n",
@@ -3462,10 +3475,8 @@ err:
3462 } 3475 }
3463 3476
3464pre_mutex_err: 3477pre_mutex_err:
3465 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3478 drm_free_large(object_list);
3466 DRM_MEM_DRIVER); 3479 drm_free_large(exec_list);
3467 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3468 DRM_MEM_DRIVER);
3469 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3480 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3470 DRM_MEM_DRIVER); 3481 DRM_MEM_DRIVER);
3471 3482
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 52a059354e83..540dd336e6ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -213,7 +213,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
213 if (tiling_mode == I915_TILING_NONE) 213 if (tiling_mode == I915_TILING_NONE)
214 return true; 214 return true;
215 215
216 if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 216 if (!IS_I9XX(dev) ||
217 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
217 tile_width = 128; 218 tile_width = 128;
218 else 219 else
219 tile_width = 512; 220 tile_width = 512;
@@ -225,11 +226,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
225 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 226 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
226 return false; 227 return false;
227 } else if (IS_I9XX(dev)) { 228 } else if (IS_I9XX(dev)) {
228 if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL || 229 uint32_t pitch_val = ffs(stride / tile_width) - 1;
230
231 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
232 * instead of 4 (2KB) on 945s.
233 */
234 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
229 size > (I830_FENCE_MAX_SIZE_VAL << 20)) 235 size > (I830_FENCE_MAX_SIZE_VAL << 20))
230 return false; 236 return false;
231 } else { 237 } else {
232 if (stride / 128 > I830_FENCE_MAX_PITCH_VAL || 238 uint32_t pitch_val = ffs(stride / tile_width) - 1;
239
240 if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
233 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 241 size > (I830_FENCE_MAX_SIZE_VAL << 19))
234 return false; 242 return false;
235 } 243 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15da44cf21b1..375569d01d01 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,7 +190,8 @@
190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
191#define I830_FENCE_PITCH_SHIFT 4 191#define I830_FENCE_PITCH_SHIFT 4
192#define I830_FENCE_REG_VALID (1<<0) 192#define I830_FENCE_REG_VALID (1<<0)
193#define I830_FENCE_MAX_PITCH_VAL 0x10 193#define I915_FENCE_MAX_PITCH_VAL 0x10
194#define I830_FENCE_MAX_PITCH_VAL 6
194#define I830_FENCE_MAX_SIZE_VAL (1<<8) 195#define I830_FENCE_MAX_SIZE_VAL (1<<8)
195 196
196#define I915_FENCE_START_MASK 0x0ff00000 197#define I915_FENCE_START_MASK 0x0ff00000
@@ -1410,9 +1411,25 @@
1410 1411
1411/* Cursor A & B regs */ 1412/* Cursor A & B regs */
1412#define CURACNTR 0x70080 1413#define CURACNTR 0x70080
1414/* Old style CUR*CNTR flags (desktop 8xx) */
1415#define CURSOR_ENABLE 0x80000000
1416#define CURSOR_GAMMA_ENABLE 0x40000000
1417#define CURSOR_STRIDE_MASK 0x30000000
1418#define CURSOR_FORMAT_SHIFT 24
1419#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
1420#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
1421#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
1422#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
1423#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
1424#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
1425/* New style CUR*CNTR flags */
1426#define CURSOR_MODE 0x27
1413#define CURSOR_MODE_DISABLE 0x00 1427#define CURSOR_MODE_DISABLE 0x00
1414#define CURSOR_MODE_64_32B_AX 0x07 1428#define CURSOR_MODE_64_32B_AX 0x07
1415#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 1429#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
1430#define MCURSOR_PIPE_SELECT (1 << 28)
1431#define MCURSOR_PIPE_A 0x00
1432#define MCURSOR_PIPE_B (1 << 28)
1416#define MCURSOR_GAMMA_ENABLE (1 << 26) 1433#define MCURSOR_GAMMA_ENABLE (1 << 26)
1417#define CURABASE 0x70084 1434#define CURABASE 0x70084
1418#define CURAPOS 0x70088 1435#define CURAPOS 0x70088
@@ -1420,6 +1437,7 @@
1420#define CURSOR_POS_SIGN 0x8000 1437#define CURSOR_POS_SIGN 0x8000
1421#define CURSOR_X_SHIFT 0 1438#define CURSOR_X_SHIFT 0
1422#define CURSOR_Y_SHIFT 16 1439#define CURSOR_Y_SHIFT 16
1440#define CURSIZE 0x700a0
1423#define CURBCNTR 0x700c0 1441#define CURBCNTR 0x700c0
1424#define CURBBASE 0x700c4 1442#define CURBBASE 0x700c4
1425#define CURBPOS 0x700c8 1443#define CURBPOS 0x700c8
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fc28e2bbd542..9d78cff33b24 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -57,9 +57,43 @@ find_section(struct bdb_header *bdb, int section_id)
57 return NULL; 57 return NULL;
58} 58}
59 59
60/* Try to find panel data */
61static void 60static void
62parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 61fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
62 struct lvds_dvo_timing *dvo_timing)
63{
64 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
65 dvo_timing->hactive_lo;
66 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
67 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
68 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
69 dvo_timing->hsync_pulse_width;
70 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
71 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
72
73 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
74 dvo_timing->vactive_lo;
75 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
76 dvo_timing->vsync_off;
77 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
78 dvo_timing->vsync_pulse_width;
79 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
80 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
81 panel_fixed_mode->clock = dvo_timing->clock * 10;
82 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
83
84 /* Some VBTs have bogus h/vtotal values */
85 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
86 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
87 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
88 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
89
90 drm_mode_set_name(panel_fixed_mode);
91}
92
93/* Try to find integrated panel data */
94static void
95parse_lfp_panel_data(struct drm_i915_private *dev_priv,
96 struct bdb_header *bdb)
63{ 97{
64 struct bdb_lvds_options *lvds_options; 98 struct bdb_lvds_options *lvds_options;
65 struct bdb_lvds_lfp_data *lvds_lfp_data; 99 struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -91,38 +125,45 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
91 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 125 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
92 DRM_MEM_DRIVER); 126 DRM_MEM_DRIVER);
93 127
94 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 128 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
95 dvo_timing->hactive_lo;
96 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
97 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
98 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
99 dvo_timing->hsync_pulse_width;
100 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
101 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
102 129
103 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | 130 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
104 dvo_timing->vactive_lo;
105 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
106 dvo_timing->vsync_off;
107 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
108 dvo_timing->vsync_pulse_width;
109 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
110 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113 131
114 /* Some VBTs have bogus h/vtotal values */ 132 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
115 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 133 drm_mode_debug_printmodeline(panel_fixed_mode);
116 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
117 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
118 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
119 134
120 drm_mode_set_name(panel_fixed_mode); 135 return;
136}
137
138/* Try to find sdvo panel data */
139static void
140parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
141 struct bdb_header *bdb)
142{
143 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
144 struct lvds_dvo_timing *dvo_timing;
145 struct drm_display_mode *panel_fixed_mode;
121 146
122 dev_priv->vbt_mode = panel_fixed_mode; 147 dev_priv->sdvo_lvds_vbt_mode = NULL;
123 148
124 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 149 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
125 drm_mode_debug_printmodeline(panel_fixed_mode); 150 if (!sdvo_lvds_options)
151 return;
152
153 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
154 if (!dvo_timing)
155 return;
156
157 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
158 DRM_MEM_DRIVER);
159
160 if (!panel_fixed_mode)
161 return;
162
163 fill_detail_timing_data(panel_fixed_mode,
164 dvo_timing + sdvo_lvds_options->panel_type);
165
166 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
126 167
127 return; 168 return;
128} 169}
@@ -199,7 +240,8 @@ intel_init_bios(struct drm_device *dev)
199 240
200 /* Grab useful general definitions */ 241 /* Grab useful general definitions */
201 parse_general_features(dev_priv, bdb); 242 parse_general_features(dev_priv, bdb);
202 parse_panel_data(dev_priv, bdb); 243 parse_lfp_panel_data(dev_priv, bdb);
244 parse_sdvo_panel_data(dev_priv, bdb);
203 245
204 pci_unmap_rom(pdev, bios); 246 pci_unmap_rom(pdev, bios);
205 247
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index de621aad85b5..8ca2cde15804 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -279,6 +279,23 @@ struct vch_bdb_22 {
279 struct vch_panel_data panels[16]; 279 struct vch_panel_data panels[16];
280} __attribute__((packed)); 280} __attribute__((packed));
281 281
282struct bdb_sdvo_lvds_options {
283 u8 panel_backlight;
284 u8 h40_set_panel_type;
285 u8 panel_type;
286 u8 ssc_clk_freq;
287 u16 als_low_trip;
288 u16 als_high_trip;
289 u8 sclalarcoeff_tab_row_num;
290 u8 sclalarcoeff_tab_row_size;
291 u8 coefficient[8];
292 u8 panel_misc_bits_1;
293 u8 panel_misc_bits_2;
294 u8 panel_misc_bits_3;
295 u8 panel_misc_bits_4;
296} __attribute__((packed));
297
298
282bool intel_init_bios(struct drm_device *dev); 299bool intel_init_bios(struct drm_device *dev);
283 300
284/* 301/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 19148c3df637..640f5158effc 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,9 +198,142 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
198 return intel_ddc_probe(intel_output); 198 return intel_ddc_probe(intel_output);
199} 199}
200 200
201static enum drm_connector_status
202intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
203{
204 struct drm_encoder *encoder = &intel_output->enc;
205 struct drm_device *dev = encoder->dev;
206 struct drm_i915_private *dev_priv = dev->dev_private;
207 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
208 uint32_t pipe = intel_crtc->pipe;
209 uint32_t save_bclrpat;
210 uint32_t save_vtotal;
211 uint32_t vtotal, vactive;
212 uint32_t vsample;
213 uint32_t vblank, vblank_start, vblank_end;
214 uint32_t dsl;
215 uint32_t bclrpat_reg;
216 uint32_t vtotal_reg;
217 uint32_t vblank_reg;
218 uint32_t vsync_reg;
219 uint32_t pipeconf_reg;
220 uint32_t pipe_dsl_reg;
221 uint8_t st00;
222 enum drm_connector_status status;
223
224 if (pipe == 0) {
225 bclrpat_reg = BCLRPAT_A;
226 vtotal_reg = VTOTAL_A;
227 vblank_reg = VBLANK_A;
228 vsync_reg = VSYNC_A;
229 pipeconf_reg = PIPEACONF;
230 pipe_dsl_reg = PIPEADSL;
231 } else {
232 bclrpat_reg = BCLRPAT_B;
233 vtotal_reg = VTOTAL_B;
234 vblank_reg = VBLANK_B;
235 vsync_reg = VSYNC_B;
236 pipeconf_reg = PIPEBCONF;
237 pipe_dsl_reg = PIPEBDSL;
238 }
239
240 save_bclrpat = I915_READ(bclrpat_reg);
241 save_vtotal = I915_READ(vtotal_reg);
242 vblank = I915_READ(vblank_reg);
243
244 vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
245 vactive = (save_vtotal & 0x7ff) + 1;
246
247 vblank_start = (vblank & 0xfff) + 1;
248 vblank_end = ((vblank >> 16) & 0xfff) + 1;
249
250 /* Set the border color to purple. */
251 I915_WRITE(bclrpat_reg, 0x500050);
252
253 if (IS_I9XX(dev)) {
254 uint32_t pipeconf = I915_READ(pipeconf_reg);
255 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
256 /* Wait for next Vblank to substitue
257 * border color for Color info */
258 intel_wait_for_vblank(dev);
259 st00 = I915_READ8(VGA_MSR_WRITE);
260 status = ((st00 & (1 << 4)) != 0) ?
261 connector_status_connected :
262 connector_status_disconnected;
263
264 I915_WRITE(pipeconf_reg, pipeconf);
265 } else {
266 bool restore_vblank = false;
267 int count, detect;
268
269 /*
270 * If there isn't any border, add some.
271 * Yes, this will flicker
272 */
273 if (vblank_start <= vactive && vblank_end >= vtotal) {
274 uint32_t vsync = I915_READ(vsync_reg);
275 uint32_t vsync_start = (vsync & 0xffff) + 1;
276
277 vblank_start = vsync_start;
278 I915_WRITE(vblank_reg,
279 (vblank_start - 1) |
280 ((vblank_end - 1) << 16));
281 restore_vblank = true;
282 }
283 /* sample in the vertical border, selecting the larger one */
284 if (vblank_start - vactive >= vtotal - vblank_end)
285 vsample = (vblank_start + vactive) >> 1;
286 else
287 vsample = (vtotal + vblank_end) >> 1;
288
289 /*
290 * Wait for the border to be displayed
291 */
292 while (I915_READ(pipe_dsl_reg) >= vactive)
293 ;
294 while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
295 ;
296 /*
297 * Watch ST00 for an entire scanline
298 */
299 detect = 0;
300 count = 0;
301 do {
302 count++;
303 /* Read the ST00 VGA status register */
304 st00 = I915_READ8(VGA_MSR_WRITE);
305 if (st00 & (1 << 4))
306 detect++;
307 } while ((I915_READ(pipe_dsl_reg) == dsl));
308
309 /* restore vblank if necessary */
310 if (restore_vblank)
311 I915_WRITE(vblank_reg, vblank);
312 /*
313 * If more than 3/4 of the scanline detected a monitor,
314 * then it is assumed to be present. This works even on i830,
315 * where there isn't any way to force the border color across
316 * the screen
317 */
318 status = detect * 4 > count * 3 ?
319 connector_status_connected :
320 connector_status_disconnected;
321 }
322
323 /* Restore previous settings */
324 I915_WRITE(bclrpat_reg, save_bclrpat);
325
326 return status;
327}
328
201static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 329static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
202{ 330{
203 struct drm_device *dev = connector->dev; 331 struct drm_device *dev = connector->dev;
332 struct intel_output *intel_output = to_intel_output(connector);
333 struct drm_encoder *encoder = &intel_output->enc;
334 struct drm_crtc *crtc;
335 int dpms_mode;
336 enum drm_connector_status status;
204 337
205 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { 338 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
206 if (intel_crt_detect_hotplug(connector)) 339 if (intel_crt_detect_hotplug(connector))
@@ -212,8 +345,20 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
212 if (intel_crt_detect_ddc(connector)) 345 if (intel_crt_detect_ddc(connector))
213 return connector_status_connected; 346 return connector_status_connected;
214 347
215 /* TODO use load detect */ 348 /* for pre-945g platforms use load detect */
216 return connector_status_unknown; 349 if (encoder->crtc && encoder->crtc->enabled) {
350 status = intel_crt_load_detect(encoder->crtc, intel_output);
351 } else {
352 crtc = intel_get_load_detect_pipe(intel_output,
353 NULL, &dpms_mode);
354 if (crtc) {
355 status = intel_crt_load_detect(crtc, intel_output);
356 intel_release_load_detect_pipe(intel_output, dpms_mode);
357 } else
358 status = connector_status_unknown;
359 }
360
361 return status;
217} 362}
218 363
219static void intel_crt_destroy(struct drm_connector *connector) 364static void intel_crt_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3387cf32f385..c9d6f10ba92e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1357,7 +1357,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1357 int pipe = intel_crtc->pipe; 1357 int pipe = intel_crtc->pipe;
1358 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 1358 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1359 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 1359 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1360 uint32_t temp; 1360 uint32_t temp = I915_READ(control);
1361 size_t addr; 1361 size_t addr;
1362 int ret; 1362 int ret;
1363 1363
@@ -1366,7 +1366,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1366 /* if we want to turn off the cursor ignore width and height */ 1366 /* if we want to turn off the cursor ignore width and height */
1367 if (!handle) { 1367 if (!handle) {
1368 DRM_DEBUG("cursor off\n"); 1368 DRM_DEBUG("cursor off\n");
1369 temp = CURSOR_MODE_DISABLE; 1369 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1370 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
1371 temp |= CURSOR_MODE_DISABLE;
1372 } else {
1373 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
1374 }
1370 addr = 0; 1375 addr = 0;
1371 bo = NULL; 1376 bo = NULL;
1372 mutex_lock(&dev->struct_mutex); 1377 mutex_lock(&dev->struct_mutex);
@@ -1409,10 +1414,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1409 addr = obj_priv->phys_obj->handle->busaddr; 1414 addr = obj_priv->phys_obj->handle->busaddr;
1410 } 1415 }
1411 1416
1412 temp = 0; 1417 if (!IS_I9XX(dev))
1413 /* set the pipe for the cursor */ 1418 I915_WRITE(CURSIZE, (height << 12) | width);
1414 temp |= (pipe << 28); 1419
1415 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 1420 /* Hooray for CUR*CNTR differences */
1421 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1422 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
1423 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1424 temp |= (pipe << 28); /* Connect to correct pipe */
1425 } else {
1426 temp &= ~(CURSOR_FORMAT_MASK);
1427 temp |= CURSOR_ENABLE;
1428 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
1429 }
1416 1430
1417 finish: 1431 finish:
1418 I915_WRITE(control, temp); 1432 I915_WRITE(control, temp);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 439a86514993..53731f0ffcb5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -511,10 +511,10 @@ void intel_lvds_init(struct drm_device *dev)
511 } 511 }
512 512
513 /* Failed to get EDID, what about VBT? */ 513 /* Failed to get EDID, what about VBT? */
514 if (dev_priv->vbt_mode) { 514 if (dev_priv->lfp_lvds_vbt_mode) {
515 mutex_lock(&dev->mode_config.mutex); 515 mutex_lock(&dev->mode_config.mutex);
516 dev_priv->panel_fixed_mode = 516 dev_priv->panel_fixed_mode =
517 drm_mode_duplicate(dev, dev_priv->vbt_mode); 517 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
518 mutex_unlock(&dev->mode_config.mutex); 518 mutex_unlock(&dev->mode_config.mutex);
519 if (dev_priv->panel_fixed_mode) { 519 if (dev_priv->panel_fixed_mode) {
520 dev_priv->panel_fixed_mode->type |= 520 dev_priv->panel_fixed_mode->type |=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9913651c1e17..f3ef6bfd8ffc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -69,6 +69,10 @@ struct intel_sdvo_priv {
69 * This is set if we treat the device as HDMI, instead of DVI. 69 * This is set if we treat the device as HDMI, instead of DVI.
70 */ 70 */
71 bool is_hdmi; 71 bool is_hdmi;
72 /**
73 * This is set if we detect output of sdvo device as LVDS.
74 */
75 bool is_lvds;
72 76
73 /** 77 /**
74 * Returned SDTV resolutions allowed for the current format, if the 78 * Returned SDTV resolutions allowed for the current format, if the
@@ -1398,10 +1402,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1398static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1402static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1399{ 1403{
1400 struct intel_output *intel_output = to_intel_output(connector); 1404 struct intel_output *intel_output = to_intel_output(connector);
1401 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1402 1405
1403 /* set the bus switch and get the modes */ 1406 /* set the bus switch and get the modes */
1404 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1405 intel_ddc_get_modes(intel_output); 1407 intel_ddc_get_modes(intel_output);
1406 1408
1407#if 0 1409#if 0
@@ -1543,6 +1545,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1543 } 1545 }
1544} 1546}
1545 1547
1548static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1549{
1550 struct intel_output *intel_output = to_intel_output(connector);
1551 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1552 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1553
1554 /*
1555 * Attempt to get the mode list from DDC.
1556 * Assume that the preferred modes are
1557 * arranged in priority order.
1558 */
1559 /* set the bus switch and get the modes */
1560 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1561 intel_ddc_get_modes(intel_output);
1562 if (list_empty(&connector->probed_modes) == false)
1563 return;
1564
1565 /* Fetch modes from VBT */
1566 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1567 struct drm_display_mode *newmode;
1568 newmode = drm_mode_duplicate(connector->dev,
1569 dev_priv->sdvo_lvds_vbt_mode);
1570 if (newmode != NULL) {
1571 /* Guarantee the mode is preferred */
1572 newmode->type = (DRM_MODE_TYPE_PREFERRED |
1573 DRM_MODE_TYPE_DRIVER);
1574 drm_mode_probed_add(connector, newmode);
1575 }
1576 }
1577}
1578
1546static int intel_sdvo_get_modes(struct drm_connector *connector) 1579static int intel_sdvo_get_modes(struct drm_connector *connector)
1547{ 1580{
1548 struct intel_output *output = to_intel_output(connector); 1581 struct intel_output *output = to_intel_output(connector);
@@ -1550,6 +1583,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1550 1583
1551 if (sdvo_priv->is_tv) 1584 if (sdvo_priv->is_tv)
1552 intel_sdvo_get_tv_modes(connector); 1585 intel_sdvo_get_tv_modes(connector);
1586 else if (sdvo_priv->is_lvds == true)
1587 intel_sdvo_get_lvds_modes(connector);
1553 else 1588 else
1554 intel_sdvo_get_ddc_modes(connector); 1589 intel_sdvo_get_ddc_modes(connector);
1555 1590
@@ -1564,6 +1599,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1564 1599
1565 if (intel_output->i2c_bus) 1600 if (intel_output->i2c_bus)
1566 intel_i2c_destroy(intel_output->i2c_bus); 1601 intel_i2c_destroy(intel_output->i2c_bus);
1602 if (intel_output->ddc_bus)
1603 intel_i2c_destroy(intel_output->ddc_bus);
1604
1567 drm_sysfs_connector_remove(connector); 1605 drm_sysfs_connector_remove(connector);
1568 drm_connector_cleanup(connector); 1606 drm_connector_cleanup(connector);
1569 kfree(intel_output); 1607 kfree(intel_output);
@@ -1660,12 +1698,56 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
1660 return true; 1698 return true;
1661} 1699}
1662 1700
1701static struct intel_output *
1702intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
1703{
1704 struct drm_device *dev = chan->drm_dev;
1705 struct drm_connector *connector;
1706 struct intel_output *intel_output = NULL;
1707
1708 list_for_each_entry(connector,
1709 &dev->mode_config.connector_list, head) {
1710 if (to_intel_output(connector)->ddc_bus == chan) {
1711 intel_output = to_intel_output(connector);
1712 break;
1713 }
1714 }
1715 return intel_output;
1716}
1717
1718static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1719 struct i2c_msg msgs[], int num)
1720{
1721 struct intel_output *intel_output;
1722 struct intel_sdvo_priv *sdvo_priv;
1723 struct i2c_algo_bit_data *algo_data;
1724 struct i2c_algorithm *algo;
1725
1726 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
1727 intel_output =
1728 intel_sdvo_chan_to_intel_output(
1729 (struct intel_i2c_chan *)(algo_data->data));
1730 if (intel_output == NULL)
1731 return -EINVAL;
1732
1733 sdvo_priv = intel_output->dev_priv;
1734 algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
1735
1736 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1737 return algo->master_xfer(i2c_adap, msgs, num);
1738}
1739
1740static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
1741 .master_xfer = intel_sdvo_master_xfer,
1742};
1743
1663bool intel_sdvo_init(struct drm_device *dev, int output_device) 1744bool intel_sdvo_init(struct drm_device *dev, int output_device)
1664{ 1745{
1665 struct drm_connector *connector; 1746 struct drm_connector *connector;
1666 struct intel_output *intel_output; 1747 struct intel_output *intel_output;
1667 struct intel_sdvo_priv *sdvo_priv; 1748 struct intel_sdvo_priv *sdvo_priv;
1668 struct intel_i2c_chan *i2cbus = NULL; 1749 struct intel_i2c_chan *i2cbus = NULL;
1750 struct intel_i2c_chan *ddcbus = NULL;
1669 int connector_type; 1751 int connector_type;
1670 u8 ch[0x40]; 1752 u8 ch[0x40];
1671 int i; 1753 int i;
@@ -1676,17 +1758,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1676 return false; 1758 return false;
1677 } 1759 }
1678 1760
1679 connector = &intel_output->base;
1680
1681 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1682 DRM_MODE_CONNECTOR_Unknown);
1683 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1684 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 1761 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
1685 intel_output->type = INTEL_OUTPUT_SDVO; 1762 intel_output->type = INTEL_OUTPUT_SDVO;
1686 1763
1687 connector->interlace_allowed = 0;
1688 connector->doublescan_allowed = 0;
1689
1690 /* setup the DDC bus. */ 1764 /* setup the DDC bus. */
1691 if (output_device == SDVOB) 1765 if (output_device == SDVOB)
1692 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 1766 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
@@ -1694,7 +1768,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1694 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 1768 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1695 1769
1696 if (!i2cbus) 1770 if (!i2cbus)
1697 goto err_connector; 1771 goto err_inteloutput;
1698 1772
1699 sdvo_priv->i2c_bus = i2cbus; 1773 sdvo_priv->i2c_bus = i2cbus;
1700 1774
@@ -1710,7 +1784,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1710 intel_output->i2c_bus = i2cbus; 1784 intel_output->i2c_bus = i2cbus;
1711 intel_output->dev_priv = sdvo_priv; 1785 intel_output->dev_priv = sdvo_priv;
1712 1786
1713
1714 /* Read the regs to test if we can talk to the device */ 1787 /* Read the regs to test if we can talk to the device */
1715 for (i = 0; i < 0x40; i++) { 1788 for (i = 0; i < 0x40; i++) {
1716 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 1789 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
@@ -1720,6 +1793,22 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1720 } 1793 }
1721 } 1794 }
1722 1795
1796 /* setup the DDC bus. */
1797 if (output_device == SDVOB)
1798 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
1799 else
1800 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
1801
1802 if (ddcbus == NULL)
1803 goto err_i2c;
1804
1805 intel_sdvo_i2c_bit_algo.functionality =
1806 intel_output->i2c_bus->adapter.algo->functionality;
1807 ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
1808 intel_output->ddc_bus = ddcbus;
1809
1810 /* In defaut case sdvo lvds is false */
1811 sdvo_priv->is_lvds = false;
1723 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 1812 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1724 1813
1725 if (sdvo_priv->caps.output_flags & 1814 if (sdvo_priv->caps.output_flags &
@@ -1729,7 +1818,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1729 else 1818 else
1730 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; 1819 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1731 1820
1732 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1733 encoder_type = DRM_MODE_ENCODER_TMDS; 1821 encoder_type = DRM_MODE_ENCODER_TMDS;
1734 connector_type = DRM_MODE_CONNECTOR_DVID; 1822 connector_type = DRM_MODE_CONNECTOR_DVID;
1735 1823
@@ -1747,7 +1835,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1747 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) 1835 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1748 { 1836 {
1749 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 1837 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1750 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1751 encoder_type = DRM_MODE_ENCODER_TVDAC; 1838 encoder_type = DRM_MODE_ENCODER_TVDAC;
1752 connector_type = DRM_MODE_CONNECTOR_SVIDEO; 1839 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1753 sdvo_priv->is_tv = true; 1840 sdvo_priv->is_tv = true;
@@ -1756,30 +1843,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1756 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) 1843 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1757 { 1844 {
1758 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 1845 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1759 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1760 encoder_type = DRM_MODE_ENCODER_DAC; 1846 encoder_type = DRM_MODE_ENCODER_DAC;
1761 connector_type = DRM_MODE_CONNECTOR_VGA; 1847 connector_type = DRM_MODE_CONNECTOR_VGA;
1762 } 1848 }
1763 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) 1849 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1764 { 1850 {
1765 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 1851 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1766 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1767 encoder_type = DRM_MODE_ENCODER_DAC; 1852 encoder_type = DRM_MODE_ENCODER_DAC;
1768 connector_type = DRM_MODE_CONNECTOR_VGA; 1853 connector_type = DRM_MODE_CONNECTOR_VGA;
1769 } 1854 }
1770 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) 1855 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1771 { 1856 {
1772 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 1857 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1773 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1774 encoder_type = DRM_MODE_ENCODER_LVDS; 1858 encoder_type = DRM_MODE_ENCODER_LVDS;
1775 connector_type = DRM_MODE_CONNECTOR_LVDS; 1859 connector_type = DRM_MODE_CONNECTOR_LVDS;
1860 sdvo_priv->is_lvds = true;
1776 } 1861 }
1777 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) 1862 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1778 { 1863 {
1779 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; 1864 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1780 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1781 encoder_type = DRM_MODE_ENCODER_LVDS; 1865 encoder_type = DRM_MODE_ENCODER_LVDS;
1782 connector_type = DRM_MODE_CONNECTOR_LVDS; 1866 connector_type = DRM_MODE_CONNECTOR_LVDS;
1867 sdvo_priv->is_lvds = true;
1783 } 1868 }
1784 else 1869 else
1785 { 1870 {
@@ -1795,9 +1880,16 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1795 goto err_i2c; 1880 goto err_i2c;
1796 } 1881 }
1797 1882
1883 connector = &intel_output->base;
1884 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1885 connector_type);
1886 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1887 connector->interlace_allowed = 0;
1888 connector->doublescan_allowed = 0;
1889 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1890
1798 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); 1891 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
1799 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 1892 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
1800 connector->connector_type = connector_type;
1801 1893
1802 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1894 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1803 drm_sysfs_connector_add(connector); 1895 drm_sysfs_connector_add(connector);
@@ -1829,14 +1921,13 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1829 sdvo_priv->caps.output_flags & 1921 sdvo_priv->caps.output_flags &
1830 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 1922 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1831 1923
1832 intel_output->ddc_bus = i2cbus;
1833
1834 return true; 1924 return true;
1835 1925
1836err_i2c: 1926err_i2c:
1927 if (ddcbus != NULL)
1928 intel_i2c_destroy(intel_output->ddc_bus);
1837 intel_i2c_destroy(intel_output->i2c_bus); 1929 intel_i2c_destroy(intel_output->i2c_bus);
1838err_connector: 1930err_inteloutput:
1839 drm_connector_cleanup(connector);
1840 kfree(intel_output); 1931 kfree(intel_output);
1841 1932
1842 return false; 1933 return false;
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index b5e3b2851698..a1787fdf5b9f 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
182 .name = "lm78", 182 .name = "lm78",
183 }, 183 },
184 .probe = lm78_isa_probe, 184 .probe = lm78_isa_probe,
185 .remove = lm78_isa_remove, 185 .remove = __devexit_p(lm78_isa_remove),
186}; 186};
187 187
188 188
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 35dc38d3b2c5..6415a2e2ba87 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -696,7 +696,7 @@ void ide_timer_expiry (unsigned long data)
696 } 696 }
697 spin_lock_irq(&hwif->lock); 697 spin_lock_irq(&hwif->lock);
698 enable_irq(hwif->irq); 698 enable_irq(hwif->irq);
699 if (startstop == ide_stopped) { 699 if (startstop == ide_stopped && hwif->polling == 0) {
700 ide_unlock_port(hwif); 700 ide_unlock_port(hwif);
701 plug_device = 1; 701 plug_device = 1;
702 } 702 }
@@ -868,7 +868,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
868 * same irq as is currently being serviced here, and Linux 868 * same irq as is currently being serviced here, and Linux
869 * won't allow another of the same (on any CPU) until we return. 869 * won't allow another of the same (on any CPU) until we return.
870 */ 870 */
871 if (startstop == ide_stopped) { 871 if (startstop == ide_stopped && hwif->polling == 0) {
872 BUG_ON(hwif->handler); 872 BUG_ON(hwif->handler);
873 ide_unlock_port(hwif); 873 ide_unlock_port(hwif);
874 plug_device = 1; 874 plug_device = 1;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c19a221b1e18..06fe002116ec 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -206,8 +206,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
206 206
207/* 207/*
208 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. 208 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
209 * We list them here and depend on the device side cable detection for them.
210 *
211 * Some optical devices with the buggy firmwares have the same problem. 209 * Some optical devices with the buggy firmwares have the same problem.
212 */ 210 */
213static const struct drive_list_entry ivb_list[] = { 211static const struct drive_list_entry ivb_list[] = {
@@ -251,10 +249,25 @@ u8 eighty_ninty_three(ide_drive_t *drive)
251 * - force bit13 (80c cable present) check also for !ivb devices 249 * - force bit13 (80c cable present) check also for !ivb devices
252 * (unless the slave device is pre-ATA3) 250 * (unless the slave device is pre-ATA3)
253 */ 251 */
254 if ((id[ATA_ID_HW_CONFIG] & 0x4000) || 252 if (id[ATA_ID_HW_CONFIG] & 0x4000)
255 (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
256 return 1; 253 return 1;
257 254
255 if (ivb) {
256 const char *model = (char *)&id[ATA_ID_PROD];
257
258 if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
259 /*
260 * These ATAPI devices always report 80c cable
261 * so we have to depend on the host in this case.
262 */
263 if (hwif->cbl == ATA_CBL_PATA80)
264 return 1;
265 } else {
266 /* Depend on the device side cable detection. */
267 if (id[ATA_ID_HW_CONFIG] & 0x2000)
268 return 1;
269 }
270 }
258no_80w: 271no_80w:
259 if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED) 272 if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
260 return 0; 273 return 0;
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 56ff8c46c7d1..2148df836ce7 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -31,24 +31,6 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
31 blk_queue_bounce_limit(drive->queue, addr); 31 blk_queue_bounce_limit(drive->queue, addr);
32} 32}
33 33
34static void ide_dump_opcode(ide_drive_t *drive)
35{
36 struct request *rq = drive->hwif->rq;
37 struct ide_cmd *cmd = NULL;
38
39 if (!rq)
40 return;
41
42 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
43 cmd = rq->special;
44
45 printk(KERN_ERR "ide: failed opcode was: ");
46 if (cmd == NULL)
47 printk(KERN_CONT "unknown\n");
48 else
49 printk(KERN_CONT "0x%02x\n", cmd->tf.command);
50}
51
52u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) 34u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
53{ 35{
54 struct ide_taskfile *tf = &cmd->tf; 36 struct ide_taskfile *tf = &cmd->tf;
@@ -91,7 +73,7 @@ static void ide_dump_sector(ide_drive_t *drive)
91 73
92static void ide_dump_ata_error(ide_drive_t *drive, u8 err) 74static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
93{ 75{
94 printk(KERN_ERR "{ "); 76 printk(KERN_CONT "{ ");
95 if (err & ATA_ABORTED) 77 if (err & ATA_ABORTED)
96 printk(KERN_CONT "DriveStatusError "); 78 printk(KERN_CONT "DriveStatusError ");
97 if (err & ATA_ICRC) 79 if (err & ATA_ICRC)
@@ -121,7 +103,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
121 103
122static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) 104static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
123{ 105{
124 printk(KERN_ERR "{ "); 106 printk(KERN_CONT "{ ");
125 if (err & ATAPI_ILI) 107 if (err & ATAPI_ILI)
126 printk(KERN_CONT "IllegalLengthIndication "); 108 printk(KERN_CONT "IllegalLengthIndication ");
127 if (err & ATAPI_EOM) 109 if (err & ATAPI_EOM)
@@ -179,7 +161,10 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
179 else 161 else
180 ide_dump_atapi_error(drive, err); 162 ide_dump_atapi_error(drive, err);
181 } 163 }
182 ide_dump_opcode(drive); 164
165 printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
166 drive->name, drive->hwif->cmd.tf.command);
167
183 return err; 168 return err;
184} 169}
185EXPORT_SYMBOL(ide_dump_status); 170EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index 61111fd27130..39d4e01f5c9c 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -33,6 +33,16 @@ static int ide_generic_all; /* Set to claim all devices */
33module_param_named(all_generic_ide, ide_generic_all, bool, 0444); 33module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
34MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); 34MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
35 35
36static void netcell_quirkproc(ide_drive_t *drive)
37{
38 /* mark words 85-87 as valid */
39 drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
40}
41
42static const struct ide_port_ops netcell_port_ops = {
43 .quirkproc = netcell_quirkproc,
44};
45
36#define DECLARE_GENERIC_PCI_DEV(extra_flags) \ 46#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
37 { \ 47 { \
38 .name = DRV_NAME, \ 48 .name = DRV_NAME, \
@@ -74,6 +84,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
74 84
75 { /* 6: Revolution */ 85 { /* 6: Revolution */
76 .name = DRV_NAME, 86 .name = DRV_NAME,
87 .port_ops = &netcell_port_ops,
77 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 88 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
78 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 89 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
79 IDE_HFLAG_OFF_BOARD, 90 IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 7f264ed1141b..c895ed52b2e8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -295,7 +295,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
295 295
296 timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 296 timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
297 297
298 if (ide_busy_sleep(hwif, timeout, use_altstatus)) 298 if (ide_busy_sleep(drive, timeout, use_altstatus))
299 return 1; 299 return 1;
300 300
301 /* wait for IRQ and ATA_DRQ */ 301 /* wait for IRQ and ATA_DRQ */
@@ -316,8 +316,9 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
316 return rc; 316 return rc;
317} 317}
318 318
319int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) 319int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
320{ 320{
321 ide_hwif_t *hwif = drive->hwif;
321 u8 stat; 322 u8 stat;
322 323
323 timeout += jiffies; 324 timeout += jiffies;
@@ -330,6 +331,8 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
330 return 0; 331 return 0;
331 } while (time_before(jiffies, timeout)); 332 } while (time_before(jiffies, timeout));
332 333
334 printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
335
333 return 1; /* drive timed-out */ 336 return 1; /* drive timed-out */
334} 337}
335 338
@@ -420,7 +423,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
420 tp_ops->dev_select(drive); 423 tp_ops->dev_select(drive);
421 msleep(50); 424 msleep(50);
422 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); 425 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
423 (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0); 426 (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
424 rc = ide_dev_read_id(drive, cmd, id); 427 rc = ide_dev_read_id(drive, cmd, id);
425 } 428 }
426 429
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 3ff7231e4858..028de26a25fe 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -67,6 +67,7 @@ static struct via_isa_bridge {
67 u8 udma_mask; 67 u8 udma_mask;
68 u8 flags; 68 u8 flags;
69} via_isa_bridges[] = { 69} via_isa_bridges[] = {
70 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
70 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 71 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
71 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 72 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
72 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 73 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -474,6 +475,7 @@ static const struct pci_device_id via_pci_tbl[] = {
474 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, 475 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
475 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, 476 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
476 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 }, 477 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
478 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
477 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, 479 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
478 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, 480 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
479 { 0, }, 481 { 0, },
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index bf740394d704..949c97ff57e3 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -41,6 +41,10 @@ static int debug;
41module_param_named(debug, debug, uint, 0644); 41module_param_named(debug, debug, uint, 0644);
42MODULE_PARM_DESC(debug, "Enable debug printks in this driver"); 42MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
43 43
44static int forceload;
45module_param_named(forceload, forceload, uint, 0644);
46MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
47
44#define dprintk(fmt, arg...) \ 48#define dprintk(fmt, arg...) \
45 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0) 49 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
46 50
@@ -552,7 +556,7 @@ static int __init i7300_idle_init(void)
552 cpus_clear(idle_cpumask); 556 cpus_clear(idle_cpumask);
553 total_us = 0; 557 total_us = 0;
554 558
555 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev)) 559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
556 return -ENODEV; 560 return -ENODEV;
557 561
558 if (i7300_idle_thrt_save()) 562 if (i7300_idle_thrt_save())
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e54e002665b0..5d445f48789b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -42,6 +42,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
42 ABS_MT_POSITION_Y, 42 ABS_MT_POSITION_Y,
43 ABS_MT_TOOL_TYPE, 43 ABS_MT_TOOL_TYPE,
44 ABS_MT_BLOB_ID, 44 ABS_MT_BLOB_ID,
45 ABS_MT_TRACKING_ID,
45 0 46 0
46}; 47};
47static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; 48static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index e29cdc13a199..a28c06d686e1 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
107 clk_disable(kmi->clk); 107 clk_disable(kmi->clk);
108} 108}
109 109
110static int amba_kmi_probe(struct amba_device *dev, void *id) 110static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
111{ 111{
112 struct amba_kmi_port *kmi; 112 struct amba_kmi_port *kmi;
113 struct serio *io; 113 struct serio *io;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 67248c31e19a..be5bbbb8ae4e 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -210,7 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
210 timeout = wait_event_timeout(ps2dev->wait, 210 timeout = wait_event_timeout(ps2dev->wait,
211 !(ps2dev->flags & PS2_FLAG_CMD1), timeout); 211 !(ps2dev->flags & PS2_FLAG_CMD1), timeout);
212 212
213 if (ps2dev->cmdcnt && timeout > 0) { 213 if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) {
214 214
215 timeout = ps2_adjust_timeout(ps2dev, command, timeout); 215 timeout = ps2_adjust_timeout(ps2dev, command, timeout);
216 wait_event_timeout(ps2dev->wait, 216 wait_event_timeout(ps2dev->wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f100c7f4c1db..6954f5500108 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -419,7 +419,7 @@ static int ucb1400_ts_remove(struct platform_device *dev)
419#ifdef CONFIG_PM 419#ifdef CONFIG_PM
420static int ucb1400_ts_resume(struct platform_device *dev) 420static int ucb1400_ts_resume(struct platform_device *dev)
421{ 421{
422 struct ucb1400_ts *ucb = platform_get_drvdata(dev); 422 struct ucb1400_ts *ucb = dev->dev.platform_data;
423 423
424 if (ucb->ts_task) { 424 if (ucb->ts_task) {
425 /* 425 /*
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 1a83910f674f..eaf722fe309a 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -358,6 +358,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
358 if (emulate_insn(cpu)) 358 if (emulate_insn(cpu))
359 return; 359 return;
360 } 360 }
361 /* If KVM is active, the vmcall instruction triggers a
362 * General Protection Fault. Normally it triggers an
363 * invalid opcode fault (6): */
364 case 6:
365 /* We need to check if ring == GUEST_PL and
366 * faulting instruction == vmcall. */
367 if (is_hypercall(cpu)) {
368 rewrite_hypercall(cpu);
369 return;
370 }
361 break; 371 break;
362 case 14: /* We've intercepted a Page Fault. */ 372 case 14: /* We've intercepted a Page Fault. */
363 /* The Guest accessed a virtual address that wasn't mapped. 373 /* The Guest accessed a virtual address that wasn't mapped.
@@ -403,15 +413,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
403 * up the pointer now to indicate a hypercall is pending. */ 413 * up the pointer now to indicate a hypercall is pending. */
404 cpu->hcall = (struct hcall_args *)cpu->regs; 414 cpu->hcall = (struct hcall_args *)cpu->regs;
405 return; 415 return;
406 case 6:
407 /* kvm hypercalls trigger an invalid opcode fault (6).
408 * We need to check if ring == GUEST_PL and
409 * faulting instruction == vmcall. */
410 if (is_hypercall(cpu)) {
411 rewrite_hypercall(cpu);
412 return;
413 }
414 break;
415 } 416 }
416 417
417 /* We didn't handle the trap, so it needs to go to the Guest. */ 418 /* We didn't handle the trap, so it needs to go to the Guest. */
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 47c68bc75a17..56df1cee8fb3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1097,14 +1097,12 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1097 } 1097 }
1098 bitmap->allclean = 1; 1098 bitmap->allclean = 1;
1099 1099
1100 spin_lock_irqsave(&bitmap->lock, flags);
1100 for (j = 0; j < bitmap->chunks; j++) { 1101 for (j = 0; j < bitmap->chunks; j++) {
1101 bitmap_counter_t *bmc; 1102 bitmap_counter_t *bmc;
1102 spin_lock_irqsave(&bitmap->lock, flags); 1103 if (!bitmap->filemap)
1103 if (!bitmap->filemap) {
1104 /* error or shutdown */ 1104 /* error or shutdown */
1105 spin_unlock_irqrestore(&bitmap->lock, flags);
1106 break; 1105 break;
1107 }
1108 1106
1109 page = filemap_get_page(bitmap, j); 1107 page = filemap_get_page(bitmap, j);
1110 1108
@@ -1121,6 +1119,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1121 write_page(bitmap, page, 0); 1119 write_page(bitmap, page, 0);
1122 bitmap->allclean = 0; 1120 bitmap->allclean = 0;
1123 } 1121 }
1122 spin_lock_irqsave(&bitmap->lock, flags);
1123 j |= (PAGE_BITS - 1);
1124 continue; 1124 continue;
1125 } 1125 }
1126 1126
@@ -1181,9 +1181,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1181 ext2_clear_bit(file_page_offset(j), paddr); 1181 ext2_clear_bit(file_page_offset(j), paddr);
1182 kunmap_atomic(paddr, KM_USER0); 1182 kunmap_atomic(paddr, KM_USER0);
1183 } 1183 }
1184 } 1184 } else
1185 spin_unlock_irqrestore(&bitmap->lock, flags); 1185 j |= PAGE_COUNTER_MASK;
1186 } 1186 }
1187 spin_unlock_irqrestore(&bitmap->lock, flags);
1187 1188
1188 /* now sync the final page */ 1189 /* now sync the final page */
1189 if (lastpage != NULL) { 1190 if (lastpage != NULL) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fccc8343a250..641b211fe3fe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1375,6 +1375,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1375 1375
1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1377 sb->size = cpu_to_le64(mddev->dev_sectors); 1377 sb->size = cpu_to_le64(mddev->dev_sectors);
1378 sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9);
1379 sb->level = cpu_to_le32(mddev->level);
1380 sb->layout = cpu_to_le32(mddev->layout);
1378 1381
1379 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1382 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1380 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1383 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
@@ -3303,7 +3306,9 @@ static ssize_t
3303action_show(mddev_t *mddev, char *page) 3306action_show(mddev_t *mddev, char *page)
3304{ 3307{
3305 char *type = "idle"; 3308 char *type = "idle";
3306 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3309 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3310 type = "frozen";
3311 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3307 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3312 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3308 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3313 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3309 type = "reshape"; 3314 type = "reshape";
@@ -3326,7 +3331,12 @@ action_store(mddev_t *mddev, const char *page, size_t len)
3326 if (!mddev->pers || !mddev->pers->sync_request) 3331 if (!mddev->pers || !mddev->pers->sync_request)
3327 return -EINVAL; 3332 return -EINVAL;
3328 3333
3329 if (cmd_match(page, "idle")) { 3334 if (cmd_match(page, "frozen"))
3335 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3336 else
3337 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3338
3339 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3330 if (mddev->sync_thread) { 3340 if (mddev->sync_thread) {
3331 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3341 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3332 md_unregister_thread(mddev->sync_thread); 3342 md_unregister_thread(mddev->sync_thread);
@@ -3680,7 +3690,7 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
3680 if (strict_blocks_to_sectors(buf, &sectors) < 0) 3690 if (strict_blocks_to_sectors(buf, &sectors) < 0)
3681 return -EINVAL; 3691 return -EINVAL;
3682 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 3692 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3683 return -EINVAL; 3693 return -E2BIG;
3684 3694
3685 mddev->external_size = 1; 3695 mddev->external_size = 1;
3686 } 3696 }
@@ -5557,7 +5567,7 @@ static struct block_device_operations md_fops =
5557 .owner = THIS_MODULE, 5567 .owner = THIS_MODULE,
5558 .open = md_open, 5568 .open = md_open,
5559 .release = md_release, 5569 .release = md_release,
5560 .locked_ioctl = md_ioctl, 5570 .ioctl = md_ioctl,
5561 .getgeo = md_getgeo, 5571 .getgeo = md_getgeo,
5562 .media_changed = md_media_changed, 5572 .media_changed = md_media_changed,
5563 .revalidate_disk= md_revalidate, 5573 .revalidate_disk= md_revalidate,
@@ -6352,12 +6362,13 @@ void md_do_sync(mddev_t *mddev)
6352 6362
6353 skipped = 0; 6363 skipped = 0;
6354 6364
6355 if ((mddev->curr_resync > mddev->curr_resync_completed && 6365 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6356 (mddev->curr_resync - mddev->curr_resync_completed) 6366 ((mddev->curr_resync > mddev->curr_resync_completed &&
6357 > (max_sectors >> 4)) || 6367 (mddev->curr_resync - mddev->curr_resync_completed)
6358 (j - mddev->curr_resync_completed)*2 6368 > (max_sectors >> 4)) ||
6359 >= mddev->resync_max - mddev->curr_resync_completed 6369 (j - mddev->curr_resync_completed)*2
6360 ) { 6370 >= mddev->resync_max - mddev->curr_resync_completed
6371 )) {
6361 /* time to update curr_resync_completed */ 6372 /* time to update curr_resync_completed */
6362 blk_unplug(mddev->queue); 6373 blk_unplug(mddev->queue);
6363 wait_event(mddev->recovery_wait, 6374 wait_event(mddev->recovery_wait,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4616bc3a6e71..5d400aef8d9b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3811,13 +3811,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3811 safepos = conf->reshape_safe; 3811 safepos = conf->reshape_safe;
3812 sector_div(safepos, data_disks); 3812 sector_div(safepos, data_disks);
3813 if (mddev->delta_disks < 0) { 3813 if (mddev->delta_disks < 0) {
3814 writepos -= reshape_sectors; 3814 writepos -= min_t(sector_t, reshape_sectors, writepos);
3815 readpos += reshape_sectors; 3815 readpos += reshape_sectors;
3816 safepos += reshape_sectors; 3816 safepos += reshape_sectors;
3817 } else { 3817 } else {
3818 writepos += reshape_sectors; 3818 writepos += reshape_sectors;
3819 readpos -= reshape_sectors; 3819 readpos -= min_t(sector_t, reshape_sectors, readpos);
3820 safepos -= reshape_sectors; 3820 safepos -= min_t(sector_t, reshape_sectors, safepos);
3821 } 3821 }
3822 3822
3823 /* 'writepos' is the most advanced device address we might write. 3823 /* 'writepos' is the most advanced device address we might write.
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 7793932a513b..11a6248cc1c1 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -443,7 +443,7 @@ static irqreturn_t pcf50633_irq(int irq, void *data)
443 dev_dbg(pcf->dev, "pcf50633_irq\n"); 443 dev_dbg(pcf->dev, "pcf50633_irq\n");
444 444
445 get_device(pcf->dev); 445 get_device(pcf->dev);
446 disable_irq(pcf->irq); 446 disable_irq_nosync(pcf->irq);
447 schedule_work(&pcf->irq_work); 447 schedule_work(&pcf->irq_work);
448 448
449 return IRQ_HANDLED; 449 return IRQ_HANDLED;
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index c2be3088e2e1..fe24079387c5 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -79,10 +79,6 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
79 /* Cache is CPU endian */ 79 /* Cache is CPU endian */
80 dest[i - reg] = be16_to_cpu(dest[i - reg]); 80 dest[i - reg] = be16_to_cpu(dest[i - reg]);
81 81
82 /* Satisfy non-volatile bits from cache */
83 dest[i - reg] &= wm8350_reg_io_map[i].vol;
84 dest[i - reg] |= wm8350->reg_cache[i];
85
86 /* Mask out non-readable bits */ 82 /* Mask out non-readable bits */
87 dest[i - reg] &= wm8350_reg_io_map[i].readable; 83 dest[i - reg] &= wm8350_reg_io_map[i].readable;
88 } 84 }
@@ -182,9 +178,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
182 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) 178 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
183 | src[i - reg]; 179 | src[i - reg];
184 180
185 /* Don't store volatile bits */
186 wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
187
188 src[i - reg] = cpu_to_be16(src[i - reg]); 181 src[i - reg] = cpu_to_be16(src[i - reg]);
189 } 182 }
190 183
@@ -1261,7 +1254,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
1261 (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) { 1254 (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) {
1262 value = be16_to_cpu(wm8350->reg_cache[i]); 1255 value = be16_to_cpu(wm8350->reg_cache[i]);
1263 value &= wm8350_reg_io_map[i].readable; 1256 value &= wm8350_reg_io_map[i].readable;
1264 value &= ~wm8350_reg_io_map[i].vol;
1265 wm8350->reg_cache[i] = value; 1257 wm8350->reg_cache[i] = value;
1266 } else 1258 } else
1267 wm8350->reg_cache[i] = reg_map[i]; 1259 wm8350->reg_cache[i] = reg_map[i];
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3cf61ece71d7..348443bdb23b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
119 edev->edev.class = &enclosure_class; 119 edev->edev.class = &enclosure_class;
120 edev->edev.parent = get_device(dev); 120 edev->edev.parent = get_device(dev);
121 edev->cb = cb; 121 edev->cb = cb;
122 dev_set_name(&edev->edev, name); 122 dev_set_name(&edev->edev, "%s", name);
123 err = device_register(&edev->edev); 123 err = device_register(&edev->edev);
124 if (err) 124 if (err)
125 goto err; 125 goto err;
@@ -255,8 +255,8 @@ enclosure_component_register(struct enclosure_device *edev,
255 ecomp->number = number; 255 ecomp->number = number;
256 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
257 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
258 if (name) 258 if (name && name[0])
259 dev_set_name(cdev, name); 259 dev_set_name(cdev, "%s", name);
260 else 260 else
261 dev_set_name(cdev, "%u", number); 261 dev_set_name(cdev, "%u", number);
262 262
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 36875dcfa492..7d4febdab286 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -490,7 +490,7 @@ static void mmci_check_status(unsigned long data)
490 mod_timer(&host->timer, jiffies + HZ); 490 mod_timer(&host->timer, jiffies + HZ);
491} 491}
492 492
493static int __devinit mmci_probe(struct amba_device *dev, void *id) 493static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
494{ 494{
495 struct mmc_platform_data *plat = dev->dev.platform_data; 495 struct mmc_platform_data *plat = dev->dev.platform_data;
496 struct mmci_host *host; 496 struct mmci_host *host;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 62dee54af0a5..43976aa4dbb1 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
178 /* Calculate flash page address; use block erase (for speed) if 178 /* Calculate flash page address; use block erase (for speed) if
179 * we're at a block boundary and need to erase the whole block. 179 * we're at a block boundary and need to erase the whole block.
180 */ 180 */
181 pageaddr = div_u64(instr->len, priv->page_size); 181 pageaddr = div_u64(instr->addr, priv->page_size);
182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; 182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
183 pageaddr = pageaddr << priv->page_offset; 183 pageaddr = pageaddr << priv->page_offset;
184 184
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index f3548d048014..40c26080ecda 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -831,6 +831,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
831 break; 831 break;
832 832
833 case NAND_CMD_READID: 833 case NAND_CMD_READID:
834 host->col_addr = 0;
834 send_read_id(host); 835 send_read_id(host);
835 break; 836 break;
836 837
@@ -867,6 +868,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
867 mtd->priv = this; 868 mtd->priv = this;
868 mtd->owner = THIS_MODULE; 869 mtd->owner = THIS_MODULE;
869 mtd->dev.parent = &pdev->dev; 870 mtd->dev.parent = &pdev->dev;
871 mtd->name = "mxc_nand";
870 872
871 /* 50 us command delay time */ 873 /* 50 us command delay time */
872 this->chip_delay = 5; 874 this->chip_delay = 5;
@@ -882,8 +884,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
882 this->verify_buf = mxc_nand_verify_buf; 884 this->verify_buf = mxc_nand_verify_buf;
883 885
884 host->clk = clk_get(&pdev->dev, "nfc"); 886 host->clk = clk_get(&pdev->dev, "nfc");
885 if (IS_ERR(host->clk)) 887 if (IS_ERR(host->clk)) {
888 err = PTR_ERR(host->clk);
886 goto eclk; 889 goto eclk;
890 }
887 891
888 clk_enable(host->clk); 892 clk_enable(host->clk);
889 host->clk_act = 1; 893 host->clk_act = 1;
@@ -896,7 +900,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
896 900
897 host->regs = ioremap(res->start, res->end - res->start + 1); 901 host->regs = ioremap(res->start, res->end - res->start + 1);
898 if (!host->regs) { 902 if (!host->regs) {
899 err = -EIO; 903 err = -ENOMEM;
900 goto eres; 904 goto eres;
901 } 905 }
902 906
@@ -1011,30 +1015,35 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1011#ifdef CONFIG_PM 1015#ifdef CONFIG_PM
1012static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) 1016static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
1013{ 1017{
1014 struct mtd_info *info = platform_get_drvdata(pdev); 1018 struct mtd_info *mtd = platform_get_drvdata(pdev);
1019 struct nand_chip *nand_chip = mtd->priv;
1020 struct mxc_nand_host *host = nand_chip->priv;
1015 int ret = 0; 1021 int ret = 0;
1016 1022
1017 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); 1023 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
1018 if (info) 1024 if (mtd) {
1019 ret = info->suspend(info); 1025 ret = mtd->suspend(mtd);
1020 1026 /* Disable the NFC clock */
1021 /* Disable the NFC clock */ 1027 clk_disable(host->clk);
1022 clk_disable(nfc_clk); /* FIXME */ 1028 }
1023 1029
1024 return ret; 1030 return ret;
1025} 1031}
1026 1032
1027static int mxcnd_resume(struct platform_device *pdev) 1033static int mxcnd_resume(struct platform_device *pdev)
1028{ 1034{
1029 struct mtd_info *info = platform_get_drvdata(pdev); 1035 struct mtd_info *mtd = platform_get_drvdata(pdev);
1036 struct nand_chip *nand_chip = mtd->priv;
1037 struct mxc_nand_host *host = nand_chip->priv;
1030 int ret = 0; 1038 int ret = 0;
1031 1039
1032 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); 1040 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
1033 /* Enable the NFC clock */
1034 clk_enable(nfc_clk); /* FIXME */
1035 1041
1036 if (info) 1042 if (mtd) {
1037 info->resume(info); 1043 /* Enable the NFC clock */
1044 clk_enable(host->clk);
1045 mtd->resume(mtd);
1046 }
1038 1047
1039 return ret; 1048 return ret;
1040} 1049}
@@ -1055,13 +1064,7 @@ static struct platform_driver mxcnd_driver = {
1055 1064
1056static int __init mxc_nd_init(void) 1065static int __init mxc_nd_init(void)
1057{ 1066{
1058 /* Register the device driver structure. */ 1067 return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
1059 pr_info("MXC MTD nand Driver\n");
1060 if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
1061 printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
1062 return -ENODEV;
1063 }
1064 return 0;
1065} 1068}
1066 1069
1067static void __exit mxc_nd_cleanup(void) 1070static void __exit mxc_nd_cleanup(void)
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index f0e99d4c066b..242257b19441 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -78,16 +78,20 @@ void free_cpu_buffers(void)
78 op_ring_buffer_write = NULL; 78 op_ring_buffer_write = NULL;
79} 79}
80 80
81#define RB_EVENT_HDR_SIZE 4
82
81int alloc_cpu_buffers(void) 83int alloc_cpu_buffers(void)
82{ 84{
83 int i; 85 int i;
84 86
85 unsigned long buffer_size = oprofile_cpu_buffer_size; 87 unsigned long buffer_size = oprofile_cpu_buffer_size;
88 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
89 RB_EVENT_HDR_SIZE);
86 90
87 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); 91 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
88 if (!op_ring_buffer_read) 92 if (!op_ring_buffer_read)
89 goto fail; 93 goto fail;
90 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); 94 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
91 if (!op_ring_buffer_write) 95 if (!op_ring_buffer_write)
92 goto fail; 96 goto fail;
93 97
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index e6a7e847ee80..ea31a452b153 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -352,8 +352,8 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
352 unsigned long port; 352 unsigned long port;
353 353
354 if (!dev->irq) { 354 if (!dev->irq) {
355 printk(KERN_WARNING "IRQ not found for parallel device at 0x%lx\n", 355 printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
356 dev->hpa.start); 356 (unsigned long long)dev->hpa.start);
357 return -ENODEV; 357 return -ENODEV;
358 } 358 }
359 359
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b70095..e68d5f20ffb3 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */ 129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
130 130
131 struct list_head sibling; 131 struct list_head sibling;
132 struct pci_dev *pci_dev;
133 struct notifier_block nb; 132 struct notifier_block nb;
134 acpi_handle handle; 133 acpi_handle handle;
135 134
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a33794d9e0dc..3a6064bce561 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,9 +32,6 @@
32 32
33/* 33/*
34 * Lifetime rules for pci_dev: 34 * Lifetime rules for pci_dev:
35 * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
36 * when the driver is loaded or when an insertion event occurs. It loses
37 * a refcount when its ejected or the driver unloads.
38 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() 35 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
39 * when the bridge is scanned and it loses a refcount when the bridge 36 * when the bridge is scanned and it loses a refcount when the bridge
40 * is removed. 37 * is removed.
@@ -130,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
130 unsigned long long adr, sun; 127 unsigned long long adr, sun;
131 int device, function, retval; 128 int device, function, retval;
132 struct pci_bus *pbus = bridge->pci_bus; 129 struct pci_bus *pbus = bridge->pci_bus;
130 struct pci_dev *pdev;
133 131
134 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
135 return AE_OK; 133 return AE_OK;
@@ -213,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
213 newfunc->slot = slot; 211 newfunc->slot = slot;
214 list_add_tail(&newfunc->sibling, &slot->funcs); 212 list_add_tail(&newfunc->sibling, &slot->funcs);
215 213
216 /* associate corresponding pci_dev */ 214 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
217 newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); 215 if (pdev) {
218 if (newfunc->pci_dev) {
219 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 216 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
217 pci_dev_put(pdev);
220 } 218 }
221 219
222 if (is_dock_device(handle)) { 220 if (is_dock_device(handle)) {
@@ -617,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
617 if (ACPI_FAILURE(status)) 615 if (ACPI_FAILURE(status))
618 err("failed to remove notify handler\n"); 616 err("failed to remove notify handler\n");
619 } 617 }
620 pci_dev_put(func->pci_dev);
621 list_del(list); 618 list_del(list);
622 kfree(func); 619 kfree(func);
623 } 620 }
@@ -1101,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
1101 pci_enable_bridges(bus); 1098 pci_enable_bridges(bus);
1102 pci_bus_add_devices(bus); 1099 pci_bus_add_devices(bus);
1103 1100
1104 /* associate pci_dev to our representation */
1105 list_for_each (l, &slot->funcs) { 1101 list_for_each (l, &slot->funcs) {
1106 func = list_entry(l, struct acpiphp_func, sibling); 1102 func = list_entry(l, struct acpiphp_func, sibling);
1107 func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 1103 dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
1108 func->function)); 1104 func->function));
1109 if (!func->pci_dev) 1105 if (!dev)
1110 continue; 1106 continue;
1111 1107
1112 if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && 1108 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
1113 func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) 1109 dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
1110 pci_dev_put(dev);
1114 continue; 1111 continue;
1112 }
1115 1113
1116 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL); 1114 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
1117 if (ACPI_FAILURE(status)) 1115 if (ACPI_FAILURE(status))
1118 warn("find_p2p_bridge failed (error code = 0x%x)\n", 1116 warn("find_p2p_bridge failed (error code = 0x%x)\n",
1119 status); 1117 status);
1118 pci_dev_put(dev);
1120 } 1119 }
1121 1120
1122 slot->flags |= SLOT_ENABLED; 1121 slot->flags |= SLOT_ENABLED;
@@ -1142,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
1142 */ 1141 */
1143static int disable_device(struct acpiphp_slot *slot) 1142static int disable_device(struct acpiphp_slot *slot)
1144{ 1143{
1145 int retval = 0;
1146 struct acpiphp_func *func; 1144 struct acpiphp_func *func;
1147 struct list_head *l; 1145 struct pci_dev *pdev;
1148 1146
1149 /* is this slot already disabled? */ 1147 /* is this slot already disabled? */
1150 if (!(slot->flags & SLOT_ENABLED)) 1148 if (!(slot->flags & SLOT_ENABLED))
1151 goto err_exit; 1149 goto err_exit;
1152 1150
1153 list_for_each (l, &slot->funcs) { 1151 list_for_each_entry(func, &slot->funcs, sibling) {
1154 func = list_entry(l, struct acpiphp_func, sibling);
1155
1156 if (func->bridge) { 1152 if (func->bridge) {
1157 /* cleanup p2p bridges under this P2P bridge */ 1153 /* cleanup p2p bridges under this P2P bridge */
1158 cleanup_p2p_bridge(func->bridge->handle, 1154 cleanup_p2p_bridge(func->bridge->handle,
@@ -1160,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
1160 func->bridge = NULL; 1156 func->bridge = NULL;
1161 } 1157 }
1162 1158
1163 if (func->pci_dev) { 1159 pdev = pci_get_slot(slot->bridge->pci_bus,
1164 pci_stop_bus_device(func->pci_dev); 1160 PCI_DEVFN(slot->device, func->function));
1165 if (func->pci_dev->subordinate) { 1161 if (pdev) {
1166 disable_bridges(func->pci_dev->subordinate); 1162 pci_stop_bus_device(pdev);
1167 pci_disable_device(func->pci_dev); 1163 if (pdev->subordinate) {
1164 disable_bridges(pdev->subordinate);
1165 pci_disable_device(pdev);
1168 } 1166 }
1167 pci_remove_bus_device(pdev);
1168 pci_dev_put(pdev);
1169 } 1169 }
1170 } 1170 }
1171 1171
1172 list_for_each (l, &slot->funcs) { 1172 list_for_each_entry(func, &slot->funcs, sibling) {
1173 func = list_entry(l, struct acpiphp_func, sibling);
1174
1175 acpiphp_unconfigure_ioapics(func->handle); 1173 acpiphp_unconfigure_ioapics(func->handle);
1176 acpiphp_bus_trim(func->handle); 1174 acpiphp_bus_trim(func->handle);
1177 /* try to remove anyway.
1178 * acpiphp_bus_add might have been failed */
1179
1180 if (!func->pci_dev)
1181 continue;
1182
1183 pci_remove_bus_device(func->pci_dev);
1184 pci_dev_put(func->pci_dev);
1185 func->pci_dev = NULL;
1186 } 1175 }
1187 1176
1188 slot->flags &= (~SLOT_ENABLED); 1177 slot->flags &= (~SLOT_ENABLED);
1189 1178
1190 err_exit: 1179err_exit:
1191 return retval; 1180 return 0;
1192} 1181}
1193 1182
1194 1183
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 34bf0fdf5047..1a91bf9687af 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -557,7 +557,8 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
557 } else { 557 } else {
558 error = -ENODEV; 558 error = -ENODEV;
559 /* Fall back to PCI_D0 if native PM is not supported */ 559 /* Fall back to PCI_D0 if native PM is not supported */
560 pci_update_current_state(dev, PCI_D0); 560 if (!dev->pm_cap)
561 dev->current_state = PCI_D0;
561 } 562 }
562 563
563 return error; 564 return error;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 72b15495183c..c6628f5a0af7 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -497,7 +497,7 @@ static struct platform_driver da903x_regulator_driver = {
497 .owner = THIS_MODULE, 497 .owner = THIS_MODULE,
498 }, 498 },
499 .probe = da903x_regulator_probe, 499 .probe = da903x_regulator_probe,
500 .remove = da903x_regulator_remove, 500 .remove = __devexit_p(da903x_regulator_remove),
501}; 501};
502 502
503static int __init da903x_regulator_init(void) 503static int __init da903x_regulator_init(void)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 826153552157..aaf1f75fa293 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -102,7 +102,7 @@ static const struct rtc_class_ops pl030_ops = {
102 .set_alarm = pl030_set_alarm, 102 .set_alarm = pl030_set_alarm,
103}; 103};
104 104
105static int pl030_probe(struct amba_device *dev, void *id) 105static int pl030_probe(struct amba_device *dev, struct amba_id *id)
106{ 106{
107 struct pl030_rtc *rtc; 107 struct pl030_rtc *rtc;
108 int ret; 108 int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 333eec689d2f..451fc13784d1 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -127,7 +127,7 @@ static int pl031_remove(struct amba_device *adev)
127 return 0; 127 return 0;
128} 128}
129 129
130static int pl031_probe(struct amba_device *adev, void *id) 130static int pl031_probe(struct amba_device *adev, struct amba_id *id)
131{ 131{
132 int ret; 132 int ret;
133 struct pl031_local *ldata; 133 struct pl031_local *ldata;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 8b7983aba8f7..36c21b19e5d7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1978{ 1978{
1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1980 1980
1981 scsi_dma_unmap(cmd); 1981 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1982 scsi_dma_unmap(cmd);
1982} /* End twa_unmap_scsi_data() */ 1983} /* End twa_unmap_scsi_data() */
1983 1984
1984/* scsi_host_template initializer */ 1985/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c03f1d2c9e2e..faa0fcfed71e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1294{ 1294{
1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1296 1296
1297 scsi_dma_unmap(cmd); 1297 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1298 scsi_dma_unmap(cmd);
1298} /* End tw_unmap_scsi_data() */ 1299} /* End tw_unmap_scsi_data() */
1299 1300
1300/* This function will reset a device extension */ 1301/* This function will reset a device extension */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 8e71e5e122b3..a5a2ba2561d9 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990c826e..fb2740789b68 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -628,6 +628,17 @@ config FCOE
628 ---help--- 628 ---help---
629 Fibre Channel over Ethernet module 629 Fibre Channel over Ethernet module
630 630
631config FCOE_FNIC
632 tristate "Cisco FNIC Driver"
633 depends on PCI && X86
634 select LIBFC
635 help
636 This is support for the Cisco PCI-Express FCoE HBA.
637
638 To compile this driver as a module, choose M here and read
639 <file:Documentation/scsi/scsi.txt>.
640 The module will be called fnic.
641
631config SCSI_DMX3191D 642config SCSI_DMX3191D
632 tristate "DMX3191D SCSI support" 643 tristate "DMX3191D SCSI support"
633 depends on PCI && SCSI 644 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e7c861ac417d..a5049cfb40ed 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/
39obj-$(CONFIG_LIBFC) += libfc/ 39obj-$(CONFIG_LIBFC) += libfc/
40obj-$(CONFIG_LIBFCOE) += fcoe/ 40obj-$(CONFIG_LIBFCOE) += fcoe/
41obj-$(CONFIG_FCOE) += fcoe/ 41obj-$(CONFIG_FCOE) += fcoe/
42obj-$(CONFIG_FCOE_FNIC) += fnic/
42obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 43obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
43obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 44obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
44obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 45obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 000000000000..37c3440bc17c
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_FCOE_FNIC) += fnic.o
2
3fnic-y := \
4 fnic_attrs.o \
5 fnic_isr.o \
6 fnic_main.o \
7 fnic_res.o \
8 fnic_fcs.o \
9 fnic_scsi.o \
10 vnic_cq.o \
11 vnic_dev.o \
12 vnic_intr.o \
13 vnic_rq.o \
14 vnic_wq_copy.o \
15 vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 000000000000..d1225cf6320e
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_DESC_H_
19#define _CQ_DESC_H_
20
21/*
22 * Completion queue descriptor types
23 */
24enum cq_desc_types {
25 CQ_DESC_TYPE_WQ_ENET = 0,
26 CQ_DESC_TYPE_DESC_COPY = 1,
27 CQ_DESC_TYPE_WQ_EXCH = 2,
28 CQ_DESC_TYPE_RQ_ENET = 3,
29 CQ_DESC_TYPE_RQ_FCP = 4,
30};
31
32/* Completion queue descriptor: 16B
33 *
34 * All completion queues have this basic layout. The
35 * type_specfic area is unique for each completion
36 * queue type.
37 */
38struct cq_desc {
39 __le16 completed_index;
40 __le16 q_number;
41 u8 type_specfic[11];
42 u8 type_color;
43};
44
45#define CQ_DESC_TYPE_BITS 4
46#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
47#define CQ_DESC_COLOR_MASK 1
48#define CQ_DESC_COLOR_SHIFT 7
49#define CQ_DESC_Q_NUM_BITS 10
50#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
51#define CQ_DESC_COMP_NDX_BITS 12
52#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
53
54static inline void cq_desc_dec(const struct cq_desc *desc_arg,
55 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
56{
57 const struct cq_desc *desc = desc_arg;
58 const u8 type_color = desc->type_color;
59
60 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
61
62 /*
63 * Make sure color bit is read from desc *before* other fields
64 * are read from desc. Hardware guarantees color bit is last
65 * bit (byte) written. Adding the rmb() prevents the compiler
66 * and/or CPU from reordering the reads which would potentially
67 * result in reading stale values.
68 */
69
70 rmb();
71
72 *type = type_color & CQ_DESC_TYPE_MASK;
73 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
74 *completed_index = le16_to_cpu(desc->completed_index) &
75 CQ_DESC_COMP_NDX_MASK;
76}
77
78#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 000000000000..a9fa26f82ddd
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_ENET_DESC_H_
19#define _CQ_ENET_DESC_H_
20
21#include "cq_desc.h"
22
23/* Ethernet completion queue descriptor: 16B */
24struct cq_enet_wq_desc {
25 __le16 completed_index;
26 __le16 q_number;
27 u8 reserved[11];
28 u8 type_color;
29};
30
31static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
32 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
33{
34 cq_desc_dec((struct cq_desc *)desc, type,
35 color, q_number, completed_index);
36}
37
38/* Completion queue descriptor: Ethernet receive queue, 16B */
39struct cq_enet_rq_desc {
40 __le16 completed_index_flags;
41 __le16 q_number_rss_type_flags;
42 __le32 rss_hash;
43 __le16 bytes_written_flags;
44 __le16 vlan;
45 __le16 checksum_fcoe;
46 u8 flags;
47 u8 type_color;
48};
49
50#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
51#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
52#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
53#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
54
55#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
56#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
57 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
58#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
59#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
60#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
65
66#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
67
68#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
69#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
70 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
71#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
72#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
73
74#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
75#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
76 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
77#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
78#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
79 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
80#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
81
82#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
83#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
84#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
85#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
86#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
87#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
88#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
91#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
92
93static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
94 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
95 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
96 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
97 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
98 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
99 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
100 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
101{
102 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
103 u16 q_number_rss_type_flags =
104 le16_to_cpu(desc->q_number_rss_type_flags);
105 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
106
107 cq_desc_dec((struct cq_desc *)desc, type,
108 color, q_number, completed_index);
109
110 *ingress_port = (completed_index_flags &
111 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
112 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
113 1 : 0;
114 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
115 1 : 0;
116 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
117 1 : 0;
118
119 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
120 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
121 *csum_not_calc = (q_number_rss_type_flags &
122 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
123
124 *rss_hash = le32_to_cpu(desc->rss_hash);
125
126 *bytes_written = bytes_written_flags &
127 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
128 *packet_error = (bytes_written_flags &
129 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
130 *vlan_stripped = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
132
133 *vlan = le16_to_cpu(desc->vlan);
134
135 if (*fcoe) {
136 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
137 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
138 *fcoe_fc_crc_ok = (desc->flags &
139 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
140 *fcoe_enc_error = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
142 *fcoe_eof = (u8)((desc->checksum_fcoe >>
143 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
144 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
145 *checksum = 0;
146 } else {
147 *fcoe_sof = 0;
148 *fcoe_fc_crc_ok = 0;
149 *fcoe_enc_error = 0;
150 *fcoe_eof = 0;
151 *checksum = le16_to_cpu(desc->checksum_fcoe);
152 }
153
154 *tcp_udp_csum_ok =
155 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
156 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
157 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
158 *ipv4_csum_ok =
159 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
160 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
161 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
162 *ipv4_fragment =
163 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
164 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
165}
166
167#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 000000000000..501660cfe228
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_EXCH_DESC_H_
19#define _CQ_EXCH_DESC_H_
20
21#include "cq_desc.h"
22
23/* Exchange completion queue descriptor: 16B */
24struct cq_exch_wq_desc {
25 u16 completed_index;
26 u16 q_number;
27 u16 exchange_id;
28 u8 tmpl;
29 u8 reserved0;
30 u32 reserved1;
31 u8 exch_status;
32 u8 reserved2[2];
33 u8 type_color;
34};
35
36#define CQ_EXCH_WQ_STATUS_BITS 2
37#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
38
39enum cq_exch_status_types {
40 CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
41 CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
42 CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
43 CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
44};
45
46static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
47 u8 *type,
48 u8 *color,
49 u16 *q_number,
50 u16 *completed_index,
51 u8 *exch_status)
52{
53 cq_desc_dec((struct cq_desc *)desc_ptr, type,
54 color, q_number, completed_index);
55 *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
56}
57
58struct cq_fcp_rq_desc {
59 u16 completed_index_eop_sop_prt;
60 u16 q_number;
61 u16 exchange_id;
62 u16 tmpl;
63 u16 bytes_written;
64 u16 vlan;
65 u8 sof;
66 u8 eof;
67 u8 fcs_fer_fck;
68 u8 type_color;
69};
70
71#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
72#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
73#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
74#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
75#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
76#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
77#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
78#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
79#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
80#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
81#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
82#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
83#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
84#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
85
86static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
87 u8 *type,
88 u8 *color,
89 u16 *q_number,
90 u16 *completed_index,
91 u8 *eop,
92 u8 *sop,
93 u8 *fck,
94 u16 *exchange_id,
95 u16 *tmpl,
96 u32 *bytes_written,
97 u8 *sof,
98 u8 *eof,
99 u8 *ingress_port,
100 u8 *packet_err,
101 u8 *fcoe_err,
102 u8 *fcs_ok,
103 u8 *vlan_stripped,
104 u16 *vlan)
105{
106 cq_desc_dec((struct cq_desc *)desc_ptr, type,
107 color, q_number, completed_index);
108 *eop = (desc_ptr->completed_index_eop_sop_prt &
109 CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
110 *sop = (desc_ptr->completed_index_eop_sop_prt &
111 CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
112 *ingress_port =
113 (desc_ptr->completed_index_eop_sop_prt &
114 CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
115 *exchange_id = desc_ptr->exchange_id;
116 *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
117 *bytes_written =
118 desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
119 *packet_err =
120 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
121 CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
122 *vlan_stripped =
123 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
124 CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
125 *vlan = desc_ptr->vlan;
126 *sof = desc_ptr->sof;
127 *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
128 *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
129 CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
130 *eof = desc_ptr->eof;
131 *fcs_ok =
132 (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
133 CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
134}
135
136struct cq_sgl_desc {
137 u16 exchange_id;
138 u16 q_number;
139 u32 active_burst_offset;
140 u32 tot_data_bytes;
141 u16 tmpl;
142 u8 sgl_err;
143 u8 type_color;
144};
145
146enum cq_sgl_err_types {
147 CQ_SGL_ERR_NO_ERROR = 0,
148 CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
149 CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
150 CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
151 CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
152 CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
153 CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
154 CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
155 CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
156 CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
157};
158
159#define CQ_SGL_SGL_ERR_MASK 0x1f
160#define CQ_SGL_TMPL_MASK 0x1f
161
162static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
163 u8 *type,
164 u8 *color,
165 u16 *q_number,
166 u16 *exchange_id,
167 u32 *active_burst_offset,
168 u32 *tot_data_bytes,
169 u16 *tmpl,
170 u8 *sgl_err)
171{
172 /* Cheat a little by assuming exchange_id is the same as completed
173 index */
174 cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
175 exchange_id);
176 *active_burst_offset = desc_ptr->active_burst_offset;
177 *tot_data_bytes = desc_ptr->tot_data_bytes;
178 *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
179 *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
180}
181
182#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 000000000000..12d770d885c5
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FCPIO_H_
19#define _FCPIO_H_
20
21#include <linux/if_ether.h>
22
23/*
24 * This header file includes all of the data structures used for
25 * communication by the host driver to the fcp firmware.
26 */
27
28/*
29 * Exchange and sequence id space allocated to the host driver
30 */
31#define FCPIO_HOST_EXCH_RANGE_START 0x1000
32#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
33#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
34#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
35
36/*
37 * Command entry type
38 */
39enum fcpio_type {
40 /*
41 * Initiator request types
42 */
43 FCPIO_ICMND_16 = 0x1,
44 FCPIO_ICMND_32,
45 FCPIO_ICMND_CMPL,
46 FCPIO_ITMF,
47 FCPIO_ITMF_CMPL,
48
49 /*
50 * Target request types
51 */
52 FCPIO_TCMND_16 = 0x11,
53 FCPIO_TCMND_32,
54 FCPIO_TDATA,
55 FCPIO_TXRDY,
56 FCPIO_TRSP,
57 FCPIO_TDRSP_CMPL,
58 FCPIO_TTMF,
59 FCPIO_TTMF_ACK,
60 FCPIO_TABORT,
61 FCPIO_TABORT_CMPL,
62
63 /*
64 * Misc request types
65 */
66 FCPIO_ACK = 0x20,
67 FCPIO_RESET,
68 FCPIO_RESET_CMPL,
69 FCPIO_FLOGI_REG,
70 FCPIO_FLOGI_REG_CMPL,
71 FCPIO_ECHO,
72 FCPIO_ECHO_CMPL,
73 FCPIO_LUNMAP_CHNG,
74 FCPIO_LUNMAP_REQ,
75 FCPIO_LUNMAP_REQ_CMPL,
76 FCPIO_FLOGI_FIP_REG,
77 FCPIO_FLOGI_FIP_REG_CMPL,
78};
79
80/*
81 * Header status codes from the firmware
82 */
83enum fcpio_status {
84 FCPIO_SUCCESS = 0, /* request was successful */
85
86 /*
87 * If a request to the firmware is rejected, the original request
88 * header will be returned with the status set to one of the following:
89 */
90 FCPIO_INVALID_HEADER, /* header contains invalid data */
91 FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
92 FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
93 FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
94 FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
95
96 /*
97 * Once a request is processed, the firmware will usually return
98 * a cmpl message type. In cases where errors occurred,
99 * the header status field would be filled in with one of the following:
100 */
101 FCPIO_ABORTED = 0x41, /* request was aborted */
102 FCPIO_TIMEOUT, /* request was timed out */
103 FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
104 FCPIO_MSS_INVALID, /* request was aborted due to mss error */
105 FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
106 FCPIO_FW_ERR, /* request was terminated due to fw error */
107 FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
108 FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
109 FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
110 FCPIO_CMND_REJECTED, /* request was invalid and rejected */
111 FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
112 FCPIO_PATH_FAILED, /* i/o sent to current path failed */
113 FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
114};
115
116/*
117 * The header command tag. All host requests will use the "tag" field
118 * to mark commands with a unique tag. When the firmware responds to
119 * a host request, it will copy the tag field into the response.
120 *
121 * The only firmware requests that will use the rx_id/ox_id fields instead
122 * of the tag field will be the target command and target task management
123 * requests. These two requests do not have corresponding host requests
124 * since they come directly from the FC initiator on the network.
125 */
126struct fcpio_tag {
127 union {
128 u32 req_id;
129 struct {
130 u16 rx_id;
131 u16 ox_id;
132 } ex_id;
133 } u;
134};
135
136static inline void
137fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
138{
139 tag->u.req_id = id;
140}
141
142static inline void
143fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
144{
145 *id = tag->u.req_id;
146}
147
148static inline void
149fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
150{
151 tag->u.ex_id.rx_id = rx_id;
152 tag->u.ex_id.ox_id = ox_id;
153}
154
155static inline void
156fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
157{
158 *rx_id = tag->u.ex_id.rx_id;
159 *ox_id = tag->u.ex_id.ox_id;
160}
161
162/*
163 * The header for an fcpio request, whether from the firmware or from the
164 * host driver
165 */
166struct fcpio_header {
167 u8 type; /* enum fcpio_type */
168 u8 status; /* header status entry */
169 u16 _resvd; /* reserved */
170 struct fcpio_tag tag; /* header tag */
171};
172
173static inline void
174fcpio_header_enc(struct fcpio_header *hdr,
175 u8 type, u8 status,
176 struct fcpio_tag tag)
177{
178 hdr->type = type;
179 hdr->status = status;
180 hdr->_resvd = 0;
181 hdr->tag = tag;
182}
183
184static inline void
185fcpio_header_dec(struct fcpio_header *hdr,
186 u8 *type, u8 *status,
187 struct fcpio_tag *tag)
188{
189 *type = hdr->type;
190 *status = hdr->status;
191 *tag = hdr->tag;
192}
193
194#define CDB_16 16
195#define CDB_32 32
196#define LUN_ADDRESS 8
197
198/*
199 * fcpio_icmnd_16: host -> firmware request
200 *
201 * used for sending out an initiator SCSI 16-byte command
202 */
203struct fcpio_icmnd_16 {
204 u32 lunmap_id; /* index into lunmap table */
205 u8 special_req_flags; /* special exchange request flags */
206 u8 _resvd0[3]; /* reserved */
207 u32 sgl_cnt; /* scatter-gather list count */
208 u32 sense_len; /* sense buffer length */
209 u64 sgl_addr; /* scatter-gather list addr */
210 u64 sense_addr; /* sense buffer address */
211 u8 crn; /* SCSI Command Reference No. */
212 u8 pri_ta; /* SCSI Priority and Task attribute */
213 u8 _resvd1; /* reserved: should be 0 */
214 u8 flags; /* command flags */
215 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
216 u32 data_len; /* length of data expected */
217 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
218 u8 _resvd2; /* reserved */
219 u8 d_id[3]; /* FC vNIC only: Target D_ID */
220 u16 mss; /* FC vNIC only: max burst */
221 u16 _resvd3; /* reserved */
222 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
223 u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
224};
225
226/*
227 * Special request flags
228 */
229#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
230
231/*
232 * Priority/Task Attribute settings
233 */
234#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
235#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
236#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
237#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
238#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
239
240/*
241 * Command flags
242 */
243#define FCPIO_ICMND_RDDATA 0x02 /* read data */
244#define FCPIO_ICMND_WRDATA 0x01 /* write data */
245
246/*
247 * fcpio_icmnd_32: host -> firmware request
248 *
249 * used for sending out an initiator SCSI 32-byte command
250 */
251struct fcpio_icmnd_32 {
252 u32 lunmap_id; /* index into lunmap table */
253 u8 special_req_flags; /* special exchange request flags */
254 u8 _resvd0[3]; /* reserved */
255 u32 sgl_cnt; /* scatter-gather list count */
256 u32 sense_len; /* sense buffer length */
257 u64 sgl_addr; /* scatter-gather list addr */
258 u64 sense_addr; /* sense buffer address */
259 u8 crn; /* SCSI Command Reference No. */
260 u8 pri_ta; /* SCSI Priority and Task attribute */
261 u8 _resvd1; /* reserved: should be 0 */
262 u8 flags; /* command flags */
263 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
264 u32 data_len; /* length of data expected */
265 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
266 u8 _resvd2; /* reserved */
267 u8 d_id[3]; /* FC vNIC only: Target D_ID */
268 u16 mss; /* FC vNIC only: max burst */
269 u16 _resvd3; /* reserved */
270 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
271 u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
272};
273
274/*
275 * fcpio_itmf: host -> firmware request
276 *
277 * used for requesting the firmware to abort a request and/or send out
278 * a task management function
279 *
280 * The t_tag field is only needed when the request type is ABT_TASK.
281 */
282struct fcpio_itmf {
283 u32 lunmap_id; /* index into lunmap table */
284 u32 tm_req; /* SCSI Task Management request */
285 u32 t_tag; /* header tag of fcpio to be aborted */
286 u32 _resvd; /* _reserved */
287 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
288 u8 _resvd1; /* reserved */
289 u8 d_id[3]; /* FC vNIC only: Target D_ID */
290 u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
291 u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
292};
293
294/*
295 * Task Management request
296 */
297enum fcpio_itmf_tm_req_type {
298 FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
299 FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
300 FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
301 FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
302 FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
303 FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
304};
305
306/*
307 * fcpio_tdata: host -> firmware request
308 *
309 * used for requesting the firmware to send out a read data transfer for a
310 * target command
311 */
312struct fcpio_tdata {
313 u16 rx_id; /* FC rx_id of target command */
314 u16 flags; /* command flags */
315 u32 rel_offset; /* data sequence relative offset */
316 u32 sgl_cnt; /* scatter-gather list count */
317 u32 data_len; /* length of data expected to send */
318 u64 sgl_addr; /* scatter-gather list address */
319};
320
321/*
322 * Command flags
323 */
324#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
325
326/*
327 * fcpio_txrdy: host -> firmware request
328 *
329 * used for requesting the firmware to send out a write data transfer for a
330 * target command
331 */
332struct fcpio_txrdy {
333 u16 rx_id; /* FC rx_id of target command */
334 u16 _resvd0; /* reserved */
335 u32 rel_offset; /* data sequence relative offset */
336 u32 sgl_cnt; /* scatter-gather list count */
337 u32 data_len; /* length of data expected to send */
338 u64 sgl_addr; /* scatter-gather list address */
339};
340
341/*
342 * fcpio_trsp: host -> firmware request
343 *
344 * used for requesting the firmware to send out a response for a target
345 * command
346 */
347struct fcpio_trsp {
348 u16 rx_id; /* FC rx_id of target command */
349 u16 _resvd0; /* reserved */
350 u32 sense_len; /* sense data buffer length */
351 u64 sense_addr; /* sense data buffer address */
352 u16 _resvd1; /* reserved */
353 u8 flags; /* response request flags */
354 u8 scsi_status; /* SCSI status */
355 u32 residual; /* SCSI data residual value of I/O */
356};
357
358/*
359 * resposnse request flags
360 */
361#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
362#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
363
364/*
365 * fcpio_ttmf_ack: host -> firmware response
366 *
367 * used by the host to indicate to the firmware it has received and processed
368 * the target tmf request
369 */
370struct fcpio_ttmf_ack {
371 u16 rx_id; /* FC rx_id of target command */
372 u16 _resvd0; /* reserved */
373 u32 tmf_status; /* SCSI task management status */
374};
375
376/*
377 * fcpio_tabort: host -> firmware request
378 *
379 * used by the host to request the firmware to abort a target request that was
380 * received by the firmware
381 */
382struct fcpio_tabort {
383 u16 rx_id; /* rx_id of the target request */
384};
385
386/*
387 * fcpio_reset: host -> firmware request
388 *
389 * used by the host to signal a reset of the driver to the firmware
390 * and to request firmware to clean up all outstanding I/O
391 */
392struct fcpio_reset {
393 u32 _resvd;
394};
395
396enum fcpio_flogi_reg_format_type {
397 FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
398 FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
399};
400
401/*
402 * fcpio_flogi_reg: host -> firmware request
403 *
404 * fc vnic only
405 * used by the host to notify the firmware of the lif's s_id
406 * and destination mac address format
407 */
408struct fcpio_flogi_reg {
409 u8 format;
410 u8 s_id[3]; /* FC vNIC only: Source S_ID */
411 u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
412 u16 _resvd;
413 u32 r_a_tov; /* R_A_TOV in msec */
414 u32 e_d_tov; /* E_D_TOV in msec */
415};
416
417/*
418 * fcpio_echo: host -> firmware request
419 *
420 * sends a heartbeat echo request to the firmware
421 */
422struct fcpio_echo {
423 u32 _resvd;
424};
425
426/*
427 * fcpio_lunmap_req: host -> firmware request
428 *
429 * scsi vnic only
430 * sends a request to retrieve the lunmap table for scsi vnics
431 */
432struct fcpio_lunmap_req {
433 u64 addr; /* address of the buffer */
434 u32 len; /* len of the buffer */
435};
436
437/*
438 * fcpio_flogi_fip_reg: host -> firmware request
439 *
440 * fc vnic only
441 * used by the host to notify the firmware of the lif's s_id
442 * and destination mac address format
443 */
444struct fcpio_flogi_fip_reg {
445 u8 _resvd0;
446 u8 s_id[3]; /* FC vNIC only: Source S_ID */
447 u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
448 u16 _resvd1;
449 u32 r_a_tov; /* R_A_TOV in msec */
450 u32 e_d_tov; /* E_D_TOV in msec */
451 u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
452 u16 _resvd2;
453};
454
455/*
456 * Basic structure for all fcpio structures that are sent from the host to the
457 * firmware. They are 128 bytes per structure.
458 */
459#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
460
461struct fcpio_host_req {
462 struct fcpio_header hdr;
463
464 union {
465 /*
466 * Defines space needed for request
467 */
468 u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
469
470 /*
471 * Initiator host requests
472 */
473 struct fcpio_icmnd_16 icmnd_16;
474 struct fcpio_icmnd_32 icmnd_32;
475 struct fcpio_itmf itmf;
476
477 /*
478 * Target host requests
479 */
480 struct fcpio_tdata tdata;
481 struct fcpio_txrdy txrdy;
482 struct fcpio_trsp trsp;
483 struct fcpio_ttmf_ack ttmf_ack;
484 struct fcpio_tabort tabort;
485
486 /*
487 * Misc requests
488 */
489 struct fcpio_reset reset;
490 struct fcpio_flogi_reg flogi_reg;
491 struct fcpio_echo echo;
492 struct fcpio_lunmap_req lunmap_req;
493 struct fcpio_flogi_fip_reg flogi_fip_reg;
494 } u;
495};
496
497/*
498 * fcpio_icmnd_cmpl: firmware -> host response
499 *
500 * used for sending the host a response to an initiator command
501 */
502struct fcpio_icmnd_cmpl {
503 u8 _resvd0[6]; /* reserved */
504 u8 flags; /* response flags */
505 u8 scsi_status; /* SCSI status */
506 u32 residual; /* SCSI data residual length */
507 u32 sense_len; /* SCSI sense length */
508};
509
510/*
511 * response flags
512 */
513#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
514#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
515
516/*
517 * fcpio_itmf_cmpl: firmware -> host response
518 *
519 * used for sending the host a response for a itmf request
520 */
521struct fcpio_itmf_cmpl {
522 u32 _resvd; /* reserved */
523};
524
525/*
526 * fcpio_tcmnd_16: firmware -> host request
527 *
528 * used by the firmware to notify the host of an incoming target SCSI 16-Byte
529 * request
530 */
531struct fcpio_tcmnd_16 {
532 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
533 u8 crn; /* SCSI Command Reference No. */
534 u8 pri_ta; /* SCSI Priority and Task attribute */
535 u8 _resvd2; /* reserved: should be 0 */
536 u8 flags; /* command flags */
537 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
538 u32 data_len; /* length of data expected */
539 u8 _resvd1; /* reserved */
540 u8 s_id[3]; /* FC vNIC only: Source S_ID */
541};
542
543/*
544 * Priority/Task Attribute settings
545 */
546#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
547#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
548#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
549#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
550#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
551
552/*
553 * Command flags
554 */
555#define FCPIO_TCMND_RDDATA 0x02 /* read data */
556#define FCPIO_TCMND_WRDATA 0x01 /* write data */
557
558/*
559 * fcpio_tcmnd_32: firmware -> host request
560 *
561 * used by the firmware to notify the host of an incoming target SCSI 32-Byte
562 * request
563 */
564struct fcpio_tcmnd_32 {
565 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
566 u8 crn; /* SCSI Command Reference No. */
567 u8 pri_ta; /* SCSI Priority and Task attribute */
568 u8 _resvd2; /* reserved: should be 0 */
569 u8 flags; /* command flags */
570 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
571 u32 data_len; /* length of data expected */
572 u8 _resvd0; /* reserved */
573 u8 s_id[3]; /* FC vNIC only: Source S_ID */
574};
575
576/*
577 * fcpio_tdrsp_cmpl: firmware -> host response
578 *
579 * used by the firmware to notify the host of a response to a host target
580 * command
581 */
582struct fcpio_tdrsp_cmpl {
583 u16 rx_id; /* rx_id of the target request */
584 u16 _resvd0; /* reserved */
585};
586
587/*
588 * fcpio_ttmf: firmware -> host request
589 *
590 * used by the firmware to notify the host of an incoming task management
591 * function request
592 */
593struct fcpio_ttmf {
594 u8 _resvd0; /* reserved */
595 u8 s_id[3]; /* FC vNIC only: Source S_ID */
596 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
597 u8 crn; /* SCSI Command Reference No. */
598 u8 _resvd2[3]; /* reserved */
599 u32 tmf_type; /* task management request type */
600};
601
602/*
603 * Task Management request
604 */
605#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
606#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
607#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
608#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
609#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
610
611/*
612 * fcpio_tabort_cmpl: firmware -> host response
613 *
614 * used by the firmware to respond to a host's tabort request
615 */
616struct fcpio_tabort_cmpl {
617 u16 rx_id; /* rx_id of the target request */
618 u16 _resvd0; /* reserved */
619};
620
621/*
622 * fcpio_ack: firmware -> host response
623 *
624 * used by firmware to notify the host of the last work request received
625 */
626struct fcpio_ack {
627 u16 request_out; /* last host entry received */
628 u16 _resvd;
629};
630
631/*
632 * fcpio_reset_cmpl: firmware -> host response
633 *
634 * use by firmware to respond to the host's reset request
635 */
636struct fcpio_reset_cmpl {
637 u16 vnic_id;
638};
639
640/*
641 * fcpio_flogi_reg_cmpl: firmware -> host response
642 *
643 * fc vnic only
644 * response to the fcpio_flogi_reg request
645 */
646struct fcpio_flogi_reg_cmpl {
647 u32 _resvd;
648};
649
650/*
651 * fcpio_echo_cmpl: firmware -> host response
652 *
653 * response to the fcpio_echo request
654 */
655struct fcpio_echo_cmpl {
656 u32 _resvd;
657};
658
659/*
660 * fcpio_lunmap_chng: firmware -> host notification
661 *
662 * scsi vnic only
663 * notifies the host that the lunmap tables have changed
664 */
665struct fcpio_lunmap_chng {
666 u32 _resvd;
667};
668
669/*
670 * fcpio_lunmap_req_cmpl: firmware -> host response
671 *
672 * scsi vnic only
673 * response for lunmap table request from the host
674 */
675struct fcpio_lunmap_req_cmpl {
676 u32 _resvd;
677};
678
679/*
680 * Basic structure for all fcpio structures that are sent from the firmware to
681 * the host. They are 64 bytes per structure.
682 */
683#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
684struct fcpio_fw_req {
685 struct fcpio_header hdr;
686
687 union {
688 /*
689 * Defines space needed for request
690 */
691 u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
692
693 /*
694 * Initiator firmware responses
695 */
696 struct fcpio_icmnd_cmpl icmnd_cmpl;
697 struct fcpio_itmf_cmpl itmf_cmpl;
698
699 /*
700 * Target firmware new requests
701 */
702 struct fcpio_tcmnd_16 tcmnd_16;
703 struct fcpio_tcmnd_32 tcmnd_32;
704
705 /*
706 * Target firmware responses
707 */
708 struct fcpio_tdrsp_cmpl tdrsp_cmpl;
709 struct fcpio_ttmf ttmf;
710 struct fcpio_tabort_cmpl tabort_cmpl;
711
712 /*
713 * Firmware response to work received
714 */
715 struct fcpio_ack ack;
716
717 /*
718 * Misc requests
719 */
720 struct fcpio_reset_cmpl reset_cmpl;
721 struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
722 struct fcpio_echo_cmpl echo_cmpl;
723 struct fcpio_lunmap_chng lunmap_chng;
724 struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
725 } u;
726};
727
728/*
729 * Access routines to encode and decode the color bit, which is the most
730 * significant bit of the MSB of the structure
731 */
732static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
733{
734 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
735
736 if (color)
737 *c |= 0x80;
738 else
739 *c &= ~0x80;
740}
741
742static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
743{
744 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
745
746 *color = *c >> 7;
747
748 /*
749 * Make sure color bit is read from desc *before* other fields
750 * are read from desc. Hardware guarantees color bit is last
751 * bit (byte) written. Adding the rmb() prevents the compiler
752 * and/or CPU from reordering the reads which would potentially
753 * result in reading stale values.
754 */
755
756 rmb();
757
758}
759
760/*
761 * Lunmap table entry for scsi vnics
762 */
763#define FCPIO_LUNMAP_TABLE_SIZE 256
764#define FCPIO_FLAGS_LUNMAP_VALID 0x80
765#define FCPIO_FLAGS_BOOT 0x01
766struct fcpio_lunmap_entry {
767 u8 bus;
768 u8 target;
769 u8 lun;
770 u8 path_cnt;
771 u16 flags;
772 u16 update_cnt;
773};
774
775struct fcpio_lunmap_tbl {
776 u32 update_cnt;
777 struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
778};
779
780#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 000000000000..e4c0a3d7d87b
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,265 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_H_
19#define _FNIC_H_
20
21#include <linux/interrupt.h>
22#include <linux/netdevice.h>
23#include <linux/workqueue.h>
24#include <scsi/libfc.h>
25#include "fnic_io.h"
26#include "fnic_res.h"
27#include "vnic_dev.h"
28#include "vnic_wq.h"
29#include "vnic_rq.h"
30#include "vnic_cq.h"
31#include "vnic_wq_copy.h"
32#include "vnic_intr.h"
33#include "vnic_stats.h"
34#include "vnic_scsi.h"
35
36#define DRV_NAME "fnic"
37#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
38#define DRV_VERSION "1.0.0.1121"
39#define PFX DRV_NAME ": "
40#define DFX DRV_NAME "%d: "
41
42#define DESC_CLEAN_LOW_WATERMARK 8
43#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
44#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
45#define FNIC_DFLT_QUEUE_DEPTH 32
46#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
47
48/*
49 * Tag bits used for special requests.
50 */
51#define BIT(nr) (1UL << (nr))
52#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
53#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
54#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
55#define FNIC_NO_TAG -1
56
57/*
58 * Usage of the scsi_cmnd scratchpad.
59 * These fields are locked by the hashed io_req_lock.
60 */
61#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
62#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
63#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
64#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
65#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
66
67#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
68
69#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
70#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
71#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
72#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
73
74#define FNIC_MAX_FCP_TARGET 256
75
76extern unsigned int fnic_log_level;
77
78#define FNIC_MAIN_LOGGING 0x01
79#define FNIC_FCS_LOGGING 0x02
80#define FNIC_SCSI_LOGGING 0x04
81#define FNIC_ISR_LOGGING 0x08
82
83#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
84do { \
85 if (unlikely(fnic_log_level & LEVEL)) \
86 do { \
87 CMD; \
88 } while (0); \
89} while (0)
90
91#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
92 FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
93 shost_printk(kern_level, host, fmt, ##args);)
94
95#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
96 FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
97 shost_printk(kern_level, host, fmt, ##args);)
98
99#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
100 FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
101 shost_printk(kern_level, host, fmt, ##args);)
102
103#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
104 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
105 shost_printk(kern_level, host, fmt, ##args);)
106
107extern const char *fnic_state_str[];
108
109enum fnic_intx_intr_index {
110 FNIC_INTX_WQ_RQ_COPYWQ,
111 FNIC_INTX_ERR,
112 FNIC_INTX_NOTIFY,
113 FNIC_INTX_INTR_MAX,
114};
115
116enum fnic_msix_intr_index {
117 FNIC_MSIX_RQ,
118 FNIC_MSIX_WQ,
119 FNIC_MSIX_WQ_COPY,
120 FNIC_MSIX_ERR_NOTIFY,
121 FNIC_MSIX_INTR_MAX,
122};
123
124struct fnic_msix_entry {
125 int requested;
126 char devname[IFNAMSIZ];
127 irqreturn_t (*isr)(int, void *);
128 void *devid;
129};
130
131enum fnic_state {
132 FNIC_IN_FC_MODE = 0,
133 FNIC_IN_FC_TRANS_ETH_MODE,
134 FNIC_IN_ETH_MODE,
135 FNIC_IN_ETH_TRANS_FC_MODE,
136};
137
138#define FNIC_WQ_COPY_MAX 1
139#define FNIC_WQ_MAX 1
140#define FNIC_RQ_MAX 1
141#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
142
143struct mempool;
144
145/* Per-instance private data structure */
146struct fnic {
147 struct fc_lport *lport;
148 struct vnic_dev_bar bar0;
149
150 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
151 struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
152
153 struct vnic_stats *stats;
154 unsigned long stats_time; /* time of stats update */
155 struct vnic_nic_cfg *nic_cfg;
156 char name[IFNAMSIZ];
157 struct timer_list notify_timer; /* used for MSI interrupts */
158
159 unsigned int err_intr_offset;
160 unsigned int link_intr_offset;
161
162 unsigned int wq_count;
163 unsigned int cq_count;
164
165 u32 fcoui_mode:1; /* use fcoui address*/
166 u32 vlan_hw_insert:1; /* let hw insert the tag */
167 u32 in_remove:1; /* fnic device in removal */
168 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
169
170 struct completion *remove_wait; /* device remove thread blocks */
171
172 struct fc_frame *flogi;
173 struct fc_frame *flogi_resp;
174 u16 flogi_oxid;
175 unsigned long s_id;
176 enum fnic_state state;
177 spinlock_t fnic_lock;
178
179 u16 vlan_id; /* VLAN tag including priority */
180 u8 mac_addr[ETH_ALEN];
181 u8 dest_addr[ETH_ALEN];
182 u8 data_src_addr[ETH_ALEN];
183 u64 fcp_input_bytes; /* internal statistic */
184 u64 fcp_output_bytes; /* internal statistic */
185 u32 link_down_cnt;
186 int link_status;
187
188 struct list_head list;
189 struct pci_dev *pdev;
190 struct vnic_fc_config config;
191 struct vnic_dev *vdev;
192 unsigned int raw_wq_count;
193 unsigned int wq_copy_count;
194 unsigned int rq_count;
195 int fw_ack_index[FNIC_WQ_COPY_MAX];
196 unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
197 unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
198 unsigned int intr_count;
199 u32 __iomem *legacy_pba;
200 struct fnic_host_tag *tags;
201 mempool_t *io_req_pool;
202 mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
203 spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
204
205 struct work_struct link_work;
206 struct work_struct frame_work;
207 struct sk_buff_head frame_queue;
208
209 /* copy work queue cache line section */
210 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
211 /* completion queue cache line section */
212 ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
213
214 spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
215
216 /* work queue cache line section */
217 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
218 spinlock_t wq_lock[FNIC_WQ_MAX];
219
220 /* receive queue cache line section */
221 ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
222
223 /* interrupt resource cache line section */
224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
225};
226
227extern struct workqueue_struct *fnic_event_queue;
228extern struct device_attribute *fnic_attrs[];
229
230void fnic_clear_intr_mode(struct fnic *fnic);
231int fnic_set_intr_mode(struct fnic *fnic);
232void fnic_free_intr(struct fnic *fnic);
233int fnic_request_intr(struct fnic *fnic);
234
235int fnic_send(struct fc_lport *, struct fc_frame *);
236void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
237void fnic_handle_frame(struct work_struct *work);
238void fnic_handle_link(struct work_struct *work);
239int fnic_rq_cmpl_handler(struct fnic *fnic, int);
240int fnic_alloc_rq_frame(struct vnic_rq *rq);
241void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
242int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
243
244int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
245int fnic_abort_cmd(struct scsi_cmnd *);
246int fnic_device_reset(struct scsi_cmnd *);
247int fnic_host_reset(struct scsi_cmnd *);
248int fnic_reset(struct Scsi_Host *);
249void fnic_scsi_cleanup(struct fc_lport *);
250void fnic_scsi_abort_io(struct fc_lport *);
251void fnic_empty_scsi_cleanup(struct fc_lport *);
252void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
253int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
254int fnic_wq_cmpl_handler(struct fnic *fnic, int);
255int fnic_flogi_reg_handler(struct fnic *fnic);
256void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
257 struct fcpio_host_req *desc);
258int fnic_fw_reset_handler(struct fnic *fnic);
259void fnic_terminate_rport_io(struct fc_rport *);
260const char *fnic_state_to_str(unsigned int state);
261
262void fnic_log_q_error(struct fnic *fnic);
263void fnic_handle_link_event(struct fnic *fnic);
264
265#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 000000000000..aea0c3becfd4
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/device.h>
20#include <scsi/scsi_host.h>
21#include "fnic.h"
22
23static ssize_t fnic_show_state(struct device *dev,
24 struct device_attribute *attr, char *buf)
25{
26 struct fc_lport *lp = shost_priv(class_to_shost(dev));
27 struct fnic *fnic = lport_priv(lp);
28
29 return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
30}
31
32static ssize_t fnic_show_drv_version(struct device *dev,
33 struct device_attribute *attr, char *buf)
34{
35 return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
36}
37
38static ssize_t fnic_show_link_state(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct fc_lport *lp = shost_priv(class_to_shost(dev));
42
43 return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
44 ? "Link Up" : "Link Down");
45}
46
47static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
48static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
49static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
50
51struct device_attribute *fnic_attrs[] = {
52 &dev_attr_fnic_state,
53 &dev_attr_drv_version,
54 &dev_attr_link_state,
55 NULL,
56};
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 000000000000..07e6eedb83ce
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,742 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/skbuff.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/if_ether.h>
24#include <linux/if_vlan.h>
25#include <linux/workqueue.h>
26#include <scsi/fc/fc_els.h>
27#include <scsi/fc/fc_fcoe.h>
28#include <scsi/fc_frame.h>
29#include <scsi/libfc.h>
30#include "fnic_io.h"
31#include "fnic.h"
32#include "cq_enet_desc.h"
33#include "cq_exch_desc.h"
34
35struct workqueue_struct *fnic_event_queue;
36
37void fnic_handle_link(struct work_struct *work)
38{
39 struct fnic *fnic = container_of(work, struct fnic, link_work);
40 unsigned long flags;
41 int old_link_status;
42 u32 old_link_down_cnt;
43
44 spin_lock_irqsave(&fnic->fnic_lock, flags);
45
46 if (fnic->stop_rx_link_events) {
47 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
48 return;
49 }
50
51 old_link_down_cnt = fnic->link_down_cnt;
52 old_link_status = fnic->link_status;
53 fnic->link_status = vnic_dev_link_status(fnic->vdev);
54 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
55
56 if (old_link_status == fnic->link_status) {
57 if (!fnic->link_status)
58 /* DOWN -> DOWN */
59 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60 else {
61 if (old_link_down_cnt != fnic->link_down_cnt) {
62 /* UP -> DOWN -> UP */
63 fnic->lport->host_stats.link_failure_count++;
64 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66 "link down\n");
67 fc_linkdown(fnic->lport);
68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 "link up\n");
70 fc_linkup(fnic->lport);
71 } else
72 /* UP -> UP */
73 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
74 }
75 } else if (fnic->link_status) {
76 /* DOWN -> UP */
77 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79 fc_linkup(fnic->lport);
80 } else {
81 /* UP -> DOWN */
82 fnic->lport->host_stats.link_failure_count++;
83 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85 fc_linkdown(fnic->lport);
86 }
87
88}
89
90/*
91 * This function passes incoming fabric frames to libFC
92 */
93void fnic_handle_frame(struct work_struct *work)
94{
95 struct fnic *fnic = container_of(work, struct fnic, frame_work);
96 struct fc_lport *lp = fnic->lport;
97 unsigned long flags;
98 struct sk_buff *skb;
99 struct fc_frame *fp;
100
101 while ((skb = skb_dequeue(&fnic->frame_queue))) {
102
103 spin_lock_irqsave(&fnic->fnic_lock, flags);
104 if (fnic->stop_rx_link_events) {
105 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106 dev_kfree_skb(skb);
107 return;
108 }
109 fp = (struct fc_frame *)skb;
110 /* if Flogi resp frame, register the address */
111 if (fr_flags(fp)) {
112 vnic_dev_add_addr(fnic->vdev,
113 fnic->data_src_addr);
114 fr_flags(fp) = 0;
115 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117
118 fc_exch_recv(lp, lp->emp, fp);
119 }
120
121}
122
123static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124 u32 len, u8 sof, u8 eof)
125{
126 struct fc_frame *fp = (struct fc_frame *)skb;
127
128 skb_trim(skb, len);
129 fr_eof(fp) = eof;
130 fr_sof(fp) = sof;
131}
132
133
134static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
135{
136 struct fc_frame *fp;
137 struct ethhdr *eh;
138 struct vlan_ethhdr *vh;
139 struct fcoe_hdr *fcoe_hdr;
140 struct fcoe_crc_eof *ft;
141 u32 transport_len = 0;
142
143 eh = (struct ethhdr *)skb->data;
144 vh = (struct vlan_ethhdr *)skb->data;
145 if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
146 vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
147 skb_pull(skb, sizeof(struct vlan_ethhdr));
148 transport_len += sizeof(struct vlan_ethhdr);
149 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
150 transport_len += sizeof(struct ethhdr);
151 skb_pull(skb, sizeof(struct ethhdr));
152 } else
153 return -1;
154
155 fcoe_hdr = (struct fcoe_hdr *)skb->data;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157 return -1;
158
159 fp = (struct fc_frame *)skb;
160 fc_frame_init(fp);
161 fr_sof(fp) = fcoe_hdr->fcoe_sof;
162 skb_pull(skb, sizeof(struct fcoe_hdr));
163 transport_len += sizeof(struct fcoe_hdr);
164
165 ft = (struct fcoe_crc_eof *)(skb->data + len -
166 transport_len - sizeof(*ft));
167 fr_eof(fp) = ft->fcoe_eof;
168 skb_trim(skb, len - transport_len - sizeof(*ft));
169 return 0;
170}
171
172static inline int fnic_handle_flogi_resp(struct fnic *fnic,
173 struct fc_frame *fp)
174{
175 u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
176 struct ethhdr *eth_hdr;
177 struct fc_frame_header *fh;
178 int ret = 0;
179 unsigned long flags;
180 struct fc_frame *old_flogi_resp = NULL;
181
182 fh = (struct fc_frame_header *)fr_hdr(fp);
183
184 spin_lock_irqsave(&fnic->fnic_lock, flags);
185
186 if (fnic->state == FNIC_IN_ETH_MODE) {
187
188 /*
189 * Check if oxid matches on taking the lock. A new Flogi
190 * issued by libFC might have changed the fnic cached oxid
191 */
192 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
193 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
194 "Flogi response oxid not"
195 " matching cached oxid, dropping frame"
196 "\n");
197 ret = -1;
198 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
199 dev_kfree_skb_irq(fp_skb(fp));
200 goto handle_flogi_resp_end;
201 }
202
203 /* Drop older cached flogi response frame, cache this frame */
204 old_flogi_resp = fnic->flogi_resp;
205 fnic->flogi_resp = fp;
206 fnic->flogi_oxid = FC_XID_UNKNOWN;
207
208 /*
209 * this frame is part of flogi get the src mac addr from this
210 * frame if the src mac is fcoui based then we mark the
211 * address mode flag to use fcoui base for dst mac addr
212 * otherwise we have to store the fcoe gateway addr
213 */
214 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
215 memcpy(mac, eth_hdr->h_source, ETH_ALEN);
216
217 if (ntoh24(mac) == FC_FCOE_OUI)
218 fnic->fcoui_mode = 1;
219 else {
220 fnic->fcoui_mode = 0;
221 memcpy(fnic->dest_addr, mac, ETH_ALEN);
222 }
223
224 /*
225 * Except for Flogi frame, all outbound frames from us have the
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
227 * the vnic MAC address as the Eth Src address
228 */
229 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic->s_id = ntoh24(fh->fh_d_id);
233
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236
237 } else {
238 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239 "Unexpected fnic state %s while"
240 " processing flogi resp\n",
241 fnic_state_to_str(fnic->state));
242 ret = -1;
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244 dev_kfree_skb_irq(fp_skb(fp));
245 goto handle_flogi_resp_end;
246 }
247
248 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250 /* Drop older cached frame */
251 if (old_flogi_resp)
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253
254 /*
255 * send flogi reg request to firmware, this will put the fnic in
256 * in FC mode
257 */
258 ret = fnic_flogi_reg_handler(fnic);
259
260 if (ret < 0) {
261 int free_fp = 1;
262 spin_lock_irqsave(&fnic->fnic_lock, flags);
263 /*
264 * free the frame is some other thread is not
265 * pointing to it
266 */
267 if (fnic->flogi_resp != fp)
268 free_fp = 0;
269 else
270 fnic->flogi_resp = NULL;
271
272 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273 fnic->state = FNIC_IN_ETH_MODE;
274 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
275 if (free_fp)
276 dev_kfree_skb_irq(fp_skb(fp));
277 }
278
279 handle_flogi_resp_end:
280 return ret;
281}
282
283/* Returns 1 for a response that matches cached flogi oxid */
284static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285 struct fc_frame *fp)
286{
287 struct fc_frame_header *fh;
288 int ret = 0;
289 u32 f_ctl;
290
291 fh = fc_frame_header_get(fp);
292 f_ctl = ntoh24(fh->fh_f_ctl);
293
294 if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295 fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296 (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297 fh->fh_type == FC_TYPE_ELS)
298 ret = 1;
299
300 return ret;
301}
302
303static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
304 *cq_desc, struct vnic_rq_buf *buf,
305 int skipped __attribute__((unused)),
306 void *opaque)
307{
308 struct fnic *fnic = vnic_dev_priv(rq->vdev);
309 struct sk_buff *skb;
310 struct fc_frame *fp;
311 unsigned int eth_hdrs_stripped;
312 u8 type, color, eop, sop, ingress_port, vlan_stripped;
313 u8 fcoe = 0, fcoe_sof, fcoe_eof;
314 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
315 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
316 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
317 u8 fcs_ok = 1, packet_error = 0;
318 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
319 u32 rss_hash;
320 u16 exchange_id, tmpl;
321 u8 sof = 0;
322 u8 eof = 0;
323 u32 fcp_bytes_written = 0;
324 unsigned long flags;
325
326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327 PCI_DMA_FROMDEVICE);
328 skb = buf->os_buf;
329 buf->os_buf = NULL;
330
331 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
332 if (type == CQ_DESC_TYPE_RQ_FCP) {
333 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
334 &type, &color, &q_number, &completed_index,
335 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
336 &tmpl, &fcp_bytes_written, &sof, &eof,
337 &ingress_port, &packet_error,
338 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339 &vlan);
340 eth_hdrs_stripped = 1;
341
342 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
344 &type, &color, &q_number, &completed_index,
345 &ingress_port, &fcoe, &eop, &sop,
346 &rss_type, &csum_not_calc, &rss_hash,
347 &bytes_written, &packet_error,
348 &vlan_stripped, &vlan, &checksum,
349 &fcoe_sof, &fcoe_fc_crc_ok,
350 &fcoe_enc_error, &fcoe_eof,
351 &tcp_udp_csum_ok, &udp, &tcp,
352 &ipv4_csum_ok, &ipv6, &ipv4,
353 &ipv4_fragment, &fcs_ok);
354 eth_hdrs_stripped = 0;
355
356 } else {
357 /* wrong CQ type*/
358 shost_printk(KERN_ERR, fnic->lport->host,
359 "fnic rq_cmpl wrong cq type x%x\n", type);
360 goto drop;
361 }
362
363 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
364 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
365 "fnic rq_cmpl fcoe x%x fcsok x%x"
366 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
367 " x%x\n",
368 fcoe, fcs_ok, packet_error,
369 fcoe_fc_crc_ok, fcoe_enc_error);
370 goto drop;
371 }
372
373 if (eth_hdrs_stripped)
374 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375 else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376 goto drop;
377
378 fp = (struct fc_frame *)skb;
379
380 /*
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
384 */
385 if (is_matching_flogi_resp_frame(fnic, fp)) {
386 if (!eth_hdrs_stripped) {
387 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388 fnic_handle_flogi_resp(fnic, fp);
389 return;
390 }
391 /*
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
394 */
395 goto forward;
396 }
397 goto drop;
398 }
399 if (!eth_hdrs_stripped)
400 goto drop;
401
402forward:
403 spin_lock_irqsave(&fnic->fnic_lock, flags);
404 if (fnic->stop_rx_link_events) {
405 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406 goto drop;
407 }
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
409 fr_flags(fp) = 0;
410 fr_dev(fp) = fnic->lport;
411 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412
413 skb_queue_tail(&fnic->frame_queue, skb);
414 queue_work(fnic_event_queue, &fnic->frame_work);
415
416 return;
417drop:
418 dev_kfree_skb_irq(skb);
419}
420
421static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
422 struct cq_desc *cq_desc, u8 type,
423 u16 q_number, u16 completed_index,
424 void *opaque)
425{
426 struct fnic *fnic = vnic_dev_priv(vdev);
427
428 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
429 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
430 NULL);
431 return 0;
432}
433
434int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
435{
436 unsigned int tot_rq_work_done = 0, cur_work_done;
437 unsigned int i;
438 int err;
439
440 for (i = 0; i < fnic->rq_count; i++) {
441 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
442 fnic_rq_cmpl_handler_cont,
443 NULL);
444 if (cur_work_done) {
445 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
446 if (err)
447 shost_printk(KERN_ERR, fnic->lport->host,
448 "fnic_alloc_rq_frame cant alloc"
449 " frame\n");
450 }
451 tot_rq_work_done += cur_work_done;
452 }
453
454 return tot_rq_work_done;
455}
456
457/*
458 * This function is called once at init time to allocate and fill RQ
459 * buffers. Subsequently, it is called in the interrupt context after RQ
460 * buffer processing to replenish the buffers in the RQ
461 */
462int fnic_alloc_rq_frame(struct vnic_rq *rq)
463{
464 struct fnic *fnic = vnic_dev_priv(rq->vdev);
465 struct sk_buff *skb;
466 u16 len;
467 dma_addr_t pa;
468
469 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
470 skb = dev_alloc_skb(len);
471 if (!skb) {
472 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
473 "Unable to allocate RQ sk_buff\n");
474 return -ENOMEM;
475 }
476 skb_reset_mac_header(skb);
477 skb_reset_transport_header(skb);
478 skb_reset_network_header(skb);
479 skb_put(skb, len);
480 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
481 fnic_queue_rq_desc(rq, skb, pa, len);
482 return 0;
483}
484
485void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
486{
487 struct fc_frame *fp = buf->os_buf;
488 struct fnic *fnic = vnic_dev_priv(rq->vdev);
489
490 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
491 PCI_DMA_FROMDEVICE);
492
493 dev_kfree_skb(fp_skb(fp));
494 buf->os_buf = NULL;
495}
496
497static inline int is_flogi_frame(struct fc_frame_header *fh)
498{
499 return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
500}
501
502int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503{
504 struct vnic_wq *wq = &fnic->wq[0];
505 struct sk_buff *skb;
506 dma_addr_t pa;
507 struct ethhdr *eth_hdr;
508 struct vlan_ethhdr *vlan_hdr;
509 struct fcoe_hdr *fcoe_hdr;
510 struct fc_frame_header *fh;
511 u32 tot_len, eth_hdr_len;
512 int ret = 0;
513 unsigned long flags;
514
515 fh = fc_frame_header_get(fp);
516 skb = fp_skb(fp);
517
518 if (!fnic->vlan_hw_insert) {
519 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
521 eth_hdr = (struct ethhdr *)vlan_hdr;
522 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
523 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
524 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
525 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
526 } else {
527 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
528 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
529 eth_hdr->h_proto = htons(ETH_P_FCOE);
530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531 }
532
533 if (is_flogi_frame(fh)) {
534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
536 } else {
537 if (fnic->fcoui_mode)
538 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539 else
540 memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542 }
543
544 tot_len = skb->len;
545 BUG_ON(tot_len % 4);
546
547 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
548 fcoe_hdr->fcoe_sof = fr_sof(fp);
549 if (FC_FCOE_VER)
550 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
551
552 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
553
554 spin_lock_irqsave(&fnic->wq_lock[0], flags);
555
556 if (!vnic_wq_desc_avail(wq)) {
557 pci_unmap_single(fnic->pdev, pa,
558 tot_len, PCI_DMA_TODEVICE);
559 ret = -1;
560 goto fnic_send_frame_end;
561 }
562
563 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
564 fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
565fnic_send_frame_end:
566 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
567
568 if (ret)
569 dev_kfree_skb_any(fp_skb(fp));
570
571 return ret;
572}
573
574/*
575 * fnic_send
576 * Routine to send a raw frame
577 */
578int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579{
580 struct fnic *fnic = lport_priv(lp);
581 struct fc_frame_header *fh;
582 int ret = 0;
583 enum fnic_state old_state;
584 unsigned long flags;
585 struct fc_frame *old_flogi = NULL;
586 struct fc_frame *old_flogi_resp = NULL;
587
588 if (fnic->in_remove) {
589 dev_kfree_skb(fp_skb(fp));
590 ret = -1;
591 goto fnic_send_end;
592 }
593
594 fh = fc_frame_header_get(fp);
595 /* if not an Flogi frame, send it out, this is the common case */
596 if (!is_flogi_frame(fh))
597 return fnic_send_frame(fnic, fp);
598
599 /* Flogi frame, now enter the state machine */
600
601 spin_lock_irqsave(&fnic->fnic_lock, flags);
602again:
603 /* Get any old cached frames, free them after dropping lock */
604 old_flogi = fnic->flogi;
605 fnic->flogi = NULL;
606 old_flogi_resp = fnic->flogi_resp;
607 fnic->flogi_resp = NULL;
608
609 fnic->flogi_oxid = FC_XID_UNKNOWN;
610
611 old_state = fnic->state;
612 switch (old_state) {
613 case FNIC_IN_FC_MODE:
614 case FNIC_IN_ETH_TRANS_FC_MODE:
615 default:
616 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619
620 if (old_flogi) {
621 dev_kfree_skb(fp_skb(old_flogi));
622 old_flogi = NULL;
623 }
624 if (old_flogi_resp) {
625 dev_kfree_skb(fp_skb(old_flogi_resp));
626 old_flogi_resp = NULL;
627 }
628
629 ret = fnic_fw_reset_handler(fnic);
630
631 spin_lock_irqsave(&fnic->fnic_lock, flags);
632 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633 goto again;
634 if (ret) {
635 fnic->state = old_state;
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 dev_kfree_skb(fp_skb(fp));
638 goto fnic_send_end;
639 }
640 old_flogi = fnic->flogi;
641 fnic->flogi = fp;
642 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643 old_flogi_resp = fnic->flogi_resp;
644 fnic->flogi_resp = NULL;
645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 break;
647
648 case FNIC_IN_FC_TRANS_ETH_MODE:
649 /*
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
655 * FNIC_IN_FC_MODE
656 */
657 fnic->flogi = fp;
658 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660 break;
661
662 case FNIC_IN_ETH_MODE:
663 /*
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
671 */
672 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674 ret = fnic_send_frame(fnic, fp);
675 break;
676 }
677
678fnic_send_end:
679 if (old_flogi)
680 dev_kfree_skb(fp_skb(old_flogi));
681 if (old_flogi_resp)
682 dev_kfree_skb(fp_skb(old_flogi_resp));
683 return ret;
684}
685
686static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
687 struct cq_desc *cq_desc,
688 struct vnic_wq_buf *buf, void *opaque)
689{
690 struct sk_buff *skb = buf->os_buf;
691 struct fc_frame *fp = (struct fc_frame *)skb;
692 struct fnic *fnic = vnic_dev_priv(wq->vdev);
693
694 pci_unmap_single(fnic->pdev, buf->dma_addr,
695 buf->len, PCI_DMA_TODEVICE);
696 dev_kfree_skb_irq(fp_skb(fp));
697 buf->os_buf = NULL;
698}
699
700static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
701 struct cq_desc *cq_desc, u8 type,
702 u16 q_number, u16 completed_index,
703 void *opaque)
704{
705 struct fnic *fnic = vnic_dev_priv(vdev);
706 unsigned long flags;
707
708 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
709 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
710 fnic_wq_complete_frame_send, NULL);
711 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
712
713 return 0;
714}
715
716int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
717{
718 unsigned int wq_work_done = 0;
719 unsigned int i;
720
721 for (i = 0; i < fnic->raw_wq_count; i++) {
722 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
723 work_to_do,
724 fnic_wq_cmpl_handler_cont,
725 NULL);
726 }
727
728 return wq_work_done;
729}
730
731
732void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
733{
734 struct fc_frame *fp = buf->os_buf;
735 struct fnic *fnic = vnic_dev_priv(wq->vdev);
736
737 pci_unmap_single(fnic->pdev, buf->dma_addr,
738 buf->len, PCI_DMA_TODEVICE);
739
740 dev_kfree_skb(fp_skb(fp));
741 buf->os_buf = NULL;
742}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 000000000000..f0b896988cd5
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_IO_H_
19#define _FNIC_IO_H_
20
21#include <scsi/fc/fc_fcp.h>
22
23#define FNIC_DFLT_SG_DESC_CNT 32
24#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
25#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
26
27struct host_sg_desc {
28 __le64 addr;
29 __le32 len;
30 u32 _resvd;
31};
32
33struct fnic_dflt_sgl_list {
34 struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
35};
36
37struct fnic_sgl_list {
38 struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
39};
40
41enum fnic_sgl_list_type {
42 FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
43 FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
44 FNIC_SGL_NUM_CACHES /* number of sgl caches */
45};
46
47enum fnic_ioreq_state {
48 FNIC_IOREQ_CMD_PENDING = 0,
49 FNIC_IOREQ_ABTS_PENDING,
50 FNIC_IOREQ_ABTS_COMPLETE,
51 FNIC_IOREQ_CMD_COMPLETE,
52};
53
54struct fnic_io_req {
55 struct host_sg_desc *sgl_list; /* sgl list */
56 void *sgl_list_alloc; /* sgl list address used for free */
57 dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
58 dma_addr_t sgl_list_pa; /* dma address for sgl list */
59 u16 sgl_cnt;
60 u8 sgl_type; /* device DMA descriptor list type */
61 u8 io_completed:1; /* set to 1 when fw completes IO */
62 u32 port_id; /* remote port DID */
63 struct completion *abts_done; /* completion for abts */
64 struct completion *dr_done; /* completion for device reset */
65};
66
67#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 000000000000..2b3064828aea
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/pci.h>
21#include <linux/interrupt.h>
22#include <scsi/libfc.h>
23#include <scsi/fc_frame.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26#include "vnic_stats.h"
27#include "fnic_io.h"
28#include "fnic.h"
29
30static irqreturn_t fnic_isr_legacy(int irq, void *data)
31{
32 struct fnic *fnic = data;
33 u32 pba;
34 unsigned long work_done = 0;
35
36 pba = vnic_intr_legacy_pba(fnic->legacy_pba);
37 if (!pba)
38 return IRQ_NONE;
39
40 if (pba & (1 << FNIC_INTX_NOTIFY)) {
41 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
42 fnic_handle_link_event(fnic);
43 }
44
45 if (pba & (1 << FNIC_INTX_ERR)) {
46 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
47 fnic_log_q_error(fnic);
48 }
49
50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
51 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
52 work_done += fnic_wq_cmpl_handler(fnic, 4);
53 work_done += fnic_rq_cmpl_handler(fnic, 4);
54
55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
56 work_done,
57 1 /* unmask intr */,
58 1 /* reset intr timer */);
59 }
60
61 return IRQ_HANDLED;
62}
63
64static irqreturn_t fnic_isr_msi(int irq, void *data)
65{
66 struct fnic *fnic = data;
67 unsigned long work_done = 0;
68
69 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
70 work_done += fnic_wq_cmpl_handler(fnic, 4);
71 work_done += fnic_rq_cmpl_handler(fnic, 4);
72
73 vnic_intr_return_credits(&fnic->intr[0],
74 work_done,
75 1 /* unmask intr */,
76 1 /* reset intr timer */);
77
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
82{
83 struct fnic *fnic = data;
84 unsigned long rq_work_done = 0;
85
86 rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
88 rq_work_done,
89 1 /* unmask intr */,
90 1 /* reset intr timer */);
91
92 return IRQ_HANDLED;
93}
94
95static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
96{
97 struct fnic *fnic = data;
98 unsigned long wq_work_done = 0;
99
100 wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
102 wq_work_done,
103 1 /* unmask intr */,
104 1 /* reset intr timer */);
105 return IRQ_HANDLED;
106}
107
108static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
109{
110 struct fnic *fnic = data;
111 unsigned long wq_copy_work_done = 0;
112
113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
115 wq_copy_work_done,
116 1 /* unmask intr */,
117 1 /* reset intr timer */);
118 return IRQ_HANDLED;
119}
120
121static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
122{
123 struct fnic *fnic = data;
124
125 vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
126 fnic_log_q_error(fnic);
127 fnic_handle_link_event(fnic);
128
129 return IRQ_HANDLED;
130}
131
132void fnic_free_intr(struct fnic *fnic)
133{
134 int i;
135
136 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
137 case VNIC_DEV_INTR_MODE_INTX:
138 case VNIC_DEV_INTR_MODE_MSI:
139 free_irq(fnic->pdev->irq, fnic);
140 break;
141
142 case VNIC_DEV_INTR_MODE_MSIX:
143 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
144 if (fnic->msix[i].requested)
145 free_irq(fnic->msix_entry[i].vector,
146 fnic->msix[i].devid);
147 break;
148
149 default:
150 break;
151 }
152}
153
154int fnic_request_intr(struct fnic *fnic)
155{
156 int err = 0;
157 int i;
158
159 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
160
161 case VNIC_DEV_INTR_MODE_INTX:
162 err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
163 IRQF_SHARED, DRV_NAME, fnic);
164 break;
165
166 case VNIC_DEV_INTR_MODE_MSI:
167 err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
168 0, fnic->name, fnic);
169 break;
170
171 case VNIC_DEV_INTR_MODE_MSIX:
172
173 sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
174 "%.11s-fcs-rq", fnic->name);
175 fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
176 fnic->msix[FNIC_MSIX_RQ].devid = fnic;
177
178 sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
179 "%.11s-fcs-wq", fnic->name);
180 fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
181 fnic->msix[FNIC_MSIX_WQ].devid = fnic;
182
183 sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
184 "%.11s-scsi-wq", fnic->name);
185 fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
186 fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
187
188 sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
189 "%.11s-err-notify", fnic->name);
190 fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
191 fnic_isr_msix_err_notify;
192 fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
193
194 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
195 err = request_irq(fnic->msix_entry[i].vector,
196 fnic->msix[i].isr, 0,
197 fnic->msix[i].devname,
198 fnic->msix[i].devid);
199 if (err) {
200 shost_printk(KERN_ERR, fnic->lport->host,
201 "MSIX: request_irq"
202 " failed %d\n", err);
203 fnic_free_intr(fnic);
204 break;
205 }
206 fnic->msix[i].requested = 1;
207 }
208 break;
209
210 default:
211 break;
212 }
213
214 return err;
215}
216
217int fnic_set_intr_mode(struct fnic *fnic)
218{
219 unsigned int n = ARRAY_SIZE(fnic->rq);
220 unsigned int m = ARRAY_SIZE(fnic->wq);
221 unsigned int o = ARRAY_SIZE(fnic->wq_copy);
222 unsigned int i;
223
224 /*
225 * Set interrupt mode (INTx, MSI, MSI-X) depending
226 * system capabilities.
227 *
228 * Try MSI-X first
229 *
230 * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
231 * (last INTR is used for WQ/RQ errors and notification area)
232 */
233
234 BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
235 for (i = 0; i < n + m + o + 1; i++)
236 fnic->msix_entry[i].entry = i;
237
238 if (fnic->rq_count >= n &&
239 fnic->raw_wq_count >= m &&
240 fnic->wq_copy_count >= o &&
241 fnic->cq_count >= n + m + o) {
242 if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
243 n + m + o + 1)) {
244 fnic->rq_count = n;
245 fnic->raw_wq_count = m;
246 fnic->wq_copy_count = o;
247 fnic->wq_count = m + o;
248 fnic->cq_count = n + m + o;
249 fnic->intr_count = n + m + o + 1;
250 fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
251
252 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
253 "Using MSI-X Interrupts\n");
254 vnic_dev_set_intr_mode(fnic->vdev,
255 VNIC_DEV_INTR_MODE_MSIX);
256 return 0;
257 }
258 }
259
260 /*
261 * Next try MSI
262 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
263 */
264 if (fnic->rq_count >= 1 &&
265 fnic->raw_wq_count >= 1 &&
266 fnic->wq_copy_count >= 1 &&
267 fnic->cq_count >= 3 &&
268 fnic->intr_count >= 1 &&
269 !pci_enable_msi(fnic->pdev)) {
270
271 fnic->rq_count = 1;
272 fnic->raw_wq_count = 1;
273 fnic->wq_copy_count = 1;
274 fnic->wq_count = 2;
275 fnic->cq_count = 3;
276 fnic->intr_count = 1;
277 fnic->err_intr_offset = 0;
278
279 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
280 "Using MSI Interrupts\n");
281 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
282
283 return 0;
284 }
285
286 /*
287 * Next try INTx
288 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
289 * 1 INTR is used for all 3 queues, 1 INTR for queue errors
290 * 1 INTR for notification area
291 */
292
293 if (fnic->rq_count >= 1 &&
294 fnic->raw_wq_count >= 1 &&
295 fnic->wq_copy_count >= 1 &&
296 fnic->cq_count >= 3 &&
297 fnic->intr_count >= 3) {
298
299 fnic->rq_count = 1;
300 fnic->raw_wq_count = 1;
301 fnic->wq_copy_count = 1;
302 fnic->cq_count = 3;
303 fnic->intr_count = 3;
304
305 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
306 "Using Legacy Interrupts\n");
307 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
308
309 return 0;
310 }
311
312 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
313
314 return -EINVAL;
315}
316
317void fnic_clear_intr_mode(struct fnic *fnic)
318{
319 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
320 case VNIC_DEV_INTR_MODE_MSIX:
321 pci_disable_msix(fnic->pdev);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 pci_disable_msi(fnic->pdev);
325 break;
326 default:
327 break;
328 }
329
330 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
331}
332
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 000000000000..32ef6b87d895
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,942 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/skbuff.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/workqueue.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/libfc.h>
33#include <scsi/fc_frame.h>
34
35#include "vnic_dev.h"
36#include "vnic_intr.h"
37#include "vnic_stats.h"
38#include "fnic_io.h"
39#include "fnic.h"
40
41#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
42
43/* Timer to poll notification area for events. Used for MSI interrupts */
44#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
45
46static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
47static struct kmem_cache *fnic_io_req_cache;
48LIST_HEAD(fnic_list);
49DEFINE_SPINLOCK(fnic_list_lock);
50
51/* Supported devices by fnic module */
52static struct pci_device_id fnic_id_table[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
54 { 0, }
55};
56
57MODULE_DESCRIPTION(DRV_DESCRIPTION);
58MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
59 "Joseph R. Eykholt <jeykholt@cisco.com>");
60MODULE_LICENSE("GPL v2");
61MODULE_VERSION(DRV_VERSION);
62MODULE_DEVICE_TABLE(pci, fnic_id_table);
63
64unsigned int fnic_log_level;
65module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
67
68
69static struct libfc_function_template fnic_transport_template = {
70 .frame_send = fnic_send,
71 .fcp_abort_io = fnic_empty_scsi_cleanup,
72 .fcp_cleanup = fnic_empty_scsi_cleanup,
73 .exch_mgr_reset = fnic_exch_mgr_reset
74};
75
76static int fnic_slave_alloc(struct scsi_device *sdev)
77{
78 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
79 struct fc_lport *lp = shost_priv(sdev->host);
80 struct fnic *fnic = lport_priv(lp);
81
82 sdev->tagged_supported = 1;
83
84 if (!rport || fc_remote_port_chkready(rport))
85 return -ENXIO;
86
87 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
88 rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
89
90 return 0;
91}
92
93static struct scsi_host_template fnic_host_template = {
94 .module = THIS_MODULE,
95 .name = DRV_NAME,
96 .queuecommand = fnic_queuecommand,
97 .eh_abort_handler = fnic_abort_cmd,
98 .eh_device_reset_handler = fnic_device_reset,
99 .eh_host_reset_handler = fnic_host_reset,
100 .slave_alloc = fnic_slave_alloc,
101 .change_queue_depth = fc_change_queue_depth,
102 .change_queue_type = fc_change_queue_type,
103 .this_id = -1,
104 .cmd_per_lun = 3,
105 .can_queue = FNIC_MAX_IO_REQ,
106 .use_clustering = ENABLE_CLUSTERING,
107 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
108 .max_sectors = 0xffff,
109 .shost_attrs = fnic_attrs,
110};
111
112static void fnic_get_host_speed(struct Scsi_Host *shost);
113static struct scsi_transport_template *fnic_fc_transport;
114static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
115
116static struct fc_function_template fnic_fc_functions = {
117
118 .show_host_node_name = 1,
119 .show_host_port_name = 1,
120 .show_host_supported_classes = 1,
121 .show_host_supported_fc4s = 1,
122 .show_host_active_fc4s = 1,
123 .show_host_maxframe_size = 1,
124 .show_host_port_id = 1,
125 .show_host_supported_speeds = 1,
126 .get_host_speed = fnic_get_host_speed,
127 .show_host_speed = 1,
128 .show_host_port_type = 1,
129 .get_host_port_state = fc_get_host_port_state,
130 .show_host_port_state = 1,
131 .show_host_symbolic_name = 1,
132 .show_rport_maxframe_size = 1,
133 .show_rport_supported_classes = 1,
134 .show_host_fabric_name = 1,
135 .show_starget_node_name = 1,
136 .show_starget_port_name = 1,
137 .show_starget_port_id = 1,
138 .show_rport_dev_loss_tmo = 1,
139 .issue_fc_host_lip = fnic_reset,
140 .get_fc_host_stats = fnic_get_stats,
141 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
142 .terminate_rport_io = fnic_terminate_rport_io,
143};
144
145static void fnic_get_host_speed(struct Scsi_Host *shost)
146{
147 struct fc_lport *lp = shost_priv(shost);
148 struct fnic *fnic = lport_priv(lp);
149 u32 port_speed = vnic_dev_port_speed(fnic->vdev);
150
151 /* Add in other values as they get defined in fw */
152 switch (port_speed) {
153 case 10000:
154 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
155 break;
156 default:
157 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
158 break;
159 }
160}
161
162static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
163{
164 int ret;
165 struct fc_lport *lp = shost_priv(host);
166 struct fnic *fnic = lport_priv(lp);
167 struct fc_host_statistics *stats = &lp->host_stats;
168 struct vnic_stats *vs;
169 unsigned long flags;
170
171 if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
172 return stats;
173 fnic->stats_time = jiffies;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178
179 if (ret) {
180 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
181 "fnic: Get vnic stats failed"
182 " 0x%x", ret);
183 return stats;
184 }
185 vs = fnic->stats;
186 stats->tx_frames = vs->tx.tx_unicast_frames_ok;
187 stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
188 stats->rx_frames = vs->rx.rx_unicast_frames_ok;
189 stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
190 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
191 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
192 stats->invalid_crc_count = vs->rx.rx_crc_errors;
193 stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
194 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
195 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
196
197 return stats;
198}
199
200void fnic_log_q_error(struct fnic *fnic)
201{
202 unsigned int i;
203 u32 error_status;
204
205 for (i = 0; i < fnic->raw_wq_count; i++) {
206 error_status = ioread32(&fnic->wq[i].ctrl->error_status);
207 if (error_status)
208 shost_printk(KERN_ERR, fnic->lport->host,
209 "WQ[%d] error_status"
210 " %d\n", i, error_status);
211 }
212
213 for (i = 0; i < fnic->rq_count; i++) {
214 error_status = ioread32(&fnic->rq[i].ctrl->error_status);
215 if (error_status)
216 shost_printk(KERN_ERR, fnic->lport->host,
217 "RQ[%d] error_status"
218 " %d\n", i, error_status);
219 }
220
221 for (i = 0; i < fnic->wq_copy_count; i++) {
222 error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
223 if (error_status)
224 shost_printk(KERN_ERR, fnic->lport->host,
225 "CWQ[%d] error_status"
226 " %d\n", i, error_status);
227 }
228}
229
230void fnic_handle_link_event(struct fnic *fnic)
231{
232 unsigned long flags;
233
234 spin_lock_irqsave(&fnic->fnic_lock, flags);
235 if (fnic->stop_rx_link_events) {
236 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
237 return;
238 }
239 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
240
241 queue_work(fnic_event_queue, &fnic->link_work);
242
243}
244
245static int fnic_notify_set(struct fnic *fnic)
246{
247 int err;
248
249 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
250 case VNIC_DEV_INTR_MODE_INTX:
251 err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
252 break;
253 case VNIC_DEV_INTR_MODE_MSI:
254 err = vnic_dev_notify_set(fnic->vdev, -1);
255 break;
256 case VNIC_DEV_INTR_MODE_MSIX:
257 err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
258 break;
259 default:
260 shost_printk(KERN_ERR, fnic->lport->host,
261 "Interrupt mode should be set up"
262 " before devcmd notify set %d\n",
263 vnic_dev_get_intr_mode(fnic->vdev));
264 err = -1;
265 break;
266 }
267
268 return err;
269}
270
271static void fnic_notify_timer(unsigned long data)
272{
273 struct fnic *fnic = (struct fnic *)data;
274
275 fnic_handle_link_event(fnic);
276 mod_timer(&fnic->notify_timer,
277 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
278}
279
280static void fnic_notify_timer_start(struct fnic *fnic)
281{
282 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
283 case VNIC_DEV_INTR_MODE_MSI:
284 /*
285 * Schedule first timeout immediately. The driver is
286 * initiatialized and ready to look for link up notification
287 */
288 mod_timer(&fnic->notify_timer, jiffies);
289 break;
290 default:
291 /* Using intr for notification for INTx/MSI-X */
292 break;
293 };
294}
295
296static int fnic_dev_wait(struct vnic_dev *vdev,
297 int (*start)(struct vnic_dev *, int),
298 int (*finished)(struct vnic_dev *, int *),
299 int arg)
300{
301 unsigned long time;
302 int done;
303 int err;
304
305 err = start(vdev, arg);
306 if (err)
307 return err;
308
309 /* Wait for func to complete...2 seconds max */
310 time = jiffies + (HZ * 2);
311 do {
312 err = finished(vdev, &done);
313 if (err)
314 return err;
315 if (done)
316 return 0;
317 schedule_timeout_uninterruptible(HZ / 10);
318 } while (time_after(time, jiffies));
319
320 return -ETIMEDOUT;
321}
322
323static int fnic_cleanup(struct fnic *fnic)
324{
325 unsigned int i;
326 int err;
327 unsigned long flags;
328 struct fc_frame *flogi = NULL;
329 struct fc_frame *flogi_resp = NULL;
330
331 vnic_dev_disable(fnic->vdev);
332 for (i = 0; i < fnic->intr_count; i++)
333 vnic_intr_mask(&fnic->intr[i]);
334
335 for (i = 0; i < fnic->rq_count; i++) {
336 err = vnic_rq_disable(&fnic->rq[i]);
337 if (err)
338 return err;
339 }
340 for (i = 0; i < fnic->raw_wq_count; i++) {
341 err = vnic_wq_disable(&fnic->wq[i]);
342 if (err)
343 return err;
344 }
345 for (i = 0; i < fnic->wq_copy_count; i++) {
346 err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
347 if (err)
348 return err;
349 }
350
351 /* Clean up completed IOs and FCS frames */
352 fnic_wq_copy_cmpl_handler(fnic, -1);
353 fnic_wq_cmpl_handler(fnic, -1);
354 fnic_rq_cmpl_handler(fnic, -1);
355
356 /* Clean up the IOs and FCS frames that have not completed */
357 for (i = 0; i < fnic->raw_wq_count; i++)
358 vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
359 for (i = 0; i < fnic->rq_count; i++)
360 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
361 for (i = 0; i < fnic->wq_copy_count; i++)
362 vnic_wq_copy_clean(&fnic->wq_copy[i],
363 fnic_wq_copy_cleanup_handler);
364
365 for (i = 0; i < fnic->cq_count; i++)
366 vnic_cq_clean(&fnic->cq[i]);
367 for (i = 0; i < fnic->intr_count; i++)
368 vnic_intr_clean(&fnic->intr[i]);
369
370 /*
371 * Remove cached flogi and flogi resp frames if any
372 * These frames are not in any queue, and therefore queue
373 * cleanup does not clean them. So clean them explicitly
374 */
375 spin_lock_irqsave(&fnic->fnic_lock, flags);
376 flogi = fnic->flogi;
377 fnic->flogi = NULL;
378 flogi_resp = fnic->flogi_resp;
379 fnic->flogi_resp = NULL;
380 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
381
382 if (flogi)
383 dev_kfree_skb(fp_skb(flogi));
384
385 if (flogi_resp)
386 dev_kfree_skb(fp_skb(flogi_resp));
387
388 mempool_destroy(fnic->io_req_pool);
389 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
390 mempool_destroy(fnic->io_sgl_pool[i]);
391
392 return 0;
393}
394
395static void fnic_iounmap(struct fnic *fnic)
396{
397 if (fnic->bar0.vaddr)
398 iounmap(fnic->bar0.vaddr);
399}
400
401/*
402 * Allocate element for mempools requiring GFP_DMA flag.
403 * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
404 */
405static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
406{
407 struct kmem_cache *mem = pool_data;
408
409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
410}
411
412static int __devinit fnic_probe(struct pci_dev *pdev,
413 const struct pci_device_id *ent)
414{
415 struct Scsi_Host *host;
416 struct fc_lport *lp;
417 struct fnic *fnic;
418 mempool_t *pool;
419 int err;
420 int i;
421 unsigned long flags;
422
423 /*
424 * Allocate SCSI Host and set up association between host,
425 * local port, and fnic
426 */
427 host = scsi_host_alloc(&fnic_host_template,
428 sizeof(struct fc_lport) + sizeof(struct fnic));
429 if (!host) {
430 printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
431 err = -ENOMEM;
432 goto err_out;
433 }
434 lp = shost_priv(host);
435 lp->host = host;
436 fnic = lport_priv(lp);
437 fnic->lport = lp;
438
439 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
440 host->host_no);
441
442 host->transportt = fnic_fc_transport;
443
444 err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
445 if (err) {
446 shost_printk(KERN_ERR, fnic->lport->host,
447 "Unable to alloc shared tag map\n");
448 goto err_out_free_hba;
449 }
450
451 /* Setup PCI resources */
452 pci_set_drvdata(pdev, fnic);
453
454 fnic->pdev = pdev;
455
456 err = pci_enable_device(pdev);
457 if (err) {
458 shost_printk(KERN_ERR, fnic->lport->host,
459 "Cannot enable PCI device, aborting.\n");
460 goto err_out_free_hba;
461 }
462
463 err = pci_request_regions(pdev, DRV_NAME);
464 if (err) {
465 shost_printk(KERN_ERR, fnic->lport->host,
466 "Cannot enable PCI resources, aborting\n");
467 goto err_out_disable_device;
468 }
469
470 pci_set_master(pdev);
471
472 /* Query PCI controller on system for DMA addressing
473 * limitation for the device. Try 40-bit first, and
474 * fail to 32-bit.
475 */
476 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
477 if (err) {
478 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
479 if (err) {
480 shost_printk(KERN_ERR, fnic->lport->host,
481 "No usable DMA configuration "
482 "aborting\n");
483 goto err_out_release_regions;
484 }
485 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
486 if (err) {
487 shost_printk(KERN_ERR, fnic->lport->host,
488 "Unable to obtain 32-bit DMA "
489 "for consistent allocations, aborting.\n");
490 goto err_out_release_regions;
491 }
492 } else {
493 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
494 if (err) {
495 shost_printk(KERN_ERR, fnic->lport->host,
496 "Unable to obtain 40-bit DMA "
497 "for consistent allocations, aborting.\n");
498 goto err_out_release_regions;
499 }
500 }
501
502 /* Map vNIC resources from BAR0 */
503 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
504 shost_printk(KERN_ERR, fnic->lport->host,
505 "BAR0 not memory-map'able, aborting.\n");
506 err = -ENODEV;
507 goto err_out_release_regions;
508 }
509
510 fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
511 fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
512 fnic->bar0.len = pci_resource_len(pdev, 0);
513
514 if (!fnic->bar0.vaddr) {
515 shost_printk(KERN_ERR, fnic->lport->host,
516 "Cannot memory-map BAR0 res hdr, "
517 "aborting.\n");
518 err = -ENODEV;
519 goto err_out_release_regions;
520 }
521
522 fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
523 if (!fnic->vdev) {
524 shost_printk(KERN_ERR, fnic->lport->host,
525 "vNIC registration failed, "
526 "aborting.\n");
527 err = -ENODEV;
528 goto err_out_iounmap;
529 }
530
531 err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
532 vnic_dev_open_done, 0);
533 if (err) {
534 shost_printk(KERN_ERR, fnic->lport->host,
535 "vNIC dev open failed, aborting.\n");
536 goto err_out_vnic_unregister;
537 }
538
539 err = vnic_dev_init(fnic->vdev, 0);
540 if (err) {
541 shost_printk(KERN_ERR, fnic->lport->host,
542 "vNIC dev init failed, aborting.\n");
543 goto err_out_dev_close;
544 }
545
546 err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
547 if (err) {
548 shost_printk(KERN_ERR, fnic->lport->host,
549 "vNIC get MAC addr failed \n");
550 goto err_out_dev_close;
551 }
552
553 /* Get vNIC configuration */
554 err = fnic_get_vnic_config(fnic);
555 if (err) {
556 shost_printk(KERN_ERR, fnic->lport->host,
557 "Get vNIC configuration failed, "
558 "aborting.\n");
559 goto err_out_dev_close;
560 }
561 host->max_lun = fnic->config.luns_per_tgt;
562 host->max_id = FNIC_MAX_FCP_TARGET;
563
564 fnic_get_res_counts(fnic);
565
566 err = fnic_set_intr_mode(fnic);
567 if (err) {
568 shost_printk(KERN_ERR, fnic->lport->host,
569 "Failed to set intr mode, "
570 "aborting.\n");
571 goto err_out_dev_close;
572 }
573
574 err = fnic_request_intr(fnic);
575 if (err) {
576 shost_printk(KERN_ERR, fnic->lport->host,
577 "Unable to request irq.\n");
578 goto err_out_clear_intr;
579 }
580
581 err = fnic_alloc_vnic_resources(fnic);
582 if (err) {
583 shost_printk(KERN_ERR, fnic->lport->host,
584 "Failed to alloc vNIC resources, "
585 "aborting.\n");
586 goto err_out_free_intr;
587 }
588
589
590 /* initialize all fnic locks */
591 spin_lock_init(&fnic->fnic_lock);
592
593 for (i = 0; i < FNIC_WQ_MAX; i++)
594 spin_lock_init(&fnic->wq_lock[i]);
595
596 for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
597 spin_lock_init(&fnic->wq_copy_lock[i]);
598 fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
599 fnic->fw_ack_recd[i] = 0;
600 fnic->fw_ack_index[i] = -1;
601 }
602
603 for (i = 0; i < FNIC_IO_LOCKS; i++)
604 spin_lock_init(&fnic->io_req_lock[i]);
605
606 fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
607 if (!fnic->io_req_pool)
608 goto err_out_free_resources;
609
610 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
611 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
612 if (!pool)
613 goto err_out_free_ioreq_pool;
614 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
615
616 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
617 fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
618 if (!pool)
619 goto err_out_free_dflt_pool;
620 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
621
622 /* setup vlan config, hw inserts vlan header */
623 fnic->vlan_hw_insert = 1;
624 fnic->vlan_id = 0;
625
626 fnic->flogi_oxid = FC_XID_UNKNOWN;
627 fnic->flogi = NULL;
628 fnic->flogi_resp = NULL;
629 fnic->state = FNIC_IN_FC_MODE;
630
631 /* Enable hardware stripping of vlan header on ingress */
632 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
633
634 /* Setup notification buffer area */
635 err = fnic_notify_set(fnic);
636 if (err) {
637 shost_printk(KERN_ERR, fnic->lport->host,
638 "Failed to alloc notify buffer, aborting.\n");
639 goto err_out_free_max_pool;
640 }
641
642 /* Setup notify timer when using MSI interrupts */
643 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
644 setup_timer(&fnic->notify_timer,
645 fnic_notify_timer, (unsigned long)fnic);
646
647 /* allocate RQ buffers and post them to RQ*/
648 for (i = 0; i < fnic->rq_count; i++) {
649 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
650 if (err) {
651 shost_printk(KERN_ERR, fnic->lport->host,
652 "fnic_alloc_rq_frame can't alloc "
653 "frame\n");
654 goto err_out_free_rq_buf;
655 }
656 }
657
658 /*
659 * Initialization done with PCI system, hardware, firmware.
660 * Add host to SCSI
661 */
662 err = scsi_add_host(lp->host, &pdev->dev);
663 if (err) {
664 shost_printk(KERN_ERR, fnic->lport->host,
665 "fnic: scsi_add_host failed...exiting\n");
666 goto err_out_free_rq_buf;
667 }
668
669 /* Start local port initiatialization */
670
671 lp->link_up = 0;
672 lp->tt = fnic_transport_template;
673
674 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
675 FCPIO_HOST_EXCH_RANGE_START,
676 FCPIO_HOST_EXCH_RANGE_END);
677 if (!lp->emp) {
678 err = -ENOMEM;
679 goto err_out_remove_scsi_host;
680 }
681
682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
684 FCP_SPPF_CONF_COMPL);
685 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
686 lp->service_params |= FCP_SPPF_RETRY;
687
688 lp->boot_time = jiffies;
689 lp->e_d_tov = fnic->config.ed_tov;
690 lp->r_a_tov = fnic->config.ra_tov;
691 lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
692 fc_set_wwnn(lp, fnic->config.node_wwn);
693 fc_set_wwpn(lp, fnic->config.port_wwn);
694
695 fc_exch_init(lp);
696 fc_lport_init(lp);
697 fc_elsct_init(lp);
698 fc_rport_init(lp);
699 fc_disc_init(lp);
700
701 fc_lport_config(lp);
702
703 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
704 sizeof(struct fc_frame_header))) {
705 err = -EINVAL;
706 goto err_out_free_exch_mgr;
707 }
708 fc_host_maxframe_size(lp->host) = lp->mfs;
709
710 sprintf(fc_host_symbolic_name(lp->host),
711 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
712
713 spin_lock_irqsave(&fnic_list_lock, flags);
714 list_add_tail(&fnic->list, &fnic_list);
715 spin_unlock_irqrestore(&fnic_list_lock, flags);
716
717 INIT_WORK(&fnic->link_work, fnic_handle_link);
718 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
719 skb_queue_head_init(&fnic->frame_queue);
720
721 /* Enable all queues */
722 for (i = 0; i < fnic->raw_wq_count; i++)
723 vnic_wq_enable(&fnic->wq[i]);
724 for (i = 0; i < fnic->rq_count; i++)
725 vnic_rq_enable(&fnic->rq[i]);
726 for (i = 0; i < fnic->wq_copy_count; i++)
727 vnic_wq_copy_enable(&fnic->wq_copy[i]);
728
729 fc_fabric_login(lp);
730
731 vnic_dev_enable(fnic->vdev);
732 for (i = 0; i < fnic->intr_count; i++)
733 vnic_intr_unmask(&fnic->intr[i]);
734
735 fnic_notify_timer_start(fnic);
736
737 return 0;
738
739err_out_free_exch_mgr:
740 fc_exch_mgr_free(lp->emp);
741err_out_remove_scsi_host:
742 fc_remove_host(fnic->lport->host);
743 scsi_remove_host(fnic->lport->host);
744err_out_free_rq_buf:
745 for (i = 0; i < fnic->rq_count; i++)
746 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
747 vnic_dev_notify_unset(fnic->vdev);
748err_out_free_max_pool:
749 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
750err_out_free_dflt_pool:
751 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
752err_out_free_ioreq_pool:
753 mempool_destroy(fnic->io_req_pool);
754err_out_free_resources:
755 fnic_free_vnic_resources(fnic);
756err_out_free_intr:
757 fnic_free_intr(fnic);
758err_out_clear_intr:
759 fnic_clear_intr_mode(fnic);
760err_out_dev_close:
761 vnic_dev_close(fnic->vdev);
762err_out_vnic_unregister:
763 vnic_dev_unregister(fnic->vdev);
764err_out_iounmap:
765 fnic_iounmap(fnic);
766err_out_release_regions:
767 pci_release_regions(pdev);
768err_out_disable_device:
769 pci_disable_device(pdev);
770err_out_free_hba:
771 scsi_host_put(lp->host);
772err_out:
773 return err;
774}
775
776static void __devexit fnic_remove(struct pci_dev *pdev)
777{
778 struct fnic *fnic = pci_get_drvdata(pdev);
779 unsigned long flags;
780
781 /*
782 * Mark state so that the workqueue thread stops forwarding
783 * received frames and link events to the local port. ISR and
784 * other threads that can queue work items will also stop
785 * creating work items on the fnic workqueue
786 */
787 spin_lock_irqsave(&fnic->fnic_lock, flags);
788 fnic->stop_rx_link_events = 1;
789 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
790
791 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
792 del_timer_sync(&fnic->notify_timer);
793
794 /*
795 * Flush the fnic event queue. After this call, there should
796 * be no event queued for this fnic device in the workqueue
797 */
798 flush_workqueue(fnic_event_queue);
799 skb_queue_purge(&fnic->frame_queue);
800
801 /*
802 * Log off the fabric. This stops all remote ports, dns port,
803 * logs off the fabric. This flushes all rport, disc, lport work
804 * before returning
805 */
806 fc_fabric_logoff(fnic->lport);
807
808 spin_lock_irqsave(&fnic->fnic_lock, flags);
809 fnic->in_remove = 1;
810 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
811
812 fc_lport_destroy(fnic->lport);
813
814 /*
815 * This stops the fnic device, masks all interrupts. Completed
816 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
817 * cleaned up
818 */
819 fnic_cleanup(fnic);
820
821 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
822
823 spin_lock_irqsave(&fnic_list_lock, flags);
824 list_del(&fnic->list);
825 spin_unlock_irqrestore(&fnic_list_lock, flags);
826
827 fc_remove_host(fnic->lport->host);
828 scsi_remove_host(fnic->lport->host);
829 fc_exch_mgr_free(fnic->lport->emp);
830 vnic_dev_notify_unset(fnic->vdev);
831 fnic_free_vnic_resources(fnic);
832 fnic_free_intr(fnic);
833 fnic_clear_intr_mode(fnic);
834 vnic_dev_close(fnic->vdev);
835 vnic_dev_unregister(fnic->vdev);
836 fnic_iounmap(fnic);
837 pci_release_regions(pdev);
838 pci_disable_device(pdev);
839 pci_set_drvdata(pdev, NULL);
840 scsi_host_put(fnic->lport->host);
841}
842
843static struct pci_driver fnic_driver = {
844 .name = DRV_NAME,
845 .id_table = fnic_id_table,
846 .probe = fnic_probe,
847 .remove = __devexit_p(fnic_remove),
848};
849
850static int __init fnic_init_module(void)
851{
852 size_t len;
853 int err = 0;
854
855 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
856
857 /* Create a cache for allocation of default size sgls */
858 len = sizeof(struct fnic_dflt_sgl_list);
859 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
860 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
861 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
862 NULL);
863 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
864 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
865 err = -ENOMEM;
866 goto err_create_fnic_sgl_slab_dflt;
867 }
868
869 /* Create a cache for allocation of max size sgls*/
870 len = sizeof(struct fnic_sgl_list);
871 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
872 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
873 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
874 NULL);
875 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
876 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
877 err = -ENOMEM;
878 goto err_create_fnic_sgl_slab_max;
879 }
880
881 /* Create a cache of io_req structs for use via mempool */
882 fnic_io_req_cache = kmem_cache_create("fnic_io_req",
883 sizeof(struct fnic_io_req),
884 0, SLAB_HWCACHE_ALIGN, NULL);
885 if (!fnic_io_req_cache) {
886 printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
887 err = -ENOMEM;
888 goto err_create_fnic_ioreq_slab;
889 }
890
891 fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
892 if (!fnic_event_queue) {
893 printk(KERN_ERR PFX "fnic work queue create failed\n");
894 err = -ENOMEM;
895 goto err_create_fnic_workq;
896 }
897
898 spin_lock_init(&fnic_list_lock);
899 INIT_LIST_HEAD(&fnic_list);
900
901 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
902 if (!fnic_fc_transport) {
903 printk(KERN_ERR PFX "fc_attach_transport error\n");
904 err = -ENOMEM;
905 goto err_fc_transport;
906 }
907
908 /* register the driver with PCI system */
909 err = pci_register_driver(&fnic_driver);
910 if (err < 0) {
911 printk(KERN_ERR PFX "pci register error\n");
912 goto err_pci_register;
913 }
914 return err;
915
916err_pci_register:
917 fc_release_transport(fnic_fc_transport);
918err_fc_transport:
919 destroy_workqueue(fnic_event_queue);
920err_create_fnic_workq:
921 kmem_cache_destroy(fnic_io_req_cache);
922err_create_fnic_ioreq_slab:
923 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
924err_create_fnic_sgl_slab_max:
925 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
926err_create_fnic_sgl_slab_dflt:
927 return err;
928}
929
930static void __exit fnic_cleanup_module(void)
931{
932 pci_unregister_driver(&fnic_driver);
933 destroy_workqueue(fnic_event_queue);
934 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
935 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
936 kmem_cache_destroy(fnic_io_req_cache);
937 fc_release_transport(fnic_fc_transport);
938}
939
940module_init(fnic_init_module);
941module_exit(fnic_cleanup_module);
942
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 000000000000..7ba61ec715d2
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "cq_enet_desc.h"
24#include "vnic_resource.h"
25#include "vnic_dev.h"
26#include "vnic_wq.h"
27#include "vnic_rq.h"
28#include "vnic_cq.h"
29#include "vnic_intr.h"
30#include "vnic_stats.h"
31#include "vnic_nic.h"
32#include "fnic.h"
33
34int fnic_get_vnic_config(struct fnic *fnic)
35{
36 struct vnic_fc_config *c = &fnic->config;
37 int err;
38
39#define GET_CONFIG(m) \
40 do { \
41 err = vnic_dev_spec(fnic->vdev, \
42 offsetof(struct vnic_fc_config, m), \
43 sizeof(c->m), &c->m); \
44 if (err) { \
45 shost_printk(KERN_ERR, fnic->lport->host, \
46 "Error getting %s, %d\n", #m, \
47 err); \
48 return err; \
49 } \
50 } while (0);
51
52 GET_CONFIG(node_wwn);
53 GET_CONFIG(port_wwn);
54 GET_CONFIG(wq_enet_desc_count);
55 GET_CONFIG(wq_copy_desc_count);
56 GET_CONFIG(rq_desc_count);
57 GET_CONFIG(maxdatafieldsize);
58 GET_CONFIG(ed_tov);
59 GET_CONFIG(ra_tov);
60 GET_CONFIG(intr_timer);
61 GET_CONFIG(intr_timer_type);
62 GET_CONFIG(flags);
63 GET_CONFIG(flogi_retries);
64 GET_CONFIG(flogi_timeout);
65 GET_CONFIG(plogi_retries);
66 GET_CONFIG(plogi_timeout);
67 GET_CONFIG(io_throttle_count);
68 GET_CONFIG(link_down_timeout);
69 GET_CONFIG(port_down_timeout);
70 GET_CONFIG(port_down_io_retries);
71 GET_CONFIG(luns_per_tgt);
72
73 c->wq_enet_desc_count =
74 min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
75 max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
76 c->wq_enet_desc_count));
77 c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
78
79 c->wq_copy_desc_count =
80 min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
81 max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
82 c->wq_copy_desc_count));
83 c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
84
85 c->rq_desc_count =
86 min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
87 max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
88 c->rq_desc_count));
89 c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
90
91 c->maxdatafieldsize =
92 min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
93 max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
94 c->maxdatafieldsize));
95 c->ed_tov =
96 min_t(u32, VNIC_FNIC_EDTOV_MAX,
97 max_t(u32, VNIC_FNIC_EDTOV_MIN,
98 c->ed_tov));
99
100 c->ra_tov =
101 min_t(u32, VNIC_FNIC_RATOV_MAX,
102 max_t(u32, VNIC_FNIC_RATOV_MIN,
103 c->ra_tov));
104
105 c->flogi_retries =
106 min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
107
108 c->flogi_timeout =
109 min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
110 max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
111 c->flogi_timeout));
112
113 c->plogi_retries =
114 min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
115
116 c->plogi_timeout =
117 min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
118 max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
119 c->plogi_timeout));
120
121 c->io_throttle_count =
122 min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
123 max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
124 c->io_throttle_count));
125
126 c->link_down_timeout =
127 min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
128 c->link_down_timeout);
129
130 c->port_down_timeout =
131 min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
132 c->port_down_timeout);
133
134 c->port_down_io_retries =
135 min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
136 c->port_down_io_retries);
137
138 c->luns_per_tgt =
139 min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
140 max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
141 c->luns_per_tgt));
142
143 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
144 c->intr_timer_type = c->intr_timer_type;
145
146 shost_printk(KERN_INFO, fnic->lport->host,
147 "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
148 "wq/wq_copy/rq %d/%d/%d\n",
149 fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
150 fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
151 c->wq_enet_desc_count, c->wq_copy_desc_count,
152 c->rq_desc_count);
153 shost_printk(KERN_INFO, fnic->lport->host,
154 "vNIC node wwn %llx port wwn %llx\n",
155 c->node_wwn, c->port_wwn);
156 shost_printk(KERN_INFO, fnic->lport->host,
157 "vNIC ed_tov %d ra_tov %d\n",
158 c->ed_tov, c->ra_tov);
159 shost_printk(KERN_INFO, fnic->lport->host,
160 "vNIC mtu %d intr timer %d\n",
161 c->maxdatafieldsize, c->intr_timer);
162 shost_printk(KERN_INFO, fnic->lport->host,
163 "vNIC flags 0x%x luns per tgt %d\n",
164 c->flags, c->luns_per_tgt);
165 shost_printk(KERN_INFO, fnic->lport->host,
166 "vNIC flogi_retries %d flogi timeout %d\n",
167 c->flogi_retries, c->flogi_timeout);
168 shost_printk(KERN_INFO, fnic->lport->host,
169 "vNIC plogi retries %d plogi timeout %d\n",
170 c->plogi_retries, c->plogi_timeout);
171 shost_printk(KERN_INFO, fnic->lport->host,
172 "vNIC io throttle count %d link dn timeout %d\n",
173 c->io_throttle_count, c->link_down_timeout);
174 shost_printk(KERN_INFO, fnic->lport->host,
175 "vNIC port dn io retries %d port dn timeout %d\n",
176 c->port_down_io_retries, c->port_down_timeout);
177
178 return 0;
179}
180
181int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
182 u8 rss_hash_type,
183 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
184 u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
185{
186 u64 a0, a1;
187 u32 nic_cfg;
188 int wait = 1000;
189
190 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
191 rss_hash_type, rss_hash_bits, rss_base_cpu,
192 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
193
194 a0 = nic_cfg;
195 a1 = 0;
196
197 return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
198}
199
200void fnic_get_res_counts(struct fnic *fnic)
201{
202 fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
203 fnic->raw_wq_count = fnic->wq_count - 1;
204 fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
205 fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
206 fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
207 fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
208 RES_TYPE_INTR_CTRL);
209}
210
211void fnic_free_vnic_resources(struct fnic *fnic)
212{
213 unsigned int i;
214
215 for (i = 0; i < fnic->raw_wq_count; i++)
216 vnic_wq_free(&fnic->wq[i]);
217
218 for (i = 0; i < fnic->wq_copy_count; i++)
219 vnic_wq_copy_free(&fnic->wq_copy[i]);
220
221 for (i = 0; i < fnic->rq_count; i++)
222 vnic_rq_free(&fnic->rq[i]);
223
224 for (i = 0; i < fnic->cq_count; i++)
225 vnic_cq_free(&fnic->cq[i]);
226
227 for (i = 0; i < fnic->intr_count; i++)
228 vnic_intr_free(&fnic->intr[i]);
229}
230
231int fnic_alloc_vnic_resources(struct fnic *fnic)
232{
233 enum vnic_dev_intr_mode intr_mode;
234 unsigned int mask_on_assertion;
235 unsigned int interrupt_offset;
236 unsigned int error_interrupt_enable;
237 unsigned int error_interrupt_offset;
238 unsigned int i, cq_index;
239 unsigned int wq_copy_cq_desc_count;
240 int err;
241
242 intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
243
244 shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
245 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
246 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
247 intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
248 "MSI-X" : "unknown");
249
250 shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
251 "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
252 fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
253 fnic->rq_count, fnic->cq_count, fnic->intr_count);
254
255 /* Allocate Raw WQ used for FCS frames */
256 for (i = 0; i < fnic->raw_wq_count; i++) {
257 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
258 fnic->config.wq_enet_desc_count,
259 sizeof(struct wq_enet_desc));
260 if (err)
261 goto err_out_cleanup;
262 }
263
264 /* Allocate Copy WQs used for SCSI IOs */
265 for (i = 0; i < fnic->wq_copy_count; i++) {
266 err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
267 (fnic->raw_wq_count + i),
268 fnic->config.wq_copy_desc_count,
269 sizeof(struct fcpio_host_req));
270 if (err)
271 goto err_out_cleanup;
272 }
273
274 /* RQ for receiving FCS frames */
275 for (i = 0; i < fnic->rq_count; i++) {
276 err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
277 fnic->config.rq_desc_count,
278 sizeof(struct rq_enet_desc));
279 if (err)
280 goto err_out_cleanup;
281 }
282
283 /* CQ for each RQ */
284 for (i = 0; i < fnic->rq_count; i++) {
285 cq_index = i;
286 err = vnic_cq_alloc(fnic->vdev,
287 &fnic->cq[cq_index], cq_index,
288 fnic->config.rq_desc_count,
289 sizeof(struct cq_enet_rq_desc));
290 if (err)
291 goto err_out_cleanup;
292 }
293
294 /* CQ for each WQ */
295 for (i = 0; i < fnic->raw_wq_count; i++) {
296 cq_index = fnic->rq_count + i;
297 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
298 fnic->config.wq_enet_desc_count,
299 sizeof(struct cq_enet_wq_desc));
300 if (err)
301 goto err_out_cleanup;
302 }
303
304 /* CQ for each COPY WQ */
305 wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
306 for (i = 0; i < fnic->wq_copy_count; i++) {
307 cq_index = fnic->raw_wq_count + fnic->rq_count + i;
308 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
309 cq_index,
310 wq_copy_cq_desc_count,
311 sizeof(struct fcpio_fw_req));
312 if (err)
313 goto err_out_cleanup;
314 }
315
316 for (i = 0; i < fnic->intr_count; i++) {
317 err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
318 if (err)
319 goto err_out_cleanup;
320 }
321
322 fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
323 RES_TYPE_INTR_PBA_LEGACY, 0);
324
325 if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
326 shost_printk(KERN_ERR, fnic->lport->host,
327 "Failed to hook legacy pba resource\n");
328 err = -ENODEV;
329 goto err_out_cleanup;
330 }
331
332 /*
333 * Init RQ/WQ resources.
334 *
335 * RQ[0 to n-1] point to CQ[0 to n-1]
336 * WQ[0 to m-1] point to CQ[n to n+m-1]
337 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
338 *
339 * Note for copy wq we always initialize with cq_index = 0
340 *
341 * Error interrupt is not enabled for MSI.
342 */
343
344 switch (intr_mode) {
345 case VNIC_DEV_INTR_MODE_INTX:
346 case VNIC_DEV_INTR_MODE_MSIX:
347 error_interrupt_enable = 1;
348 error_interrupt_offset = fnic->err_intr_offset;
349 break;
350 default:
351 error_interrupt_enable = 0;
352 error_interrupt_offset = 0;
353 break;
354 }
355
356 for (i = 0; i < fnic->rq_count; i++) {
357 cq_index = i;
358 vnic_rq_init(&fnic->rq[i],
359 cq_index,
360 error_interrupt_enable,
361 error_interrupt_offset);
362 }
363
364 for (i = 0; i < fnic->raw_wq_count; i++) {
365 cq_index = i + fnic->rq_count;
366 vnic_wq_init(&fnic->wq[i],
367 cq_index,
368 error_interrupt_enable,
369 error_interrupt_offset);
370 }
371
372 for (i = 0; i < fnic->wq_copy_count; i++) {
373 vnic_wq_copy_init(&fnic->wq_copy[i],
374 0 /* cq_index 0 - always */,
375 error_interrupt_enable,
376 error_interrupt_offset);
377 }
378
379 for (i = 0; i < fnic->cq_count; i++) {
380
381 switch (intr_mode) {
382 case VNIC_DEV_INTR_MODE_MSIX:
383 interrupt_offset = i;
384 break;
385 default:
386 interrupt_offset = 0;
387 break;
388 }
389
390 vnic_cq_init(&fnic->cq[i],
391 0 /* flow_control_enable */,
392 1 /* color_enable */,
393 0 /* cq_head */,
394 0 /* cq_tail */,
395 1 /* cq_tail_color */,
396 1 /* interrupt_enable */,
397 1 /* cq_entry_enable */,
398 0 /* cq_message_enable */,
399 interrupt_offset,
400 0 /* cq_message_addr */);
401 }
402
403 /*
404 * Init INTR resources
405 *
406 * mask_on_assertion is not used for INTx due to the level-
407 * triggered nature of INTx
408 */
409
410 switch (intr_mode) {
411 case VNIC_DEV_INTR_MODE_MSI:
412 case VNIC_DEV_INTR_MODE_MSIX:
413 mask_on_assertion = 1;
414 break;
415 default:
416 mask_on_assertion = 0;
417 break;
418 }
419
420 for (i = 0; i < fnic->intr_count; i++) {
421 vnic_intr_init(&fnic->intr[i],
422 fnic->config.intr_timer,
423 fnic->config.intr_timer_type,
424 mask_on_assertion);
425 }
426
427 /* init the stats memory by making the first call here */
428 err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
429 if (err) {
430 shost_printk(KERN_ERR, fnic->lport->host,
431 "vnic_dev_stats_dump failed - x%x\n", err);
432 goto err_out_cleanup;
433 }
434
435 /* Clear LIF stats */
436 vnic_dev_stats_clear(fnic->vdev);
437
438 return 0;
439
440err_out_cleanup:
441 fnic_free_vnic_resources(fnic);
442
443 return err;
444}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 000000000000..b6f310262534
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,197 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_RES_H_
19#define _FNIC_RES_H_
20
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "vnic_wq.h"
24#include "vnic_rq.h"
25#include "fnic_io.h"
26#include "fcpio.h"
27#include "vnic_wq_copy.h"
28#include "vnic_cq_copy.h"
29
30static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
31 void *os_buf, dma_addr_t dma_addr,
32 unsigned int len, unsigned int fc_eof,
33 int vlan_tag_insert,
34 unsigned int vlan_tag,
35 int cq_entry, int sop, int eop)
36{
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
38
39 wq_enet_desc_enc(desc,
40 (u64)dma_addr | VNIC_PADDR_TARGET,
41 (u16)len,
42 0, /* mss_or_csum_offset */
43 (u16)fc_eof,
44 0, /* offload_mode */
45 (u8)eop, (u8)cq_entry,
46 1, /* fcoe_encap */
47 (u8)vlan_tag_insert,
48 (u16)vlan_tag,
49 0 /* loopback */);
50
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
52}
53
54static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
55 u32 req_id,
56 u32 lunmap_id, u8 spl_flags,
57 u32 sgl_cnt, u32 sense_len,
58 u64 sgl_addr, u64 sns_addr,
59 u8 crn, u8 pri_ta,
60 u8 flags, u8 *scsi_cdb,
61 u32 data_len, u8 *lun,
62 u32 d_id, u16 mss,
63 u32 ratov, u32 edtov)
64{
65 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
66
67 desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
68 desc->hdr.status = 0; /* header status entry */
69 desc->hdr._resvd = 0; /* reserved */
70 desc->hdr.tag.u.req_id = req_id; /* id for this request */
71
72 desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
73 desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
74 desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
75 desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
76 desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
77 desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
78 desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
79 desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
80 desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
81 desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
82 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
83 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
84 desc->u.icmnd_16.flags = flags; /* command flags */
85 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */
86 desc->u.icmnd_16.data_len = data_len; /* length of data expected */
87 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
88 desc->u.icmnd_16._resvd2 = 0; /* reserved */
89 hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
90 desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
91 desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
92 desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
93
94 vnic_wq_copy_post(wq);
95}
96
97static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
98 u32 req_id, u32 lunmap_id,
99 u32 tm_req, u32 tm_id, u8 *lun,
100 u32 d_id, u32 r_a_tov,
101 u32 e_d_tov)
102{
103 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
104
105 desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
106 desc->hdr.status = 0; /* header status entry */
107 desc->hdr._resvd = 0; /* reserved */
108 desc->hdr.tag.u.req_id = req_id; /* id for this request */
109
110 desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
111 desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
112 desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
113 desc->u.itmf._resvd = 0;
114 memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
115 desc->u.itmf._resvd1 = 0;
116 hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
117 desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
118 desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
119
120 vnic_wq_copy_post(wq);
121}
122
123static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
124 u32 req_id, u8 format,
125 u32 s_id, u8 *gw_mac)
126{
127 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
128
129 desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
130 desc->hdr.status = 0; /* header status entry */
131 desc->hdr._resvd = 0; /* reserved */
132 desc->hdr.tag.u.req_id = req_id; /* id for this request */
133
134 desc->u.flogi_reg.format = format;
135 hton24(desc->u.flogi_reg.s_id, s_id);
136 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
137
138 vnic_wq_copy_post(wq);
139}
140
141static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
142 u32 req_id)
143{
144 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
145
146 desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
147 desc->hdr.status = 0; /* header status entry */
148 desc->hdr._resvd = 0; /* reserved */
149 desc->hdr.tag.u.req_id = req_id; /* id for this request */
150
151 vnic_wq_copy_post(wq);
152}
153
154static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
155 u32 req_id, u64 lunmap_addr,
156 u32 lunmap_len)
157{
158 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
159
160 desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
161 desc->hdr.status = 0; /* header status entry */
162 desc->hdr._resvd = 0; /* reserved */
163 desc->hdr.tag.u.req_id = req_id; /* id for this request */
164
165 desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
166 desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
167
168 vnic_wq_copy_post(wq);
169}
170
171static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
172 void *os_buf, dma_addr_t dma_addr,
173 u16 len)
174{
175 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
176
177 rq_enet_desc_enc(desc,
178 (u64)dma_addr | VNIC_PADDR_TARGET,
179 RQ_ENET_TYPE_ONLY_SOP,
180 (u16)len);
181
182 vnic_rq_post(rq, os_buf, 0, dma_addr, len);
183}
184
185
186struct fnic;
187
188int fnic_get_vnic_config(struct fnic *);
189int fnic_alloc_vnic_resources(struct fnic *);
190void fnic_free_vnic_resources(struct fnic *);
191void fnic_get_res_counts(struct fnic *);
192int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
193 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
194 u8 rss_enable, u8 tso_ipid_split_en,
195 u8 ig_vlan_strip_en);
196
197#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 000000000000..eabf36502856
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,1850 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/delay.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_tcq.h>
34#include <scsi/fc/fc_els.h>
35#include <scsi/fc/fc_fcoe.h>
36#include <scsi/libfc.h>
37#include <scsi/fc_frame.h>
38#include "fnic_io.h"
39#include "fnic.h"
40
41const char *fnic_state_str[] = {
42 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
43 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
44 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
45 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
46};
47
48static const char *fnic_ioreq_state_str[] = {
49 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
50 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
51 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
52 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
53};
54
55static const char *fcpio_status_str[] = {
56 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
57 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
58 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
59 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
60 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
61 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
62 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
63 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
64 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
65 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
66 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
67 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
68 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
69 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
70 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
71 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
72 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
73 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
74 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
75};
76
77const char *fnic_state_to_str(unsigned int state)
78{
79 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
80 return "unknown";
81
82 return fnic_state_str[state];
83}
84
85static const char *fnic_ioreq_state_to_str(unsigned int state)
86{
87 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
88 !fnic_ioreq_state_str[state])
89 return "unknown";
90
91 return fnic_ioreq_state_str[state];
92}
93
94static const char *fnic_fcpio_status_to_str(unsigned int status)
95{
96 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
97 return "unknown";
98
99 return fcpio_status_str[status];
100}
101
102static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
103
104static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
105 struct scsi_cmnd *sc)
106{
107 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
108
109 return &fnic->io_req_lock[hash];
110}
111
112/*
113 * Unmap the data buffer and sense buffer for an io_req,
114 * also unmap and free the device-private scatter/gather list.
115 */
116static void fnic_release_ioreq_buf(struct fnic *fnic,
117 struct fnic_io_req *io_req,
118 struct scsi_cmnd *sc)
119{
120 if (io_req->sgl_list_pa)
121 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
122 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
123 PCI_DMA_TODEVICE);
124 scsi_dma_unmap(sc);
125
126 if (io_req->sgl_cnt)
127 mempool_free(io_req->sgl_list_alloc,
128 fnic->io_sgl_pool[io_req->sgl_type]);
129 if (io_req->sense_buf_pa)
130 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
131 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
132}
133
134/* Free up Copy Wq descriptors. Called with copy_wq lock held */
135static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
136{
137 /* if no Ack received from firmware, then nothing to clean */
138 if (!fnic->fw_ack_recd[0])
139 return 1;
140
141 /*
142 * Update desc_available count based on number of freed descriptors
143 * Account for wraparound
144 */
145 if (wq->to_clean_index <= fnic->fw_ack_index[0])
146 wq->ring.desc_avail += (fnic->fw_ack_index[0]
147 - wq->to_clean_index + 1);
148 else
149 wq->ring.desc_avail += (wq->ring.desc_count
150 - wq->to_clean_index
151 + fnic->fw_ack_index[0] + 1);
152
153 /*
154 * just bump clean index to ack_index+1 accounting for wraparound
155 * this will essentially free up all descriptors between
156 * to_clean_index and fw_ack_index, both inclusive
157 */
158 wq->to_clean_index =
159 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
160
161 /* we have processed the acks received so far */
162 fnic->fw_ack_recd[0] = 0;
163 return 0;
164}
165
166
167/*
168 * fnic_fw_reset_handler
169 * Routine to send reset msg to fw
170 */
171int fnic_fw_reset_handler(struct fnic *fnic)
172{
173 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
174 int ret = 0;
175 unsigned long flags;
176
177 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
178
179 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
180 free_wq_copy_descs(fnic, wq);
181
182 if (!vnic_wq_copy_desc_avail(wq))
183 ret = -EAGAIN;
184 else
185 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
186
187 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
188
189 if (!ret)
190 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
191 "Issued fw reset\n");
192 else
193 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
194 "Failed to issue fw reset\n");
195 return ret;
196}
197
198
199/*
200 * fnic_flogi_reg_handler
201 * Routine to send flogi register msg to fw
202 */
203int fnic_flogi_reg_handler(struct fnic *fnic)
204{
205 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
206 u8 gw_mac[ETH_ALEN];
207 int ret = 0;
208 unsigned long flags;
209
210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
211
212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
213 free_wq_copy_descs(fnic, wq);
214
215 if (!vnic_wq_copy_desc_avail(wq)) {
216 ret = -EAGAIN;
217 goto flogi_reg_ioreq_end;
218 }
219
220 if (fnic->fcoui_mode)
221 memset(gw_mac, 0xff, ETH_ALEN);
222 else
223 memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
224
225 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
226 FCPIO_FLOGI_REG_GW_DEST,
227 fnic->s_id,
228 gw_mac);
229
230flogi_reg_ioreq_end:
231 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
232
233 if (!ret)
234 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
235 "flog reg issued\n");
236
237 return ret;
238}
239
240/*
241 * fnic_queue_wq_copy_desc
242 * Routine to enqueue a wq copy desc
243 */
244static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
245 struct vnic_wq_copy *wq,
246 struct fnic_io_req *io_req,
247 struct scsi_cmnd *sc,
248 u32 sg_count)
249{
250 struct scatterlist *sg;
251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
252 struct fc_rport_libfc_priv *rp = rport->dd_data;
253 struct host_sg_desc *desc;
254 u8 pri_tag = 0;
255 unsigned int i;
256 unsigned long intr_flags;
257 int flags;
258 u8 exch_flags;
259 struct scsi_lun fc_lun;
260 char msg[2];
261
262 if (sg_count) {
263 BUG_ON(sg_count < 0);
264 BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
265
266 /* For each SGE, create a device desc entry */
267 desc = io_req->sgl_list;
268 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
269 desc->addr = cpu_to_le64(sg_dma_address(sg));
270 desc->len = cpu_to_le32(sg_dma_len(sg));
271 desc->_resvd = 0;
272 desc++;
273 }
274
275 io_req->sgl_list_pa = pci_map_single
276 (fnic->pdev,
277 io_req->sgl_list,
278 sizeof(io_req->sgl_list[0]) * sg_count,
279 PCI_DMA_TODEVICE);
280 }
281
282 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
283 sc->sense_buffer,
284 SCSI_SENSE_BUFFERSIZE,
285 PCI_DMA_FROMDEVICE);
286
287 int_to_scsilun(sc->device->lun, &fc_lun);
288
289 pri_tag = FCPIO_ICMND_PTA_SIMPLE;
290 msg[0] = MSG_SIMPLE_TAG;
291 scsi_populate_tag_msg(sc, msg);
292 if (msg[0] == MSG_ORDERED_TAG)
293 pri_tag = FCPIO_ICMND_PTA_ORDERED;
294
295 /* Enqueue the descriptor in the Copy WQ */
296 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
297
298 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
299 free_wq_copy_descs(fnic, wq);
300
301 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
302 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
303 return SCSI_MLQUEUE_HOST_BUSY;
304 }
305
306 flags = 0;
307 if (sc->sc_data_direction == DMA_FROM_DEVICE)
308 flags = FCPIO_ICMND_RDDATA;
309 else if (sc->sc_data_direction == DMA_TO_DEVICE)
310 flags = FCPIO_ICMND_WRDATA;
311
312 exch_flags = 0;
313 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
314 (rp->flags & FC_RP_FLAGS_RETRY))
315 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
316
317 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
318 0, exch_flags, io_req->sgl_cnt,
319 SCSI_SENSE_BUFFERSIZE,
320 io_req->sgl_list_pa,
321 io_req->sense_buf_pa,
322 0, /* scsi cmd ref, always 0 */
323 pri_tag, /* scsi pri and tag */
324 flags, /* command flags */
325 sc->cmnd, scsi_bufflen(sc),
326 fc_lun.scsi_lun, io_req->port_id,
327 rport->maxframe_size, rp->r_a_tov,
328 rp->e_d_tov);
329
330 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
331 return 0;
332}
333
334/*
335 * fnic_queuecommand
336 * Routine to send a scsi cdb
337 * Called with host_lock held and interrupts disabled.
338 */
339int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
340{
341 struct fc_lport *lp;
342 struct fc_rport *rport;
343 struct fnic_io_req *io_req;
344 struct fnic *fnic;
345 struct vnic_wq_copy *wq;
346 int ret;
347 u32 sg_count;
348 unsigned long flags;
349 unsigned long ptr;
350
351 rport = starget_to_rport(scsi_target(sc->device));
352 ret = fc_remote_port_chkready(rport);
353 if (ret) {
354 sc->result = ret;
355 done(sc);
356 return 0;
357 }
358
359 lp = shost_priv(sc->device->host);
360 if (lp->state != LPORT_ST_READY || !(lp->link_up))
361 return SCSI_MLQUEUE_HOST_BUSY;
362
363 /*
364 * Release host lock, use driver resource specific locks from here.
365 * Don't re-enable interrupts in case they were disabled prior to the
366 * caller disabling them.
367 */
368 spin_unlock(lp->host->host_lock);
369
370 /* Get a new io_req for this SCSI IO */
371 fnic = lport_priv(lp);
372
373 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
374 if (!io_req) {
375 ret = SCSI_MLQUEUE_HOST_BUSY;
376 goto out;
377 }
378 memset(io_req, 0, sizeof(*io_req));
379
380 /* Map the data buffer */
381 sg_count = scsi_dma_map(sc);
382 if (sg_count < 0) {
383 mempool_free(io_req, fnic->io_req_pool);
384 goto out;
385 }
386
387 /* Determine the type of scatter/gather list we need */
388 io_req->sgl_cnt = sg_count;
389 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
390 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
391 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
392
393 if (sg_count) {
394 io_req->sgl_list =
395 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
396 GFP_ATOMIC | GFP_DMA);
397 if (!io_req->sgl_list) {
398 ret = SCSI_MLQUEUE_HOST_BUSY;
399 scsi_dma_unmap(sc);
400 mempool_free(io_req, fnic->io_req_pool);
401 goto out;
402 }
403
404 /* Cache sgl list allocated address before alignment */
405 io_req->sgl_list_alloc = io_req->sgl_list;
406 ptr = (unsigned long) io_req->sgl_list;
407 if (ptr % FNIC_SG_DESC_ALIGN) {
408 io_req->sgl_list = (struct host_sg_desc *)
409 (((unsigned long) ptr
410 + FNIC_SG_DESC_ALIGN - 1)
411 & ~(FNIC_SG_DESC_ALIGN - 1));
412 }
413 }
414
415 /* initialize rest of io_req */
416 io_req->port_id = rport->port_id;
417 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
418 CMD_SP(sc) = (char *)io_req;
419 sc->scsi_done = done;
420
421 /* create copy wq desc and enqueue it */
422 wq = &fnic->wq_copy[0];
423 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
424 if (ret) {
425 /*
426 * In case another thread cancelled the request,
427 * refetch the pointer under the lock.
428 */
429 spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
430
431 spin_lock_irqsave(io_lock, flags);
432 io_req = (struct fnic_io_req *)CMD_SP(sc);
433 CMD_SP(sc) = NULL;
434 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
435 spin_unlock_irqrestore(io_lock, flags);
436 if (io_req) {
437 fnic_release_ioreq_buf(fnic, io_req, sc);
438 mempool_free(io_req, fnic->io_req_pool);
439 }
440 }
441out:
442 /* acquire host lock before returning to SCSI */
443 spin_lock(lp->host->host_lock);
444 return ret;
445}
446
447/*
448 * fnic_fcpio_fw_reset_cmpl_handler
449 * Routine to handle fw reset completion
450 */
451static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
452 struct fcpio_fw_req *desc)
453{
454 u8 type;
455 u8 hdr_status;
456 struct fcpio_tag tag;
457 int ret = 0;
458 struct fc_frame *flogi;
459 unsigned long flags;
460
461 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
462
463 /* Clean up all outstanding io requests */
464 fnic_cleanup_io(fnic, SCSI_NO_TAG);
465
466 spin_lock_irqsave(&fnic->fnic_lock, flags);
467
468 flogi = fnic->flogi;
469 fnic->flogi = NULL;
470
471 /* fnic should be in FC_TRANS_ETH_MODE */
472 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
473 /* Check status of reset completion */
474 if (!hdr_status) {
475 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
476 "reset cmpl success\n");
477 /* Ready to send flogi out */
478 fnic->state = FNIC_IN_ETH_MODE;
479 } else {
480 FNIC_SCSI_DBG(KERN_DEBUG,
481 fnic->lport->host,
482 "fnic fw_reset : failed %s\n",
483 fnic_fcpio_status_to_str(hdr_status));
484
485 /*
486 * Unable to change to eth mode, cannot send out flogi
487 * Change state to fc mode, so that subsequent Flogi
488 * requests from libFC will cause more attempts to
489 * reset the firmware. Free the cached flogi
490 */
491 fnic->state = FNIC_IN_FC_MODE;
492 ret = -1;
493 }
494 } else {
495 FNIC_SCSI_DBG(KERN_DEBUG,
496 fnic->lport->host,
497 "Unexpected state %s while processing"
498 " reset cmpl\n", fnic_state_to_str(fnic->state));
499 ret = -1;
500 }
501
502 /* Thread removing device blocks till firmware reset is complete */
503 if (fnic->remove_wait)
504 complete(fnic->remove_wait);
505
506 /*
507 * If fnic is being removed, or fw reset failed
508 * free the flogi frame. Else, send it out
509 */
510 if (fnic->remove_wait || ret) {
511 fnic->flogi_oxid = FC_XID_UNKNOWN;
512 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
513 if (flogi)
514 dev_kfree_skb_irq(fp_skb(flogi));
515 goto reset_cmpl_handler_end;
516 }
517
518 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
519
520 if (flogi)
521 ret = fnic_send_frame(fnic, flogi);
522
523 reset_cmpl_handler_end:
524 return ret;
525}
526
527/*
528 * fnic_fcpio_flogi_reg_cmpl_handler
529 * Routine to handle flogi register completion
530 */
531static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
532 struct fcpio_fw_req *desc)
533{
534 u8 type;
535 u8 hdr_status;
536 struct fcpio_tag tag;
537 int ret = 0;
538 struct fc_frame *flogi_resp = NULL;
539 unsigned long flags;
540 struct sk_buff *skb;
541
542 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
543
544 /* Update fnic state based on status of flogi reg completion */
545 spin_lock_irqsave(&fnic->fnic_lock, flags);
546
547 flogi_resp = fnic->flogi_resp;
548 fnic->flogi_resp = NULL;
549
550 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
551
552 /* Check flogi registration completion status */
553 if (!hdr_status) {
554 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
555 "flog reg succeeded\n");
556 fnic->state = FNIC_IN_FC_MODE;
557 } else {
558 FNIC_SCSI_DBG(KERN_DEBUG,
559 fnic->lport->host,
560 "fnic flogi reg :failed %s\n",
561 fnic_fcpio_status_to_str(hdr_status));
562 fnic->state = FNIC_IN_ETH_MODE;
563 ret = -1;
564 }
565 } else {
566 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
567 "Unexpected fnic state %s while"
568 " processing flogi reg completion\n",
569 fnic_state_to_str(fnic->state));
570 ret = -1;
571 }
572
573 /* Successful flogi reg cmpl, pass frame to LibFC */
574 if (!ret && flogi_resp) {
575 if (fnic->stop_rx_link_events) {
576 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
577 goto reg_cmpl_handler_end;
578 }
579 skb = (struct sk_buff *)flogi_resp;
580 /* Use fr_flags to indicate whether flogi resp or not */
581 fr_flags(flogi_resp) = 1;
582 fr_dev(flogi_resp) = fnic->lport;
583 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
584
585 skb_queue_tail(&fnic->frame_queue, skb);
586 queue_work(fnic_event_queue, &fnic->frame_work);
587
588 } else {
589 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
590 if (flogi_resp)
591 dev_kfree_skb_irq(fp_skb(flogi_resp));
592 }
593
594reg_cmpl_handler_end:
595 return ret;
596}
597
598static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
599 u16 request_out)
600{
601 if (wq->to_clean_index <= wq->to_use_index) {
602 /* out of range, stale request_out index */
603 if (request_out < wq->to_clean_index ||
604 request_out >= wq->to_use_index)
605 return 0;
606 } else {
607 /* out of range, stale request_out index */
608 if (request_out < wq->to_clean_index &&
609 request_out >= wq->to_use_index)
610 return 0;
611 }
612 /* request_out index is in range */
613 return 1;
614}
615
616
617/*
618 * Mark that ack received and store the Ack index. If there are multiple
619 * acks received before Tx thread cleans it up, the latest value will be
620 * used which is correct behavior. This state should be in the copy Wq
621 * instead of in the fnic
622 */
623static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
624 unsigned int cq_index,
625 struct fcpio_fw_req *desc)
626{
627 struct vnic_wq_copy *wq;
628 u16 request_out = desc->u.ack.request_out;
629 unsigned long flags;
630
631 /* mark the ack state */
632 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
633 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
634
635 if (is_ack_index_in_range(wq, request_out)) {
636 fnic->fw_ack_index[0] = request_out;
637 fnic->fw_ack_recd[0] = 1;
638 }
639 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
640}
641
642/*
643 * fnic_fcpio_icmnd_cmpl_handler
644 * Routine to handle icmnd completions
645 */
646static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
647 struct fcpio_fw_req *desc)
648{
649 u8 type;
650 u8 hdr_status;
651 struct fcpio_tag tag;
652 u32 id;
653 u64 xfer_len = 0;
654 struct fcpio_icmnd_cmpl *icmnd_cmpl;
655 struct fnic_io_req *io_req;
656 struct scsi_cmnd *sc;
657 unsigned long flags;
658 spinlock_t *io_lock;
659
660 /* Decode the cmpl description to get the io_req id */
661 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
662 fcpio_tag_id_dec(&tag, &id);
663
664 if (id >= FNIC_MAX_IO_REQ)
665 return;
666
667 sc = scsi_host_find_tag(fnic->lport->host, id);
668 WARN_ON_ONCE(!sc);
669 if (!sc)
670 return;
671
672 io_lock = fnic_io_lock_hash(fnic, sc);
673 spin_lock_irqsave(io_lock, flags);
674 io_req = (struct fnic_io_req *)CMD_SP(sc);
675 WARN_ON_ONCE(!io_req);
676 if (!io_req) {
677 spin_unlock_irqrestore(io_lock, flags);
678 return;
679 }
680
681 /* firmware completed the io */
682 io_req->io_completed = 1;
683
684 /*
685 * if SCSI-ML has already issued abort on this command,
686 * ignore completion of the IO. The abts path will clean it up
687 */
688 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
689 spin_unlock_irqrestore(io_lock, flags);
690 return;
691 }
692
693 /* Mark the IO as complete */
694 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
695
696 icmnd_cmpl = &desc->u.icmnd_cmpl;
697
698 switch (hdr_status) {
699 case FCPIO_SUCCESS:
700 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
701 xfer_len = scsi_bufflen(sc);
702 scsi_set_resid(sc, icmnd_cmpl->residual);
703
704 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
705 xfer_len -= icmnd_cmpl->residual;
706
707 /*
708 * If queue_full, then try to reduce queue depth for all
709 * LUNS on the target. Todo: this should be accompanied
710 * by a periodic queue_depth rampup based on successful
711 * IO completion.
712 */
713 if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
714 struct scsi_device *t_sdev;
715 int qd = 0;
716
717 shost_for_each_device(t_sdev, sc->device->host) {
718 if (t_sdev->id != sc->device->id)
719 continue;
720
721 if (t_sdev->queue_depth > 1) {
722 qd = scsi_track_queue_full
723 (t_sdev,
724 t_sdev->queue_depth - 1);
725 if (qd == -1)
726 qd = t_sdev->host->cmd_per_lun;
727 shost_printk(KERN_INFO,
728 fnic->lport->host,
729 "scsi[%d:%d:%d:%d"
730 "] queue full detected,"
731 "new depth = %d\n",
732 t_sdev->host->host_no,
733 t_sdev->channel,
734 t_sdev->id, t_sdev->lun,
735 t_sdev->queue_depth);
736 }
737 }
738 }
739 break;
740
741 case FCPIO_TIMEOUT: /* request was timed out */
742 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
743 break;
744
745 case FCPIO_ABORTED: /* request was aborted */
746 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
747 break;
748
749 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
750 scsi_set_resid(sc, icmnd_cmpl->residual);
751 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
752 break;
753
754 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
755 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
756 break;
757 case FCPIO_INVALID_HEADER: /* header contains invalid data */
758 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
759 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
760 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
761 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
762 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
763 case FCPIO_FW_ERR: /* request was terminated due fw error */
764 default:
765 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
766 fnic_fcpio_status_to_str(hdr_status));
767 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
768 break;
769 }
770
771 /* Break link with the SCSI command */
772 CMD_SP(sc) = NULL;
773
774 spin_unlock_irqrestore(io_lock, flags);
775
776 fnic_release_ioreq_buf(fnic, io_req, sc);
777
778 mempool_free(io_req, fnic->io_req_pool);
779
780 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
781 fnic->lport->host_stats.fcp_input_requests++;
782 fnic->fcp_input_bytes += xfer_len;
783 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
784 fnic->lport->host_stats.fcp_output_requests++;
785 fnic->fcp_output_bytes += xfer_len;
786 } else
787 fnic->lport->host_stats.fcp_control_requests++;
788
789 /* Call SCSI completion function to complete the IO */
790 if (sc->scsi_done)
791 sc->scsi_done(sc);
792
793}
794
795/* fnic_fcpio_itmf_cmpl_handler
796 * Routine to handle itmf completions
797 */
798static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
799 struct fcpio_fw_req *desc)
800{
801 u8 type;
802 u8 hdr_status;
803 struct fcpio_tag tag;
804 u32 id;
805 struct scsi_cmnd *sc;
806 struct fnic_io_req *io_req;
807 unsigned long flags;
808 spinlock_t *io_lock;
809
810 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
811 fcpio_tag_id_dec(&tag, &id);
812
813 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
814 return;
815
816 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
817 WARN_ON_ONCE(!sc);
818 if (!sc)
819 return;
820
821 io_lock = fnic_io_lock_hash(fnic, sc);
822 spin_lock_irqsave(io_lock, flags);
823 io_req = (struct fnic_io_req *)CMD_SP(sc);
824 WARN_ON_ONCE(!io_req);
825 if (!io_req) {
826 spin_unlock_irqrestore(io_lock, flags);
827 return;
828 }
829
830 if (id & FNIC_TAG_ABORT) {
831 /* Completion of abort cmd */
832 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
833 /* This is a late completion. Ignore it */
834 spin_unlock_irqrestore(io_lock, flags);
835 return;
836 }
837 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
838 CMD_ABTS_STATUS(sc) = hdr_status;
839
840 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
841 "abts cmpl recd. id %d status %s\n",
842 (int)(id & FNIC_TAG_MASK),
843 fnic_fcpio_status_to_str(hdr_status));
844
845 /*
846 * If scsi_eh thread is blocked waiting for abts to complete,
847 * signal completion to it. IO will be cleaned in the thread
848 * else clean it in this context
849 */
850 if (io_req->abts_done) {
851 complete(io_req->abts_done);
852 spin_unlock_irqrestore(io_lock, flags);
853 } else {
854 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
855 "abts cmpl, completing IO\n");
856 CMD_SP(sc) = NULL;
857 sc->result = (DID_ERROR << 16);
858
859 spin_unlock_irqrestore(io_lock, flags);
860
861 fnic_release_ioreq_buf(fnic, io_req, sc);
862 mempool_free(io_req, fnic->io_req_pool);
863 if (sc->scsi_done)
864 sc->scsi_done(sc);
865 }
866
867 } else if (id & FNIC_TAG_DEV_RST) {
868 /* Completion of device reset */
869 CMD_LR_STATUS(sc) = hdr_status;
870 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
871 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
872 "dev reset cmpl recd. id %d status %s\n",
873 (int)(id & FNIC_TAG_MASK),
874 fnic_fcpio_status_to_str(hdr_status));
875 if (io_req->dr_done)
876 complete(io_req->dr_done);
877 spin_unlock_irqrestore(io_lock, flags);
878
879 } else {
880 shost_printk(KERN_ERR, fnic->lport->host,
881 "Unexpected itmf io state %s tag %x\n",
882 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
883 spin_unlock_irqrestore(io_lock, flags);
884 }
885
886}
887
888/*
889 * fnic_fcpio_cmpl_handler
890 * Routine to service the cq for wq_copy
891 */
892static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
893 unsigned int cq_index,
894 struct fcpio_fw_req *desc)
895{
896 struct fnic *fnic = vnic_dev_priv(vdev);
897 int ret = 0;
898
899 switch (desc->hdr.type) {
900 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
901 fnic_fcpio_ack_handler(fnic, cq_index, desc);
902 break;
903
904 case FCPIO_ICMND_CMPL: /* fw completed a command */
905 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
906 break;
907
908 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
909 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
910 break;
911
912 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
913 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
914 break;
915
916 case FCPIO_RESET_CMPL: /* fw completed reset */
917 ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
918 break;
919
920 default:
921 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
922 "firmware completion type %d\n",
923 desc->hdr.type);
924 break;
925 }
926
927 return ret;
928}
929
930/*
931 * fnic_wq_copy_cmpl_handler
932 * Routine to process wq copy
933 */
934int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
935{
936 unsigned int wq_work_done = 0;
937 unsigned int i, cq_index;
938 unsigned int cur_work_done;
939
940 for (i = 0; i < fnic->wq_copy_count; i++) {
941 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
942 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
943 fnic_fcpio_cmpl_handler,
944 copy_work_to_do);
945 wq_work_done += cur_work_done;
946 }
947 return wq_work_done;
948}
949
950static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
951{
952 unsigned int i;
953 struct fnic_io_req *io_req;
954 unsigned long flags = 0;
955 struct scsi_cmnd *sc;
956 spinlock_t *io_lock;
957
958 for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
959 if (i == exclude_id)
960 continue;
961
962 sc = scsi_host_find_tag(fnic->lport->host, i);
963 if (!sc)
964 continue;
965
966 io_lock = fnic_io_lock_hash(fnic, sc);
967 spin_lock_irqsave(io_lock, flags);
968 io_req = (struct fnic_io_req *)CMD_SP(sc);
969 if (!io_req) {
970 spin_unlock_irqrestore(io_lock, flags);
971 goto cleanup_scsi_cmd;
972 }
973
974 CMD_SP(sc) = NULL;
975
976 spin_unlock_irqrestore(io_lock, flags);
977
978 /*
979 * If there is a scsi_cmnd associated with this io_req, then
980 * free the corresponding state
981 */
982 fnic_release_ioreq_buf(fnic, io_req, sc);
983 mempool_free(io_req, fnic->io_req_pool);
984
985cleanup_scsi_cmd:
986 sc->result = DID_TRANSPORT_DISRUPTED << 16;
987 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
988 " DID_TRANSPORT_DISRUPTED\n");
989
990 /* Complete the command to SCSI */
991 if (sc->scsi_done)
992 sc->scsi_done(sc);
993 }
994}
995
996void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
997 struct fcpio_host_req *desc)
998{
999 u32 id;
1000 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1001 struct fnic_io_req *io_req;
1002 struct scsi_cmnd *sc;
1003 unsigned long flags;
1004 spinlock_t *io_lock;
1005
1006 /* get the tag reference */
1007 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1008 id &= FNIC_TAG_MASK;
1009
1010 if (id >= FNIC_MAX_IO_REQ)
1011 return;
1012
1013 sc = scsi_host_find_tag(fnic->lport->host, id);
1014 if (!sc)
1015 return;
1016
1017 io_lock = fnic_io_lock_hash(fnic, sc);
1018 spin_lock_irqsave(io_lock, flags);
1019
1020 /* Get the IO context which this desc refers to */
1021 io_req = (struct fnic_io_req *)CMD_SP(sc);
1022
1023 /* fnic interrupts are turned off by now */
1024
1025 if (!io_req) {
1026 spin_unlock_irqrestore(io_lock, flags);
1027 goto wq_copy_cleanup_scsi_cmd;
1028 }
1029
1030 CMD_SP(sc) = NULL;
1031
1032 spin_unlock_irqrestore(io_lock, flags);
1033
1034 fnic_release_ioreq_buf(fnic, io_req, sc);
1035 mempool_free(io_req, fnic->io_req_pool);
1036
1037wq_copy_cleanup_scsi_cmd:
1038 sc->result = DID_NO_CONNECT << 16;
1039 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1040 " DID_NO_CONNECT\n");
1041
1042 if (sc->scsi_done)
1043 sc->scsi_done(sc);
1044}
1045
1046static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1047 u32 task_req, u8 *fc_lun,
1048 struct fnic_io_req *io_req)
1049{
1050 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1054
1055 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1056 free_wq_copy_descs(fnic, wq);
1057
1058 if (!vnic_wq_copy_desc_avail(wq)) {
1059 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1060 return 1;
1061 }
1062 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1063 0, task_req, tag, fc_lun, io_req->port_id,
1064 fnic->config.ra_tov, fnic->config.ed_tov);
1065
1066 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1067 return 0;
1068}
1069
1070void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1071{
1072 int tag;
1073 struct fnic_io_req *io_req;
1074 spinlock_t *io_lock;
1075 unsigned long flags;
1076 struct scsi_cmnd *sc;
1077 struct scsi_lun fc_lun;
1078 enum fnic_ioreq_state old_ioreq_state;
1079
1080 FNIC_SCSI_DBG(KERN_DEBUG,
1081 fnic->lport->host,
1082 "fnic_rport_reset_exch called portid 0x%06x\n",
1083 port_id);
1084
1085 if (fnic->in_remove)
1086 return;
1087
1088 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1089 sc = scsi_host_find_tag(fnic->lport->host, tag);
1090 if (!sc)
1091 continue;
1092
1093 io_lock = fnic_io_lock_hash(fnic, sc);
1094 spin_lock_irqsave(io_lock, flags);
1095
1096 io_req = (struct fnic_io_req *)CMD_SP(sc);
1097
1098 if (!io_req || io_req->port_id != port_id) {
1099 spin_unlock_irqrestore(io_lock, flags);
1100 continue;
1101 }
1102
1103 /*
1104 * Found IO that is still pending with firmware and
1105 * belongs to rport that went away
1106 */
1107 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1108 spin_unlock_irqrestore(io_lock, flags);
1109 continue;
1110 }
1111 old_ioreq_state = CMD_STATE(sc);
1112 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1113 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1114
1115 BUG_ON(io_req->abts_done);
1116
1117 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1118 "fnic_rport_reset_exch: Issuing abts\n");
1119
1120 spin_unlock_irqrestore(io_lock, flags);
1121
1122 /* Now queue the abort command to firmware */
1123 int_to_scsilun(sc->device->lun, &fc_lun);
1124
1125 if (fnic_queue_abort_io_req(fnic, tag,
1126 FCPIO_ITMF_ABT_TASK_TERM,
1127 fc_lun.scsi_lun, io_req)) {
1128 /*
1129 * Revert the cmd state back to old state, if
1130 * it hasnt changed in between. This cmd will get
1131 * aborted later by scsi_eh, or cleaned up during
1132 * lun reset
1133 */
1134 io_lock = fnic_io_lock_hash(fnic, sc);
1135
1136 spin_lock_irqsave(io_lock, flags);
1137 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1138 CMD_STATE(sc) = old_ioreq_state;
1139 spin_unlock_irqrestore(io_lock, flags);
1140 }
1141 }
1142
1143}
1144
1145void fnic_terminate_rport_io(struct fc_rport *rport)
1146{
1147 int tag;
1148 struct fnic_io_req *io_req;
1149 spinlock_t *io_lock;
1150 unsigned long flags;
1151 struct scsi_cmnd *sc;
1152 struct scsi_lun fc_lun;
1153 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1154 struct fc_lport *lport = rdata->local_port;
1155 struct fnic *fnic = lport_priv(lport);
1156 struct fc_rport *cmd_rport;
1157 enum fnic_ioreq_state old_ioreq_state;
1158
1159 FNIC_SCSI_DBG(KERN_DEBUG,
1160 fnic->lport->host, "fnic_terminate_rport_io called"
1161 " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
1162 rport->port_name, rport->node_name,
1163 rport->port_id);
1164
1165 if (fnic->in_remove)
1166 return;
1167
1168 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1169 sc = scsi_host_find_tag(fnic->lport->host, tag);
1170 if (!sc)
1171 continue;
1172
1173 cmd_rport = starget_to_rport(scsi_target(sc->device));
1174 if (rport != cmd_rport)
1175 continue;
1176
1177 io_lock = fnic_io_lock_hash(fnic, sc);
1178 spin_lock_irqsave(io_lock, flags);
1179
1180 io_req = (struct fnic_io_req *)CMD_SP(sc);
1181
1182 if (!io_req || rport != cmd_rport) {
1183 spin_unlock_irqrestore(io_lock, flags);
1184 continue;
1185 }
1186
1187 /*
1188 * Found IO that is still pending with firmware and
1189 * belongs to rport that went away
1190 */
1191 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1192 spin_unlock_irqrestore(io_lock, flags);
1193 continue;
1194 }
1195 old_ioreq_state = CMD_STATE(sc);
1196 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1197 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1198
1199 BUG_ON(io_req->abts_done);
1200
1201 FNIC_SCSI_DBG(KERN_DEBUG,
1202 fnic->lport->host,
1203 "fnic_terminate_rport_io: Issuing abts\n");
1204
1205 spin_unlock_irqrestore(io_lock, flags);
1206
1207 /* Now queue the abort command to firmware */
1208 int_to_scsilun(sc->device->lun, &fc_lun);
1209
1210 if (fnic_queue_abort_io_req(fnic, tag,
1211 FCPIO_ITMF_ABT_TASK_TERM,
1212 fc_lun.scsi_lun, io_req)) {
1213 /*
1214 * Revert the cmd state back to old state, if
1215 * it hasnt changed in between. This cmd will get
1216 * aborted later by scsi_eh, or cleaned up during
1217 * lun reset
1218 */
1219 io_lock = fnic_io_lock_hash(fnic, sc);
1220
1221 spin_lock_irqsave(io_lock, flags);
1222 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1223 CMD_STATE(sc) = old_ioreq_state;
1224 spin_unlock_irqrestore(io_lock, flags);
1225 }
1226 }
1227
1228}
1229
1230static void fnic_block_error_handler(struct scsi_cmnd *sc)
1231{
1232 struct Scsi_Host *shost = sc->device->host;
1233 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(shost->host_lock, flags);
1237 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1238 spin_unlock_irqrestore(shost->host_lock, flags);
1239 msleep(1000);
1240 spin_lock_irqsave(shost->host_lock, flags);
1241 }
1242 spin_unlock_irqrestore(shost->host_lock, flags);
1243
1244}
1245
1246/*
1247 * This function is exported to SCSI for sending abort cmnds.
1248 * A SCSI IO is represented by a io_req in the driver.
1249 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1250 */
1251int fnic_abort_cmd(struct scsi_cmnd *sc)
1252{
1253 struct fc_lport *lp;
1254 struct fnic *fnic;
1255 struct fnic_io_req *io_req;
1256 struct fc_rport *rport;
1257 spinlock_t *io_lock;
1258 unsigned long flags;
1259 int ret = SUCCESS;
1260 u32 task_req;
1261 struct scsi_lun fc_lun;
1262 DECLARE_COMPLETION_ONSTACK(tm_done);
1263
1264 /* Wait for rport to unblock */
1265 fnic_block_error_handler(sc);
1266
1267 /* Get local-port, check ready and link up */
1268 lp = shost_priv(sc->device->host);
1269
1270 fnic = lport_priv(lp);
1271 FNIC_SCSI_DBG(KERN_DEBUG,
1272 fnic->lport->host,
1273 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1274 (starget_to_rport(scsi_target(sc->device)))->port_id,
1275 sc->device->lun, sc->request->tag);
1276
1277 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1278 ret = FAILED;
1279 goto fnic_abort_cmd_end;
1280 }
1281
1282 /*
1283 * Avoid a race between SCSI issuing the abort and the device
1284 * completing the command.
1285 *
1286 * If the command is already completed by the fw cmpl code,
1287 * we just return SUCCESS from here. This means that the abort
1288 * succeeded. In the SCSI ML, since the timeout for command has
1289 * happened, the completion wont actually complete the command
1290 * and it will be considered as an aborted command
1291 *
1292 * The CMD_SP will not be cleared except while holding io_req_lock.
1293 */
1294 io_lock = fnic_io_lock_hash(fnic, sc);
1295 spin_lock_irqsave(io_lock, flags);
1296 io_req = (struct fnic_io_req *)CMD_SP(sc);
1297 if (!io_req) {
1298 spin_unlock_irqrestore(io_lock, flags);
1299 goto fnic_abort_cmd_end;
1300 }
1301
1302 io_req->abts_done = &tm_done;
1303
1304 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1305 spin_unlock_irqrestore(io_lock, flags);
1306 goto wait_pending;
1307 }
1308 /*
1309 * Command is still pending, need to abort it
1310 * If the firmware completes the command after this point,
1311 * the completion wont be done till mid-layer, since abort
1312 * has already started.
1313 */
1314 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1315 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1316
1317 spin_unlock_irqrestore(io_lock, flags);
1318
1319 /*
1320 * Check readiness of the remote port. If the path to remote
1321 * port is up, then send abts to the remote port to terminate
1322 * the IO. Else, just locally terminate the IO in the firmware
1323 */
1324 rport = starget_to_rport(scsi_target(sc->device));
1325 if (fc_remote_port_chkready(rport) == 0)
1326 task_req = FCPIO_ITMF_ABT_TASK;
1327 else
1328 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1329
1330 /* Now queue the abort command to firmware */
1331 int_to_scsilun(sc->device->lun, &fc_lun);
1332
1333 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1334 fc_lun.scsi_lun, io_req)) {
1335 spin_lock_irqsave(io_lock, flags);
1336 io_req = (struct fnic_io_req *)CMD_SP(sc);
1337 if (io_req)
1338 io_req->abts_done = NULL;
1339 spin_unlock_irqrestore(io_lock, flags);
1340 ret = FAILED;
1341 goto fnic_abort_cmd_end;
1342 }
1343
1344 /*
1345 * We queued an abort IO, wait for its completion.
1346 * Once the firmware completes the abort command, it will
1347 * wake up this thread.
1348 */
1349 wait_pending:
1350 wait_for_completion_timeout(&tm_done,
1351 msecs_to_jiffies
1352 (2 * fnic->config.ra_tov +
1353 fnic->config.ed_tov));
1354
1355 /* Check the abort status */
1356 spin_lock_irqsave(io_lock, flags);
1357
1358 io_req = (struct fnic_io_req *)CMD_SP(sc);
1359 if (!io_req) {
1360 spin_unlock_irqrestore(io_lock, flags);
1361 ret = FAILED;
1362 goto fnic_abort_cmd_end;
1363 }
1364 io_req->abts_done = NULL;
1365
1366 /* fw did not complete abort, timed out */
1367 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1368 spin_unlock_irqrestore(io_lock, flags);
1369 ret = FAILED;
1370 goto fnic_abort_cmd_end;
1371 }
1372
1373 /*
1374 * firmware completed the abort, check the status,
1375 * free the io_req irrespective of failure or success
1376 */
1377 if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1378 ret = FAILED;
1379
1380 CMD_SP(sc) = NULL;
1381
1382 spin_unlock_irqrestore(io_lock, flags);
1383
1384 fnic_release_ioreq_buf(fnic, io_req, sc);
1385 mempool_free(io_req, fnic->io_req_pool);
1386
1387fnic_abort_cmd_end:
1388 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1389 "Returning from abort cmd %s\n",
1390 (ret == SUCCESS) ?
1391 "SUCCESS" : "FAILED");
1392 return ret;
1393}
1394
1395static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1396 struct scsi_cmnd *sc,
1397 struct fnic_io_req *io_req)
1398{
1399 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1400 struct scsi_lun fc_lun;
1401 int ret = 0;
1402 unsigned long intr_flags;
1403
1404 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1405
1406 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1407 free_wq_copy_descs(fnic, wq);
1408
1409 if (!vnic_wq_copy_desc_avail(wq)) {
1410 ret = -EAGAIN;
1411 goto lr_io_req_end;
1412 }
1413
1414 /* fill in the lun info */
1415 int_to_scsilun(sc->device->lun, &fc_lun);
1416
1417 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1418 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1419 fc_lun.scsi_lun, io_req->port_id,
1420 fnic->config.ra_tov, fnic->config.ed_tov);
1421
1422lr_io_req_end:
1423 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1424
1425 return ret;
1426}
1427
1428/*
1429 * Clean up any pending aborts on the lun
1430 * For each outstanding IO on this lun, whose abort is not completed by fw,
1431 * issue a local abort. Wait for abort to complete. Return 0 if all commands
1432 * successfully aborted, 1 otherwise
1433 */
1434static int fnic_clean_pending_aborts(struct fnic *fnic,
1435 struct scsi_cmnd *lr_sc)
1436{
1437 int tag;
1438 struct fnic_io_req *io_req;
1439 spinlock_t *io_lock;
1440 unsigned long flags;
1441 int ret = 0;
1442 struct scsi_cmnd *sc;
1443 struct fc_rport *rport;
1444 struct scsi_lun fc_lun;
1445 struct scsi_device *lun_dev = lr_sc->device;
1446 DECLARE_COMPLETION_ONSTACK(tm_done);
1447
1448 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1449 sc = scsi_host_find_tag(fnic->lport->host, tag);
1450 /*
1451 * ignore this lun reset cmd or cmds that do not belong to
1452 * this lun
1453 */
1454 if (!sc || sc == lr_sc || sc->device != lun_dev)
1455 continue;
1456
1457 io_lock = fnic_io_lock_hash(fnic, sc);
1458 spin_lock_irqsave(io_lock, flags);
1459
1460 io_req = (struct fnic_io_req *)CMD_SP(sc);
1461
1462 if (!io_req || sc->device != lun_dev) {
1463 spin_unlock_irqrestore(io_lock, flags);
1464 continue;
1465 }
1466
1467 /*
1468 * Found IO that is still pending with firmware and
1469 * belongs to the LUN that we are resetting
1470 */
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1472 "Found IO in %s on lun\n",
1473 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1474
1475 BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
1476
1477 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1478 io_req->abts_done = &tm_done;
1479 spin_unlock_irqrestore(io_lock, flags);
1480
1481 /* Now queue the abort command to firmware */
1482 int_to_scsilun(sc->device->lun, &fc_lun);
1483 rport = starget_to_rport(scsi_target(sc->device));
1484
1485 if (fnic_queue_abort_io_req(fnic, tag,
1486 FCPIO_ITMF_ABT_TASK_TERM,
1487 fc_lun.scsi_lun, io_req)) {
1488 spin_lock_irqsave(io_lock, flags);
1489 io_req = (struct fnic_io_req *)CMD_SP(sc);
1490 if (io_req)
1491 io_req->abts_done = NULL;
1492 spin_unlock_irqrestore(io_lock, flags);
1493 ret = 1;
1494 goto clean_pending_aborts_end;
1495 }
1496
1497 wait_for_completion_timeout(&tm_done,
1498 msecs_to_jiffies
1499 (fnic->config.ed_tov));
1500
1501 /* Recheck cmd state to check if it is now aborted */
1502 spin_lock_irqsave(io_lock, flags);
1503 io_req = (struct fnic_io_req *)CMD_SP(sc);
1504 if (!io_req) {
1505 spin_unlock_irqrestore(io_lock, flags);
1506 ret = 1;
1507 goto clean_pending_aborts_end;
1508 }
1509
1510 io_req->abts_done = NULL;
1511
1512 /* if abort is still pending with fw, fail */
1513 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1514 spin_unlock_irqrestore(io_lock, flags);
1515 ret = 1;
1516 goto clean_pending_aborts_end;
1517 }
1518 CMD_SP(sc) = NULL;
1519 spin_unlock_irqrestore(io_lock, flags);
1520
1521 fnic_release_ioreq_buf(fnic, io_req, sc);
1522 mempool_free(io_req, fnic->io_req_pool);
1523 }
1524
1525clean_pending_aborts_end:
1526 return ret;
1527}
1528
1529/*
1530 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1531 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
1532 * on the LUN.
1533 */
1534int fnic_device_reset(struct scsi_cmnd *sc)
1535{
1536 struct fc_lport *lp;
1537 struct fnic *fnic;
1538 struct fnic_io_req *io_req;
1539 struct fc_rport *rport;
1540 int status;
1541 int ret = FAILED;
1542 spinlock_t *io_lock;
1543 unsigned long flags;
1544 DECLARE_COMPLETION_ONSTACK(tm_done);
1545
1546 /* Wait for rport to unblock */
1547 fnic_block_error_handler(sc);
1548
1549 /* Get local-port, check ready and link up */
1550 lp = shost_priv(sc->device->host);
1551
1552 fnic = lport_priv(lp);
1553 FNIC_SCSI_DBG(KERN_DEBUG,
1554 fnic->lport->host,
1555 "Device reset called FCID 0x%x, LUN 0x%x\n",
1556 (starget_to_rport(scsi_target(sc->device)))->port_id,
1557 sc->device->lun);
1558
1559
1560 if (lp->state != LPORT_ST_READY || !(lp->link_up))
1561 goto fnic_device_reset_end;
1562
1563 /* Check if remote port up */
1564 rport = starget_to_rport(scsi_target(sc->device));
1565 if (fc_remote_port_chkready(rport))
1566 goto fnic_device_reset_end;
1567
1568 io_lock = fnic_io_lock_hash(fnic, sc);
1569 spin_lock_irqsave(io_lock, flags);
1570 io_req = (struct fnic_io_req *)CMD_SP(sc);
1571
1572 /*
1573 * If there is a io_req attached to this command, then use it,
1574 * else allocate a new one.
1575 */
1576 if (!io_req) {
1577 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
1578 if (!io_req) {
1579 spin_unlock_irqrestore(io_lock, flags);
1580 goto fnic_device_reset_end;
1581 }
1582 memset(io_req, 0, sizeof(*io_req));
1583 io_req->port_id = rport->port_id;
1584 CMD_SP(sc) = (char *)io_req;
1585 }
1586 io_req->dr_done = &tm_done;
1587 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
1588 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
1589 spin_unlock_irqrestore(io_lock, flags);
1590
1591 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
1592 sc->request->tag);
1593
1594 /*
1595 * issue the device reset, if enqueue failed, clean up the ioreq
1596 * and break assoc with scsi cmd
1597 */
1598 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
1599 spin_lock_irqsave(io_lock, flags);
1600 io_req = (struct fnic_io_req *)CMD_SP(sc);
1601 if (io_req)
1602 io_req->dr_done = NULL;
1603 goto fnic_device_reset_clean;
1604 }
1605
1606 /*
1607 * Wait on the local completion for LUN reset. The io_req may be
1608 * freed while we wait since we hold no lock.
1609 */
1610 wait_for_completion_timeout(&tm_done,
1611 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
1612
1613 spin_lock_irqsave(io_lock, flags);
1614 io_req = (struct fnic_io_req *)CMD_SP(sc);
1615 if (!io_req) {
1616 spin_unlock_irqrestore(io_lock, flags);
1617 goto fnic_device_reset_end;
1618 }
1619 io_req->dr_done = NULL;
1620
1621 status = CMD_LR_STATUS(sc);
1622 spin_unlock_irqrestore(io_lock, flags);
1623
1624 /*
1625 * If lun reset not completed, bail out with failed. io_req
1626 * gets cleaned up during higher levels of EH
1627 */
1628 if (status == FCPIO_INVALID_CODE) {
1629 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1630 "Device reset timed out\n");
1631 goto fnic_device_reset_end;
1632 }
1633
1634 /* Completed, but not successful, clean up the io_req, return fail */
1635 if (status != FCPIO_SUCCESS) {
1636 spin_lock_irqsave(io_lock, flags);
1637 FNIC_SCSI_DBG(KERN_DEBUG,
1638 fnic->lport->host,
1639 "Device reset completed - failed\n");
1640 io_req = (struct fnic_io_req *)CMD_SP(sc);
1641 goto fnic_device_reset_clean;
1642 }
1643
1644 /*
1645 * Clean up any aborts on this lun that have still not
1646 * completed. If any of these fail, then LUN reset fails.
1647 * clean_pending_aborts cleans all cmds on this lun except
1648 * the lun reset cmd. If all cmds get cleaned, the lun reset
1649 * succeeds
1650 */
1651 if (fnic_clean_pending_aborts(fnic, sc)) {
1652 spin_lock_irqsave(io_lock, flags);
1653 io_req = (struct fnic_io_req *)CMD_SP(sc);
1654 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1655 "Device reset failed"
1656 " since could not abort all IOs\n");
1657 goto fnic_device_reset_clean;
1658 }
1659
1660 /* Clean lun reset command */
1661 spin_lock_irqsave(io_lock, flags);
1662 io_req = (struct fnic_io_req *)CMD_SP(sc);
1663 if (io_req)
1664 /* Completed, and successful */
1665 ret = SUCCESS;
1666
1667fnic_device_reset_clean:
1668 if (io_req)
1669 CMD_SP(sc) = NULL;
1670
1671 spin_unlock_irqrestore(io_lock, flags);
1672
1673 if (io_req) {
1674 fnic_release_ioreq_buf(fnic, io_req, sc);
1675 mempool_free(io_req, fnic->io_req_pool);
1676 }
1677
1678fnic_device_reset_end:
1679 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1680 "Returning from device reset %s\n",
1681 (ret == SUCCESS) ?
1682 "SUCCESS" : "FAILED");
1683 return ret;
1684}
1685
1686/* Clean up all IOs, clean up libFC local port */
1687int fnic_reset(struct Scsi_Host *shost)
1688{
1689 struct fc_lport *lp;
1690 struct fnic *fnic;
1691 int ret = SUCCESS;
1692
1693 lp = shost_priv(shost);
1694 fnic = lport_priv(lp);
1695
1696 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1697 "fnic_reset called\n");
1698
1699 /*
1700 * Reset local port, this will clean up libFC exchanges,
1701 * reset remote port sessions, and if link is up, begin flogi
1702 */
1703 if (lp->tt.lport_reset(lp))
1704 ret = FAILED;
1705
1706 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1707 "Returning from fnic reset %s\n",
1708 (ret == SUCCESS) ?
1709 "SUCCESS" : "FAILED");
1710
1711 return ret;
1712}
1713
1714/*
1715 * SCSI Error handling calls driver's eh_host_reset if all prior
1716 * error handling levels return FAILED. If host reset completes
1717 * successfully, and if link is up, then Fabric login begins.
1718 *
1719 * Host Reset is the highest level of error recovery. If this fails, then
1720 * host is offlined by SCSI.
1721 *
1722 */
1723int fnic_host_reset(struct scsi_cmnd *sc)
1724{
1725 int ret;
1726 unsigned long wait_host_tmo;
1727 struct Scsi_Host *shost = sc->device->host;
1728 struct fc_lport *lp = shost_priv(shost);
1729
1730 /*
1731 * If fnic_reset is successful, wait for fabric login to complete
1732 * scsi-ml tries to send a TUR to every device if host reset is
1733 * successful, so before returning to scsi, fabric should be up
1734 */
1735 ret = fnic_reset(shost);
1736 if (ret == SUCCESS) {
1737 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
1738 ret = FAILED;
1739 while (time_before(jiffies, wait_host_tmo)) {
1740 if ((lp->state == LPORT_ST_READY) &&
1741 (lp->link_up)) {
1742 ret = SUCCESS;
1743 break;
1744 }
1745 ssleep(1);
1746 }
1747 }
1748
1749 return ret;
1750}
1751
1752/*
1753 * This fxn is called from libFC when host is removed
1754 */
1755void fnic_scsi_abort_io(struct fc_lport *lp)
1756{
1757 int err = 0;
1758 unsigned long flags;
1759 enum fnic_state old_state;
1760 struct fnic *fnic = lport_priv(lp);
1761 DECLARE_COMPLETION_ONSTACK(remove_wait);
1762
1763 /* Issue firmware reset for fnic, wait for reset to complete */
1764 spin_lock_irqsave(&fnic->fnic_lock, flags);
1765 fnic->remove_wait = &remove_wait;
1766 old_state = fnic->state;
1767 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1768 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1769 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1770
1771 err = fnic_fw_reset_handler(fnic);
1772 if (err) {
1773 spin_lock_irqsave(&fnic->fnic_lock, flags);
1774 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1775 fnic->state = old_state;
1776 fnic->remove_wait = NULL;
1777 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1778 return;
1779 }
1780
1781 /* Wait for firmware reset to complete */
1782 wait_for_completion_timeout(&remove_wait,
1783 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
1784
1785 spin_lock_irqsave(&fnic->fnic_lock, flags);
1786 fnic->remove_wait = NULL;
1787 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1788 "fnic_scsi_abort_io %s\n",
1789 (fnic->state == FNIC_IN_ETH_MODE) ?
1790 "SUCCESS" : "FAILED");
1791 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1792
1793}
1794
1795/*
1796 * This fxn called from libFC to clean up driver IO state on link down
1797 */
1798void fnic_scsi_cleanup(struct fc_lport *lp)
1799{
1800 unsigned long flags;
1801 enum fnic_state old_state;
1802 struct fnic *fnic = lport_priv(lp);
1803
1804 /* issue fw reset */
1805 spin_lock_irqsave(&fnic->fnic_lock, flags);
1806 old_state = fnic->state;
1807 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1808 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1809 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1810
1811 if (fnic_fw_reset_handler(fnic)) {
1812 spin_lock_irqsave(&fnic->fnic_lock, flags);
1813 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1814 fnic->state = old_state;
1815 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1816 }
1817
1818}
1819
1820void fnic_empty_scsi_cleanup(struct fc_lport *lp)
1821{
1822}
1823
1824void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
1825{
1826 struct fnic *fnic = lport_priv(lp);
1827
1828 /* Non-zero sid, nothing to do */
1829 if (sid)
1830 goto call_fc_exch_mgr_reset;
1831
1832 if (did) {
1833 fnic_rport_exch_reset(fnic, did);
1834 goto call_fc_exch_mgr_reset;
1835 }
1836
1837 /*
1838 * sid = 0, did = 0
1839 * link down or device being removed
1840 */
1841 if (!fnic->in_remove)
1842 fnic_scsi_cleanup(lp);
1843 else
1844 fnic_scsi_abort_io(lp);
1845
1846 /* call libFC exch mgr reset to reset its exchanges */
1847call_fc_exch_mgr_reset:
1848 fc_exch_mgr_reset(lp, sid, did);
1849
1850}
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 000000000000..92e80ae6b725
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _RQ_ENET_DESC_H_
19#define _RQ_ENET_DESC_H_
20
21/* Ethernet receive queue descriptor: 16B */
22struct rq_enet_desc {
23 __le64 address;
24 __le16 length_type;
25 u8 reserved[6];
26};
27
28enum rq_enet_type_types {
29 RQ_ENET_TYPE_ONLY_SOP = 0,
30 RQ_ENET_TYPE_NOT_SOP = 1,
31 RQ_ENET_TYPE_RESV2 = 2,
32 RQ_ENET_TYPE_RESV3 = 3,
33};
34
35#define RQ_ENET_ADDR_BITS 64
36#define RQ_ENET_LEN_BITS 14
37#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
38#define RQ_ENET_TYPE_BITS 2
39#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
40
41static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
42 u64 address, u8 type, u16 length)
43{
44 desc->address = cpu_to_le64(address);
45 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
46 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
47}
48
49static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
50 u64 *address, u8 *type, u16 *length)
51{
52 *address = le64_to_cpu(desc->address);
53 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
54 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
55 RQ_ENET_TYPE_MASK);
56}
57
58#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 000000000000..c5db32eda5ef
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "vnic_dev.h"
22#include "vnic_cq.h"
23
24void vnic_cq_free(struct vnic_cq *cq)
25{
26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
27
28 cq->ctrl = NULL;
29}
30
31int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
32 unsigned int desc_count, unsigned int desc_size)
33{
34 int err;
35
36 cq->index = index;
37 cq->vdev = vdev;
38
39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
40 if (!cq->ctrl) {
41 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
42 return -EINVAL;
43 }
44
45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
46 if (err)
47 return err;
48
49 return 0;
50}
51
52void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
53 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
54 unsigned int cq_tail_color, unsigned int interrupt_enable,
55 unsigned int cq_entry_enable, unsigned int cq_message_enable,
56 unsigned int interrupt_offset, u64 cq_message_addr)
57{
58 u64 paddr;
59
60 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
61 writeq(paddr, &cq->ctrl->ring_base);
62 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
63 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
64 iowrite32(color_enable, &cq->ctrl->color_enable);
65 iowrite32(cq_head, &cq->ctrl->cq_head);
66 iowrite32(cq_tail, &cq->ctrl->cq_tail);
67 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
68 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
69 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
70 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
71 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
72 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
73}
74
75void vnic_cq_clean(struct vnic_cq *cq)
76{
77 cq->to_clean = 0;
78 cq->last_color = 0;
79
80 iowrite32(0, &cq->ctrl->cq_head);
81 iowrite32(0, &cq->ctrl->cq_tail);
82 iowrite32(1, &cq->ctrl->cq_tail_color);
83
84 vnic_dev_clear_desc_ring(&cq->ring);
85}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 000000000000..4ede6809fb1e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_H_
19#define _VNIC_CQ_H_
20
21#include "cq_desc.h"
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_cq_service fnic_cq_service
29#define vnic_cq_free fnic_cq_free
30#define vnic_cq_alloc fnic_cq_alloc
31#define vnic_cq_init fnic_cq_init
32#define vnic_cq_clean fnic_cq_clean
33
34/* Completion queue control */
35struct vnic_cq_ctrl {
36 u64 ring_base; /* 0x00 */
37 u32 ring_size; /* 0x08 */
38 u32 pad0;
39 u32 flow_control_enable; /* 0x10 */
40 u32 pad1;
41 u32 color_enable; /* 0x18 */
42 u32 pad2;
43 u32 cq_head; /* 0x20 */
44 u32 pad3;
45 u32 cq_tail; /* 0x28 */
46 u32 pad4;
47 u32 cq_tail_color; /* 0x30 */
48 u32 pad5;
49 u32 interrupt_enable; /* 0x38 */
50 u32 pad6;
51 u32 cq_entry_enable; /* 0x40 */
52 u32 pad7;
53 u32 cq_message_enable; /* 0x48 */
54 u32 pad8;
55 u32 interrupt_offset; /* 0x50 */
56 u32 pad9;
57 u64 cq_message_addr; /* 0x58 */
58 u32 pad10;
59};
60
61struct vnic_cq {
62 unsigned int index;
63 struct vnic_dev *vdev;
64 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
65 struct vnic_dev_ring ring;
66 unsigned int to_clean;
67 unsigned int last_color;
68};
69
70static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
71 unsigned int work_to_do,
72 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
73 u8 type, u16 q_number, u16 completed_index, void *opaque),
74 void *opaque)
75{
76 struct cq_desc *cq_desc;
77 unsigned int work_done = 0;
78 u16 q_number, completed_index;
79 u8 type, color;
80
81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
82 cq->ring.desc_size * cq->to_clean);
83 cq_desc_dec(cq_desc, &type, &color,
84 &q_number, &completed_index);
85
86 while (color != cq->last_color) {
87
88 if ((*q_service)(cq->vdev, cq_desc, type,
89 q_number, completed_index, opaque))
90 break;
91
92 cq->to_clean++;
93 if (cq->to_clean == cq->ring.desc_count) {
94 cq->to_clean = 0;
95 cq->last_color = cq->last_color ? 0 : 1;
96 }
97
98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
99 cq->ring.desc_size * cq->to_clean);
100 cq_desc_dec(cq_desc, &type, &color,
101 &q_number, &completed_index);
102
103 work_done++;
104 if (work_done >= work_to_do)
105 break;
106 }
107
108 return work_done;
109}
110
111void vnic_cq_free(struct vnic_cq *cq);
112int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
113 unsigned int desc_count, unsigned int desc_size);
114void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
115 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
116 unsigned int cq_tail_color, unsigned int interrupt_enable,
117 unsigned int cq_entry_enable, unsigned int message_enable,
118 unsigned int interrupt_offset, u64 message_addr);
119void vnic_cq_clean(struct vnic_cq *cq);
120
121#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 000000000000..7901ce255a81
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_COPY_H_
19#define _VNIC_CQ_COPY_H_
20
21#include "fcpio.h"
22
23static inline unsigned int vnic_cq_copy_service(
24 struct vnic_cq *cq,
25 int (*q_service)(struct vnic_dev *vdev,
26 unsigned int index,
27 struct fcpio_fw_req *desc),
28 unsigned int work_to_do)
29
30{
31 struct fcpio_fw_req *desc;
32 unsigned int work_done = 0;
33 u8 color;
34
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
36 cq->ring.desc_size * cq->to_clean);
37 fcpio_color_dec(desc, &color);
38
39 while (color != cq->last_color) {
40
41 if ((*q_service)(cq->vdev, cq->index, desc))
42 break;
43
44 cq->to_clean++;
45 if (cq->to_clean == cq->ring.desc_count) {
46 cq->to_clean = 0;
47 cq->last_color = cq->last_color ? 0 : 1;
48 }
49
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
51 cq->ring.desc_size * cq->to_clean);
52 fcpio_color_dec(desc, &color);
53
54 work_done++;
55 if (work_done >= work_to_do)
56 break;
57 }
58
59 return work_done;
60}
61
62#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 000000000000..566770645086
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,690 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/if_ether.h>
25#include "vnic_resource.h"
26#include "vnic_devcmd.h"
27#include "vnic_dev.h"
28#include "vnic_stats.h"
29
30struct vnic_res {
31 void __iomem *vaddr;
32 unsigned int count;
33};
34
35struct vnic_dev {
36 void *priv;
37 struct pci_dev *pdev;
38 struct vnic_res res[RES_TYPE_MAX];
39 enum vnic_dev_intr_mode intr_mode;
40 struct vnic_devcmd __iomem *devcmd;
41 struct vnic_devcmd_notify *notify;
42 struct vnic_devcmd_notify notify_copy;
43 dma_addr_t notify_pa;
44 u32 *linkstatus;
45 dma_addr_t linkstatus_pa;
46 struct vnic_stats *stats;
47 dma_addr_t stats_pa;
48 struct vnic_devcmd_fw_info *fw_info;
49 dma_addr_t fw_info_pa;
50};
51
52#define VNIC_MAX_RES_HDR_SIZE \
53 (sizeof(struct vnic_resource_header) + \
54 sizeof(struct vnic_resource) * RES_TYPE_MAX)
55#define VNIC_RES_STRIDE 128
56
57void *vnic_dev_priv(struct vnic_dev *vdev)
58{
59 return vdev->priv;
60}
61
62static int vnic_dev_discover_res(struct vnic_dev *vdev,
63 struct vnic_dev_bar *bar)
64{
65 struct vnic_resource_header __iomem *rh;
66 struct vnic_resource __iomem *r;
67 u8 type;
68
69 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
70 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
71 return -EINVAL;
72 }
73
74 rh = bar->vaddr;
75 if (!rh) {
76 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
77 return -EINVAL;
78 }
79
80 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
81 ioread32(&rh->version) != VNIC_RES_VERSION) {
82 printk(KERN_ERR "vNIC BAR0 res magic/version error "
83 "exp (%lx/%lx) curr (%x/%x)\n",
84 VNIC_RES_MAGIC, VNIC_RES_VERSION,
85 ioread32(&rh->magic), ioread32(&rh->version));
86 return -EINVAL;
87 }
88
89 r = (struct vnic_resource __iomem *)(rh + 1);
90
91 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
92
93 u8 bar_num = ioread8(&r->bar);
94 u32 bar_offset = ioread32(&r->bar_offset);
95 u32 count = ioread32(&r->count);
96 u32 len;
97
98 r++;
99
100 if (bar_num != 0) /* only mapping in BAR0 resources */
101 continue;
102
103 switch (type) {
104 case RES_TYPE_WQ:
105 case RES_TYPE_RQ:
106 case RES_TYPE_CQ:
107 case RES_TYPE_INTR_CTRL:
108 /* each count is stride bytes long */
109 len = count * VNIC_RES_STRIDE;
110 if (len + bar_offset > bar->len) {
111 printk(KERN_ERR "vNIC BAR0 resource %d "
112 "out-of-bounds, offset 0x%x + "
113 "size 0x%x > bar len 0x%lx\n",
114 type, bar_offset,
115 len,
116 bar->len);
117 return -EINVAL;
118 }
119 break;
120 case RES_TYPE_INTR_PBA_LEGACY:
121 case RES_TYPE_DEVCMD:
122 len = count;
123 break;
124 default:
125 continue;
126 }
127
128 vdev->res[type].count = count;
129 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
130 }
131
132 return 0;
133}
134
135unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
136 enum vnic_res_type type)
137{
138 return vdev->res[type].count;
139}
140
141void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
142 unsigned int index)
143{
144 if (!vdev->res[type].vaddr)
145 return NULL;
146
147 switch (type) {
148 case RES_TYPE_WQ:
149 case RES_TYPE_RQ:
150 case RES_TYPE_CQ:
151 case RES_TYPE_INTR_CTRL:
152 return (char __iomem *)vdev->res[type].vaddr +
153 index * VNIC_RES_STRIDE;
154 default:
155 return (char __iomem *)vdev->res[type].vaddr;
156 }
157}
158
159unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
160 unsigned int desc_count,
161 unsigned int desc_size)
162{
163 /* The base address of the desc rings must be 512 byte aligned.
164 * Descriptor count is aligned to groups of 32 descriptors. A
165 * count of 0 means the maximum 4096 descriptors. Descriptor
166 * size is aligned to 16 bytes.
167 */
168
169 unsigned int count_align = 32;
170 unsigned int desc_align = 16;
171
172 ring->base_align = 512;
173
174 if (desc_count == 0)
175 desc_count = 4096;
176
177 ring->desc_count = ALIGN(desc_count, count_align);
178
179 ring->desc_size = ALIGN(desc_size, desc_align);
180
181 ring->size = ring->desc_count * ring->desc_size;
182 ring->size_unaligned = ring->size + ring->base_align;
183
184 return ring->size_unaligned;
185}
186
187void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
188{
189 memset(ring->descs, 0, ring->size);
190}
191
192int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
193 unsigned int desc_count, unsigned int desc_size)
194{
195 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
196
197 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
198 ring->size_unaligned,
199 &ring->base_addr_unaligned);
200
201 if (!ring->descs_unaligned) {
202 printk(KERN_ERR
203 "Failed to allocate ring (size=%d), aborting\n",
204 (int)ring->size);
205 return -ENOMEM;
206 }
207
208 ring->base_addr = ALIGN(ring->base_addr_unaligned,
209 ring->base_align);
210 ring->descs = (u8 *)ring->descs_unaligned +
211 (ring->base_addr - ring->base_addr_unaligned);
212
213 vnic_dev_clear_desc_ring(ring);
214
215 ring->desc_avail = ring->desc_count - 1;
216
217 return 0;
218}
219
220void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
221{
222 if (ring->descs) {
223 pci_free_consistent(vdev->pdev,
224 ring->size_unaligned,
225 ring->descs_unaligned,
226 ring->base_addr_unaligned);
227 ring->descs = NULL;
228 }
229}
230
231int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
232 u64 *a0, u64 *a1, int wait)
233{
234 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
235 int delay;
236 u32 status;
237 int dev_cmd_err[] = {
238 /* convert from fw's version of error.h to host's version */
239 0, /* ERR_SUCCESS */
240 EINVAL, /* ERR_EINVAL */
241 EFAULT, /* ERR_EFAULT */
242 EPERM, /* ERR_EPERM */
243 EBUSY, /* ERR_EBUSY */
244 };
245 int err;
246
247 status = ioread32(&devcmd->status);
248 if (status & STAT_BUSY) {
249 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
250 return -EBUSY;
251 }
252
253 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
254 writeq(*a0, &devcmd->args[0]);
255 writeq(*a1, &devcmd->args[1]);
256 wmb();
257 }
258
259 iowrite32(cmd, &devcmd->cmd);
260
261 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
262 return 0;
263
264 for (delay = 0; delay < wait; delay++) {
265
266 udelay(100);
267
268 status = ioread32(&devcmd->status);
269 if (!(status & STAT_BUSY)) {
270
271 if (status & STAT_ERROR) {
272 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
273 printk(KERN_ERR "Error %d devcmd %d\n",
274 err, _CMD_N(cmd));
275 return -err;
276 }
277
278 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
279 rmb();
280 *a0 = readq(&devcmd->args[0]);
281 *a1 = readq(&devcmd->args[1]);
282 }
283
284 return 0;
285 }
286 }
287
288 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
289 return -ETIMEDOUT;
290}
291
292int vnic_dev_fw_info(struct vnic_dev *vdev,
293 struct vnic_devcmd_fw_info **fw_info)
294{
295 u64 a0, a1 = 0;
296 int wait = 1000;
297 int err = 0;
298
299 if (!vdev->fw_info) {
300 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
301 sizeof(struct vnic_devcmd_fw_info),
302 &vdev->fw_info_pa);
303 if (!vdev->fw_info)
304 return -ENOMEM;
305
306 a0 = vdev->fw_info_pa;
307
308 /* only get fw_info once and cache it */
309 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
310 }
311
312 *fw_info = vdev->fw_info;
313
314 return err;
315}
316
317int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
318 void *value)
319{
320 u64 a0, a1;
321 int wait = 1000;
322 int err;
323
324 a0 = offset;
325 a1 = size;
326
327 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
328
329 switch (size) {
330 case 1:
331 *(u8 *)value = (u8)a0;
332 break;
333 case 2:
334 *(u16 *)value = (u16)a0;
335 break;
336 case 4:
337 *(u32 *)value = (u32)a0;
338 break;
339 case 8:
340 *(u64 *)value = a0;
341 break;
342 default:
343 BUG();
344 break;
345 }
346
347 return err;
348}
349
350int vnic_dev_stats_clear(struct vnic_dev *vdev)
351{
352 u64 a0 = 0, a1 = 0;
353 int wait = 1000;
354 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
355}
356
357int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
358{
359 u64 a0, a1;
360 int wait = 1000;
361
362 if (!vdev->stats) {
363 vdev->stats = pci_alloc_consistent(vdev->pdev,
364 sizeof(struct vnic_stats), &vdev->stats_pa);
365 if (!vdev->stats)
366 return -ENOMEM;
367 }
368
369 *stats = vdev->stats;
370 a0 = vdev->stats_pa;
371 a1 = sizeof(struct vnic_stats);
372
373 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
374}
375
376int vnic_dev_close(struct vnic_dev *vdev)
377{
378 u64 a0 = 0, a1 = 0;
379 int wait = 1000;
380 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
381}
382
383int vnic_dev_enable(struct vnic_dev *vdev)
384{
385 u64 a0 = 0, a1 = 0;
386 int wait = 1000;
387 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
388}
389
390int vnic_dev_disable(struct vnic_dev *vdev)
391{
392 u64 a0 = 0, a1 = 0;
393 int wait = 1000;
394 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
395}
396
397int vnic_dev_open(struct vnic_dev *vdev, int arg)
398{
399 u64 a0 = (u32)arg, a1 = 0;
400 int wait = 1000;
401 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
402}
403
404int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
405{
406 u64 a0 = 0, a1 = 0;
407 int wait = 1000;
408 int err;
409
410 *done = 0;
411
412 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
413 if (err)
414 return err;
415
416 *done = (a0 == 0);
417
418 return 0;
419}
420
421int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
422{
423 u64 a0 = (u32)arg, a1 = 0;
424 int wait = 1000;
425 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
426}
427
428int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
429{
430 u64 a0 = 0, a1 = 0;
431 int wait = 1000;
432 int err;
433
434 *done = 0;
435
436 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
437 if (err)
438 return err;
439
440 *done = (a0 == 0);
441
442 return 0;
443}
444
445int vnic_dev_hang_notify(struct vnic_dev *vdev)
446{
447 u64 a0, a1;
448 int wait = 1000;
449 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
450}
451
452int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
453{
454 u64 a0, a1;
455 int wait = 1000;
456 int err, i;
457
458 for (i = 0; i < ETH_ALEN; i++)
459 mac_addr[i] = 0;
460
461 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
462 if (err)
463 return err;
464
465 for (i = 0; i < ETH_ALEN; i++)
466 mac_addr[i] = ((u8 *)&a0)[i];
467
468 return 0;
469}
470
471void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
472 int broadcast, int promisc, int allmulti)
473{
474 u64 a0, a1 = 0;
475 int wait = 1000;
476 int err;
477
478 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
479 (multicast ? CMD_PFILTER_MULTICAST : 0) |
480 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
481 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
482 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
483
484 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
485 if (err)
486 printk(KERN_ERR "Can't set packet filter\n");
487}
488
489void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
490{
491 u64 a0 = 0, a1 = 0;
492 int wait = 1000;
493 int err;
494 int i;
495
496 for (i = 0; i < ETH_ALEN; i++)
497 ((u8 *)&a0)[i] = addr[i];
498
499 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
500 if (err)
501 printk(KERN_ERR
502 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
503 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
504 err);
505}
506
507void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
508{
509 u64 a0 = 0, a1 = 0;
510 int wait = 1000;
511 int err;
512 int i;
513
514 for (i = 0; i < ETH_ALEN; i++)
515 ((u8 *)&a0)[i] = addr[i];
516
517 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
518 if (err)
519 printk(KERN_ERR
520 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
521 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
522 err);
523}
524
525int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
526{
527 u64 a0, a1;
528 int wait = 1000;
529
530 if (!vdev->notify) {
531 vdev->notify = pci_alloc_consistent(vdev->pdev,
532 sizeof(struct vnic_devcmd_notify),
533 &vdev->notify_pa);
534 if (!vdev->notify)
535 return -ENOMEM;
536 }
537
538 a0 = vdev->notify_pa;
539 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
540 a1 += sizeof(struct vnic_devcmd_notify);
541
542 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
543}
544
545void vnic_dev_notify_unset(struct vnic_dev *vdev)
546{
547 u64 a0, a1;
548 int wait = 1000;
549
550 a0 = 0; /* paddr = 0 to unset notify buffer */
551 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
552 a1 += sizeof(struct vnic_devcmd_notify);
553
554 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
555}
556
557static int vnic_dev_notify_ready(struct vnic_dev *vdev)
558{
559 u32 *words;
560 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
561 unsigned int i;
562 u32 csum;
563
564 if (!vdev->notify)
565 return 0;
566
567 do {
568 csum = 0;
569 memcpy(&vdev->notify_copy, vdev->notify,
570 sizeof(struct vnic_devcmd_notify));
571 words = (u32 *)&vdev->notify_copy;
572 for (i = 1; i < nwords; i++)
573 csum += words[i];
574 } while (csum != words[0]);
575
576 return 1;
577}
578
579int vnic_dev_init(struct vnic_dev *vdev, int arg)
580{
581 u64 a0 = (u32)arg, a1 = 0;
582 int wait = 1000;
583 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
584}
585
586int vnic_dev_link_status(struct vnic_dev *vdev)
587{
588 if (vdev->linkstatus)
589 return *vdev->linkstatus;
590
591 if (!vnic_dev_notify_ready(vdev))
592 return 0;
593
594 return vdev->notify_copy.link_state;
595}
596
597u32 vnic_dev_port_speed(struct vnic_dev *vdev)
598{
599 if (!vnic_dev_notify_ready(vdev))
600 return 0;
601
602 return vdev->notify_copy.port_speed;
603}
604
605u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
606{
607 if (!vnic_dev_notify_ready(vdev))
608 return 0;
609
610 return vdev->notify_copy.msglvl;
611}
612
613u32 vnic_dev_mtu(struct vnic_dev *vdev)
614{
615 if (!vnic_dev_notify_ready(vdev))
616 return 0;
617
618 return vdev->notify_copy.mtu;
619}
620
621u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
622{
623 if (!vnic_dev_notify_ready(vdev))
624 return 0;
625
626 return vdev->notify_copy.link_down_cnt;
627}
628
629void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
630 enum vnic_dev_intr_mode intr_mode)
631{
632 vdev->intr_mode = intr_mode;
633}
634
635enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
636 struct vnic_dev *vdev)
637{
638 return vdev->intr_mode;
639}
640
641void vnic_dev_unregister(struct vnic_dev *vdev)
642{
643 if (vdev) {
644 if (vdev->notify)
645 pci_free_consistent(vdev->pdev,
646 sizeof(struct vnic_devcmd_notify),
647 vdev->notify,
648 vdev->notify_pa);
649 if (vdev->linkstatus)
650 pci_free_consistent(vdev->pdev,
651 sizeof(u32),
652 vdev->linkstatus,
653 vdev->linkstatus_pa);
654 if (vdev->stats)
655 pci_free_consistent(vdev->pdev,
656 sizeof(struct vnic_dev),
657 vdev->stats, vdev->stats_pa);
658 if (vdev->fw_info)
659 pci_free_consistent(vdev->pdev,
660 sizeof(struct vnic_devcmd_fw_info),
661 vdev->fw_info, vdev->fw_info_pa);
662 kfree(vdev);
663 }
664}
665
666struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
667 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
668{
669 if (!vdev) {
670 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
671 if (!vdev)
672 return NULL;
673 }
674
675 vdev->priv = priv;
676 vdev->pdev = pdev;
677
678 if (vnic_dev_discover_res(vdev, bar))
679 goto err_out;
680
681 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
682 if (!vdev->devcmd)
683 goto err_out;
684
685 return vdev;
686
687err_out:
688 vnic_dev_unregister(vdev);
689 return NULL;
690}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 000000000000..f9935a8a5a09
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEV_H_
19#define _VNIC_DEV_H_
20
21#include "vnic_resource.h"
22#include "vnic_devcmd.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_dev_priv fnic_dev_priv
29#define vnic_dev_get_res_count fnic_dev_get_res_count
30#define vnic_dev_get_res fnic_dev_get_res
31#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
32#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
33#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
34#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
35#define vnic_dev_cmd fnic_dev_cmd
36#define vnic_dev_fw_info fnic_dev_fw_info
37#define vnic_dev_spec fnic_dev_spec
38#define vnic_dev_stats_clear fnic_dev_stats_clear
39#define vnic_dev_stats_dump fnic_dev_stats_dump
40#define vnic_dev_hang_notify fnic_dev_hang_notify
41#define vnic_dev_packet_filter fnic_dev_packet_filter
42#define vnic_dev_add_addr fnic_dev_add_addr
43#define vnic_dev_del_addr fnic_dev_del_addr
44#define vnic_dev_mac_addr fnic_dev_mac_addr
45#define vnic_dev_notify_set fnic_dev_notify_set
46#define vnic_dev_notify_unset fnic_dev_notify_unset
47#define vnic_dev_link_status fnic_dev_link_status
48#define vnic_dev_port_speed fnic_dev_port_speed
49#define vnic_dev_msg_lvl fnic_dev_msg_lvl
50#define vnic_dev_mtu fnic_dev_mtu
51#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
52#define vnic_dev_close fnic_dev_close
53#define vnic_dev_enable fnic_dev_enable
54#define vnic_dev_disable fnic_dev_disable
55#define vnic_dev_open fnic_dev_open
56#define vnic_dev_open_done fnic_dev_open_done
57#define vnic_dev_init fnic_dev_init
58#define vnic_dev_soft_reset fnic_dev_soft_reset
59#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
60#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
61#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
62#define vnic_dev_unregister fnic_dev_unregister
63#define vnic_dev_register fnic_dev_register
64
65#ifndef VNIC_PADDR_TARGET
66#define VNIC_PADDR_TARGET 0x0000000000000000ULL
67#endif
68
69#ifndef readq
70static inline u64 readq(void __iomem *reg)
71{
72 return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
73}
74
75static inline void writeq(u64 val, void __iomem *reg)
76{
77 writel(val & 0xffffffff, reg);
78 writel(val >> 32, reg + 0x4UL);
79}
80#endif
81
82enum vnic_dev_intr_mode {
83 VNIC_DEV_INTR_MODE_UNKNOWN,
84 VNIC_DEV_INTR_MODE_INTX,
85 VNIC_DEV_INTR_MODE_MSI,
86 VNIC_DEV_INTR_MODE_MSIX,
87};
88
89struct vnic_dev_bar {
90 void __iomem *vaddr;
91 dma_addr_t bus_addr;
92 unsigned long len;
93};
94
95struct vnic_dev_ring {
96 void *descs;
97 size_t size;
98 dma_addr_t base_addr;
99 size_t base_align;
100 void *descs_unaligned;
101 size_t size_unaligned;
102 dma_addr_t base_addr_unaligned;
103 unsigned int desc_size;
104 unsigned int desc_count;
105 unsigned int desc_avail;
106};
107
108struct vnic_dev;
109struct vnic_stats;
110
111void *vnic_dev_priv(struct vnic_dev *vdev);
112unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
113 enum vnic_res_type type);
114void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
115 unsigned int index);
116unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
117 unsigned int desc_count,
118 unsigned int desc_size);
119void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
120int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
121 unsigned int desc_count, unsigned int desc_size);
122void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
123 struct vnic_dev_ring *ring);
124int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
125 u64 *a0, u64 *a1, int wait);
126int vnic_dev_fw_info(struct vnic_dev *vdev,
127 struct vnic_devcmd_fw_info **fw_info);
128int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
129 unsigned int size, void *value);
130int vnic_dev_stats_clear(struct vnic_dev *vdev);
131int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
132int vnic_dev_hang_notify(struct vnic_dev *vdev);
133void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
134 int broadcast, int promisc, int allmulti);
135void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
136void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
137int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
138int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
139void vnic_dev_notify_unset(struct vnic_dev *vdev);
140int vnic_dev_link_status(struct vnic_dev *vdev);
141u32 vnic_dev_port_speed(struct vnic_dev *vdev);
142u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
143u32 vnic_dev_mtu(struct vnic_dev *vdev);
144u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
145int vnic_dev_close(struct vnic_dev *vdev);
146int vnic_dev_enable(struct vnic_dev *vdev);
147int vnic_dev_disable(struct vnic_dev *vdev);
148int vnic_dev_open(struct vnic_dev *vdev, int arg);
149int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
150int vnic_dev_init(struct vnic_dev *vdev, int arg);
151int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
152int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
153void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
154 enum vnic_dev_intr_mode intr_mode);
155enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
156void vnic_dev_unregister(struct vnic_dev *vdev);
157struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
158 void *priv, struct pci_dev *pdev,
159 struct vnic_dev_bar *bar);
160
161#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 000000000000..d62b9061bf12
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,281 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEVCMD_H_
19#define _VNIC_DEVCMD_H_
20
21#define _CMD_NBITS 14
22#define _CMD_VTYPEBITS 10
23#define _CMD_FLAGSBITS 6
24#define _CMD_DIRBITS 2
25
26#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
27#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
28#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
29#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
30
31#define _CMD_NSHIFT 0
32#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
33#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
34#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
35
36/*
37 * Direction bits (from host perspective).
38 */
39#define _CMD_DIR_NONE 0U
40#define _CMD_DIR_WRITE 1U
41#define _CMD_DIR_READ 2U
42#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
43
44/*
45 * Flag bits.
46 */
47#define _CMD_FLAGS_NONE 0U
48#define _CMD_FLAGS_NOWAIT 1U
49
50/*
51 * vNIC type bits.
52 */
53#define _CMD_VTYPE_NONE 0U
54#define _CMD_VTYPE_ENET 1U
55#define _CMD_VTYPE_FC 2U
56#define _CMD_VTYPE_SCSI 4U
57#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
58
59/*
60 * Used to create cmds..
61*/
62#define _CMDCF(dir, flags, vtype, nr) \
63 (((dir) << _CMD_DIRSHIFT) | \
64 ((flags) << _CMD_FLAGSSHIFT) | \
65 ((vtype) << _CMD_VTYPESHIFT) | \
66 ((nr) << _CMD_NSHIFT))
67#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
68#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
69
70/*
71 * Used to decode cmds..
72*/
73#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
74#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
75#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
76#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
77
78enum vnic_devcmd_cmd {
79 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
80
81 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
82 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
83
84 /* dev-specific block member:
85 * in: (u16)a0=offset,(u8)a1=size
86 * out: a0=value */
87 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
88
89 /* stats clear */
90 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
91
92 /* stats dump in mem: (u64)a0=paddr to stats area,
93 * (u16)a1=sizeof stats area */
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95
96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
98
99 /* hang detection notification */
100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
101
102 /* MAC address in (u48)a0 */
103 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
104 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
105
106 /* disable/enable promisc mode: (u8)a0=0/1 */
107/***** XXX DEPRECATED *****/
108 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
109
110 /* disable/enable all-multi mode: (u8)a0=0/1 */
111/***** XXX DEPRECATED *****/
112 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
113
114 /* add addr from (u48)a0 */
115 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
116 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
117
118 /* del addr from (u48)a0 */
119 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
120 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
121
122 /* add VLAN id in (u16)a0 */
123 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
124
125 /* del VLAN id in (u16)a0 */
126 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
127
128 /* nic_cfg in (u32)a0 */
129 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
130
131 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
132 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
133
134 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
135 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
136
137 /* initiate softreset */
138 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
139
140 /* softreset status:
141 * out: a0=0 reset complete, a0=1 reset in progress */
142 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
143
144 /* set struct vnic_devcmd_notify buffer in mem:
145 * in:
146 * (u64)a0=paddr to notify (set paddr=0 to unset)
147 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
148 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
149 * out:
150 * (u32)a1 = effective size
151 */
152 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
153
154 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
155 * (u8)a1=PXENV_UNDI_xxx */
156 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
157
158 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
159 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
160
161 /* open status:
162 * out: a0=0 open complete, a0=1 open in progress */
163 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
164
165 /* close vnic */
166 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
167
168 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
169 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
170
171 /* variant of CMD_INIT, with provisioning info
172 * (u64)a0=paddr of vnic_devcmd_provinfo
173 * (u32)a1=sizeof provision info */
174 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
175
176 /* enable virtual link */
177 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
178
179 /* disable virtual link */
180 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
181
182 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
183 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
184
185 /* init status:
186 * out: a0=0 init complete, a0=1 init in progress
187 * if a0=0, a1=errno */
188 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
189
190 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
191 * (u8)a1=INT13_CMD_xxx */
192 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
193
194 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
195 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
196
197 /* undo initialize of virtual link */
198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
199};
200
201/* flags for CMD_OPEN */
202#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
203
204/* flags for CMD_INIT */
205#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
206
207/* flags for CMD_PACKET_FILTER */
208#define CMD_PFILTER_DIRECTED 0x01
209#define CMD_PFILTER_MULTICAST 0x02
210#define CMD_PFILTER_BROADCAST 0x04
211#define CMD_PFILTER_PROMISCUOUS 0x08
212#define CMD_PFILTER_ALL_MULTICAST 0x10
213
214enum vnic_devcmd_status {
215 STAT_NONE = 0,
216 STAT_BUSY = 1 << 0, /* cmd in progress */
217 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
218};
219
220enum vnic_devcmd_error {
221 ERR_SUCCESS = 0,
222 ERR_EINVAL = 1,
223 ERR_EFAULT = 2,
224 ERR_EPERM = 3,
225 ERR_EBUSY = 4,
226 ERR_ECMDUNKNOWN = 5,
227 ERR_EBADSTATE = 6,
228 ERR_ENOMEM = 7,
229 ERR_ETIMEDOUT = 8,
230 ERR_ELINKDOWN = 9,
231};
232
233struct vnic_devcmd_fw_info {
234 char fw_version[32];
235 char fw_build[32];
236 char hw_version[32];
237 char hw_serial_number[32];
238};
239
240struct vnic_devcmd_notify {
241 u32 csum; /* checksum over following words */
242
243 u32 link_state; /* link up == 1 */
244 u32 port_speed; /* effective port speed (rate limit) */
245 u32 mtu; /* MTU */
246 u32 msglvl; /* requested driver msg lvl */
247 u32 uif; /* uplink interface */
248 u32 status; /* status bits (see VNIC_STF_*) */
249 u32 error; /* error code (see ERR_*) for first ERR */
250 u32 link_down_cnt; /* running count of link down transitions */
251};
252#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
253
254struct vnic_devcmd_provinfo {
255 u8 oui[3];
256 u8 type;
257 u8 data[0];
258};
259
260/*
261 * Writing cmd register causes STAT_BUSY to get set in status register.
262 * When cmd completes, STAT_BUSY will be cleared.
263 *
264 * If cmd completed successfully STAT_ERROR will be clear
265 * and args registers contain cmd-specific results.
266 *
267 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
268 *
269 * status register is read-only. While STAT_BUSY is set,
270 * all other register contents are read-only.
271 */
272
273/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
274#define VNIC_DEVCMD_NARGS 15
275struct vnic_devcmd {
276 u32 status; /* RO */
277 u32 cmd; /* RW */
278 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
279};
280
281#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 000000000000..4f4dc8793d23
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26
27void vnic_intr_free(struct vnic_intr *intr)
28{
29 intr->ctrl = NULL;
30}
31
32int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
33 unsigned int index)
34{
35 intr->index = index;
36 intr->vdev = vdev;
37
38 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
39 if (!intr->ctrl) {
40 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
41 index);
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
49 unsigned int coalescing_type, unsigned int mask_on_assertion)
50{
51 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
52 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
53 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
54 iowrite32(0, &intr->ctrl->int_credits);
55}
56
57void vnic_intr_clean(struct vnic_intr *intr)
58{
59 iowrite32(0, &intr->ctrl->int_credits);
60}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 000000000000..d5fb40e7c98e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_INTR_H_
19#define _VNIC_INTR_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_intr_unmask fnic_intr_unmask
29#define vnic_intr_mask fnic_intr_mask
30#define vnic_intr_return_credits fnic_intr_return_credits
31#define vnic_intr_credits fnic_intr_credits
32#define vnic_intr_return_all_credits fnic_intr_return_all_credits
33#define vnic_intr_legacy_pba fnic_intr_legacy_pba
34#define vnic_intr_free fnic_intr_free
35#define vnic_intr_alloc fnic_intr_alloc
36#define vnic_intr_init fnic_intr_init
37#define vnic_intr_clean fnic_intr_clean
38
39#define VNIC_INTR_TIMER_MAX 0xffff
40
41#define VNIC_INTR_TIMER_TYPE_ABS 0
42#define VNIC_INTR_TIMER_TYPE_QUIET 1
43
44/* Interrupt control */
45struct vnic_intr_ctrl {
46 u32 coalescing_timer; /* 0x00 */
47 u32 pad0;
48 u32 coalescing_value; /* 0x08 */
49 u32 pad1;
50 u32 coalescing_type; /* 0x10 */
51 u32 pad2;
52 u32 mask_on_assertion; /* 0x18 */
53 u32 pad3;
54 u32 mask; /* 0x20 */
55 u32 pad4;
56 u32 int_credits; /* 0x28 */
57 u32 pad5;
58 u32 int_credit_return; /* 0x30 */
59 u32 pad6;
60};
61
62struct vnic_intr {
63 unsigned int index;
64 struct vnic_dev *vdev;
65 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
66};
67
68static inline void vnic_intr_unmask(struct vnic_intr *intr)
69{
70 iowrite32(0, &intr->ctrl->mask);
71}
72
73static inline void vnic_intr_mask(struct vnic_intr *intr)
74{
75 iowrite32(1, &intr->ctrl->mask);
76}
77
78static inline void vnic_intr_return_credits(struct vnic_intr *intr,
79 unsigned int credits, int unmask, int reset_timer)
80{
81#define VNIC_INTR_UNMASK_SHIFT 16
82#define VNIC_INTR_RESET_TIMER_SHIFT 17
83
84 u32 int_credit_return = (credits & 0xffff) |
85 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
86 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
87
88 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
89}
90
91static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
92{
93 return ioread32(&intr->ctrl->int_credits);
94}
95
96static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
97{
98 unsigned int credits = vnic_intr_credits(intr);
99 int unmask = 1;
100 int reset_timer = 1;
101
102 vnic_intr_return_credits(intr, credits, unmask, reset_timer);
103}
104
105static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
106{
107 /* read PBA without clearing */
108 return ioread32(legacy_pba);
109}
110
111void vnic_intr_free(struct vnic_intr *intr);
112int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
113 unsigned int index);
114void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
115 unsigned int coalescing_type, unsigned int mask_on_assertion);
116void vnic_intr_clean(struct vnic_intr *intr);
117
118#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 000000000000..f15b83eeaced
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_NIC_H_
19#define _VNIC_NIC_H_
20
21/*
22 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
23 * Driver) when both are built with CONFIG options =y
24 */
25#define vnic_set_nic_cfg fnic_set_nic_cfg
26
27#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
28#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
29#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
30#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
31#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
32#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
33#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
34#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
35#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
36#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
37#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
38#define NIC_CFG_RSS_ENABLE (1UL << 22)
39#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
40#define NIC_CFG_RSS_ENABLE_SHIFT 22
41#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
42#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
43#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
44#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
45#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
46#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
47
48static inline void vnic_set_nic_cfg(u32 *nic_cfg,
49 u8 rss_default_cpu, u8 rss_hash_type,
50 u8 rss_hash_bits, u8 rss_base_cpu,
51 u8 rss_enable, u8 tso_ipid_split_en,
52 u8 ig_vlan_strip_en)
53{
54 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
55 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
56 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
57 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
58 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
59 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
60 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
61 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
62 << NIC_CFG_RSS_ENABLE_SHIFT) |
63 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
64 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
65 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
66 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
67}
68
69#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 000000000000..2d842f79d41a
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RESOURCE_H_
19#define _VNIC_RESOURCE_H_
20
21#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
22#define VNIC_RES_VERSION 0x00000000L
23
24/* vNIC resource types */
25enum vnic_res_type {
26 RES_TYPE_EOL, /* End-of-list */
27 RES_TYPE_WQ, /* Work queues */
28 RES_TYPE_RQ, /* Receive queues */
29 RES_TYPE_CQ, /* Completion queues */
30 RES_TYPE_RSVD1,
31 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
32 RES_TYPE_RSVD2,
33 RES_TYPE_RSVD3,
34 RES_TYPE_RSVD4,
35 RES_TYPE_RSVD5,
36 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
37 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
38 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
39 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
40 RES_TYPE_RSVD6,
41 RES_TYPE_RSVD7,
42 RES_TYPE_DEVCMD, /* Device command region */
43 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
44
45 RES_TYPE_MAX, /* Count of resource types */
46};
47
48struct vnic_resource_header {
49 u32 magic;
50 u32 version;
51};
52
53struct vnic_resource {
54 u8 type;
55 u8 bar;
56 u8 pad[2];
57 u32 bar_offset;
58 u32 count;
59};
60
61#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 000000000000..bedd0d285630
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_rq.h"
25
26static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
27{
28 struct vnic_rq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = rq->ring.desc_count;
31 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
32
33 vdev = rq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!rq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc rq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = rq->bufs[i];
45 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)rq->ring.descs +
48 rq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = rq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
53 buf->next = rq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 rq->to_use = rq->to_clean = rq->bufs[0];
62 rq->buf_index = 0;
63
64 return 0;
65}
66
67void vnic_rq_free(struct vnic_rq *rq)
68{
69 struct vnic_dev *vdev;
70 unsigned int i;
71
72 vdev = rq->vdev;
73
74 vnic_dev_free_desc_ring(vdev, &rq->ring);
75
76 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
77 kfree(rq->bufs[i]);
78 rq->bufs[i] = NULL;
79 }
80
81 rq->ctrl = NULL;
82}
83
84int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 rq->index = index;
90 rq->vdev = vdev;
91
92 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
93 if (!rq->ctrl) {
94 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_rq_disable(rq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_rq_alloc_bufs(rq);
105 if (err) {
106 vnic_rq_free(rq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118 u32 fetch_index;
119
120 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
121 writeq(paddr, &rq->ctrl->ring_base);
122 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
123 iowrite32(cq_index, &rq->ctrl->cq_index);
124 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
125 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
126 iowrite32(0, &rq->ctrl->dropped_packet_count);
127 iowrite32(0, &rq->ctrl->error_status);
128
129 /* Use current fetch_index as the ring starting point */
130 fetch_index = ioread32(&rq->ctrl->fetch_index);
131 rq->to_use = rq->to_clean =
132 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
133 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
134 iowrite32(fetch_index, &rq->ctrl->posted_index);
135
136 rq->buf_index = 0;
137}
138
139unsigned int vnic_rq_error_status(struct vnic_rq *rq)
140{
141 return ioread32(&rq->ctrl->error_status);
142}
143
144void vnic_rq_enable(struct vnic_rq *rq)
145{
146 iowrite32(1, &rq->ctrl->enable);
147}
148
149int vnic_rq_disable(struct vnic_rq *rq)
150{
151 unsigned int wait;
152
153 iowrite32(0, &rq->ctrl->enable);
154
155 /* Wait for HW to ACK disable request */
156 for (wait = 0; wait < 100; wait++) {
157 if (!(ioread32(&rq->ctrl->running)))
158 return 0;
159 udelay(1);
160 }
161
162 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
163
164 return -ETIMEDOUT;
165}
166
167void vnic_rq_clean(struct vnic_rq *rq,
168 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
169{
170 struct vnic_rq_buf *buf;
171 u32 fetch_index;
172
173 BUG_ON(ioread32(&rq->ctrl->enable));
174
175 buf = rq->to_clean;
176
177 while (vnic_rq_desc_used(rq) > 0) {
178
179 (*buf_clean)(rq, buf);
180
181 buf = rq->to_clean = buf->next;
182 rq->ring.desc_avail++;
183 }
184
185 /* Use current fetch_index as the ring starting point */
186 fetch_index = ioread32(&rq->ctrl->fetch_index);
187 rq->to_use = rq->to_clean =
188 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
189 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
190 iowrite32(fetch_index, &rq->ctrl->posted_index);
191
192 rq->buf_index = 0;
193
194 vnic_dev_clear_desc_ring(&rq->ring);
195}
196
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 000000000000..aebdfbd6ad3c
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RQ_H_
19#define _VNIC_RQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_rq_desc_avail fnic_rq_desc_avail
30#define vnic_rq_desc_used fnic_rq_desc_used
31#define vnic_rq_next_desc fnic_rq_next_desc
32#define vnic_rq_next_index fnic_rq_next_index
33#define vnic_rq_next_buf_index fnic_rq_next_buf_index
34#define vnic_rq_post fnic_rq_post
35#define vnic_rq_posting_soon fnic_rq_posting_soon
36#define vnic_rq_return_descs fnic_rq_return_descs
37#define vnic_rq_service fnic_rq_service
38#define vnic_rq_fill fnic_rq_fill
39#define vnic_rq_free fnic_rq_free
40#define vnic_rq_alloc fnic_rq_alloc
41#define vnic_rq_init fnic_rq_init
42#define vnic_rq_error_status fnic_rq_error_status
43#define vnic_rq_enable fnic_rq_enable
44#define vnic_rq_disable fnic_rq_disable
45#define vnic_rq_clean fnic_rq_clean
46
47/* Receive queue control */
48struct vnic_rq_ctrl {
49 u64 ring_base; /* 0x00 */
50 u32 ring_size; /* 0x08 */
51 u32 pad0;
52 u32 posted_index; /* 0x10 */
53 u32 pad1;
54 u32 cq_index; /* 0x18 */
55 u32 pad2;
56 u32 enable; /* 0x20 */
57 u32 pad3;
58 u32 running; /* 0x28 */
59 u32 pad4;
60 u32 fetch_index; /* 0x30 */
61 u32 pad5;
62 u32 error_interrupt_enable; /* 0x38 */
63 u32 pad6;
64 u32 error_interrupt_offset; /* 0x40 */
65 u32 pad7;
66 u32 error_status; /* 0x48 */
67 u32 pad8;
68 u32 dropped_packet_count; /* 0x50 */
69 u32 pad9;
70 u32 dropped_packet_count_rc; /* 0x58 */
71 u32 pad10;
72};
73
74/* Break the vnic_rq_buf allocations into blocks of 64 entries */
75#define VNIC_RQ_BUF_BLK_ENTRIES 64
76#define VNIC_RQ_BUF_BLK_SZ \
77 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
78#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
79 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
80#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
81
82struct vnic_rq_buf {
83 struct vnic_rq_buf *next;
84 dma_addr_t dma_addr;
85 void *os_buf;
86 unsigned int os_buf_index;
87 unsigned int len;
88 unsigned int index;
89 void *desc;
90};
91
92struct vnic_rq {
93 unsigned int index;
94 struct vnic_dev *vdev;
95 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
96 struct vnic_dev_ring ring;
97 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
98 struct vnic_rq_buf *to_use;
99 struct vnic_rq_buf *to_clean;
100 void *os_buf_head;
101 unsigned int buf_index;
102 unsigned int pkts_outstanding;
103};
104
105static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
106{
107 /* how many does SW own? */
108 return rq->ring.desc_avail;
109}
110
111static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
112{
113 /* how many does HW own? */
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
115}
116
117static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
118{
119 return rq->to_use->desc;
120}
121
122static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
123{
124 return rq->to_use->index;
125}
126
127static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
128{
129 return rq->buf_index++;
130}
131
132static inline void vnic_rq_post(struct vnic_rq *rq,
133 void *os_buf, unsigned int os_buf_index,
134 dma_addr_t dma_addr, unsigned int len)
135{
136 struct vnic_rq_buf *buf = rq->to_use;
137
138 buf->os_buf = os_buf;
139 buf->os_buf_index = os_buf_index;
140 buf->dma_addr = dma_addr;
141 buf->len = len;
142
143 buf = buf->next;
144 rq->to_use = buf;
145 rq->ring.desc_avail--;
146
147 /* Move the posted_index every nth descriptor
148 */
149
150#ifndef VNIC_RQ_RETURN_RATE
151#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
152#endif
153
154 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
155 /* Adding write memory barrier prevents compiler and/or CPU
156 * reordering, thus avoiding descriptor posting before
157 * descriptor is initialized. Otherwise, hardware can read
158 * stale descriptor fields.
159 */
160 wmb();
161 iowrite32(buf->index, &rq->ctrl->posted_index);
162 }
163}
164
165static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
166{
167 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
168}
169
170static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
171{
172 rq->ring.desc_avail += count;
173}
174
175enum desc_return_options {
176 VNIC_RQ_RETURN_DESC,
177 VNIC_RQ_DEFER_RETURN_DESC,
178};
179
180static inline void vnic_rq_service(struct vnic_rq *rq,
181 struct cq_desc *cq_desc, u16 completed_index,
182 int desc_return, void (*buf_service)(struct vnic_rq *rq,
183 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
184 int skipped, void *opaque), void *opaque)
185{
186 struct vnic_rq_buf *buf;
187 int skipped;
188
189 buf = rq->to_clean;
190 while (1) {
191
192 skipped = (buf->index != completed_index);
193
194 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
195
196 if (desc_return == VNIC_RQ_RETURN_DESC)
197 rq->ring.desc_avail++;
198
199 rq->to_clean = buf->next;
200
201 if (!skipped)
202 break;
203
204 buf = rq->to_clean;
205 }
206}
207
208static inline int vnic_rq_fill(struct vnic_rq *rq,
209 int (*buf_fill)(struct vnic_rq *rq))
210{
211 int err;
212
213 while (vnic_rq_desc_avail(rq) > 1) {
214
215 err = (*buf_fill)(rq);
216 if (err)
217 return err;
218 }
219
220 return 0;
221}
222
223void vnic_rq_free(struct vnic_rq *rq);
224int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
225 unsigned int desc_count, unsigned int desc_size);
226void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
227 unsigned int error_interrupt_enable,
228 unsigned int error_interrupt_offset);
229unsigned int vnic_rq_error_status(struct vnic_rq *rq);
230void vnic_rq_enable(struct vnic_rq *rq);
231int vnic_rq_disable(struct vnic_rq *rq);
232void vnic_rq_clean(struct vnic_rq *rq,
233 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
234
235#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 000000000000..46baa5254001
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_SCSI_H_
19#define _VNIC_SCSI_H_
20
21#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
22#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
23
24#define VNIC_FNIC_WQ_DESCS_MIN 64
25#define VNIC_FNIC_WQ_DESCS_MAX 128
26
27#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
28#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
29
30#define VNIC_FNIC_RQ_DESCS_MIN 64
31#define VNIC_FNIC_RQ_DESCS_MAX 128
32
33#define VNIC_FNIC_EDTOV_MIN 1000
34#define VNIC_FNIC_EDTOV_MAX 255000
35#define VNIC_FNIC_EDTOV_DEF 2000
36
37#define VNIC_FNIC_RATOV_MIN 1000
38#define VNIC_FNIC_RATOV_MAX 255000
39
40#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
41#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
42
43#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
44#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
45#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
46
47#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
48#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
49
50#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
51#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
52#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
53
54#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
55#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
56
57#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
58#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
59
60#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
61#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
62
63#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
64#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
65
66#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
67#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
68
69#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
70#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
71
72/* Device-specific region: scsi configuration */
73struct vnic_fc_config {
74 u64 node_wwn;
75 u64 port_wwn;
76 u32 flags;
77 u32 wq_enet_desc_count;
78 u32 wq_copy_desc_count;
79 u32 rq_desc_count;
80 u32 flogi_retries;
81 u32 flogi_timeout;
82 u32 plogi_retries;
83 u32 plogi_timeout;
84 u32 io_throttle_count;
85 u32 link_down_timeout;
86 u32 port_down_timeout;
87 u32 port_down_io_retries;
88 u32 luns_per_tgt;
89 u16 maxdatafieldsize;
90 u16 ed_tov;
91 u16 ra_tov;
92 u16 intr_timer;
93 u8 intr_timer_type;
94};
95
96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
97#define VFCF_PERBI 0x2 /* persistent binding info available */
98
99#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 000000000000..5372e23c1cb3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_STATS_H_
19#define _VNIC_STATS_H_
20
21/* Tx statistics */
22struct vnic_tx_stats {
23 u64 tx_frames_ok;
24 u64 tx_unicast_frames_ok;
25 u64 tx_multicast_frames_ok;
26 u64 tx_broadcast_frames_ok;
27 u64 tx_bytes_ok;
28 u64 tx_unicast_bytes_ok;
29 u64 tx_multicast_bytes_ok;
30 u64 tx_broadcast_bytes_ok;
31 u64 tx_drops;
32 u64 tx_errors;
33 u64 tx_tso;
34 u64 rsvd[16];
35};
36
37/* Rx statistics */
38struct vnic_rx_stats {
39 u64 rx_frames_ok;
40 u64 rx_frames_total;
41 u64 rx_unicast_frames_ok;
42 u64 rx_multicast_frames_ok;
43 u64 rx_broadcast_frames_ok;
44 u64 rx_bytes_ok;
45 u64 rx_unicast_bytes_ok;
46 u64 rx_multicast_bytes_ok;
47 u64 rx_broadcast_bytes_ok;
48 u64 rx_drop;
49 u64 rx_no_bufs;
50 u64 rx_errors;
51 u64 rx_rss;
52 u64 rx_crc_errors;
53 u64 rx_frames_64;
54 u64 rx_frames_127;
55 u64 rx_frames_255;
56 u64 rx_frames_511;
57 u64 rx_frames_1023;
58 u64 rx_frames_1518;
59 u64 rx_frames_to_max;
60 u64 rsvd[16];
61};
62
63struct vnic_stats {
64 struct vnic_tx_stats tx;
65 struct vnic_rx_stats rx;
66};
67
68#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 000000000000..1f9ea790d130
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_wq.h"
25
26static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
27{
28 struct vnic_wq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = wq->ring.desc_count;
31 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
32
33 vdev = wq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!wq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc wq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = wq->bufs[i];
45 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)wq->ring.descs +
48 wq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = wq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
53 buf->next = wq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 wq->to_use = wq->to_clean = wq->bufs[0];
62
63 return 0;
64}
65
66void vnic_wq_free(struct vnic_wq *wq)
67{
68 struct vnic_dev *vdev;
69 unsigned int i;
70
71 vdev = wq->vdev;
72
73 vnic_dev_free_desc_ring(vdev, &wq->ring);
74
75 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
76 kfree(wq->bufs[i]);
77 wq->bufs[i] = NULL;
78 }
79
80 wq->ctrl = NULL;
81
82}
83
84int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 wq->index = index;
90 wq->vdev = vdev;
91
92 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
93 if (!wq->ctrl) {
94 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_wq_disable(wq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_wq_alloc_bufs(wq);
105 if (err) {
106 vnic_wq_free(wq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118
119 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
120 writeq(paddr, &wq->ctrl->ring_base);
121 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
122 iowrite32(0, &wq->ctrl->fetch_index);
123 iowrite32(0, &wq->ctrl->posted_index);
124 iowrite32(cq_index, &wq->ctrl->cq_index);
125 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
126 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
127 iowrite32(0, &wq->ctrl->error_status);
128}
129
130unsigned int vnic_wq_error_status(struct vnic_wq *wq)
131{
132 return ioread32(&wq->ctrl->error_status);
133}
134
135void vnic_wq_enable(struct vnic_wq *wq)
136{
137 iowrite32(1, &wq->ctrl->enable);
138}
139
140int vnic_wq_disable(struct vnic_wq *wq)
141{
142 unsigned int wait;
143
144 iowrite32(0, &wq->ctrl->enable);
145
146 /* Wait for HW to ACK disable request */
147 for (wait = 0; wait < 100; wait++) {
148 if (!(ioread32(&wq->ctrl->running)))
149 return 0;
150 udelay(1);
151 }
152
153 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
154
155 return -ETIMEDOUT;
156}
157
158void vnic_wq_clean(struct vnic_wq *wq,
159 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
160{
161 struct vnic_wq_buf *buf;
162
163 BUG_ON(ioread32(&wq->ctrl->enable));
164
165 buf = wq->to_clean;
166
167 while (vnic_wq_desc_used(wq) > 0) {
168
169 (*buf_clean)(wq, buf);
170
171 buf = wq->to_clean = buf->next;
172 wq->ring.desc_avail++;
173 }
174
175 wq->to_use = wq->to_clean = wq->bufs[0];
176
177 iowrite32(0, &wq->ctrl->fetch_index);
178 iowrite32(0, &wq->ctrl->posted_index);
179 iowrite32(0, &wq->ctrl->error_status);
180
181 vnic_dev_clear_desc_ring(&wq->ring);
182}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 000000000000..5cd094f79281
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_H_
19#define _VNIC_WQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_wq_desc_avail fnic_wq_desc_avail
30#define vnic_wq_desc_used fnic_wq_desc_used
31#define vnic_wq_next_desc fni_cwq_next_desc
32#define vnic_wq_post fnic_wq_post
33#define vnic_wq_service fnic_wq_service
34#define vnic_wq_free fnic_wq_free
35#define vnic_wq_alloc fnic_wq_alloc
36#define vnic_wq_init fnic_wq_init
37#define vnic_wq_error_status fnic_wq_error_status
38#define vnic_wq_enable fnic_wq_enable
39#define vnic_wq_disable fnic_wq_disable
40#define vnic_wq_clean fnic_wq_clean
41
42/* Work queue control */
43struct vnic_wq_ctrl {
44 u64 ring_base; /* 0x00 */
45 u32 ring_size; /* 0x08 */
46 u32 pad0;
47 u32 posted_index; /* 0x10 */
48 u32 pad1;
49 u32 cq_index; /* 0x18 */
50 u32 pad2;
51 u32 enable; /* 0x20 */
52 u32 pad3;
53 u32 running; /* 0x28 */
54 u32 pad4;
55 u32 fetch_index; /* 0x30 */
56 u32 pad5;
57 u32 dca_value; /* 0x38 */
58 u32 pad6;
59 u32 error_interrupt_enable; /* 0x40 */
60 u32 pad7;
61 u32 error_interrupt_offset; /* 0x48 */
62 u32 pad8;
63 u32 error_status; /* 0x50 */
64 u32 pad9;
65};
66
67struct vnic_wq_buf {
68 struct vnic_wq_buf *next;
69 dma_addr_t dma_addr;
70 void *os_buf;
71 unsigned int len;
72 unsigned int index;
73 int sop;
74 void *desc;
75};
76
77/* Break the vnic_wq_buf allocations into blocks of 64 entries */
78#define VNIC_WQ_BUF_BLK_ENTRIES 64
79#define VNIC_WQ_BUF_BLK_SZ \
80 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
81#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
82 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
83#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
84
85struct vnic_wq {
86 unsigned int index;
87 struct vnic_dev *vdev;
88 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
89 struct vnic_dev_ring ring;
90 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
91 struct vnic_wq_buf *to_use;
92 struct vnic_wq_buf *to_clean;
93 unsigned int pkts_outstanding;
94};
95
96static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
97{
98 /* how many does SW own? */
99 return wq->ring.desc_avail;
100}
101
102static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
103{
104 /* how many does HW own? */
105 return wq->ring.desc_count - wq->ring.desc_avail - 1;
106}
107
108static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
109{
110 return wq->to_use->desc;
111}
112
113static inline void vnic_wq_post(struct vnic_wq *wq,
114 void *os_buf, dma_addr_t dma_addr,
115 unsigned int len, int sop, int eop)
116{
117 struct vnic_wq_buf *buf = wq->to_use;
118
119 buf->sop = sop;
120 buf->os_buf = eop ? os_buf : NULL;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 if (eop) {
126 /* Adding write memory barrier prevents compiler and/or CPU
127 * reordering, thus avoiding descriptor posting before
128 * descriptor is initialized. Otherwise, hardware can read
129 * stale descriptor fields.
130 */
131 wmb();
132 iowrite32(buf->index, &wq->ctrl->posted_index);
133 }
134 wq->to_use = buf;
135
136 wq->ring.desc_avail--;
137}
138
139static inline void vnic_wq_service(struct vnic_wq *wq,
140 struct cq_desc *cq_desc, u16 completed_index,
141 void (*buf_service)(struct vnic_wq *wq,
142 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
143 void *opaque)
144{
145 struct vnic_wq_buf *buf;
146
147 buf = wq->to_clean;
148 while (1) {
149
150 (*buf_service)(wq, cq_desc, buf, opaque);
151
152 wq->ring.desc_avail++;
153
154 wq->to_clean = buf->next;
155
156 if (buf->index == completed_index)
157 break;
158
159 buf = wq->to_clean;
160 }
161}
162
163void vnic_wq_free(struct vnic_wq *wq);
164int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
165 unsigned int desc_count, unsigned int desc_size);
166void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
167 unsigned int error_interrupt_enable,
168 unsigned int error_interrupt_offset);
169unsigned int vnic_wq_error_status(struct vnic_wq *wq);
170void vnic_wq_enable(struct vnic_wq *wq);
171int vnic_wq_disable(struct vnic_wq *wq);
172void vnic_wq_clean(struct vnic_wq *wq,
173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
174
175#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 000000000000..9eab7e7caf38
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_wq_copy.h"
24
25void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
26{
27 iowrite32(1, &wq->ctrl->enable);
28}
29
30int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
31{
32 unsigned int wait;
33
34 iowrite32(0, &wq->ctrl->enable);
35
36 /* Wait for HW to ACK disable request */
37 for (wait = 0; wait < 100; wait++) {
38 if (!(ioread32(&wq->ctrl->running)))
39 return 0;
40 udelay(1);
41 }
42
43 printk(KERN_ERR "Failed to disable Copy WQ[%d],"
44 " fetch index=%d, posted_index=%d\n",
45 wq->index, ioread32(&wq->ctrl->fetch_index),
46 ioread32(&wq->ctrl->posted_index));
47
48 return -ENODEV;
49}
50
51void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
52 void (*q_clean)(struct vnic_wq_copy *wq,
53 struct fcpio_host_req *wq_desc))
54{
55 BUG_ON(ioread32(&wq->ctrl->enable));
56
57 if (vnic_wq_copy_desc_in_use(wq))
58 vnic_wq_copy_service(wq, -1, q_clean);
59
60 wq->to_use_index = wq->to_clean_index = 0;
61
62 iowrite32(0, &wq->ctrl->fetch_index);
63 iowrite32(0, &wq->ctrl->posted_index);
64 iowrite32(0, &wq->ctrl->error_status);
65
66 vnic_dev_clear_desc_ring(&wq->ring);
67}
68
69void vnic_wq_copy_free(struct vnic_wq_copy *wq)
70{
71 struct vnic_dev *vdev;
72
73 vdev = wq->vdev;
74 vnic_dev_free_desc_ring(vdev, &wq->ring);
75 wq->ctrl = NULL;
76}
77
78int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
79 unsigned int index, unsigned int desc_count,
80 unsigned int desc_size)
81{
82 int err;
83
84 wq->index = index;
85 wq->vdev = vdev;
86 wq->to_use_index = wq->to_clean_index = 0;
87 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
88 if (!wq->ctrl) {
89 printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
90 return -EINVAL;
91 }
92
93 vnic_wq_copy_disable(wq);
94
95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
96 if (err)
97 return err;
98
99 return 0;
100}
101
102void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
103 unsigned int error_interrupt_enable,
104 unsigned int error_interrupt_offset)
105{
106 u64 paddr;
107
108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
109 writeq(paddr, &wq->ctrl->ring_base);
110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
111 iowrite32(0, &wq->ctrl->fetch_index);
112 iowrite32(0, &wq->ctrl->posted_index);
113 iowrite32(cq_index, &wq->ctrl->cq_index);
114 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
115 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
116}
117
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 000000000000..6aff9740c3df
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_COPY_H_
19#define _VNIC_WQ_COPY_H_
20
21#include <linux/pci.h>
22#include "vnic_wq.h"
23#include "fcpio.h"
24
25#define VNIC_WQ_COPY_MAX 1
26
27struct vnic_wq_copy {
28 unsigned int index;
29 struct vnic_dev *vdev;
30 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
31 struct vnic_dev_ring ring;
32 unsigned to_use_index;
33 unsigned to_clean_index;
34};
35
36static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
37{
38 return wq->ring.desc_avail;
39}
40
41static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
42{
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail;
44}
45
46static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
47{
48 struct fcpio_host_req *desc = wq->ring.descs;
49 return &desc[wq->to_use_index];
50}
51
52static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
53{
54
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ?
56 (wq->to_use_index = 0) : (wq->to_use_index++);
57 wq->ring.desc_avail--;
58
59 /* Adding write memory barrier prevents compiler and/or CPU
60 * reordering, thus avoiding descriptor posting before
61 * descriptor is initialized. Otherwise, hardware can read
62 * stale descriptor fields.
63 */
64 wmb();
65
66 iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
67}
68
69static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
70{
71 unsigned int cnt;
72
73 if (wq->to_clean_index <= index)
74 cnt = (index - wq->to_clean_index) + 1;
75 else
76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
77
78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
79 wq->ring.desc_avail += cnt;
80
81}
82
83static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
84 u16 completed_index,
85 void (*q_service)(struct vnic_wq_copy *wq,
86 struct fcpio_host_req *wq_desc))
87{
88 struct fcpio_host_req *wq_desc = wq->ring.descs;
89 unsigned int curr_index;
90
91 while (1) {
92
93 if (q_service)
94 (*q_service)(wq, &wq_desc[wq->to_clean_index]);
95
96 wq->ring.desc_avail++;
97
98 curr_index = wq->to_clean_index;
99
100 /* increment the to-clean index so that we start
101 * with an unprocessed index next time we enter the loop
102 */
103 ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
104 (wq->to_clean_index = 0) : (wq->to_clean_index++);
105
106 if (curr_index == completed_index)
107 break;
108
109 /* we have cleaned all the entries */
110 if ((completed_index == (u16)-1) &&
111 (wq->to_clean_index == wq->to_use_index))
112 break;
113 }
114}
115
116void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
117int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
118void vnic_wq_copy_free(struct vnic_wq_copy *wq);
119int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
120 unsigned int index, unsigned int desc_count, unsigned int desc_size);
121void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
122 unsigned int error_interrupt_enable,
123 unsigned int error_interrupt_offset);
124void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
125 void (*q_clean)(struct vnic_wq_copy *wq,
126 struct fcpio_host_req *wq_desc));
127
128#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 000000000000..b121cbad18b8
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _WQ_ENET_DESC_H_
19#define _WQ_ENET_DESC_H_
20
21/* Ethernet work queue descriptor: 16B */
22struct wq_enet_desc {
23 __le64 address;
24 __le16 length;
25 __le16 mss_loopback;
26 __le16 header_length_flags;
27 __le16 vlan_tag;
28};
29
30#define WQ_ENET_ADDR_BITS 64
31#define WQ_ENET_LEN_BITS 14
32#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
33#define WQ_ENET_MSS_BITS 14
34#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
35#define WQ_ENET_MSS_SHIFT 2
36#define WQ_ENET_LOOPBACK_SHIFT 1
37#define WQ_ENET_HDRLEN_BITS 10
38#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
39#define WQ_ENET_FLAGS_OM_BITS 2
40#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
41#define WQ_ENET_FLAGS_EOP_SHIFT 12
42#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
43#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
44#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
45
46#define WQ_ENET_OFFLOAD_MODE_CSUM 0
47#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
48#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
49#define WQ_ENET_OFFLOAD_MODE_TSO 3
50
51static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
52 u64 address, u16 length, u16 mss, u16 header_length,
53 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
54 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
55{
56 desc->address = cpu_to_le64(address);
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
59 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
60 desc->header_length_flags = cpu_to_le16(
61 (header_length & WQ_ENET_HDRLEN_MASK) |
62 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
63 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
64 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
65 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
66 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
67 desc->vlan_tag = cpu_to_le16(vlan_tag);
68}
69
70static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
71 u64 *address, u16 *length, u16 *mss, u16 *header_length,
72 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
73 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
74{
75 *address = le64_to_cpu(desc->address);
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
78 WQ_ENET_MSS_MASK;
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
80 WQ_ENET_LOOPBACK_SHIFT) & 1);
81 *header_length = le16_to_cpu(desc->header_length_flags) &
82 WQ_ENET_HDRLEN_MASK;
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
84 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
93 *vlan_tag = le16_to_cpu(desc->vlan_tag);
94}
95
96#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index babd4cc0cb25..36b1d1052ba1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,7 +69,7 @@
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 71#define MPT2SAS_DRIVER_VERSION "01.100.02.00"
72#define MPT2SAS_MAJOR_VERSION 00 72#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 73#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 74#define MPT2SAS_BUILD_VERSION 02
75#define MPT2SAS_RELEASE_VERSION 00 75#define MPT2SAS_RELEASE_VERSION 00
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f51ca485f35..e2b50d8f57a8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
425 INIT_LIST_HEAD(&starget->devices); 425 INIT_LIST_HEAD(&starget->devices);
426 starget->state = STARGET_CREATED; 426 starget->state = STARGET_CREATED;
427 starget->scsi_level = SCSI_2; 427 starget->scsi_level = SCSI_2;
428 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
428 retry: 429 retry:
429 spin_lock_irqsave(shost->host_lock, flags); 430 spin_lock_irqsave(shost->host_lock, flags);
430 431
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 094795455293..0a2ce7b6325c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -357,7 +357,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
357 err = 0; 357 err = 0;
358 break; 358 break;
359 case ISCSI_SESSION_FAILED: 359 case ISCSI_SESSION_FAILED:
360 err = DID_TRANSPORT_DISRUPTED << 16; 360 err = DID_IMM_RETRY << 16;
361 break; 361 break;
362 case ISCSI_SESSION_FREE: 362 case ISCSI_SESSION_FREE:
363 err = DID_TRANSPORT_FAILFAST << 16; 363 err = DID_TRANSPORT_FAILFAST << 16;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index b4b39811b445..a0127e93ade0 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -137,6 +137,7 @@ struct uart_8250_port {
137 unsigned char mcr; 137 unsigned char mcr;
138 unsigned char mcr_mask; /* mask of user bits */ 138 unsigned char mcr_mask; /* mask of user bits */
139 unsigned char mcr_force; /* mask of forced bits */ 139 unsigned char mcr_force; /* mask of forced bits */
140 unsigned char cur_iotype; /* Running I/O type */
140 141
141 /* 142 /*
142 * Some bits in registers are cleared on a read, so they must 143 * Some bits in registers are cleared on a read, so they must
@@ -471,6 +472,7 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
471 472
472static void set_io_from_upio(struct uart_port *p) 473static void set_io_from_upio(struct uart_port *p)
473{ 474{
475 struct uart_8250_port *up = (struct uart_8250_port *)p;
474 switch (p->iotype) { 476 switch (p->iotype) {
475 case UPIO_HUB6: 477 case UPIO_HUB6:
476 p->serial_in = hub6_serial_in; 478 p->serial_in = hub6_serial_in;
@@ -509,6 +511,8 @@ static void set_io_from_upio(struct uart_port *p)
509 p->serial_out = io_serial_out; 511 p->serial_out = io_serial_out;
510 break; 512 break;
511 } 513 }
514 /* Remember loaded iotype */
515 up->cur_iotype = p->iotype;
512} 516}
513 517
514static void 518static void
@@ -1937,6 +1941,9 @@ static int serial8250_startup(struct uart_port *port)
1937 up->capabilities = uart_config[up->port.type].flags; 1941 up->capabilities = uart_config[up->port.type].flags;
1938 up->mcr = 0; 1942 up->mcr = 0;
1939 1943
1944 if (up->port.iotype != up->cur_iotype)
1945 set_io_from_upio(port);
1946
1940 if (up->port.type == PORT_16C950) { 1947 if (up->port.type == PORT_16C950) {
1941 /* Wake up and initialize UART */ 1948 /* Wake up and initialize UART */
1942 up->acr = 0; 1949 up->acr = 0;
@@ -2563,6 +2570,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
2563 if (ret < 0) 2570 if (ret < 0)
2564 probeflags &= ~PROBE_RSA; 2571 probeflags &= ~PROBE_RSA;
2565 2572
2573 if (up->port.iotype != up->cur_iotype)
2574 set_io_from_upio(port);
2575
2566 if (flags & UART_CONFIG_TYPE) 2576 if (flags & UART_CONFIG_TYPE)
2567 autoconfig(up, probeflags); 2577 autoconfig(up, probeflags);
2568 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) 2578 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
@@ -2671,6 +2681,11 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2671{ 2681{
2672 int i; 2682 int i;
2673 2683
2684 for (i = 0; i < nr_uarts; i++) {
2685 struct uart_8250_port *up = &serial8250_ports[i];
2686 up->cur_iotype = 0xFF;
2687 }
2688
2674 serial8250_isa_init_ports(); 2689 serial8250_isa_init_ports();
2675 2690
2676 for (i = 0; i < nr_uarts; i++) { 2691 for (i = 0; i < nr_uarts; i++) {
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 418b4fe9a0a1..33149d982e82 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -39,9 +39,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
39 */ 39 */
40 if (parisc_parent(dev)->id.hw_type != HPHW_IOA) 40 if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
41 printk(KERN_INFO 41 printk(KERN_INFO
42 "Serial: device 0x%lx not configured.\n" 42 "Serial: device 0x%llx not configured.\n"
43 "Enable support for Wax, Lasi, Asp or Dino.\n", 43 "Enable support for Wax, Lasi, Asp or Dino.\n",
44 dev->hpa.start); 44 (unsigned long long)dev->hpa.start);
45 return -ENODEV; 45 return -ENODEV;
46 } 46 }
47 47
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index e3a5ad5ef1d6..cdc049d4350f 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -665,7 +665,7 @@ static struct uart_driver amba_reg = {
665 .cons = AMBA_CONSOLE, 665 .cons = AMBA_CONSOLE,
666}; 666};
667 667
668static int pl010_probe(struct amba_device *dev, void *id) 668static int pl010_probe(struct amba_device *dev, struct amba_id *id)
669{ 669{
670 struct uart_amba_port *uap; 670 struct uart_amba_port *uap;
671 void __iomem *base; 671 void __iomem *base;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 8b2b9700f3e4..88fdac51b6c5 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -729,7 +729,7 @@ static struct uart_driver amba_reg = {
729 .cons = AMBA_CONSOLE, 729 .cons = AMBA_CONSOLE,
730}; 730};
731 731
732static int pl011_probe(struct amba_device *dev, void *id) 732static int pl011_probe(struct amba_device *dev, struct amba_id *id)
733{ 733{
734 struct uart_amba_port *uap; 734 struct uart_amba_port *uap;
735 void __iomem *base; 735 void __iomem *base;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 6579e2be1dd1..a461b3b2c72d 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1472,8 +1472,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
1472 1472
1473 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter); 1473 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
1474 iounmap(icom_adapter->base_addr); 1474 iounmap(icom_adapter->base_addr);
1475 icom_free_adapter(icom_adapter);
1476 pci_release_regions(icom_adapter->pci_dev); 1475 pci_release_regions(icom_adapter->pci_dev);
1476 icom_free_adapter(icom_adapter);
1477} 1477}
1478 1478
1479static void icom_kref_release(struct kref *kref) 1479static void icom_kref_release(struct kref *kref)
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 7f72f8ceaa6f..b3feb6198d57 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
988 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", 988 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
989 co, co->index, options); 989 co, co->index, options);
990 990
991 if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) { 991 if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
992 pr_debug("PSC%x out of range\n", co->index); 992 pr_debug("PSC%x out of range\n", co->index);
993 return -EINVAL; 993 return -EINVAL;
994 } 994 }
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0716cdb44cd8..0a3dc5ece634 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_MON) += mon/
11obj-$(CONFIG_PCI) += host/ 11obj-$(CONFIG_PCI) += host/
12obj-$(CONFIG_USB_EHCI_HCD) += host/ 12obj-$(CONFIG_USB_EHCI_HCD) += host/
13obj-$(CONFIG_USB_ISP116X_HCD) += host/ 13obj-$(CONFIG_USB_ISP116X_HCD) += host/
14obj-$(CONFIG_USB_ISP1760_HCD) += host/
15obj-$(CONFIG_USB_OHCI_HCD) += host/ 14obj-$(CONFIG_USB_OHCI_HCD) += host/
16obj-$(CONFIG_USB_UHCI_HCD) += host/ 15obj-$(CONFIG_USB_UHCI_HCD) += host/
17obj-$(CONFIG_USB_FHCI_HCD) += host/ 16obj-$(CONFIG_USB_FHCI_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0a69c0977e3f..7a1164dd1d37 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1375,6 +1375,9 @@ static struct usb_device_id acm_ids[] = {
1375 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */ 1375 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
1376 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1376 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1377 }, 1377 },
1378 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
1379 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1380 },
1378 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ 1381 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1379 }, 1382 },
1380 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ 1383 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 563d57275448..05c913cc3658 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -794,7 +794,8 @@ usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
794 if (ep->desc) { 794 if (ep->desc) {
795 list_add_tail(&req->queue, &ep->queue); 795 list_add_tail(&req->queue, &ep->queue);
796 796
797 if (ep->is_in || (ep_is_control(ep) 797 if ((!ep_is_control(ep) && ep->is_in) ||
798 (ep_is_control(ep)
798 && (ep->state == DATA_STAGE_IN 799 && (ep->state == DATA_STAGE_IN
799 || ep->state == STATUS_STAGE_IN))) 800 || ep->state == STATUS_STAGE_IN)))
800 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 801 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
@@ -1940,7 +1941,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1940 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 1941 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1941 clk_disable(pclk); 1942 clk_disable(pclk);
1942 1943
1943 usba_ep = kmalloc(sizeof(struct usba_ep) * pdata->num_ep, 1944 usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
1944 GFP_KERNEL); 1945 GFP_KERNEL);
1945 if (!usba_ep) 1946 if (!usba_ep)
1946 goto err_alloc_ep; 1947 goto err_alloc_ep;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index cd07ea3f0c63..15438469f21a 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1658,6 +1658,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1658 u32 reg_base, or_reg, skip_reg; 1658 u32 reg_base, or_reg, skip_reg;
1659 unsigned long flags; 1659 unsigned long flags;
1660 struct ptd ptd; 1660 struct ptd ptd;
1661 packet_enqueue *pe;
1661 1662
1662 switch (usb_pipetype(urb->pipe)) { 1663 switch (usb_pipetype(urb->pipe)) {
1663 case PIPE_ISOCHRONOUS: 1664 case PIPE_ISOCHRONOUS:
@@ -1669,6 +1670,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1669 reg_base = INT_REGS_OFFSET; 1670 reg_base = INT_REGS_OFFSET;
1670 or_reg = HC_INT_IRQ_MASK_OR_REG; 1671 or_reg = HC_INT_IRQ_MASK_OR_REG;
1671 skip_reg = HC_INT_PTD_SKIPMAP_REG; 1672 skip_reg = HC_INT_PTD_SKIPMAP_REG;
1673 pe = enqueue_an_INT_packet;
1672 break; 1674 break;
1673 1675
1674 default: 1676 default:
@@ -1676,6 +1678,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1676 reg_base = ATL_REGS_OFFSET; 1678 reg_base = ATL_REGS_OFFSET;
1677 or_reg = HC_ATL_IRQ_MASK_OR_REG; 1679 or_reg = HC_ATL_IRQ_MASK_OR_REG;
1678 skip_reg = HC_ATL_PTD_SKIPMAP_REG; 1680 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1681 pe = enqueue_an_ATL_packet;
1679 break; 1682 break;
1680 } 1683 }
1681 1684
@@ -1687,6 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1687 u32 skip_map; 1690 u32 skip_map;
1688 u32 or_map; 1691 u32 or_map;
1689 struct isp1760_qtd *qtd; 1692 struct isp1760_qtd *qtd;
1693 struct isp1760_qh *qh = ints->qh;
1690 1694
1691 skip_map = isp1760_readl(hcd->regs + skip_reg); 1695 skip_map = isp1760_readl(hcd->regs + skip_reg);
1692 skip_map |= 1 << i; 1696 skip_map |= 1 << i;
@@ -1699,8 +1703,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1699 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base 1703 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
1700 + i * sizeof(ptd), sizeof(ptd)); 1704 + i * sizeof(ptd), sizeof(ptd));
1701 qtd = ints->qtd; 1705 qtd = ints->qtd;
1702 1706 qtd = clean_up_qtdlist(qtd);
1703 clean_up_qtdlist(qtd);
1704 1707
1705 free_mem(priv, ints->payload); 1708 free_mem(priv, ints->payload);
1706 1709
@@ -1711,7 +1714,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1711 ints->payload = 0; 1714 ints->payload = 0;
1712 1715
1713 isp1760_urb_done(priv, urb, status); 1716 isp1760_urb_done(priv, urb, status);
1717 if (qtd)
1718 pe(hcd, qh, qtd);
1714 break; 1719 break;
1720
1721 } else if (ints->qtd) {
1722 struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
1723
1724 for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
1725 if (qtd->urb == urb) {
1726 prev_qtd->hw_next = clean_up_qtdlist(qtd);
1727 isp1760_urb_done(priv, urb, status);
1728 break;
1729 }
1730 prev_qtd = qtd;
1731 }
1732 /* we found the urb before the end of the list */
1733 if (qtd)
1734 break;
1715 } 1735 }
1716 ints++; 1736 ints++;
1717 } 1737 }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ab8474b00cb..d9fcdaedf389 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1487,14 +1487,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1487 1487
1488 remove_sysfs_attrs(port); 1488 remove_sysfs_attrs(port);
1489 1489
1490 /* all open ports are closed at this point 1490 kref_put(&priv->kref, ftdi_sio_priv_release);
1491 * (by usbserial.c:__serial_close, which calls ftdi_close)
1492 */
1493
1494 if (priv) {
1495 usb_set_serial_port_data(port, NULL);
1496 kref_put(&priv->kref, ftdi_sio_priv_release);
1497 }
1498 1491
1499 return 0; 1492 return 0;
1500} 1493}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 0a566eea49c0..f331e2bde88a 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -974,6 +974,7 @@ int usb_serial_probe(struct usb_interface *interface,
974 if (retval > 0) { 974 if (retval > 0) {
975 /* quietly accept this device, but don't bind to a 975 /* quietly accept this device, but don't bind to a
976 serial port as it's about to disappear */ 976 serial port as it's about to disappear */
977 serial->num_ports = 0;
977 goto exit; 978 goto exit;
978 } 979 }
979 } 980 }
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 61050ab14128..d1f80bac54f0 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -437,7 +437,7 @@ static int clcdfb_register(struct clcd_fb *fb)
437 return ret; 437 return ret;
438} 438}
439 439
440static int clcdfb_probe(struct amba_device *dev, void *id) 440static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
441{ 441{
442 struct clcd_board *board = dev->dev.platform_data; 442 struct clcd_board *board = dev->dev.platform_data;
443 struct clcd_fb *fb; 443 struct clcd_fb *fb;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 9a577a800db5..2fb63f6ea2f1 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -29,14 +29,8 @@
29 29
30/* configurable parameters */ 30/* configurable parameters */
31#define ATMEL_LCDC_CVAL_DEFAULT 0xc8 31#define ATMEL_LCDC_CVAL_DEFAULT 0xc8
32#define ATMEL_LCDC_DMA_BURST_LEN 8 32#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
33 33#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
34#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
35 defined(CONFIG_ARCH_AT91SAM9RL)
36#define ATMEL_LCDC_FIFO_SIZE 2048
37#else
38#define ATMEL_LCDC_FIFO_SIZE 512
39#endif
40 34
41#if defined(CONFIG_ARCH_AT91) 35#if defined(CONFIG_ARCH_AT91)
42#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \ 36#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index dfb72f5e4c96..148cbcc39602 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -880,20 +880,22 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
880 880
881static int get_dss_clocks(void) 881static int get_dss_clocks(void)
882{ 882{
883 if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) { 883 dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
884 dev_err(dispc.fbdev->dev, "can't get dss_ick\n"); 884 if (IS_ERR(dispc.dss_ick)) {
885 dev_err(dispc.fbdev->dev, "can't get ick\n");
885 return PTR_ERR(dispc.dss_ick); 886 return PTR_ERR(dispc.dss_ick);
886 } 887 }
887 888
888 if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) { 889 dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
890 if (IS_ERR(dispc.dss1_fck)) {
889 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 891 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
890 clk_put(dispc.dss_ick); 892 clk_put(dispc.dss_ick);
891 return PTR_ERR(dispc.dss1_fck); 893 return PTR_ERR(dispc.dss1_fck);
892 } 894 }
893 895
894 if (IS_ERR((dispc.dss_54m_fck = 896 dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
895 clk_get(dispc.fbdev->dev, "dss_54m_fck")))) { 897 if (IS_ERR(dispc.dss_54m_fck)) {
896 dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n"); 898 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
897 clk_put(dispc.dss_ick); 899 clk_put(dispc.dss_ick);
898 clk_put(dispc.dss1_fck); 900 clk_put(dispc.dss1_fck);
899 return PTR_ERR(dispc.dss_54m_fck); 901 return PTR_ERR(dispc.dss_54m_fck);
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index a13c8dcad2a8..9332d6ca6456 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,12 +83,14 @@ static inline u32 rfbi_read_reg(int idx)
83 83
84static int rfbi_get_clocks(void) 84static int rfbi_get_clocks(void)
85{ 85{
86 if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) { 86 rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
87 dev_err(rfbi.fbdev->dev, "can't get dss_ick\n"); 87 if (IS_ERR(rfbi.dss_ick)) {
88 dev_err(rfbi.fbdev->dev, "can't get ick\n");
88 return PTR_ERR(rfbi.dss_ick); 89 return PTR_ERR(rfbi.dss_ick);
89 } 90 }
90 91
91 if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) { 92 rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
93 if (IS_ERR(rfbi.dss1_fck)) {
92 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
93 clk_put(rfbi.dss_ick); 95 clk_put(rfbi.dss_ick);
94 return PTR_ERR(rfbi.dss1_fck); 96 return PTR_ERR(rfbi.dss1_fck);
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5e9c6302433b..d3a568e6b169 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -947,7 +947,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
947 int win; 947 int win;
948 948
949 for (win = 0; win <= S3C_FB_MAX_WIN; win++) 949 for (win = 0; win <= S3C_FB_MAX_WIN; win++)
950 s3c_fb_release_win(sfb, sfb->windows[win]); 950 if (sfb->windows[win])
951 s3c_fb_release_win(sfb, sfb->windows[win]);
951 952
952 iounmap(sfb->regs); 953 iounmap(sfb->regs);
953 954
@@ -985,11 +986,20 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
985static int s3c_fb_resume(struct platform_device *pdev) 986static int s3c_fb_resume(struct platform_device *pdev)
986{ 987{
987 struct s3c_fb *sfb = platform_get_drvdata(pdev); 988 struct s3c_fb *sfb = platform_get_drvdata(pdev);
989 struct s3c_fb_platdata *pd = sfb->pdata;
988 struct s3c_fb_win *win; 990 struct s3c_fb_win *win;
989 int win_no; 991 int win_no;
990 992
991 clk_enable(sfb->bus_clk); 993 clk_enable(sfb->bus_clk);
992 994
995 /* setup registers */
996 writel(pd->vidcon1, sfb->regs + VIDCON1);
997
998 /* zero all windows before we do anything */
999 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++)
1000 s3c_fb_clear_win(sfb, win_no);
1001
1002 /* restore framebuffers */
993 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { 1003 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
994 win = sfb->windows[win_no]; 1004 win = sfb->windows[win_no];
995 if (!win) 1005 if (!win)
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 92ea0ab44ce2..f10d2fbeda06 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -47,6 +47,7 @@ struct sh_mobile_lcdc_priv {
47#endif 47#endif
48 unsigned long lddckr; 48 unsigned long lddckr;
49 struct sh_mobile_lcdc_chan ch[2]; 49 struct sh_mobile_lcdc_chan ch[2];
50 int started;
50}; 51};
51 52
52/* shared registers */ 53/* shared registers */
@@ -451,6 +452,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
451 452
452 /* start the lcdc */ 453 /* start the lcdc */
453 sh_mobile_lcdc_start_stop(priv, 1); 454 sh_mobile_lcdc_start_stop(priv, 1);
455 priv->started = 1;
454 456
455 /* tell the board code to enable the panel */ 457 /* tell the board code to enable the panel */
456 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 458 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -493,7 +495,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
493 } 495 }
494 496
495 /* stop the lcdc */ 497 /* stop the lcdc */
496 sh_mobile_lcdc_start_stop(priv, 0); 498 if (priv->started) {
499 sh_mobile_lcdc_start_stop(priv, 0);
500 priv->started = 0;
501 }
497 502
498 /* stop clocks */ 503 /* stop clocks */
499 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) 504 for (k = 0; k < ARRAY_SIZE(priv->ch); k++)