aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/sleep/main.c67
-rw-r--r--drivers/acpi/sleep/proc.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/base/devres.c32
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/drm/drm_dma.c2
-rw-r--r--drivers/char/drm/drm_vm.c2
-rw-r--r--drivers/char/drm/r300_reg.h2
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c156
-rw-r--r--drivers/char/pcmcia/Kconfig1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c46
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tty_io.c22
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c7
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/pci/siimage.c2
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c6
-rw-r--r--drivers/isdn/capi/Kconfig2
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c14
-rw-r--r--drivers/isdn/hardware/eicon/divasync.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c4
-rw-r--r--drivers/kvm/kvm_main.c3
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/mca/mca-bus.c28
-rw-r--r--drivers/mca/mca-driver.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bio-list.h26
-rw-r--r--drivers/md/dm-crypt.c91
-rw-r--r--drivers/md/dm-delay.c383
-rw-r--r--drivers/md/dm-exception-store.c54
-rw-r--r--drivers/md/dm-hw-handler.h1
-rw-r--r--drivers/md/dm-io.c232
-rw-r--r--drivers/md/dm-io.h83
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c187
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/kcopyd.c28
-rw-r--r--drivers/md/md.c186
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb/frontends/tda10021.c2
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/pwc/philips.txt6
-rw-r--r--drivers/media/video/usbvideo/vicam.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/misc/tifm_7xx1.c27
-rw-r--r--drivers/mmc/Kconfig10
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/core/Kconfig1
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/host/Kconfig19
-rw-r--r--drivers/mmc/host/tifm_sd.c13
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/3c509.c5
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/atp.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/meth.h2
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/wireless/airport.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h2
-rw-r--r--drivers/net/yellowfin.c1
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/net/qeth_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c90
-rw-r--r--drivers/s390/scsi/zfcp_def.h41
-rw-r--r--drivers/s390/scsi/zfcp_erp.c89
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c50
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c51
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c9
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/aacraid/dpcsup.c6
-rw-r--r--drivers/scsi/aacraid/rx.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c2
-rw-r--r--drivers/scsi/aic94xx/Makefile2
-rw-r--r--drivers/scsi/ch.c9
-rw-r--r--drivers/scsi/dc395x.c6
-rw-r--r--drivers/scsi/dpt_i2o.c17
-rw-r--r--drivers/scsi/ipr.c349
-rw-r--r--drivers/scsi/ipr.h33
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c36
-rw-r--r--drivers/scsi/lpfc/lpfc.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c212
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h32
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h28
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c552
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c884
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c516
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c307
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c109
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c427
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/mesh.c14
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c9
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c158
-rw-r--r--drivers/scsi/tmscsim.c225
-rw-r--r--drivers/scsi/tmscsim.h12
-rw-r--r--drivers/spi/atmel_spi.c5
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/misc/auerswald.c2
-rw-r--r--drivers/usb/net/usbnet.h2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/video/Kconfig28
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arkfb.c1200
-rw-r--r--drivers/video/aty/atyfb_base.c3
-rw-r--r--drivers/video/aty/mach64_cursor.c1
-rw-r--r--drivers/video/console/softcursor.c2
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_misc.c2
-rw-r--r--drivers/video/nvidia/nv_hw.c7
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/s3fb.c19
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/svgalib.c17
-rw-r--r--drivers/video/vt8623fb.c927
193 files changed, 6077 insertions, 2811 deletions
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 4dd0dabe81cb..8fcd6a15517f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -228,7 +228,7 @@ int __init acpi_numa_init(void)
228 return 0; 228 return 0;
229} 229}
230 230
231int __meminit acpi_get_pxm(acpi_handle h) 231int acpi_get_pxm(acpi_handle h)
232{ 232{
233 unsigned long pxm; 233 unsigned long pxm;
234 acpi_status status; 234 acpi_status status;
@@ -246,7 +246,7 @@ int __meminit acpi_get_pxm(acpi_handle h)
246} 246}
247EXPORT_SYMBOL(acpi_get_pxm); 247EXPORT_SYMBOL(acpi_get_pxm);
248 248
249int __meminit acpi_get_node(acpi_handle *handle) 249int acpi_get_node(acpi_handle *handle)
250{ 250{
251 int pxm, node = -1; 251 int pxm, node = -1;
252 252
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index f8c63410bcbf..52b23471dd69 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = {
29 [PM_SUSPEND_ON] = ACPI_STATE_S0, 29 [PM_SUSPEND_ON] = ACPI_STATE_S0,
30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
31 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 31 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
32 [PM_SUSPEND_DISK] = ACPI_STATE_S4,
33 [PM_SUSPEND_MAX] = ACPI_STATE_S5 32 [PM_SUSPEND_MAX] = ACPI_STATE_S5
34}; 33};
35 34
@@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
94 do_suspend_lowlevel(); 93 do_suspend_lowlevel();
95 break; 94 break;
96 95
97 case PM_SUSPEND_DISK:
98 if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
99 status = acpi_enter_sleep_state(acpi_state);
100 break;
101 case PM_SUSPEND_MAX:
102 acpi_power_off();
103 break;
104
105 default: 96 default:
106 return -EINVAL; 97 return -EINVAL;
107 } 98 }
@@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state)
157 suspend_state_t states[] = { 148 suspend_state_t states[] = {
158 [1] = PM_SUSPEND_STANDBY, 149 [1] = PM_SUSPEND_STANDBY,
159 [3] = PM_SUSPEND_MEM, 150 [3] = PM_SUSPEND_MEM,
160 [4] = PM_SUSPEND_DISK,
161 [5] = PM_SUSPEND_MAX 151 [5] = PM_SUSPEND_MAX
162 }; 152 };
163 153
164 if (acpi_state < 6 && states[acpi_state]) 154 if (acpi_state < 6 && states[acpi_state])
165 return pm_suspend(states[acpi_state]); 155 return pm_suspend(states[acpi_state]);
156 if (acpi_state == 4)
157 return hibernate();
166 return -EINVAL; 158 return -EINVAL;
167} 159}
168 160
@@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = {
189 .finish = acpi_pm_finish, 181 .finish = acpi_pm_finish,
190}; 182};
191 183
184#ifdef CONFIG_SOFTWARE_SUSPEND
185static int acpi_hibernation_prepare(void)
186{
187 return acpi_sleep_prepare(ACPI_STATE_S4);
188}
189
190static int acpi_hibernation_enter(void)
191{
192 acpi_status status = AE_OK;
193 unsigned long flags = 0;
194
195 ACPI_FLUSH_CPU_CACHE();
196
197 local_irq_save(flags);
198 acpi_enable_wakeup_device(ACPI_STATE_S4);
199 /* This shouldn't return. If it returns, we have a problem */
200 status = acpi_enter_sleep_state(ACPI_STATE_S4);
201 local_irq_restore(flags);
202
203 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
204}
205
206static void acpi_hibernation_finish(void)
207{
208 acpi_leave_sleep_state(ACPI_STATE_S4);
209 acpi_disable_wakeup_device(ACPI_STATE_S4);
210
211 /* reset firmware waking vector */
212 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
213
214 if (init_8259A_after_S1) {
215 printk("Broken toshiba laptop -> kicking interrupts\n");
216 init_8259A(0);
217 }
218}
219
220static struct hibernation_ops acpi_hibernation_ops = {
221 .prepare = acpi_hibernation_prepare,
222 .enter = acpi_hibernation_enter,
223 .finish = acpi_hibernation_finish,
224};
225#endif /* CONFIG_SOFTWARE_SUSPEND */
226
192/* 227/*
193 * Toshiba fails to preserve interrupts over S1, reinitialization 228 * Toshiba fails to preserve interrupts over S1, reinitialization
194 * of 8259 is needed after S1 resume. 229 * of 8259 is needed after S1 resume.
@@ -227,14 +262,18 @@ int __init acpi_sleep_init(void)
227 sleep_states[i] = 1; 262 sleep_states[i] = 1;
228 printk(" S%d", i); 263 printk(" S%d", i);
229 } 264 }
230 if (i == ACPI_STATE_S4) {
231 if (sleep_states[i])
232 acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
233 }
234 } 265 }
235 printk(")\n"); 266 printk(")\n");
236 267
237 pm_set_ops(&acpi_pm_ops); 268 pm_set_ops(&acpi_pm_ops);
269
270#ifdef CONFIG_SOFTWARE_SUSPEND
271 if (sleep_states[ACPI_STATE_S4])
272 hibernation_set_ops(&acpi_hibernation_ops);
273#else
274 sleep_states[ACPI_STATE_S4] = 0;
275#endif
276
238 return 0; 277 return 0;
239} 278}
240 279
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 5a76e5be61d5..76b45f0b8341 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file,
60 state = simple_strtoul(str, NULL, 0); 60 state = simple_strtoul(str, NULL, 0);
61#ifdef CONFIG_SOFTWARE_SUSPEND 61#ifdef CONFIG_SOFTWARE_SUSPEND
62 if (state == 4) { 62 if (state == 4) {
63 error = pm_suspend(PM_SUSPEND_DISK); 63 error = hibernate();
64 goto Done; 64 goto Done;
65 } 65 }
66#endif 66#endif
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 45dbdc14915f..c7219663f2b9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -435,7 +435,7 @@ config PATA_OPTIDMA
435 help 435 help
436 This option enables DMA/PIO support for the later OPTi 436 This option enables DMA/PIO support for the later OPTi
437 controllers found on some old motherboards and in some 437 controllers found on some old motherboards and in some
438 latops 438 laptops.
439 439
440 If unsure, say N. 440 If unsure, say N.
441 441
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7950885d18e..fef87dd70d17 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
1316 spin_unlock_irqrestore(ap->lock, flags); 1316 spin_unlock_irqrestore(ap->lock, flags);
1317 1317
1318 DPRINTK("flush #1\n"); 1318 DPRINTK("flush #1\n");
1319 flush_workqueue(ata_wq); 1319 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1320 1320
1321 /* 1321 /*
1322 * At this point, if a task is running, it's guaranteed to see 1322 * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
1327 if (ata_msg_ctl(ap)) 1327 if (ata_msg_ctl(ap))
1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329 __FUNCTION__); 1329 __FUNCTION__);
1330 flush_workqueue(ata_wq); 1330 cancel_work_sync(&ap->port_task.work);
1331 } 1331 }
1332 1332
1333 spin_lock_irqsave(ap->lock, flags); 1333 spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
6475 /* Flush hotplug task. The sequence is similar to 6475 /* Flush hotplug task. The sequence is similar to
6476 * ata_port_flush_task(). 6476 * ata_port_flush_task().
6477 */ 6477 */
6478 flush_workqueue(ata_aux_wq); 6478 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6479 cancel_delayed_work(&ap->hotplug_task); 6479 cancel_delayed_work(&ap->hotplug_task);
6480 flush_workqueue(ata_aux_wq); 6480 cancel_work_sync(&ap->hotplug_task.work);
6481 6481
6482 skip_eh: 6482 skip_eh:
6483 /* remove the associated SCSI host */ 6483 /* remove the associated SCSI host */
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e177c9533b6c..e1c0730a3b99 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -101,19 +101,6 @@ static void add_dr(struct device *dev, struct devres_node *node)
101 list_add_tail(&node->entry, &dev->devres_head); 101 list_add_tail(&node->entry, &dev->devres_head);
102} 102}
103 103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES 104#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, 105void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name) 106 const char *name)
@@ -128,6 +115,19 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
128} 115}
129EXPORT_SYMBOL_GPL(__devres_alloc); 116EXPORT_SYMBOL_GPL(__devres_alloc);
130#else 117#else
118/**
119 * devres_alloc - Allocate device resource data
120 * @release: Release function devres will be associated with
121 * @size: Allocation size
122 * @gfp: Allocation flags
123 *
124 * Allocate devres of @size bytes. The allocated area is zeroed, then
125 * associated with @release. The returned pointer can be passed to
126 * other devres_*() functions.
127 *
128 * RETURNS:
129 * Pointer to allocated devres on success, NULL on failure.
130 */
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{ 132{
133 struct devres *dr; 133 struct devres *dr;
@@ -416,7 +416,7 @@ static int release_nodes(struct device *dev, struct list_head *first,
416} 416}
417 417
418/** 418/**
419 * devres_release_all - Release all resources 419 * devres_release_all - Release all managed resources
420 * @dev: Device to release resources for 420 * @dev: Device to release resources for
421 * 421 *
422 * Release all resources associated with @dev. This function is 422 * Release all resources associated with @dev. This function is
@@ -600,7 +600,7 @@ static int devm_kzalloc_match(struct device *dev, void *res, void *data)
600} 600}
601 601
602/** 602/**
603 * devm_kzalloc - Managed kzalloc 603 * devm_kzalloc - Resource-managed kzalloc
604 * @dev: Device to allocate memory for 604 * @dev: Device to allocate memory for
605 * @size: Allocation size 605 * @size: Allocation size
606 * @gfp: Allocation gfp flags 606 * @gfp: Allocation gfp flags
@@ -628,7 +628,7 @@ void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
628EXPORT_SYMBOL_GPL(devm_kzalloc); 628EXPORT_SYMBOL_GPL(devm_kzalloc);
629 629
630/** 630/**
631 * devm_kfree - Managed kfree 631 * devm_kfree - Resource-managed kfree
632 * @dev: Device this memory belongs to 632 * @dev: Device this memory belongs to
633 * @p: Memory to free 633 * @p: Memory to free
634 * 634 *
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eb84d9d44645..869ff8c00146 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -360,7 +360,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
360 * This function creates a simple platform device that requires minimal 360 * This function creates a simple platform device that requires minimal
361 * resource and memory management. Canned release function freeing 361 * resource and memory management. Canned release function freeing
362 * memory allocated for the device allows drivers using such devices 362 * memory allocated for the device allows drivers using such devices
363 * to be unloaded iwithout waiting for the last reference to the device 363 * to be unloaded without waiting for the last reference to the device
364 * to be dropped. 364 * to be dropped.
365 * 365 *
366 * This interface is primarily intended for use with legacy drivers 366 * This interface is primarily intended for use with legacy drivers
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 067a9e8bc377..8d8cdfec6529 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
126 126
127 switch (action) { 127 switch (action) {
128 case CPU_UP_PREPARE: 128 case CPU_UP_PREPARE:
129 case CPU_UP_PREPARE_FROZEN:
129 rc = topology_add_dev(cpu); 130 rc = topology_add_dev(cpu);
130 break; 131 break;
131 case CPU_UP_CANCELED: 132 case CPU_UP_CANCELED:
133 case CPU_UP_CANCELED_FROZEN:
132 case CPU_DEAD: 134 case CPU_DEAD:
135 case CPU_DEAD_FROZEN:
133 topology_remove_dev(cpu); 136 topology_remove_dev(cpu);
134 break; 137 break;
135 } 138 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index af6d7274a7cc..18cdd8c77626 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
243 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 bvec->bv_page, bv_offs, size, IV); 244 bvec->bv_page, bv_offs, size, IV);
245 if (unlikely(transfer_result)) { 245 if (unlikely(transfer_result)) {
246 char *kaddr;
247
248 /* 246 /*
249 * The transfer failed, but we still write the data to 247 * The transfer failed, but we still write the data to
250 * keep prepare/commit calls balanced. 248 * keep prepare/commit calls balanced.
251 */ 249 */
252 printk(KERN_ERR "loop: transfer error block %llu\n", 250 printk(KERN_ERR "loop: transfer error block %llu\n",
253 (unsigned long long)index); 251 (unsigned long long)index);
254 kaddr = kmap_atomic(page, KM_USER0); 252 zero_user_page(page, offset, size, KM_USER0);
255 memset(kaddr + offset, 0, size);
256 kunmap_atomic(kaddr, KM_USER0);
257 } 253 }
258 flush_dcache_page(page); 254 flush_dcache_page(page);
259 ret = aops->commit_write(file, page, offset, 255 ret = aops->commit_write(file, page, offset,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796bef78f..069ae39a9cd9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -366,20 +366,25 @@ static struct disk_attribute pid_attr = {
366 .show = pid_show, 366 .show = pid_show,
367}; 367};
368 368
369static void nbd_do_it(struct nbd_device *lo) 369static int nbd_do_it(struct nbd_device *lo)
370{ 370{
371 struct request *req; 371 struct request *req;
372 int ret;
372 373
373 BUG_ON(lo->magic != LO_MAGIC); 374 BUG_ON(lo->magic != LO_MAGIC);
374 375
375 lo->pid = current->pid; 376 lo->pid = current->pid;
376 sysfs_create_file(&lo->disk->kobj, &pid_attr.attr); 377 ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
378 if (ret) {
379 printk(KERN_ERR "nbd: sysfs_create_file failed!");
380 return ret;
381 }
377 382
378 while ((req = nbd_read_stat(lo)) != NULL) 383 while ((req = nbd_read_stat(lo)) != NULL)
379 nbd_end_request(req); 384 nbd_end_request(req);
380 385
381 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr); 386 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
382 return; 387 return 0;
383} 388}
384 389
385static void nbd_clear_que(struct nbd_device *lo) 390static void nbd_clear_que(struct nbd_device *lo)
@@ -569,7 +574,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
569 case NBD_DO_IT: 574 case NBD_DO_IT:
570 if (!lo->file) 575 if (!lo->file)
571 return -EINVAL; 576 return -EINVAL;
572 nbd_do_it(lo); 577 error = nbd_do_it(lo);
578 if (error)
579 return error;
573 /* on return tidy up in case we have a signal */ 580 /* on return tidy up in case we have a signal */
574 /* Forcibly shutdown the socket causing all listeners 581 /* Forcibly shutdown the socket causing all listeners
575 * to error 582 * to error
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 43d4ebcb3b44..a1512da32410 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -151,7 +151,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
151} 151}
152 152
153/* 153/*
154 * ->writepage to the the blockdev's mapping has to redirty the page so that the 154 * ->writepage to the blockdev's mapping has to redirty the page so that the
155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM 155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
156 * won't try to (pointlessly) write the page again for a while. 156 * won't try to (pointlessly) write the page again for a while.
157 * 157 *
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1e32fb834eb8..2df42fdcdc91 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -631,7 +631,8 @@ config HVC_CONSOLE
631 631
632config HVC_ISERIES 632config HVC_ISERIES
633 bool "iSeries Hypervisor Virtual Console support" 633 bool "iSeries Hypervisor Virtual Console support"
634 depends on PPC_ISERIES && !VIOCONS 634 depends on PPC_ISERIES
635 default y
635 select HVC_DRIVER 636 select HVC_DRIVER
636 help 637 help
637 iSeries machines support a hypervisor virtual console. 638 iSeries machines support a hypervisor virtual console.
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 892db7096986..32ed19c9ec1c 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev)
65 * \param dev DRM device. 65 * \param dev DRM device.
66 * 66 *
67 * Free all pages associated with DMA buffers, the buffers and pages lists, and 67 * Free all pages associated with DMA buffers, the buffers and pages lists, and
68 * finally the the drm_device::dma structure itself. 68 * finally the drm_device::dma structure itself.
69 */ 69 */
70void drm_dma_takedown(drm_device_t * dev) 70void drm_dma_takedown(drm_device_t * dev)
71{ 71{
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 35540cfb43dd..b5c5b9fa84c3 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -157,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
157 * \param address access address. 157 * \param address access address.
158 * \return pointer to the page structure. 158 * \return pointer to the page structure.
159 * 159 *
160 * Get the the mapping, find the real physical page to map, get the page, and 160 * Get the mapping, find the real physical page to map, get the page, and
161 * return it. 161 * return it.
162 */ 162 */
163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a881f96c983e..ecda760ae8c0 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
294# define R300_PVS_CNTL_1_POS_END_SHIFT 10 294# define R300_PVS_CNTL_1_POS_END_SHIFT 10
295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
296/* Addresses are relative the the vertex program parameters area. */ 296/* Addresses are relative to the vertex program parameters area. */
297#define R300_VAP_PVS_CNTL_2 0x22D4 297#define R300_VAP_PVS_CNTL_2 0x22D4
298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 49f914e79216..9e1fc02967ff 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -12,7 +12,7 @@
12 * 12 *
13 * This driver allows use of the real time clock (built into 13 * This driver allows use of the real time clock (built into
14 * nearly all computers) from user space. It exports the /dev/rtc 14 * nearly all computers) from user space. It exports the /dev/rtc
15 * interface supporting various ioctl() and also the /proc/dev/rtc 15 * interface supporting various ioctl() and also the /proc/driver/rtc
16 * pseudo-file for status information. 16 * pseudo-file for status information.
17 * 17 *
18 * The ioctls can be used to set the interrupt behaviour where 18 * The ioctls can be used to set the interrupt behaviour where
@@ -377,7 +377,7 @@ static int gen_rtc_release(struct inode *inode, struct file *file)
377#ifdef CONFIG_PROC_FS 377#ifdef CONFIG_PROC_FS
378 378
379/* 379/*
380 * Info exported via "/proc/rtc". 380 * Info exported via "/proc/driver/rtc".
381 */ 381 */
382 382
383static int gen_rtc_proc_output(char *buf) 383static int gen_rtc_proc_output(char *buf)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5f3acd8e64b8..7cda04b33534 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -91,3 +91,17 @@ config HW_RANDOM_OMAP
91 module will be called omap-rng. 91 module will be called omap-rng.
92 92
93 If unsure, say Y. 93 If unsure, say Y.
94
95config HW_RANDOM_PASEMI
96 tristate "PA Semi HW Random Number Generator support"
97 depends on HW_RANDOM && PPC_PASEMI
98 default HW_RANDOM
99 ---help---
100 This driver provides kernel-side support for the Random Number
101 Generator hardware found on PA6T-1682M processor.
102
103 To compile this driver as a module, choose M here: the
104 module will be called pasemi-rng.
105
106 If unsure, say Y.
107
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c41fa19454e3..c8b7300e2fb1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
13obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 000000000000..fa6040b6c8f2
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Maintained by: Olof Johansson <olof@lixom.net>
5 *
6 * Driver for the PWRficient onchip rng
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/platform_device.h>
25#include <linux/hw_random.h>
26#include <asm/of_platform.h>
27#include <asm/io.h>
28
29#define SDCRNG_CTL_REG 0x00
30#define SDCRNG_CTL_FVLD_M 0x0000f000
31#define SDCRNG_CTL_FVLD_S 12
32#define SDCRNG_CTL_KSZ 0x00000800
33#define SDCRNG_CTL_RSRC_CRG 0x00000010
34#define SDCRNG_CTL_RSRC_RRG 0x00000000
35#define SDCRNG_CTL_CE 0x00000004
36#define SDCRNG_CTL_RE 0x00000002
37#define SDCRNG_CTL_DR 0x00000001
38#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
39#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
40#define SDCRNG_VAL_REG 0x20
41
42#define MODULE_NAME "pasemi_rng"
43
44static int pasemi_rng_data_present(struct hwrng *rng)
45{
46 void __iomem *rng_regs = (void __iomem *)rng->priv;
47
48 return (in_le32(rng_regs + SDCRNG_CTL_REG)
49 & SDCRNG_CTL_FVLD_M) ? 1 : 0;
50}
51
52static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
53{
54 void __iomem *rng_regs = (void __iomem *)rng->priv;
55 *data = in_le32(rng_regs + SDCRNG_VAL_REG);
56 return 4;
57}
58
59static int pasemi_rng_init(struct hwrng *rng)
60{
61 void __iomem *rng_regs = (void __iomem *)rng->priv;
62 u32 ctl;
63
64 ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
65 out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
66 out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
67
68 return 0;
69}
70
71static void pasemi_rng_cleanup(struct hwrng *rng)
72{
73 void __iomem *rng_regs = (void __iomem *)rng->priv;
74 u32 ctl;
75
76 ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
77 out_le32(rng_regs + SDCRNG_CTL_REG,
78 in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
79}
80
81static struct hwrng pasemi_rng = {
82 .name = MODULE_NAME,
83 .init = pasemi_rng_init,
84 .cleanup = pasemi_rng_cleanup,
85 .data_present = pasemi_rng_data_present,
86 .data_read = pasemi_rng_data_read,
87};
88
89static int __devinit rng_probe(struct of_device *ofdev,
90 const struct of_device_id *match)
91{
92 void __iomem *rng_regs;
93 struct device_node *rng_np = ofdev->node;
94 struct resource res;
95 int err = 0;
96
97 err = of_address_to_resource(rng_np, 0, &res);
98 if (err)
99 return -ENODEV;
100
101 rng_regs = ioremap(res.start, 0x100);
102
103 if (!rng_regs)
104 return -ENOMEM;
105
106 pasemi_rng.priv = (unsigned long)rng_regs;
107
108 printk(KERN_INFO "Registering PA Semi RNG\n");
109
110 err = hwrng_register(&pasemi_rng);
111
112 if (err)
113 iounmap(rng_regs);
114
115 return err;
116}
117
118static int __devexit rng_remove(struct of_device *dev)
119{
120 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
121
122 hwrng_unregister(&pasemi_rng);
123 iounmap(rng_regs);
124
125 return 0;
126}
127
128static struct of_device_id rng_match[] = {
129 {
130 .compatible = "1682m-rng",
131 },
132 {},
133};
134
135static struct of_platform_driver rng_driver = {
136 .name = "pasemi-rng",
137 .match_table = rng_match,
138 .probe = rng_probe,
139 .remove = rng_remove,
140};
141
142static int __init rng_init(void)
143{
144 return of_register_platform_driver(&rng_driver);
145}
146module_init(rng_init);
147
148static void __exit rng_exit(void)
149{
150 of_unregister_platform_driver(&rng_driver);
151}
152module_exit(rng_exit);
153
154MODULE_LICENSE("GPL");
155MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
156MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 27c1179ee527..f25facd97bb4 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -21,6 +21,7 @@ config SYNCLINK_CS
21config CARDMAN_4000 21config CARDMAN_4000
22 tristate "Omnikey Cardman 4000 support" 22 tristate "Omnikey Cardman 4000 support"
23 depends on PCMCIA 23 depends on PCMCIA
24 select BITREVERSE
24 help 25 help
25 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard 26 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
26 reader. 27 reader.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index e91b43a014b0..fee58e03dbe2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/io.h> 36#include <asm/io.h>
36 37
@@ -194,41 +195,17 @@ static inline unsigned char xinb(unsigned short port)
194} 195}
195#endif 196#endif
196 197
197#define b_0000 15 198static inline unsigned char invert_revert(unsigned char ch)
198#define b_0001 14 199{
199#define b_0010 13 200 return bitrev8(~ch);
200#define b_0011 12 201}
201#define b_0100 11
202#define b_0101 10
203#define b_0110 9
204#define b_0111 8
205#define b_1000 7
206#define b_1001 6
207#define b_1010 5
208#define b_1011 4
209#define b_1100 3
210#define b_1101 2
211#define b_1110 1
212#define b_1111 0
213
214static unsigned char irtab[16] = {
215 b_0000, b_1000, b_0100, b_1100,
216 b_0010, b_1010, b_0110, b_1110,
217 b_0001, b_1001, b_0101, b_1101,
218 b_0011, b_1011, b_0111, b_1111
219};
220 202
221static void str_invert_revert(unsigned char *b, int len) 203static void str_invert_revert(unsigned char *b, int len)
222{ 204{
223 int i; 205 int i;
224 206
225 for (i = 0; i < len; i++) 207 for (i = 0; i < len; i++)
226 b[i] = (irtab[b[i] & 0x0f] << 4) | irtab[b[i] >> 4]; 208 b[i] = invert_revert(b[i]);
227}
228
229static unsigned char invert_revert(unsigned char ch)
230{
231 return (irtab[ch & 0x0f] << 4) | irtab[ch >> 4];
232} 209}
233 210
234#define ATRLENCK(dev,pos) \ 211#define ATRLENCK(dev,pos) \
@@ -1114,7 +1091,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
1114 /* 1091 /*
1115 * wait for atr to become valid. 1092 * wait for atr to become valid.
1116 * note: it is important to lock this code. if we dont, the monitor 1093 * note: it is important to lock this code. if we dont, the monitor
1117 * could be run between test_bit and the the call the sleep on the 1094 * could be run between test_bit and the call to sleep on the
1118 * atr-queue. if *then* the monitor detects atr valid, it will wake up 1095 * atr-queue. if *then* the monitor detects atr valid, it will wake up
1119 * any process on the atr-queue, *but* since we have been interrupted, 1096 * any process on the atr-queue, *but* since we have been interrupted,
1120 * we do not yet sleep on this queue. this would result in a missed 1097 * we do not yet sleep on this queue. this would result in a missed
@@ -1881,8 +1858,11 @@ static int cm4000_probe(struct pcmcia_device *link)
1881 init_waitqueue_head(&dev->readq); 1858 init_waitqueue_head(&dev->readq);
1882 1859
1883 ret = cm4000_config(link, i); 1860 ret = cm4000_config(link, i);
1884 if (ret) 1861 if (ret) {
1862 dev_table[i] = NULL;
1863 kfree(dev);
1885 return ret; 1864 return ret;
1865 }
1886 1866
1887 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1867 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
1888 "cmm%d", i); 1868 "cmm%d", i);
@@ -1907,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1907 cm4000_release(link); 1887 cm4000_release(link);
1908 1888
1909 dev_table[devno] = NULL; 1889 dev_table[devno] = NULL;
1910 kfree(dev); 1890 kfree(dev);
1911 1891
1912 class_device_destroy(cmm_class, MKDEV(major, devno)); 1892 class_device_destroy(cmm_class, MKDEV(major, devno));
1913 1893
@@ -1956,12 +1936,14 @@ static int __init cmm_init(void)
1956 if (major < 0) { 1936 if (major < 0) {
1957 printk(KERN_WARNING MODULE_NAME 1937 printk(KERN_WARNING MODULE_NAME
1958 ": could not get major number\n"); 1938 ": could not get major number\n");
1939 class_destroy(cmm_class);
1959 return major; 1940 return major;
1960 } 1941 }
1961 1942
1962 rc = pcmcia_register_driver(&cm4000_driver); 1943 rc = pcmcia_register_driver(&cm4000_driver);
1963 if (rc < 0) { 1944 if (rc < 0) {
1964 unregister_chrdev(major, DEVICE_NAME); 1945 unregister_chrdev(major, DEVICE_NAME);
1946 class_destroy(cmm_class);
1965 return rc; 1947 return rc;
1966 } 1948 }
1967 1949
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f2e4ec4fd407..af88181a17f4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -636,8 +636,11 @@ static int reader_probe(struct pcmcia_device *link)
636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0); 636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
637 637
638 ret = reader_config(link, i); 638 ret = reader_config(link, i);
639 if (ret) 639 if (ret) {
640 dev_table[i] = NULL;
641 kfree(dev);
640 return ret; 642 return ret;
643 }
641 644
642 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 645 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
643 "cmx%d", i); 646 "cmx%d", i);
@@ -708,12 +711,14 @@ static int __init cm4040_init(void)
708 if (major < 0) { 711 if (major < 0) {
709 printk(KERN_WARNING MODULE_NAME 712 printk(KERN_WARNING MODULE_NAME
710 ": could not get major number\n"); 713 ": could not get major number\n");
714 class_destroy(cmx_class);
711 return major; 715 return major;
712 } 716 }
713 717
714 rc = pcmcia_register_driver(&reader_driver); 718 rc = pcmcia_register_driver(&reader_driver);
715 if (rc < 0) { 719 if (rc < 0) {
716 unregister_chrdev(major, DEVICE_NAME); 720 unregister_chrdev(major, DEVICE_NAME);
721 class_destroy(cmx_class);
717 return rc; 722 return rc;
718 } 723 }
719 724
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe00c7dfb649..11089be0691b 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,7 +33,7 @@ config TCG_NSC
33 tristate "National Semiconductor TPM Interface" 33 tristate "National Semiconductor TPM Interface"
34 depends on TCG_TPM && PNPACPI 34 depends on TCG_TPM && PNPACPI
35 ---help--- 35 ---help---
36 If you have a TPM security chip from National Semicondutor 36 If you have a TPM security chip from National Semiconductor
37 say Yes and it will be accessible from within Linux. To 37 say Yes and it will be accessible from within Linux. To
38 compile this driver as a module, choose M here; the module 38 compile this driver as a module, choose M here; the module
39 will be called tpm_nsc. 39 will be called tpm_nsc.
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 7710a6a77d97..fc662e4ce58a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -934,13 +934,6 @@ restart:
934 return -EINVAL; 934 return -EINVAL;
935 935
936 /* 936 /*
937 * No more input please, we are switching. The new ldisc
938 * will update this value in the ldisc open function
939 */
940
941 tty->receive_room = 0;
942
943 /*
944 * Problem: What do we do if this blocks ? 937 * Problem: What do we do if this blocks ?
945 */ 938 */
946 939
@@ -951,6 +944,13 @@ restart:
951 return 0; 944 return 0;
952 } 945 }
953 946
947 /*
948 * No more input please, we are switching. The new ldisc
949 * will update this value in the ldisc open function
950 */
951
952 tty->receive_room = 0;
953
954 o_ldisc = tty->ldisc; 954 o_ldisc = tty->ldisc;
955 o_tty = tty->link; 955 o_tty = tty->link;
956 956
@@ -1573,11 +1573,11 @@ void no_tty(void)
1573 1573
1574 1574
1575/** 1575/**
1576 * stop_tty - propogate flow control 1576 * stop_tty - propagate flow control
1577 * @tty: tty to stop 1577 * @tty: tty to stop
1578 * 1578 *
1579 * Perform flow control to the driver. For PTY/TTY pairs we 1579 * Perform flow control to the driver. For PTY/TTY pairs we
1580 * must also propogate the TIOCKPKT status. May be called 1580 * must also propagate the TIOCKPKT status. May be called
1581 * on an already stopped device and will not re-call the driver 1581 * on an already stopped device and will not re-call the driver
1582 * method. 1582 * method.
1583 * 1583 *
@@ -1607,11 +1607,11 @@ void stop_tty(struct tty_struct *tty)
1607EXPORT_SYMBOL(stop_tty); 1607EXPORT_SYMBOL(stop_tty);
1608 1608
1609/** 1609/**
1610 * start_tty - propogate flow control 1610 * start_tty - propagate flow control
1611 * @tty: tty to start 1611 * @tty: tty to start
1612 * 1612 *
1613 * Start a tty that has been stopped if at all possible. Perform 1613 * Start a tty that has been stopped if at all possible. Perform
1614 * any neccessary wakeups and propogate the TIOCPKT status. If this 1614 * any neccessary wakeups and propagate the TIOCPKT status. If this
1615 * is the tty was previous stopped and is being started then the 1615 * is the tty was previous stopped and is being started then the
1616 * driver start method is invoked and the line discipline woken. 1616 * driver start method is invoked and the line discipline woken.
1617 * 1617 *
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 893dbaf386fb..eb37fba9b7ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1685 if (sys_dev) { 1685 if (sys_dev) {
1686 switch (action) { 1686 switch (action) {
1687 case CPU_ONLINE: 1687 case CPU_ONLINE:
1688 case CPU_ONLINE_FROZEN:
1688 cpufreq_add_dev(sys_dev); 1689 cpufreq_add_dev(sys_dev);
1689 break; 1690 break;
1690 case CPU_DOWN_PREPARE: 1691 case CPU_DOWN_PREPARE:
1692 case CPU_DOWN_PREPARE_FROZEN:
1691 if (unlikely(lock_policy_rwsem_write(cpu))) 1693 if (unlikely(lock_policy_rwsem_write(cpu)))
1692 BUG(); 1694 BUG();
1693 1695
@@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1699 __cpufreq_remove_dev(sys_dev); 1701 __cpufreq_remove_dev(sys_dev);
1700 break; 1702 break;
1701 case CPU_DOWN_FAILED: 1703 case CPU_DOWN_FAILED:
1704 case CPU_DOWN_FAILED_FROZEN:
1702 cpufreq_add_dev(sys_dev); 1705 cpufreq_add_dev(sys_dev);
1703 break; 1706 break;
1704 } 1707 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d1c7cac9316c..d2f0cbd8b8f3 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
313 313
314 switch (action) { 314 switch (action) {
315 case CPU_ONLINE: 315 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN:
316 cpufreq_update_policy(cpu); 317 cpufreq_update_policy(cpu);
317 break; 318 break;
318 case CPU_DEAD: 319 case CPU_DEAD:
320 case CPU_DEAD_FROZEN:
319 cpufreq_stats_free_table(cpu); 321 cpufreq_stats_free_table(cpu);
320 break; 322 break;
321 } 323 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f21fe66c9eef..f4c634504d1a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,7 +51,7 @@ config CRYPTO_DEV_GEODE
51 default m 51 default m
52 help 52 help
53 Say 'Y' here to use the AMD Geode LX processor on-board AES 53 Say 'Y' here to use the AMD Geode LX processor on-board AES
54 engine for the CryptoAPI AES alogrithm. 54 engine for the CryptoAPI AES algorithm.
55 55
56 To compile this driver as a module, choose M here: the module 56 To compile this driver as a module, choose M here: the module
57 will be called geode-aes. 57 will be called geode-aes.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 3215f9c87f32..b51c104a28a2 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -981,7 +981,7 @@ static SENSOR_DEVICE_ATTR_2(fan##offset##_output, S_IRUGO | S_IWUSR, \
981static SENSOR_DEVICE_ATTR(fan##offset##_manual, S_IRUGO | S_IWUSR, \ 981static SENSOR_DEVICE_ATTR(fan##offset##_manual, S_IRUGO | S_IWUSR, \
982 applesmc_show_fan_manual, applesmc_store_fan_manual, offset-1); \ 982 applesmc_show_fan_manual, applesmc_store_fan_manual, offset-1); \
983\ 983\
984static SENSOR_DEVICE_ATTR(fan##offset##_position, S_IRUGO, \ 984static SENSOR_DEVICE_ATTR(fan##offset##_label, S_IRUGO, \
985 applesmc_show_fan_position, NULL, offset-1); \ 985 applesmc_show_fan_position, NULL, offset-1); \
986\ 986\
987static struct attribute *fan##offset##_attributes[] = { \ 987static struct attribute *fan##offset##_attributes[] = { \
@@ -991,7 +991,7 @@ static struct attribute *fan##offset##_attributes[] = { \
991 &sensor_dev_attr_fan##offset##_safe.dev_attr.attr, \ 991 &sensor_dev_attr_fan##offset##_safe.dev_attr.attr, \
992 &sensor_dev_attr_fan##offset##_output.dev_attr.attr, \ 992 &sensor_dev_attr_fan##offset##_output.dev_attr.attr, \
993 &sensor_dev_attr_fan##offset##_manual.dev_attr.attr, \ 993 &sensor_dev_attr_fan##offset##_manual.dev_attr.attr, \
994 &sensor_dev_attr_fan##offset##_position.dev_attr.attr, \ 994 &sensor_dev_attr_fan##offset##_label.dev_attr.attr, \
995 NULL \ 995 NULL \
996}; 996};
997 997
@@ -1190,7 +1190,8 @@ static int __init applesmc_init(void)
1190 if (ret) 1190 if (ret)
1191 goto out_region; 1191 goto out_region;
1192 1192
1193 pdev = platform_device_register_simple("applesmc", -1, NULL, 0); 1193 pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT,
1194 NULL, 0);
1194 if (IS_ERR(pdev)) { 1195 if (IS_ERR(pdev)) {
1195 ret = PTR_ERR(pdev); 1196 ret = PTR_ERR(pdev);
1196 goto out_driver; 1197 goto out_driver;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 03b1f650d1c4..75e3911810a3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
309 309
310 switch (action) { 310 switch (action) {
311 case CPU_ONLINE: 311 case CPU_ONLINE:
312 case CPU_ONLINE_FROZEN:
312 coretemp_device_add(cpu); 313 coretemp_device_add(cpu);
313 break; 314 break;
314 case CPU_DEAD: 315 case CPU_DEAD:
316 case CPU_DEAD_FROZEN:
315 coretemp_device_remove(cpu); 317 coretemp_device_remove(cpu);
316 break; 318 break;
317 } 319 }
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 7ed92dc3d833..3c3f2ebf3fc9 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
354 * also needs to get error handling and probably 354 * also needs to get error handling and probably
355 * an #ifdef CONFIG_SOFTWARE_SUSPEND 355 * an #ifdef CONFIG_SOFTWARE_SUSPEND
356 */ 356 */
357 pm_suspend(PM_SUSPEND_DISK); 357 hibernate();
358#endif 358#endif
359 poll = 1; 359 poll = 1;
360 } 360 }
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index c0188de3cc66..79cec50a242f 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -831,7 +831,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
831 831
832 /* 832 /*
833 * Now set up the hw. We have to do this ourselves as 833 * Now set up the hw. We have to do this ourselves as
834 * the MMIO layout isnt the same as the the standard port 834 * the MMIO layout isnt the same as the standard port
835 * based I/O 835 * based I/O
836 */ 836 */
837 837
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 6a1a0572275e..835937e38529 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1702,7 +1702,7 @@ static int nodemgr_host_thread(void *__hi)
1702 generation = get_hpsb_generation(host); 1702 generation = get_hpsb_generation(host);
1703 1703
1704 /* If we get a reset before we are done waiting, then 1704 /* If we get a reset before we are done waiting, then
1705 * start the the waiting over again */ 1705 * start the waiting over again */
1706 if (generation != g) 1706 if (generation != g)
1707 g = generation, i = 0; 1707 g = generation, i = 0;
1708 } 1708 }
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index f284be1c9166..82dda2faf4d0 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
745 745
746 switch (action) { 746 switch (action) {
747 case CPU_UP_PREPARE: 747 case CPU_UP_PREPARE:
748 case CPU_UP_PREPARE_FROZEN:
748 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 749 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
749 if(!create_comp_task(pool, cpu)) { 750 if(!create_comp_task(pool, cpu)) {
750 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 751 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
@@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb,
752 } 753 }
753 break; 754 break;
754 case CPU_UP_CANCELED: 755 case CPU_UP_CANCELED:
756 case CPU_UP_CANCELED_FROZEN:
755 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 757 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
756 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 758 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
757 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 759 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
758 destroy_comp_task(pool, cpu); 760 destroy_comp_task(pool, cpu);
759 break; 761 break;
760 case CPU_ONLINE: 762 case CPU_ONLINE:
763 case CPU_ONLINE_FROZEN:
761 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); 764 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
762 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 765 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
763 kthread_bind(cct->task, cpu); 766 kthread_bind(cct->task, cpu);
764 wake_up_process(cct->task); 767 wake_up_process(cct->task);
765 break; 768 break;
766 case CPU_DOWN_PREPARE: 769 case CPU_DOWN_PREPARE:
770 case CPU_DOWN_PREPARE_FROZEN:
767 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); 771 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
768 break; 772 break;
769 case CPU_DOWN_FAILED: 773 case CPU_DOWN_FAILED:
774 case CPU_DOWN_FAILED_FROZEN:
770 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); 775 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
771 break; 776 break;
772 case CPU_DEAD: 777 case CPU_DEAD:
778 case CPU_DEAD_FROZEN:
773 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); 779 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
774 destroy_comp_task(pool, cpu); 780 destroy_comp_task(pool, cpu);
775 take_over_work(pool, cpu); 781 take_over_work(pool, cpu);
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c921d6c522f5..c92f9d764fce 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,7 +17,7 @@ config CAPI_TRACE
17 help 17 help
18 If you say Y here, the kernelcapi driver can make verbose traces 18 If you say Y here, the kernelcapi driver can make verbose traces
19 of CAPI messages. This feature can be enabled/disabled via IOCTL for 19 of CAPI messages. This feature can be enabled/disabled via IOCTL for
20 every controler (default disabled). 20 every controller (default disabled).
21 This will increase the size of the kernelcapi module by 20 KB. 21 This will increase the size of the kernelcapi module by 20 KB.
22 If unsure, say Y. 22 If unsure, say Y.
23 23
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index c8e1c357cec8..a1263019df5e 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -138,8 +138,6 @@ struct usb_cardstate {
138 char bchars[6]; /* for request 0x19 */ 138 char bchars[6]; /* for request 0x19 */
139}; 139};
140 140
141struct usb_bc_state {};
142
143static inline unsigned tiocm_to_gigaset(unsigned state) 141static inline unsigned tiocm_to_gigaset(unsigned state)
144{ 142{
145 return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0); 143 return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
@@ -579,25 +577,21 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
579 577
580static int gigaset_freebcshw(struct bc_state *bcs) 578static int gigaset_freebcshw(struct bc_state *bcs)
581{ 579{
582 if (!bcs->hw.usb) 580 /* unused */
583 return 0;
584 //FIXME
585 kfree(bcs->hw.usb);
586 return 1; 581 return 1;
587} 582}
588 583
589/* Initialize the b-channel structure */ 584/* Initialize the b-channel structure */
590static int gigaset_initbcshw(struct bc_state *bcs) 585static int gigaset_initbcshw(struct bc_state *bcs)
591{ 586{
592 bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL); 587 /* unused */
593 if (!bcs->hw.usb) 588 bcs->hw.usb = NULL;
594 return 0;
595
596 return 1; 589 return 1;
597} 590}
598 591
599static void gigaset_reinitbcshw(struct bc_state *bcs) 592static void gigaset_reinitbcshw(struct bc_state *bcs)
600{ 593{
594 /* nothing to do for M10x */
601} 595}
602 596
603static void gigaset_freecshw(struct cardstate *cs) 597static void gigaset_freecshw(struct cardstate *cs)
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
index af3eb9e795b5..85784a7ffb25 100644
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ b/drivers/isdn/hardware/eicon/divasync.h
@@ -216,7 +216,7 @@ typedef struct
216#define SERIAL_HOOK_RING 0x85 216#define SERIAL_HOOK_RING 0x85
217#define SERIAL_HOOK_DETACH 0x8f 217#define SERIAL_HOOK_DETACH 0x8f
218 unsigned char Flags; /* function refinements */ 218 unsigned char Flags; /* function refinements */
219 /* parameters passed by the the ATTACH request */ 219 /* parameters passed by the ATTACH request */
220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */ 220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */
221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */ 221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
222 void *HandlerContext; /* context for both handlers */ 222 void *HandlerContext; /* context for both handlers */
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 99e70d4103b6..1f18f1993387 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1217,11 +1217,11 @@ usb_init(hfcusb_data * hfc)
1217 /* aux = output, reset off */ 1217 /* aux = output, reset off */
1218 write_usb(hfc, HFCUSB_CIRM, 0x10); 1218 write_usb(hfc, HFCUSB_CIRM, 0x10);
1219 1219
1220 /* set USB_SIZE to match the the wMaxPacketSize for INT or BULK transfers */ 1220 /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
1221 write_usb(hfc, HFCUSB_USB_SIZE, 1221 write_usb(hfc, HFCUSB_USB_SIZE,
1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4)); 1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4));
1223 1223
1224 /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */ 1224 /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size); 1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size);
1226 1226
1227 /* enable PCM/GCI master mode */ 1227 /* enable PCM/GCI master mode */
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index c8b8cfa332bb..0d892600ff00 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2889 2889
2890 switch (val) { 2890 switch (val) {
2891 case CPU_DOWN_PREPARE: 2891 case CPU_DOWN_PREPARE:
2892 case CPU_DOWN_PREPARE_FROZEN:
2892 case CPU_UP_CANCELED: 2893 case CPU_UP_CANCELED:
2894 case CPU_UP_CANCELED_FROZEN:
2893 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2895 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2894 cpu); 2896 cpu);
2895 decache_vcpus_on_cpu(cpu); 2897 decache_vcpus_on_cpu(cpu);
@@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2897 NULL, 0, 1); 2899 NULL, 0, 1);
2898 break; 2900 break;
2899 case CPU_ONLINE: 2901 case CPU_ONLINE:
2902 case CPU_ONLINE_FROZEN:
2900 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2903 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2901 cpu); 2904 cpu);
2902 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 2905 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1d49d2ade557..677c99325be5 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/leds/h1940-leds.c 2 * drivers/leds/leds-h1940.c
3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> 3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a32c91e27b3c..58926da0ae18 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -237,7 +237,7 @@ config PMAC_RACKMETER
237 tristate "Support for Apple XServe front panel LEDs" 237 tristate "Support for Apple XServe front panel LEDs"
238 depends on PPC_PMAC 238 depends on PPC_PMAC
239 help 239 help
240 This driver procides some support to control the front panel 240 This driver provides some support to control the front panel
241 blue LEDs "vu-meter" of the XServer macs. 241 blue LEDs "vu-meter" of the XServer macs.
242 242
243endif # MACINTOSH_DRIVERS 243endif # MACINTOSH_DRIVERS
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
index da862e4632dd..67b8e9453b19 100644
--- a/drivers/mca/mca-bus.c
+++ b/drivers/mca/mca-bus.c
@@ -47,19 +47,25 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv)
47{ 47{
48 struct mca_device *mca_dev = to_mca_device (dev); 48 struct mca_device *mca_dev = to_mca_device (dev);
49 struct mca_driver *mca_drv = to_mca_driver (drv); 49 struct mca_driver *mca_drv = to_mca_driver (drv);
50 const short *mca_ids = mca_drv->id_table; 50 const unsigned short *mca_ids = mca_drv->id_table;
51 int i; 51 int i = 0;
52 52
53 if (!mca_ids) 53 if (mca_ids) {
54 return 0; 54 for(i = 0; mca_ids[i]; i++) {
55 55 if (mca_ids[i] == mca_dev->pos_id) {
56 for(i = 0; mca_ids[i]; i++) { 56 mca_dev->index = i;
57 if (mca_ids[i] == mca_dev->pos_id) { 57 return 1;
58 mca_dev->index = i; 58 }
59 return 1;
60 } 59 }
61 } 60 }
62 61 /* If the integrated id is present, treat it as though it were an
62 * additional id in the id_table (it can't be because by definition,
63 * integrated id's overflow a short */
64 if (mca_drv->integrated_id && mca_dev->pos_id ==
65 mca_drv->integrated_id) {
66 mca_dev->index = i;
67 return 1;
68 }
63 return 0; 69 return 0;
64} 70}
65 71
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
index 2223466b3d8a..32cd39bcc715 100644
--- a/drivers/mca/mca-driver.c
+++ b/drivers/mca/mca-driver.c
@@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv)
36 mca_drv->driver.bus = &mca_bus_type; 36 mca_drv->driver.bus = &mca_bus_type;
37 if ((r = driver_register(&mca_drv->driver)) < 0) 37 if ((r = driver_register(&mca_drv->driver)) < 0)
38 return r; 38 return r;
39 mca_drv->integrated_id = 0;
39 } 40 }
40 41
41 return 0; 42 return 0;
42} 43}
43EXPORT_SYMBOL(mca_register_driver); 44EXPORT_SYMBOL(mca_register_driver);
44 45
46int mca_register_driver_integrated(struct mca_driver *mca_driver,
47 int integrated_id)
48{
49 int r = mca_register_driver(mca_driver);
50
51 if (!r)
52 mca_driver->integrated_id = integrated_id;
53
54 return r;
55}
56EXPORT_SYMBOL(mca_register_driver_integrated);
57
45void mca_unregister_driver(struct mca_driver *mca_drv) 58void mca_unregister_driver(struct mca_driver *mca_drv)
46{ 59{
47 if (MCA_bus) 60 if (MCA_bus)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4540ade6b6b5..7df934d69134 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -262,6 +262,15 @@ config DM_MULTIPATH_EMC
262 ---help--- 262 ---help---
263 Multipath support for EMC CX/AX series hardware. 263 Multipath support for EMC CX/AX series hardware.
264 264
265config DM_DELAY
266 tristate "I/O delaying target (EXPERIMENTAL)"
267 depends on BLK_DEV_DM && EXPERIMENTAL
268 ---help---
269 A target that delays reads and/or writes and can send
270 them to different devices. Useful for testing.
271
272 If unsure, say N.
273
265endmenu 274endmenu
266 275
267endif 276endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 34957a68d921..38754084eac7 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o 32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o
34obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
35obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o 36obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 37obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index da4349649f7f..c6be88826fae 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,17 +8,43 @@
8#define DM_BIO_LIST_H 8#define DM_BIO_LIST_H
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/prefetch.h>
11 12
12struct bio_list { 13struct bio_list {
13 struct bio *head; 14 struct bio *head;
14 struct bio *tail; 15 struct bio *tail;
15}; 16};
16 17
18static inline int bio_list_empty(const struct bio_list *bl)
19{
20 return bl->head == NULL;
21}
22
23#define BIO_LIST_INIT { .head = NULL, .tail = NULL }
24
25#define BIO_LIST(bl) \
26 struct bio_list bl = BIO_LIST_INIT
27
17static inline void bio_list_init(struct bio_list *bl) 28static inline void bio_list_init(struct bio_list *bl)
18{ 29{
19 bl->head = bl->tail = NULL; 30 bl->head = bl->tail = NULL;
20} 31}
21 32
33#define bio_list_for_each(bio, bl) \
34 for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
35 bio = bio->bi_next)
36
37static inline unsigned bio_list_size(const struct bio_list *bl)
38{
39 unsigned sz = 0;
40 struct bio *bio;
41
42 bio_list_for_each(bio, bl)
43 sz++;
44
45 return sz;
46}
47
22static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 48static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
23{ 49{
24 bio->bi_next = NULL; 50 bio->bi_next = NULL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c347..7b0fcfc9eaa5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
33struct crypt_io { 33struct crypt_io {
34 struct dm_target *target; 34 struct dm_target *target;
35 struct bio *base_bio; 35 struct bio *base_bio;
36 struct bio *first_clone;
37 struct work_struct work; 36 struct work_struct work;
38 atomic_t pending; 37 atomic_t pending;
39 int error; 38 int error;
@@ -107,6 +106,8 @@ struct crypt_config {
107 106
108static struct kmem_cache *_crypt_io_pool; 107static struct kmem_cache *_crypt_io_pool;
109 108
109static void clone_init(struct crypt_io *, struct bio *);
110
110/* 111/*
111 * Different IV generation algorithms: 112 * Different IV generation algorithms:
112 * 113 *
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
120 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 121 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
121 * (needed for LRW-32-AES and possible other narrow block modes) 122 * (needed for LRW-32-AES and possible other narrow block modes)
122 * 123 *
124 * null: the initial vector is always zero. Provides compatibility with
125 * obsolete loop_fish2 devices. Do not use for new devices.
126 *
123 * plumb: unimplemented, see: 127 * plumb: unimplemented, see:
124 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 128 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
125 */ 129 */
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
256 return 0; 260 return 0;
257} 261}
258 262
263static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
264{
265 memset(iv, 0, cc->iv_size);
266
267 return 0;
268}
269
259static struct crypt_iv_operations crypt_iv_plain_ops = { 270static struct crypt_iv_operations crypt_iv_plain_ops = {
260 .generator = crypt_iv_plain_gen 271 .generator = crypt_iv_plain_gen
261}; 272};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
272 .generator = crypt_iv_benbi_gen 283 .generator = crypt_iv_benbi_gen
273}; 284};
274 285
286static struct crypt_iv_operations crypt_iv_null_ops = {
287 .generator = crypt_iv_null_gen
288};
289
275static int 290static int
276crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 291crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
277 struct scatterlist *in, unsigned int length, 292 struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
378 * This should never violate the device limitations 393 * This should never violate the device limitations
379 * May return a smaller bio when running out of pages 394 * May return a smaller bio when running out of pages
380 */ 395 */
381static struct bio * 396static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
382crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
383 struct bio *base_bio, unsigned int *bio_vec_idx)
384{ 397{
398 struct crypt_config *cc = io->target->private;
385 struct bio *clone; 399 struct bio *clone;
386 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 400 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 401 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
388 unsigned int i; 402 unsigned int i;
389 403
390 if (base_bio) { 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
391 clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
392 __bio_clone(clone, base_bio);
393 } else
394 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
395
396 if (!clone) 405 if (!clone)
397 return NULL; 406 return NULL;
398 407
399 clone->bi_destructor = dm_crypt_bio_destructor; 408 clone_init(io, clone);
400
401 /* if the last bio was not complete, continue where that one ended */
402 clone->bi_idx = *bio_vec_idx;
403 clone->bi_vcnt = *bio_vec_idx;
404 clone->bi_size = 0;
405 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
406
407 /* clone->bi_idx pages have already been allocated */
408 size -= clone->bi_idx * PAGE_SIZE;
409 409
410 for (i = clone->bi_idx; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
411 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 struct bio_vec *bv = bio_iovec_idx(clone, i);
412 412
413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
419 * return a partially allocated bio, the caller will then try 419 * return a partially allocated bio, the caller will then try
420 * to allocate additional bios while submitting this partial bio 420 * to allocate additional bios while submitting this partial bio
421 */ 421 */
422 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) 422 if (i == (MIN_BIO_PAGES - 1))
423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
424 424
425 bv->bv_offset = 0; 425 bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
438 return NULL; 438 return NULL;
439 } 439 }
440 440
441 /*
442 * Remember the last bio_vec allocated to be able
443 * to correctly continue after the splitting.
444 */
445 *bio_vec_idx = clone->bi_vcnt;
446
447 return clone; 441 return clone;
448} 442}
449 443
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
495 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
496 return; 490 return;
497 491
498 if (io->first_clone)
499 bio_put(io->first_clone);
500
501 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
502 493
503 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
562 clone->bi_end_io = crypt_endio; 553 clone->bi_end_io = crypt_endio;
563 clone->bi_bdev = cc->dev->bdev; 554 clone->bi_bdev = cc->dev->bdev;
564 clone->bi_rw = io->base_bio->bi_rw; 555 clone->bi_rw = io->base_bio->bi_rw;
556 clone->bi_destructor = dm_crypt_bio_destructor;
565} 557}
566 558
567static void process_read(struct crypt_io *io) 559static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
585 } 577 }
586 578
587 clone_init(io, clone); 579 clone_init(io, clone);
588 clone->bi_destructor = dm_crypt_bio_destructor;
589 clone->bi_idx = 0; 580 clone->bi_idx = 0;
590 clone->bi_vcnt = bio_segments(base_bio); 581 clone->bi_vcnt = bio_segments(base_bio);
591 clone->bi_size = base_bio->bi_size; 582 clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
604 struct convert_context ctx; 595 struct convert_context ctx;
605 unsigned remaining = base_bio->bi_size; 596 unsigned remaining = base_bio->bi_size;
606 sector_t sector = base_bio->bi_sector - io->target->begin; 597 sector_t sector = base_bio->bi_sector - io->target->begin;
607 unsigned bvec_idx = 0;
608 598
609 atomic_inc(&io->pending); 599 atomic_inc(&io->pending);
610 600
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
615 * so repeat the whole process until all the data can be handled. 605 * so repeat the whole process until all the data can be handled.
616 */ 606 */
617 while (remaining) { 607 while (remaining) {
618 clone = crypt_alloc_buffer(cc, base_bio->bi_size, 608 clone = crypt_alloc_buffer(io, remaining);
619 io->first_clone, &bvec_idx);
620 if (unlikely(!clone)) { 609 if (unlikely(!clone)) {
621 dec_pending(io, -ENOMEM); 610 dec_pending(io, -ENOMEM);
622 return; 611 return;
623 } 612 }
624 613
625 ctx.bio_out = clone; 614 ctx.bio_out = clone;
615 ctx.idx_out = 0;
626 616
627 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 617 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
628 crypt_free_buffer_pages(cc, clone, clone->bi_size); 618 crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
631 return; 621 return;
632 } 622 }
633 623
634 clone_init(io, clone); 624 /* crypt_convert should have filled the clone bio */
635 clone->bi_sector = cc->start + sector; 625 BUG_ON(ctx.idx_out < clone->bi_vcnt);
636
637 if (!io->first_clone) {
638 /*
639 * hold a reference to the first clone, because it
640 * holds the bio_vec array and that can't be freed
641 * before all other clones are released
642 */
643 bio_get(clone);
644 io->first_clone = clone;
645 }
646 626
627 clone->bi_sector = cc->start + sector;
647 remaining -= clone->bi_size; 628 remaining -= clone->bi_size;
648 sector += bio_sectors(clone); 629 sector += bio_sectors(clone);
649 630
650 /* prevent bio_put of first_clone */ 631 /* Grab another reference to the io struct
632 * before we kick off the request */
651 if (remaining) 633 if (remaining)
652 atomic_inc(&io->pending); 634 atomic_inc(&io->pending);
653 635
654 generic_make_request(clone); 636 generic_make_request(clone);
655 637
638 /* Do not reference clone after this - it
639 * may be gone already. */
640
656 /* out of memory -> run queues */ 641 /* out of memory -> run queues */
657 if (remaining) 642 if (remaining)
658 congestion_wait(bio_data_dir(clone), HZ/100); 643 congestion_wait(WRITE, HZ/100);
659 } 644 }
660} 645}
661 646
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
832 cc->iv_gen_ops = &crypt_iv_essiv_ops; 817 cc->iv_gen_ops = &crypt_iv_essiv_ops;
833 else if (strcmp(ivmode, "benbi") == 0) 818 else if (strcmp(ivmode, "benbi") == 0)
834 cc->iv_gen_ops = &crypt_iv_benbi_ops; 819 cc->iv_gen_ops = &crypt_iv_benbi_ops;
820 else if (strcmp(ivmode, "null") == 0)
821 cc->iv_gen_ops = &crypt_iv_null_ops;
835 else { 822 else {
836 ti->error = "Invalid IV mode"; 823 ti->error = "Invalid IV mode";
837 goto bad2; 824 goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
954 struct crypt_config *cc = ti->private; 941 struct crypt_config *cc = ti->private;
955 struct crypt_io *io; 942 struct crypt_io *io;
956 943
944 if (bio_barrier(bio))
945 return -EOPNOTSUPP;
946
957 io = mempool_alloc(cc->io_pool, GFP_NOIO); 947 io = mempool_alloc(cc->io_pool, GFP_NOIO);
958 io->target = ti; 948 io->target = ti;
959 io->base_bio = bio; 949 io->base_bio = bio;
960 io->first_clone = NULL;
961 io->error = io->post_process = 0; 950 io->error = io->post_process = 0;
962 atomic_set(&io->pending, 0); 951 atomic_set(&io->pending, 0);
963 kcryptd_queue_io(io); 952 kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
1057 1046
1058static struct target_type crypt_target = { 1047static struct target_type crypt_target = {
1059 .name = "crypt", 1048 .name = "crypt",
1060 .version= {1, 3, 0}, 1049 .version= {1, 5, 0},
1061 .module = THIS_MODULE, 1050 .module = THIS_MODULE,
1062 .ctr = crypt_ctr, 1051 .ctr = crypt_ctr,
1063 .dtr = crypt_dtr, 1052 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
new file mode 100644
index 000000000000..52c7cf9e5803
--- /dev/null
+++ b/drivers/md/dm-delay.c
@@ -0,0 +1,383 @@
1/*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "delay"
20
21struct delay_c {
22 struct timer_list delay_timer;
23 struct semaphore timer_lock;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27 mempool_t *delayed_pool;
28
29 struct dm_dev *dev_read;
30 sector_t start_read;
31 unsigned read_delay;
32 unsigned reads;
33
34 struct dm_dev *dev_write;
35 sector_t start_write;
36 unsigned write_delay;
37 unsigned writes;
38};
39
40struct delay_info {
41 struct delay_c *context;
42 struct list_head list;
43 struct bio *bio;
44 unsigned long expires;
45};
46
47static DEFINE_MUTEX(delayed_bios_lock);
48
49static struct workqueue_struct *kdelayd_wq;
50static struct kmem_cache *delayed_cache;
51
52static void handle_delayed_timer(unsigned long data)
53{
54 struct delay_c *dc = (struct delay_c *)data;
55
56 queue_work(kdelayd_wq, &dc->flush_expired_bios);
57}
58
59static void queue_timeout(struct delay_c *dc, unsigned long expires)
60{
61 down(&dc->timer_lock);
62
63 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
64 mod_timer(&dc->delay_timer, expires);
65
66 up(&dc->timer_lock);
67}
68
69static void flush_bios(struct bio *bio)
70{
71 struct bio *n;
72
73 while (bio) {
74 n = bio->bi_next;
75 bio->bi_next = NULL;
76 generic_make_request(bio);
77 bio = n;
78 }
79}
80
81static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
82{
83 struct delay_info *delayed, *next;
84 unsigned long next_expires = 0;
85 int start_timer = 0;
86 BIO_LIST(flush_bios);
87
88 mutex_lock(&delayed_bios_lock);
89 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
90 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
91 list_del(&delayed->list);
92 bio_list_add(&flush_bios, delayed->bio);
93 if ((bio_data_dir(delayed->bio) == WRITE))
94 delayed->context->writes--;
95 else
96 delayed->context->reads--;
97 mempool_free(delayed, dc->delayed_pool);
98 continue;
99 }
100
101 if (!start_timer) {
102 start_timer = 1;
103 next_expires = delayed->expires;
104 } else
105 next_expires = min(next_expires, delayed->expires);
106 }
107
108 mutex_unlock(&delayed_bios_lock);
109
110 if (start_timer)
111 queue_timeout(dc, next_expires);
112
113 return bio_list_get(&flush_bios);
114}
115
116static void flush_expired_bios(struct work_struct *work)
117{
118 struct delay_c *dc;
119
120 dc = container_of(work, struct delay_c, flush_expired_bios);
121 flush_bios(flush_delayed_bios(dc, 0));
122}
123
124/*
125 * Mapping parameters:
126 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
127 *
128 * With separate write parameters, the first set is only used for reads.
129 * Delays are specified in milliseconds.
130 */
131static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
132{
133 struct delay_c *dc;
134 unsigned long long tmpll;
135
136 if (argc != 3 && argc != 6) {
137 ti->error = "requires exactly 3 or 6 arguments";
138 return -EINVAL;
139 }
140
141 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
142 if (!dc) {
143 ti->error = "Cannot allocate context";
144 return -ENOMEM;
145 }
146
147 dc->reads = dc->writes = 0;
148
149 if (sscanf(argv[1], "%llu", &tmpll) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
161 dm_table_get_mode(ti->table), &dc->dev_read)) {
162 ti->error = "Device lookup failed";
163 goto bad;
164 }
165
166 if (argc == 3) {
167 dc->dev_write = NULL;
168 goto out;
169 }
170
171 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
172 ti->error = "Invalid write device sector";
173 goto bad;
174 }
175 dc->start_write = tmpll;
176
177 if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
178 ti->error = "Invalid write delay";
179 goto bad;
180 }
181
182 if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
183 dm_table_get_mode(ti->table), &dc->dev_write)) {
184 ti->error = "Write device lookup failed";
185 dm_put_device(ti, dc->dev_read);
186 goto bad;
187 }
188
189out:
190 dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
191 if (!dc->delayed_pool) {
192 DMERR("Couldn't create delayed bio pool.");
193 goto bad;
194 }
195
196 init_timer(&dc->delay_timer);
197 dc->delay_timer.function = handle_delayed_timer;
198 dc->delay_timer.data = (unsigned long)dc;
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 init_MUTEX(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->private = dc;
206 return 0;
207
208bad:
209 kfree(dc);
210 return -EINVAL;
211}
212
213static void delay_dtr(struct dm_target *ti)
214{
215 struct delay_c *dc = ti->private;
216
217 flush_workqueue(kdelayd_wq);
218
219 dm_put_device(ti, dc->dev_read);
220
221 if (dc->dev_write)
222 dm_put_device(ti, dc->dev_write);
223
224 mempool_destroy(dc->delayed_pool);
225 kfree(dc);
226}
227
228static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
229{
230 struct delay_info *delayed;
231 unsigned long expires = 0;
232
233 if (!delay || !atomic_read(&dc->may_delay))
234 return 1;
235
236 delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
237
238 delayed->context = dc;
239 delayed->bio = bio;
240 delayed->expires = expires = jiffies + (delay * HZ / 1000);
241
242 mutex_lock(&delayed_bios_lock);
243
244 if (bio_data_dir(bio) == WRITE)
245 dc->writes++;
246 else
247 dc->reads++;
248
249 list_add_tail(&delayed->list, &dc->delayed_bios);
250
251 mutex_unlock(&delayed_bios_lock);
252
253 queue_timeout(dc, expires);
254
255 return 0;
256}
257
258static void delay_presuspend(struct dm_target *ti)
259{
260 struct delay_c *dc = ti->private;
261
262 atomic_set(&dc->may_delay, 0);
263 del_timer_sync(&dc->delay_timer);
264 flush_bios(flush_delayed_bios(dc, 1));
265}
266
267static void delay_resume(struct dm_target *ti)
268{
269 struct delay_c *dc = ti->private;
270
271 atomic_set(&dc->may_delay, 1);
272}
273
274static int delay_map(struct dm_target *ti, struct bio *bio,
275 union map_info *map_context)
276{
277 struct delay_c *dc = ti->private;
278
279 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
280 bio->bi_bdev = dc->dev_write->bdev;
281 bio->bi_sector = dc->start_write +
282 (bio->bi_sector - ti->begin);
283
284 return delay_bio(dc, dc->write_delay, bio);
285 }
286
287 bio->bi_bdev = dc->dev_read->bdev;
288 bio->bi_sector = dc->start_read +
289 (bio->bi_sector - ti->begin);
290
291 return delay_bio(dc, dc->read_delay, bio);
292}
293
294static int delay_status(struct dm_target *ti, status_type_t type,
295 char *result, unsigned maxlen)
296{
297 struct delay_c *dc = ti->private;
298 int sz = 0;
299
300 switch (type) {
301 case STATUSTYPE_INFO:
302 DMEMIT("%u %u", dc->reads, dc->writes);
303 break;
304
305 case STATUSTYPE_TABLE:
306 DMEMIT("%s %llu %u", dc->dev_read->name,
307 (unsigned long long) dc->start_read,
308 dc->read_delay);
309 if (dc->dev_write)
310 DMEMIT("%s %llu %u", dc->dev_write->name,
311 (unsigned long long) dc->start_write,
312 dc->write_delay);
313 break;
314 }
315
316 return 0;
317}
318
319static struct target_type delay_target = {
320 .name = "delay",
321 .version = {1, 0, 2},
322 .module = THIS_MODULE,
323 .ctr = delay_ctr,
324 .dtr = delay_dtr,
325 .map = delay_map,
326 .presuspend = delay_presuspend,
327 .resume = delay_resume,
328 .status = delay_status,
329};
330
331static int __init dm_delay_init(void)
332{
333 int r = -ENOMEM;
334
335 kdelayd_wq = create_workqueue("kdelayd");
336 if (!kdelayd_wq) {
337 DMERR("Couldn't start kdelayd");
338 goto bad_queue;
339 }
340
341 delayed_cache = kmem_cache_create("dm-delay",
342 sizeof(struct delay_info),
343 __alignof__(struct delay_info),
344 0, NULL, NULL);
345 if (!delayed_cache) {
346 DMERR("Couldn't create delayed bio cache.");
347 goto bad_memcache;
348 }
349
350 r = dm_register_target(&delay_target);
351 if (r < 0) {
352 DMERR("register failed %d", r);
353 goto bad_register;
354 }
355
356 return 0;
357
358bad_register:
359 kmem_cache_destroy(delayed_cache);
360bad_memcache:
361 destroy_workqueue(kdelayd_wq);
362bad_queue:
363 return r;
364}
365
366static void __exit dm_delay_exit(void)
367{
368 int r = dm_unregister_target(&delay_target);
369
370 if (r < 0)
371 DMERR("unregister failed %d", r);
372
373 kmem_cache_destroy(delayed_cache);
374 destroy_workqueue(kdelayd_wq);
375}
376
377/* Module hooks */
378module_init(dm_delay_init);
379module_exit(dm_delay_exit);
380
381MODULE_DESCRIPTION(DM_NAME " delay target");
382MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
383MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 99cdffa7fbfe..07e0a0c84f6e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * dm-snapshot.c 2 * dm-exception-store.c
3 * 3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * Copyright (C) 2006 Red Hat GmbH
5 * 6 *
6 * This file is released under the GPL. 7 * This file is released under the GPL.
7 */ 8 */
@@ -123,6 +124,7 @@ struct pstore {
123 atomic_t pending_count; 124 atomic_t pending_count;
124 uint32_t callback_count; 125 uint32_t callback_count;
125 struct commit_callback *callbacks; 126 struct commit_callback *callbacks;
127 struct dm_io_client *io_client;
126}; 128};
127 129
128static inline unsigned int sectors_to_pages(unsigned int sectors) 130static inline unsigned int sectors_to_pages(unsigned int sectors)
@@ -159,14 +161,20 @@ static void free_area(struct pstore *ps)
159 */ 161 */
160static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) 162static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
161{ 163{
162 struct io_region where; 164 struct io_region where = {
163 unsigned long bits; 165 .bdev = ps->snap->cow->bdev,
164 166 .sector = ps->snap->chunk_size * chunk,
165 where.bdev = ps->snap->cow->bdev; 167 .count = ps->snap->chunk_size,
166 where.sector = ps->snap->chunk_size * chunk; 168 };
167 where.count = ps->snap->chunk_size; 169 struct dm_io_request io_req = {
168 170 .bi_rw = rw,
169 return dm_io_sync_vm(1, &where, rw, ps->area, &bits); 171 .mem.type = DM_IO_VMA,
172 .mem.ptr.vma = ps->area,
173 .client = ps->io_client,
174 .notify.fn = NULL,
175 };
176
177 return dm_io(&io_req, 1, &where, NULL);
170} 178}
171 179
172/* 180/*
@@ -213,17 +221,18 @@ static int read_header(struct pstore *ps, int *new_snapshot)
213 chunk_size_supplied = 0; 221 chunk_size_supplied = 0;
214 } 222 }
215 223
216 r = dm_io_get(sectors_to_pages(ps->snap->chunk_size)); 224 ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
217 if (r) 225 chunk_size));
218 return r; 226 if (IS_ERR(ps->io_client))
227 return PTR_ERR(ps->io_client);
219 228
220 r = alloc_area(ps); 229 r = alloc_area(ps);
221 if (r) 230 if (r)
222 goto bad1; 231 return r;
223 232
224 r = chunk_io(ps, 0, READ); 233 r = chunk_io(ps, 0, READ);
225 if (r) 234 if (r)
226 goto bad2; 235 goto bad;
227 236
228 dh = (struct disk_header *) ps->area; 237 dh = (struct disk_header *) ps->area;
229 238
@@ -235,7 +244,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
235 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { 244 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
236 DMWARN("Invalid or corrupt snapshot"); 245 DMWARN("Invalid or corrupt snapshot");
237 r = -ENXIO; 246 r = -ENXIO;
238 goto bad2; 247 goto bad;
239 } 248 }
240 249
241 *new_snapshot = 0; 250 *new_snapshot = 0;
@@ -252,27 +261,22 @@ static int read_header(struct pstore *ps, int *new_snapshot)
252 (unsigned long long)ps->snap->chunk_size); 261 (unsigned long long)ps->snap->chunk_size);
253 262
254 /* We had a bogus chunk_size. Fix stuff up. */ 263 /* We had a bogus chunk_size. Fix stuff up. */
255 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
256 free_area(ps); 264 free_area(ps);
257 265
258 ps->snap->chunk_size = chunk_size; 266 ps->snap->chunk_size = chunk_size;
259 ps->snap->chunk_mask = chunk_size - 1; 267 ps->snap->chunk_mask = chunk_size - 1;
260 ps->snap->chunk_shift = ffs(chunk_size) - 1; 268 ps->snap->chunk_shift = ffs(chunk_size) - 1;
261 269
262 r = dm_io_get(sectors_to_pages(chunk_size)); 270 r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
271 ps->io_client);
263 if (r) 272 if (r)
264 return r; 273 return r;
265 274
266 r = alloc_area(ps); 275 r = alloc_area(ps);
267 if (r) 276 return r;
268 goto bad1;
269
270 return 0;
271 277
272bad2: 278bad:
273 free_area(ps); 279 free_area(ps);
274bad1:
275 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
276 return r; 280 return r;
277} 281}
278 282
@@ -405,7 +409,7 @@ static void persistent_destroy(struct exception_store *store)
405{ 409{
406 struct pstore *ps = get_info(store); 410 struct pstore *ps = get_info(store);
407 411
408 dm_io_put(sectors_to_pages(ps->snap->chunk_size)); 412 dm_io_client_destroy(ps->io_client);
409 vfree(ps->callbacks); 413 vfree(ps->callbacks);
410 free_area(ps); 414 free_area(ps);
411 kfree(ps); 415 kfree(ps);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
index 32eff28e4adc..e0832e6fcf36 100644
--- a/drivers/md/dm-hw-handler.h
+++ b/drivers/md/dm-hw-handler.h
@@ -16,6 +16,7 @@
16struct hw_handler_type; 16struct hw_handler_type;
17struct hw_handler { 17struct hw_handler {
18 struct hw_handler_type *type; 18 struct hw_handler_type *type;
19 struct mapped_device *md;
19 void *context; 20 void *context;
20}; 21};
21 22
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8bdc8a87b249..352c6fbeac53 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
@@ -12,13 +13,17 @@
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15static struct bio_set *_bios; 16struct dm_io_client {
17 mempool_t *pool;
18 struct bio_set *bios;
19};
16 20
17/* FIXME: can we shrink this ? */ 21/* FIXME: can we shrink this ? */
18struct io { 22struct io {
19 unsigned long error; 23 unsigned long error;
20 atomic_t count; 24 atomic_t count;
21 struct task_struct *sleeper; 25 struct task_struct *sleeper;
26 struct dm_io_client *client;
22 io_notify_fn callback; 27 io_notify_fn callback;
23 void *context; 28 void *context;
24}; 29};
@@ -26,63 +31,58 @@ struct io {
26/* 31/*
27 * io contexts are only dynamically allocated for asynchronous 32 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll 33 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME: 34 * have the same number of io contexts as bios! (FIXME: must reduce this).
30 * must reduce this).
31 */ 35 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34 36
35static unsigned int pages_to_ios(unsigned int pages) 37static unsigned int pages_to_ios(unsigned int pages)
36{ 38{
37 return 4 * pages; /* too many ? */ 39 return 4 * pages; /* too many ? */
38} 40}
39 41
40static int resize_pool(unsigned int new_ios) 42/*
43 * Create a client with mempool and bioset.
44 */
45struct dm_io_client *dm_io_client_create(unsigned num_pages)
41{ 46{
42 int r = 0; 47 unsigned ios = pages_to_ios(num_pages);
43 48 struct dm_io_client *client;
44 if (_io_pool) {
45 if (new_ios == 0) {
46 /* free off the pool */
47 mempool_destroy(_io_pool);
48 _io_pool = NULL;
49 bioset_free(_bios);
50
51 } else {
52 /* resize the pool */
53 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 }
55 49
56 } else { 50 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 /* create new pool */ 51 if (!client)
58 _io_pool = mempool_create_kmalloc_pool(new_ios, 52 return ERR_PTR(-ENOMEM);
59 sizeof(struct io)); 53
60 if (!_io_pool) 54 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
61 return -ENOMEM; 55 if (!client->pool)
62 56 goto bad;
63 _bios = bioset_create(16, 16);
64 if (!_bios) {
65 mempool_destroy(_io_pool);
66 _io_pool = NULL;
67 return -ENOMEM;
68 }
69 }
70 57
71 if (!r) 58 client->bios = bioset_create(16, 16);
72 _num_ios = new_ios; 59 if (!client->bios)
60 goto bad;
73 61
74 return r; 62 return client;
63
64 bad:
65 if (client->pool)
66 mempool_destroy(client->pool);
67 kfree(client);
68 return ERR_PTR(-ENOMEM);
75} 69}
70EXPORT_SYMBOL(dm_io_client_create);
76 71
77int dm_io_get(unsigned int num_pages) 72int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
78{ 73{
79 return resize_pool(_num_ios + pages_to_ios(num_pages)); 74 return mempool_resize(client->pool, pages_to_ios(num_pages),
75 GFP_KERNEL);
80} 76}
77EXPORT_SYMBOL(dm_io_client_resize);
81 78
82void dm_io_put(unsigned int num_pages) 79void dm_io_client_destroy(struct dm_io_client *client)
83{ 80{
84 resize_pool(_num_ios - pages_to_ios(num_pages)); 81 mempool_destroy(client->pool);
82 bioset_free(client->bios);
83 kfree(client);
85} 84}
85EXPORT_SYMBOL(dm_io_client_destroy);
86 86
87/*----------------------------------------------------------------- 87/*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for. 88 * We need to keep track of which region a bio is doing io for.
@@ -118,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
118 io_notify_fn fn = io->callback; 118 io_notify_fn fn = io->callback;
119 void *context = io->context; 119 void *context = io->context;
120 120
121 mempool_free(io, _io_pool); 121 mempool_free(io, io->client->pool);
122 fn(r, context); 122 fn(r, context);
123 } 123 }
124 } 124 }
@@ -126,7 +126,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static int endio(struct bio *bio, unsigned int done, int error)
128{ 128{
129 struct io *io = (struct io *) bio->bi_private; 129 struct io *io;
130 unsigned region;
130 131
131 /* keep going until we've finished */ 132 /* keep going until we've finished */
132 if (bio->bi_size) 133 if (bio->bi_size)
@@ -135,10 +136,17 @@ static int endio(struct bio *bio, unsigned int done, int error)
135 if (error && bio_data_dir(bio) == READ) 136 if (error && bio_data_dir(bio) == READ)
136 zero_fill_bio(bio); 137 zero_fill_bio(bio);
137 138
138 dec_count(io, bio_get_region(bio), error); 139 /*
140 * The bio destructor in bio_put() may use the io object.
141 */
142 io = bio->bi_private;
143 region = bio_get_region(bio);
144
139 bio->bi_max_vecs++; 145 bio->bi_max_vecs++;
140 bio_put(bio); 146 bio_put(bio);
141 147
148 dec_count(io, region, error);
149
142 return 0; 150 return 0;
143} 151}
144 152
@@ -209,6 +217,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209 dp->context_ptr = bvec; 217 dp->context_ptr = bvec;
210} 218}
211 219
220/*
221 * Functions for getting the pages from a VMA.
222 */
212static void vm_get_page(struct dpages *dp, 223static void vm_get_page(struct dpages *dp,
213 struct page **p, unsigned long *len, unsigned *offset) 224 struct page **p, unsigned long *len, unsigned *offset)
214{ 225{
@@ -233,7 +244,34 @@ static void vm_dp_init(struct dpages *dp, void *data)
233 244
234static void dm_bio_destructor(struct bio *bio) 245static void dm_bio_destructor(struct bio *bio)
235{ 246{
236 bio_free(bio, _bios); 247 struct io *io = bio->bi_private;
248
249 bio_free(bio, io->client->bios);
250}
251
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
237} 275}
238 276
239/*----------------------------------------------------------------- 277/*-----------------------------------------------------------------
@@ -256,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
256 * to hide it from bio_add_page(). 294 * to hide it from bio_add_page().
257 */ 295 */
258 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; 296 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
259 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
260 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
261 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
262 bio->bi_end_io = endio; 300 bio->bi_end_io = endio;
@@ -311,8 +349,9 @@ static void dispatch_io(int rw, unsigned int num_regions,
311 dec_count(io, 0, 0); 349 dec_count(io, 0, 0);
312} 350}
313 351
314static int sync_io(unsigned int num_regions, struct io_region *where, 352static int sync_io(struct dm_io_client *client, unsigned int num_regions,
315 int rw, struct dpages *dp, unsigned long *error_bits) 353 struct io_region *where, int rw, struct dpages *dp,
354 unsigned long *error_bits)
316{ 355{
317 struct io io; 356 struct io io;
318 357
@@ -324,6 +363,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
324 io.error = 0; 363 io.error = 0;
325 atomic_set(&io.count, 1); /* see dispatch_io() */ 364 atomic_set(&io.count, 1); /* see dispatch_io() */
326 io.sleeper = current; 365 io.sleeper = current;
366 io.client = client;
327 367
328 dispatch_io(rw, num_regions, where, dp, &io, 1); 368 dispatch_io(rw, num_regions, where, dp, &io, 1);
329 369
@@ -340,12 +380,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
340 if (atomic_read(&io.count)) 380 if (atomic_read(&io.count))
341 return -EINTR; 381 return -EINTR;
342 382
343 *error_bits = io.error; 383 if (error_bits)
384 *error_bits = io.error;
385
344 return io.error ? -EIO : 0; 386 return io.error ? -EIO : 0;
345} 387}
346 388
347static int async_io(unsigned int num_regions, struct io_region *where, int rw, 389static int async_io(struct dm_io_client *client, unsigned int num_regions,
348 struct dpages *dp, io_notify_fn fn, void *context) 390 struct io_region *where, int rw, struct dpages *dp,
391 io_notify_fn fn, void *context)
349{ 392{
350 struct io *io; 393 struct io *io;
351 394
@@ -355,10 +398,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
355 return -EIO; 398 return -EIO;
356 } 399 }
357 400
358 io = mempool_alloc(_io_pool, GFP_NOIO); 401 io = mempool_alloc(client->pool, GFP_NOIO);
359 io->error = 0; 402 io->error = 0;
360 atomic_set(&io->count, 1); /* see dispatch_io() */ 403 atomic_set(&io->count, 1); /* see dispatch_io() */
361 io->sleeper = NULL; 404 io->sleeper = NULL;
405 io->client = client;
362 io->callback = fn; 406 io->callback = fn;
363 io->context = context; 407 io->context = context;
364 408
@@ -366,61 +410,51 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
366 return 0; 410 return 0;
367} 411}
368 412
369int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 413static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
370 struct page_list *pl, unsigned int offset,
371 unsigned long *error_bits)
372{ 414{
373 struct dpages dp; 415 /* Set up dpages based on memory type */
374 list_dp_init(&dp, pl, offset); 416 switch (io_req->mem.type) {
375 return sync_io(num_regions, where, rw, &dp, error_bits); 417 case DM_IO_PAGE_LIST:
376} 418 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 break;
420
421 case DM_IO_BVEC:
422 bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 break;
424
425 case DM_IO_VMA:
426 vm_dp_init(dp, io_req->mem.ptr.vma);
427 break;
428
429 case DM_IO_KMEM:
430 km_dp_init(dp, io_req->mem.ptr.addr);
431 break;
432
433 default:
434 return -EINVAL;
435 }
377 436
378int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, 437 return 0;
379 struct bio_vec *bvec, unsigned long *error_bits)
380{
381 struct dpages dp;
382 bvec_dp_init(&dp, bvec);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
384} 438}
385 439
386int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, 440/*
387 void *data, unsigned long *error_bits) 441 * New collapsed (a)synchronous interface
442 */
443int dm_io(struct dm_io_request *io_req, unsigned num_regions,
444 struct io_region *where, unsigned long *sync_error_bits)
388{ 445{
446 int r;
389 struct dpages dp; 447 struct dpages dp;
390 vm_dp_init(&dp, data);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
392}
393 448
394int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 449 r = dp_init(io_req, &dp);
395 struct page_list *pl, unsigned int offset, 450 if (r)
396 io_notify_fn fn, void *context) 451 return r;
397{
398 struct dpages dp;
399 list_dp_init(&dp, pl, offset);
400 return async_io(num_regions, where, rw, &dp, fn, context);
401}
402 452
403int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, 453 if (!io_req->notify.fn)
404 struct bio_vec *bvec, io_notify_fn fn, void *context) 454 return sync_io(io_req->client, num_regions, where,
405{ 455 io_req->bi_rw, &dp, sync_error_bits);
406 struct dpages dp;
407 bvec_dp_init(&dp, bvec);
408 return async_io(num_regions, where, rw, &dp, fn, context);
409}
410 456
411int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, 457 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
412 void *data, io_notify_fn fn, void *context) 458 &dp, io_req->notify.fn, io_req->notify.context);
413{
414 struct dpages dp;
415 vm_dp_init(&dp, data);
416 return async_io(num_regions, where, rw, &dp, fn, context);
417} 459}
418 460EXPORT_SYMBOL(dm_io);
419EXPORT_SYMBOL(dm_io_get);
420EXPORT_SYMBOL(dm_io_put);
421EXPORT_SYMBOL(dm_io_sync);
422EXPORT_SYMBOL(dm_io_async);
423EXPORT_SYMBOL(dm_io_sync_bvec);
424EXPORT_SYMBOL(dm_io_async_bvec);
425EXPORT_SYMBOL(dm_io_sync_vm);
426EXPORT_SYMBOL(dm_io_async_vm);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9f..f647e2cceaa6 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
12struct io_region { 12struct io_region {
13 struct block_device *bdev; 13 struct block_device *bdev;
14 sector_t sector; 14 sector_t sector;
15 sector_t count; 15 sector_t count; /* If this is zero the region is ignored. */
16}; 16};
17 17
18struct page_list { 18struct page_list {
@@ -20,55 +20,60 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23
24/*
25 * 'error' is a bitset, with each bit indicating whether an error
26 * occurred doing io to the corresponding region.
27 */
28typedef void (*io_notify_fn)(unsigned long error, void *context); 23typedef void (*io_notify_fn)(unsigned long error, void *context);
29 24
25enum dm_io_mem_type {
26 DM_IO_PAGE_LIST,/* Page list */
27 DM_IO_BVEC, /* Bio vector */
28 DM_IO_VMA, /* Virtual memory area */
29 DM_IO_KMEM, /* Kernel memory */
30};
31
32struct dm_io_memory {
33 enum dm_io_mem_type type;
34
35 union {
36 struct page_list *pl;
37 struct bio_vec *bvec;
38 void *vma;
39 void *addr;
40 } ptr;
41
42 unsigned offset;
43};
44
45struct dm_io_notify {
46 io_notify_fn fn; /* Callback for asynchronous requests */
47 void *context; /* Passed to callback */
48};
30 49
31/* 50/*
32 * Before anyone uses the IO interface they should call 51 * IO request structure
33 * dm_io_get(), specifying roughly how many pages they are
34 * expecting to perform io on concurrently.
35 *
36 * This function may block.
37 */ 52 */
38int dm_io_get(unsigned int num_pages); 53struct dm_io_client;
39void dm_io_put(unsigned int num_pages); 54struct dm_io_request {
55 int bi_rw; /* READ|WRITE - not READA */
56 struct dm_io_memory mem; /* Memory to use for io */
57 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
58 struct dm_io_client *client; /* Client memory handler */
59};
40 60
41/* 61/*
42 * Synchronous IO. 62 * For async io calls, users can alternatively use the dm_io() function below
63 * and dm_io_client_create() to create private mempools for the client.
43 * 64 *
44 * Please ensure that the rw flag in the next two functions is 65 * Create/destroy may block.
45 * either READ or WRITE, ie. we don't take READA. Any
46 * regions with a zero count field will be ignored.
47 */ 66 */
48int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 67struct dm_io_client *dm_io_client_create(unsigned num_pages);
49 struct page_list *pl, unsigned int offset, 68int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
50 unsigned long *error_bits); 69void dm_io_client_destroy(struct dm_io_client *client);
51
52int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
53 struct bio_vec *bvec, unsigned long *error_bits);
54
55int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
56 void *data, unsigned long *error_bits);
57 70
58/* 71/*
59 * Aynchronous IO. 72 * IO interface using private per-client pools.
60 * 73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
61 * The 'where' array may be safely allocated on the stack since 74 * error occurred doing io to the corresponding region.
62 * the function takes a copy.
63 */ 75 */
64int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
65 struct page_list *pl, unsigned int offset, 77 struct io_region *region, unsigned long *sync_error_bits);
66 io_notify_fn fn, void *context);
67
68int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
69 struct bio_vec *bvec, io_notify_fn fn, void *context);
70
71int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
72 void *data, io_notify_fn fn, void *context);
73 78
74#endif 79#endif
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6a9261351848..a66428d860fe 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -149,9 +149,12 @@ struct log_c {
149 FORCESYNC, /* Force a sync to happen */ 149 FORCESYNC, /* Force a sync to happen */
150 } sync; 150 } sync;
151 151
152 struct dm_io_request io_req;
153
152 /* 154 /*
153 * Disk log fields 155 * Disk log fields
154 */ 156 */
157 int log_dev_failed;
155 struct dm_dev *log_dev; 158 struct dm_dev *log_dev;
156 struct log_header header; 159 struct log_header header;
157 160
@@ -199,13 +202,20 @@ static void header_from_disk(struct log_header *core, struct log_header *disk)
199 core->nr_regions = le64_to_cpu(disk->nr_regions); 202 core->nr_regions = le64_to_cpu(disk->nr_regions);
200} 203}
201 204
205static int rw_header(struct log_c *lc, int rw)
206{
207 lc->io_req.bi_rw = rw;
208 lc->io_req.mem.ptr.vma = lc->disk_header;
209 lc->io_req.notify.fn = NULL;
210
211 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
212}
213
202static int read_header(struct log_c *log) 214static int read_header(struct log_c *log)
203{ 215{
204 int r; 216 int r;
205 unsigned long ebits;
206 217
207 r = dm_io_sync_vm(1, &log->header_location, READ, 218 r = rw_header(log, READ);
208 log->disk_header, &ebits);
209 if (r) 219 if (r)
210 return r; 220 return r;
211 221
@@ -233,11 +243,8 @@ static int read_header(struct log_c *log)
233 243
234static inline int write_header(struct log_c *log) 244static inline int write_header(struct log_c *log)
235{ 245{
236 unsigned long ebits;
237
238 header_to_disk(&log->header, log->disk_header); 246 header_to_disk(&log->header, log->disk_header);
239 return dm_io_sync_vm(1, &log->header_location, WRITE, 247 return rw_header(log, WRITE);
240 log->disk_header, &ebits);
241} 248}
242 249
243/*---------------------------------------------------------------- 250/*----------------------------------------------------------------
@@ -256,6 +263,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
256 uint32_t region_size; 263 uint32_t region_size;
257 unsigned int region_count; 264 unsigned int region_count;
258 size_t bitset_size, buf_size; 265 size_t bitset_size, buf_size;
266 int r;
259 267
260 if (argc < 1 || argc > 2) { 268 if (argc < 1 || argc > 2) {
261 DMWARN("wrong number of arguments to mirror log"); 269 DMWARN("wrong number of arguments to mirror log");
@@ -315,6 +323,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 lc->disk_header = NULL; 323 lc->disk_header = NULL;
316 } else { 324 } else {
317 lc->log_dev = dev; 325 lc->log_dev = dev;
326 lc->log_dev_failed = 0;
318 lc->header_location.bdev = lc->log_dev->bdev; 327 lc->header_location.bdev = lc->log_dev->bdev;
319 lc->header_location.sector = 0; 328 lc->header_location.sector = 0;
320 329
@@ -324,6 +333,15 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
324 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 333 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
325 bitset_size, ti->limits.hardsect_size); 334 bitset_size, ti->limits.hardsect_size);
326 lc->header_location.count = buf_size >> SECTOR_SHIFT; 335 lc->header_location.count = buf_size >> SECTOR_SHIFT;
336 lc->io_req.mem.type = DM_IO_VMA;
337 lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
338 PAGE_SIZE));
339 if (IS_ERR(lc->io_req.client)) {
340 r = PTR_ERR(lc->io_req.client);
341 DMWARN("couldn't allocate disk io client");
342 kfree(lc);
343 return -ENOMEM;
344 }
327 345
328 lc->disk_header = vmalloc(buf_size); 346 lc->disk_header = vmalloc(buf_size);
329 if (!lc->disk_header) { 347 if (!lc->disk_header) {
@@ -424,6 +442,7 @@ static void disk_dtr(struct dirty_log *log)
424 442
425 dm_put_device(lc->ti, lc->log_dev); 443 dm_put_device(lc->ti, lc->log_dev);
426 vfree(lc->disk_header); 444 vfree(lc->disk_header);
445 dm_io_client_destroy(lc->io_req.client);
427 destroy_log_context(lc); 446 destroy_log_context(lc);
428} 447}
429 448
@@ -437,6 +456,15 @@ static int count_bits32(uint32_t *addr, unsigned size)
437 return count; 456 return count;
438} 457}
439 458
459static void fail_log_device(struct log_c *lc)
460{
461 if (lc->log_dev_failed)
462 return;
463
464 lc->log_dev_failed = 1;
465 dm_table_event(lc->ti->table);
466}
467
440static int disk_resume(struct dirty_log *log) 468static int disk_resume(struct dirty_log *log)
441{ 469{
442 int r; 470 int r;
@@ -446,8 +474,19 @@ static int disk_resume(struct dirty_log *log)
446 474
447 /* read the disk header */ 475 /* read the disk header */
448 r = read_header(lc); 476 r = read_header(lc);
449 if (r) 477 if (r) {
450 return r; 478 DMWARN("%s: Failed to read header on mirror log device",
479 lc->log_dev->name);
480 fail_log_device(lc);
481 /*
482 * If the log device cannot be read, we must assume
483 * all regions are out-of-sync. If we simply return
484 * here, the state will be uninitialized and could
485 * lead us to return 'in-sync' status for regions
486 * that are actually 'out-of-sync'.
487 */
488 lc->header.nr_regions = 0;
489 }
451 490
452 /* set or clear any new bits -- device has grown */ 491 /* set or clear any new bits -- device has grown */
453 if (lc->sync == NOSYNC) 492 if (lc->sync == NOSYNC)
@@ -472,7 +511,14 @@ static int disk_resume(struct dirty_log *log)
472 lc->header.nr_regions = lc->region_count; 511 lc->header.nr_regions = lc->region_count;
473 512
474 /* write the new header */ 513 /* write the new header */
475 return write_header(lc); 514 r = write_header(lc);
515 if (r) {
516 DMWARN("%s: Failed to write header on mirror log device",
517 lc->log_dev->name);
518 fail_log_device(lc);
519 }
520
521 return r;
476} 522}
477 523
478static uint32_t core_get_region_size(struct dirty_log *log) 524static uint32_t core_get_region_size(struct dirty_log *log)
@@ -516,7 +562,9 @@ static int disk_flush(struct dirty_log *log)
516 return 0; 562 return 0;
517 563
518 r = write_header(lc); 564 r = write_header(lc);
519 if (!r) 565 if (r)
566 fail_log_device(lc);
567 else
520 lc->touched = 0; 568 lc->touched = 0;
521 569
522 return r; 570 return r;
@@ -591,6 +639,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
591 639
592 switch(status) { 640 switch(status) {
593 case STATUSTYPE_INFO: 641 case STATUSTYPE_INFO:
642 DMEMIT("1 %s", log->type->name);
594 break; 643 break;
595 644
596 case STATUSTYPE_TABLE: 645 case STATUSTYPE_TABLE:
@@ -606,17 +655,17 @@ static int disk_status(struct dirty_log *log, status_type_t status,
606 char *result, unsigned int maxlen) 655 char *result, unsigned int maxlen)
607{ 656{
608 int sz = 0; 657 int sz = 0;
609 char buffer[16];
610 struct log_c *lc = log->context; 658 struct log_c *lc = log->context;
611 659
612 switch(status) { 660 switch(status) {
613 case STATUSTYPE_INFO: 661 case STATUSTYPE_INFO:
662 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
663 lc->log_dev_failed ? 'D' : 'A');
614 break; 664 break;
615 665
616 case STATUSTYPE_TABLE: 666 case STATUSTYPE_TABLE:
617 format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
618 DMEMIT("%s %u %s %u ", log->type->name, 667 DMEMIT("%s %u %s %u ", log->type->name,
619 lc->sync == DEFAULTSYNC ? 2 : 3, buffer, 668 lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
620 lc->region_size); 669 lc->region_size);
621 DMEMIT_SYNC; 670 DMEMIT_SYNC;
622 } 671 }
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3aa013506967..de54b39e6ffe 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -668,6 +668,9 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
668 return -EINVAL; 668 return -EINVAL;
669 } 669 }
670 670
671 m->hw_handler.md = dm_table_get_md(ti->table);
672 dm_put(m->hw_handler.md);
673
671 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); 674 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
672 if (r) { 675 if (r) {
673 dm_put_hw_handler(hwht); 676 dm_put_hw_handler(hwht);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 23a642619bed..ef124b71ccc8 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -21,15 +21,11 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22 22
23#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24#define DM_IO_PAGES 64
24 25
25static struct workqueue_struct *_kmirrord_wq; 26#define DM_RAID1_HANDLE_ERRORS 0x01
26static struct work_struct _kmirrord_work;
27static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
28 27
29static inline void wake(void) 28static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30{
31 queue_work(_kmirrord_wq, &_kmirrord_work);
32}
33 29
34/*----------------------------------------------------------------- 30/*-----------------------------------------------------------------
35 * Region hash 31 * Region hash
@@ -125,17 +121,23 @@ struct mirror_set {
125 struct list_head list; 121 struct list_head list;
126 struct region_hash rh; 122 struct region_hash rh;
127 struct kcopyd_client *kcopyd_client; 123 struct kcopyd_client *kcopyd_client;
124 uint64_t features;
128 125
129 spinlock_t lock; /* protects the next two lists */ 126 spinlock_t lock; /* protects the next two lists */
130 struct bio_list reads; 127 struct bio_list reads;
131 struct bio_list writes; 128 struct bio_list writes;
132 129
130 struct dm_io_client *io_client;
131
133 /* recovery */ 132 /* recovery */
134 region_t nr_regions; 133 region_t nr_regions;
135 int in_sync; 134 int in_sync;
136 135
137 struct mirror *default_mirror; /* Default mirror */ 136 struct mirror *default_mirror; /* Default mirror */
138 137
138 struct workqueue_struct *kmirrord_wq;
139 struct work_struct kmirrord_work;
140
139 unsigned int nr_mirrors; 141 unsigned int nr_mirrors;
140 struct mirror mirror[0]; 142 struct mirror mirror[0];
141}; 143};
@@ -153,6 +155,11 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
153 return region << rh->region_shift; 155 return region << rh->region_shift;
154} 156}
155 157
158static void wake(struct mirror_set *ms)
159{
160 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161}
162
156/* FIXME move this */ 163/* FIXME move this */
157static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 164static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
158 165
@@ -398,8 +405,7 @@ static void rh_update_states(struct region_hash *rh)
398 mempool_free(reg, rh->region_pool); 405 mempool_free(reg, rh->region_pool);
399 } 406 }
400 407
401 if (!list_empty(&recovered)) 408 rh->log->type->flush(rh->log);
402 rh->log->type->flush(rh->log);
403 409
404 list_for_each_entry_safe (reg, next, &clean, list) 410 list_for_each_entry_safe (reg, next, &clean, list)
405 mempool_free(reg, rh->region_pool); 411 mempool_free(reg, rh->region_pool);
@@ -471,7 +477,7 @@ static void rh_dec(struct region_hash *rh, region_t region)
471 spin_unlock_irqrestore(&rh->region_lock, flags); 477 spin_unlock_irqrestore(&rh->region_lock, flags);
472 478
473 if (should_wake) 479 if (should_wake)
474 wake(); 480 wake(rh->ms);
475} 481}
476 482
477/* 483/*
@@ -558,7 +564,7 @@ static void rh_recovery_end(struct region *reg, int success)
558 list_add(&reg->list, &reg->rh->recovered_regions); 564 list_add(&reg->list, &reg->rh->recovered_regions);
559 spin_unlock_irq(&rh->region_lock); 565 spin_unlock_irq(&rh->region_lock);
560 566
561 wake(); 567 wake(rh->ms);
562} 568}
563 569
564static void rh_flush(struct region_hash *rh) 570static void rh_flush(struct region_hash *rh)
@@ -592,7 +598,7 @@ static void rh_start_recovery(struct region_hash *rh)
592 for (i = 0; i < MAX_RECOVERY; i++) 598 for (i = 0; i < MAX_RECOVERY; i++)
593 up(&rh->recovery_count); 599 up(&rh->recovery_count);
594 600
595 wake(); 601 wake(rh->ms);
596} 602}
597 603
598/* 604/*
@@ -735,7 +741,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
735 /* 741 /*
736 * We can only read balance if the region is in sync. 742 * We can only read balance if the region is in sync.
737 */ 743 */
738 if (rh_in_sync(&ms->rh, region, 0)) 744 if (rh_in_sync(&ms->rh, region, 1))
739 m = choose_mirror(ms, bio->bi_sector); 745 m = choose_mirror(ms, bio->bi_sector);
740 else 746 else
741 m = ms->default_mirror; 747 m = ms->default_mirror;
@@ -792,6 +798,14 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
792 unsigned int i; 798 unsigned int i;
793 struct io_region io[KCOPYD_MAX_REGIONS+1]; 799 struct io_region io[KCOPYD_MAX_REGIONS+1];
794 struct mirror *m; 800 struct mirror *m;
801 struct dm_io_request io_req = {
802 .bi_rw = WRITE,
803 .mem.type = DM_IO_BVEC,
804 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
805 .notify.fn = write_callback,
806 .notify.context = bio,
807 .client = ms->io_client,
808 };
795 809
796 for (i = 0; i < ms->nr_mirrors; i++) { 810 for (i = 0; i < ms->nr_mirrors; i++) {
797 m = ms->mirror + i; 811 m = ms->mirror + i;
@@ -802,9 +816,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
802 } 816 }
803 817
804 bio_set_ms(bio, ms); 818 bio_set_ms(bio, ms);
805 dm_io_async_bvec(ms->nr_mirrors, io, WRITE, 819
806 bio->bi_io_vec + bio->bi_idx, 820 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
807 write_callback, bio);
808} 821}
809 822
810static void do_writes(struct mirror_set *ms, struct bio_list *writes) 823static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -870,11 +883,10 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
870/*----------------------------------------------------------------- 883/*-----------------------------------------------------------------
871 * kmirrord 884 * kmirrord
872 *---------------------------------------------------------------*/ 885 *---------------------------------------------------------------*/
873static LIST_HEAD(_mirror_sets); 886static void do_mirror(struct work_struct *work)
874static DECLARE_RWSEM(_mirror_sets_lock);
875
876static void do_mirror(struct mirror_set *ms)
877{ 887{
888 struct mirror_set *ms =container_of(work, struct mirror_set,
889 kmirrord_work);
878 struct bio_list reads, writes; 890 struct bio_list reads, writes;
879 891
880 spin_lock(&ms->lock); 892 spin_lock(&ms->lock);
@@ -890,16 +902,6 @@ static void do_mirror(struct mirror_set *ms)
890 do_writes(ms, &writes); 902 do_writes(ms, &writes);
891} 903}
892 904
893static void do_work(struct work_struct *ignored)
894{
895 struct mirror_set *ms;
896
897 down_read(&_mirror_sets_lock);
898 list_for_each_entry (ms, &_mirror_sets, list)
899 do_mirror(ms);
900 up_read(&_mirror_sets_lock);
901}
902
903/*----------------------------------------------------------------- 905/*-----------------------------------------------------------------
904 * Target functions 906 * Target functions
905 *---------------------------------------------------------------*/ 907 *---------------------------------------------------------------*/
@@ -931,6 +933,13 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
931 ms->in_sync = 0; 933 ms->in_sync = 0;
932 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; 934 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
933 935
936 ms->io_client = dm_io_client_create(DM_IO_PAGES);
937 if (IS_ERR(ms->io_client)) {
938 ti->error = "Error creating dm_io client";
939 kfree(ms);
940 return NULL;
941 }
942
934 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 943 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
935 ti->error = "Error creating dirty region hash"; 944 ti->error = "Error creating dirty region hash";
936 kfree(ms); 945 kfree(ms);
@@ -946,6 +955,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
946 while (m--) 955 while (m--)
947 dm_put_device(ti, ms->mirror[m].dev); 956 dm_put_device(ti, ms->mirror[m].dev);
948 957
958 dm_io_client_destroy(ms->io_client);
949 rh_exit(&ms->rh); 959 rh_exit(&ms->rh);
950 kfree(ms); 960 kfree(ms);
951} 961}
@@ -978,23 +988,6 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
978 return 0; 988 return 0;
979} 989}
980 990
981static int add_mirror_set(struct mirror_set *ms)
982{
983 down_write(&_mirror_sets_lock);
984 list_add_tail(&ms->list, &_mirror_sets);
985 up_write(&_mirror_sets_lock);
986 wake();
987
988 return 0;
989}
990
991static void del_mirror_set(struct mirror_set *ms)
992{
993 down_write(&_mirror_sets_lock);
994 list_del(&ms->list);
995 up_write(&_mirror_sets_lock);
996}
997
998/* 991/*
999 * Create dirty log: log_type #log_params <log_params> 992 * Create dirty log: log_type #log_params <log_params>
1000 */ 993 */
@@ -1037,16 +1030,55 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1037 return dl; 1030 return dl;
1038} 1031}
1039 1032
1033static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1034 unsigned *args_used)
1035{
1036 unsigned num_features;
1037 struct dm_target *ti = ms->ti;
1038
1039 *args_used = 0;
1040
1041 if (!argc)
1042 return 0;
1043
1044 if (sscanf(argv[0], "%u", &num_features) != 1) {
1045 ti->error = "Invalid number of features";
1046 return -EINVAL;
1047 }
1048
1049 argc--;
1050 argv++;
1051 (*args_used)++;
1052
1053 if (num_features > argc) {
1054 ti->error = "Not enough arguments to support feature count";
1055 return -EINVAL;
1056 }
1057
1058 if (!strcmp("handle_errors", argv[0]))
1059 ms->features |= DM_RAID1_HANDLE_ERRORS;
1060 else {
1061 ti->error = "Unrecognised feature requested";
1062 return -EINVAL;
1063 }
1064
1065 (*args_used)++;
1066
1067 return 0;
1068}
1069
1040/* 1070/*
1041 * Construct a mirror mapping: 1071 * Construct a mirror mapping:
1042 * 1072 *
1043 * log_type #log_params <log_params> 1073 * log_type #log_params <log_params>
1044 * #mirrors [mirror_path offset]{2,} 1074 * #mirrors [mirror_path offset]{2,}
1075 * [#features <features>]
1045 * 1076 *
1046 * log_type is "core" or "disk" 1077 * log_type is "core" or "disk"
1047 * #log_params is between 1 and 3 1078 * #log_params is between 1 and 3
1079 *
1080 * If present, features must be "handle_errors".
1048 */ 1081 */
1049#define DM_IO_PAGES 64
1050static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1082static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1051{ 1083{
1052 int r; 1084 int r;
@@ -1070,8 +1102,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1070 1102
1071 argv++, argc--; 1103 argv++, argc--;
1072 1104
1073 if (argc != nr_mirrors * 2) { 1105 if (argc < nr_mirrors * 2) {
1074 ti->error = "Wrong number of mirror arguments"; 1106 ti->error = "Too few mirror arguments";
1075 dm_destroy_dirty_log(dl); 1107 dm_destroy_dirty_log(dl);
1076 return -EINVAL; 1108 return -EINVAL;
1077 } 1109 }
@@ -1096,13 +1128,37 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1096 ti->private = ms; 1128 ti->private = ms;
1097 ti->split_io = ms->rh.region_size; 1129 ti->split_io = ms->rh.region_size;
1098 1130
1131 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1132 if (!ms->kmirrord_wq) {
1133 DMERR("couldn't start kmirrord");
1134 free_context(ms, ti, m);
1135 return -ENOMEM;
1136 }
1137 INIT_WORK(&ms->kmirrord_work, do_mirror);
1138
1139 r = parse_features(ms, argc, argv, &args_used);
1140 if (r) {
1141 free_context(ms, ti, ms->nr_mirrors);
1142 return r;
1143 }
1144
1145 argv += args_used;
1146 argc -= args_used;
1147
1148 if (argc) {
1149 ti->error = "Too many mirror arguments";
1150 free_context(ms, ti, ms->nr_mirrors);
1151 return -EINVAL;
1152 }
1153
1099 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1154 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1100 if (r) { 1155 if (r) {
1156 destroy_workqueue(ms->kmirrord_wq);
1101 free_context(ms, ti, ms->nr_mirrors); 1157 free_context(ms, ti, ms->nr_mirrors);
1102 return r; 1158 return r;
1103 } 1159 }
1104 1160
1105 add_mirror_set(ms); 1161 wake(ms);
1106 return 0; 1162 return 0;
1107} 1163}
1108 1164
@@ -1110,8 +1166,9 @@ static void mirror_dtr(struct dm_target *ti)
1110{ 1166{
1111 struct mirror_set *ms = (struct mirror_set *) ti->private; 1167 struct mirror_set *ms = (struct mirror_set *) ti->private;
1112 1168
1113 del_mirror_set(ms); 1169 flush_workqueue(ms->kmirrord_wq);
1114 kcopyd_client_destroy(ms->kcopyd_client); 1170 kcopyd_client_destroy(ms->kcopyd_client);
1171 destroy_workqueue(ms->kmirrord_wq);
1115 free_context(ms, ti, ms->nr_mirrors); 1172 free_context(ms, ti, ms->nr_mirrors);
1116} 1173}
1117 1174
@@ -1127,7 +1184,7 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1127 spin_unlock(&ms->lock); 1184 spin_unlock(&ms->lock);
1128 1185
1129 if (should_wake) 1186 if (should_wake)
1130 wake(); 1187 wake(ms);
1131} 1188}
1132 1189
1133/* 1190/*
@@ -1222,11 +1279,9 @@ static void mirror_resume(struct dm_target *ti)
1222static int mirror_status(struct dm_target *ti, status_type_t type, 1279static int mirror_status(struct dm_target *ti, status_type_t type,
1223 char *result, unsigned int maxlen) 1280 char *result, unsigned int maxlen)
1224{ 1281{
1225 unsigned int m, sz; 1282 unsigned int m, sz = 0;
1226 struct mirror_set *ms = (struct mirror_set *) ti->private; 1283 struct mirror_set *ms = (struct mirror_set *) ti->private;
1227 1284
1228 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1229
1230 switch (type) { 1285 switch (type) {
1231 case STATUSTYPE_INFO: 1286 case STATUSTYPE_INFO:
1232 DMEMIT("%d ", ms->nr_mirrors); 1287 DMEMIT("%d ", ms->nr_mirrors);
@@ -1237,13 +1292,21 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1237 (unsigned long long)ms->rh.log->type-> 1292 (unsigned long long)ms->rh.log->type->
1238 get_sync_count(ms->rh.log), 1293 get_sync_count(ms->rh.log),
1239 (unsigned long long)ms->nr_regions); 1294 (unsigned long long)ms->nr_regions);
1295
1296 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1297
1240 break; 1298 break;
1241 1299
1242 case STATUSTYPE_TABLE: 1300 case STATUSTYPE_TABLE:
1301 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1302
1243 DMEMIT("%d", ms->nr_mirrors); 1303 DMEMIT("%d", ms->nr_mirrors);
1244 for (m = 0; m < ms->nr_mirrors; m++) 1304 for (m = 0; m < ms->nr_mirrors; m++)
1245 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1305 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1246 (unsigned long long)ms->mirror[m].offset); 1306 (unsigned long long)ms->mirror[m].offset);
1307
1308 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1309 DMEMIT(" 1 handle_errors");
1247 } 1310 }
1248 1311
1249 return 0; 1312 return 0;
@@ -1251,7 +1314,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1251 1314
1252static struct target_type mirror_target = { 1315static struct target_type mirror_target = {
1253 .name = "mirror", 1316 .name = "mirror",
1254 .version = {1, 0, 2}, 1317 .version = {1, 0, 3},
1255 .module = THIS_MODULE, 1318 .module = THIS_MODULE,
1256 .ctr = mirror_ctr, 1319 .ctr = mirror_ctr,
1257 .dtr = mirror_dtr, 1320 .dtr = mirror_dtr,
@@ -1270,20 +1333,11 @@ static int __init dm_mirror_init(void)
1270 if (r) 1333 if (r)
1271 return r; 1334 return r;
1272 1335
1273 _kmirrord_wq = create_singlethread_workqueue("kmirrord");
1274 if (!_kmirrord_wq) {
1275 DMERR("couldn't start kmirrord");
1276 dm_dirty_log_exit();
1277 return r;
1278 }
1279 INIT_WORK(&_kmirrord_work, do_work);
1280
1281 r = dm_register_target(&mirror_target); 1336 r = dm_register_target(&mirror_target);
1282 if (r < 0) { 1337 if (r < 0) {
1283 DMERR("%s: Failed to register mirror target", 1338 DMERR("%s: Failed to register mirror target",
1284 mirror_target.name); 1339 mirror_target.name);
1285 dm_dirty_log_exit(); 1340 dm_dirty_log_exit();
1286 destroy_workqueue(_kmirrord_wq);
1287 } 1341 }
1288 1342
1289 return r; 1343 return r;
@@ -1297,7 +1351,6 @@ static void __exit dm_mirror_exit(void)
1297 if (r < 0) 1351 if (r < 0)
1298 DMERR("%s: unregister failed %d", mirror_target.name, r); 1352 DMERR("%s: unregister failed %d", mirror_target.name, r);
1299 1353
1300 destroy_workqueue(_kmirrord_wq);
1301 dm_dirty_log_exit(); 1354 dm_dirty_log_exit();
1302} 1355}
1303 1356
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 05befa91807a..2fc199b0016b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
425} 425}
426 426
427/* 427/*
428 * If possible (ie. blk_size[major] is set), this checks an area 428 * If possible, this checks an area of a destination device is valid.
429 * of a destination device is valid.
430 */ 429 */
431static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 430static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
432{ 431{
433 sector_t dev_size; 432 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
434 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 433
434 if (!dev_size)
435 return 1;
436
435 return ((start < dev_size) && (len <= (dev_size - start))); 437 return ((start < dev_size) && (len <= (dev_size - start)));
436} 438}
437 439
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 11a98df298ec..2717a355dc5b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1236,6 +1236,7 @@ void dm_put(struct mapped_device *md)
1236 free_dev(md); 1236 free_dev(md);
1237 } 1237 }
1238} 1238}
1239EXPORT_SYMBOL_GPL(dm_put);
1239 1240
1240/* 1241/*
1241 * Process the deferred bios 1242 * Process the deferred bios
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index b46f6c575f7e..dbc234e3c69f 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 * 6 *
@@ -45,6 +46,8 @@ struct kcopyd_client {
45 unsigned int nr_pages; 46 unsigned int nr_pages;
46 unsigned int nr_free_pages; 47 unsigned int nr_free_pages;
47 48
49 struct dm_io_client *io_client;
50
48 wait_queue_head_t destroyq; 51 wait_queue_head_t destroyq;
49 atomic_t nr_jobs; 52 atomic_t nr_jobs;
50}; 53};
@@ -342,16 +345,20 @@ static void complete_io(unsigned long error, void *context)
342static int run_io_job(struct kcopyd_job *job) 345static int run_io_job(struct kcopyd_job *job)
343{ 346{
344 int r; 347 int r;
348 struct dm_io_request io_req = {
349 .bi_rw = job->rw,
350 .mem.type = DM_IO_PAGE_LIST,
351 .mem.ptr.pl = job->pages,
352 .mem.offset = job->offset,
353 .notify.fn = complete_io,
354 .notify.context = job,
355 .client = job->kc->io_client,
356 };
345 357
346 if (job->rw == READ) 358 if (job->rw == READ)
347 r = dm_io_async(1, &job->source, job->rw, 359 r = dm_io(&io_req, 1, &job->source, NULL);
348 job->pages,
349 job->offset, complete_io, job);
350
351 else 360 else
352 r = dm_io_async(job->num_dests, job->dests, job->rw, 361 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
353 job->pages,
354 job->offset, complete_io, job);
355 362
356 return r; 363 return r;
357} 364}
@@ -670,8 +677,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
670 return r; 677 return r;
671 } 678 }
672 679
673 r = dm_io_get(nr_pages); 680 kc->io_client = dm_io_client_create(nr_pages);
674 if (r) { 681 if (IS_ERR(kc->io_client)) {
682 r = PTR_ERR(kc->io_client);
675 client_free_pages(kc); 683 client_free_pages(kc);
676 kfree(kc); 684 kfree(kc);
677 kcopyd_exit(); 685 kcopyd_exit();
@@ -691,7 +699,7 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
691 /* Wait for completion of all jobs submitted by this client. */ 699 /* Wait for completion of all jobs submitted by this client. */
692 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 700 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
693 701
694 dm_io_put(kc->nr_pages); 702 dm_io_client_destroy(kc->io_client);
695 client_free_pages(kc); 703 client_free_pages(kc);
696 client_del(kc); 704 client_del(kc);
697 kfree(kc); 705 kfree(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2b4315d7e5d6..2901d0c0ee9e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/kernel.h>
36#include <linux/kthread.h> 37#include <linux/kthread.h>
37#include <linux/linkage.h> 38#include <linux/linkage.h>
38#include <linux/raid/md.h> 39#include <linux/raid/md.h>
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
273 atomic_set(&new->active, 1); 274 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock); 275 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait); 276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector;
276 278
277 new->queue = blk_alloc_queue(GFP_KERNEL); 279 new->queue = blk_alloc_queue(GFP_KERNEL);
278 if (!new->queue) { 280 if (!new->queue) {
@@ -589,14 +591,41 @@ abort:
589 return ret; 591 return ret;
590} 592}
591 593
594
595static u32 md_csum_fold(u32 csum)
596{
597 csum = (csum & 0xffff) + (csum >> 16);
598 return (csum & 0xffff) + (csum >> 16);
599}
600
592static unsigned int calc_sb_csum(mdp_super_t * sb) 601static unsigned int calc_sb_csum(mdp_super_t * sb)
593{ 602{
603 u64 newcsum = 0;
604 u32 *sb32 = (u32*)sb;
605 int i;
594 unsigned int disk_csum, csum; 606 unsigned int disk_csum, csum;
595 607
596 disk_csum = sb->sb_csum; 608 disk_csum = sb->sb_csum;
597 sb->sb_csum = 0; 609 sb->sb_csum = 0;
598 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 610
611 for (i = 0; i < MD_SB_BYTES/4 ; i++)
612 newcsum += sb32[i];
613 csum = (newcsum & 0xffffffff) + (newcsum>>32);
614
615
616#ifdef CONFIG_ALPHA
617 /* This used to use csum_partial, which was wrong for several
618 * reasons including that different results are returned on
619 * different architectures. It isn't critical that we get exactly
620 * the same return value as before (we always csum_fold before
621 * testing, and that removes any differences). However as we
622 * know that csum_partial always returned a 16bit value on
623 * alphas, do a fold to maximise conformity to previous behaviour.
624 */
625 sb->sb_csum = md_csum_fold(disk_csum);
626#else
599 sb->sb_csum = disk_csum; 627 sb->sb_csum = disk_csum;
628#endif
600 return csum; 629 return csum;
601} 630}
602 631
@@ -684,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
684 if (sb->raid_disks <= 0) 713 if (sb->raid_disks <= 0)
685 goto abort; 714 goto abort;
686 715
687 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 716 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
688 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 717 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 b); 718 b);
690 goto abort; 719 goto abort;
@@ -694,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
694 rdev->data_offset = 0; 723 rdev->data_offset = 0;
695 rdev->sb_size = MD_SB_BYTES; 724 rdev->sb_size = MD_SB_BYTES;
696 725
726 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727 if (sb->level != 1 && sb->level != 4
728 && sb->level != 5 && sb->level != 6
729 && sb->level != 10) {
730 /* FIXME use a better test */
731 printk(KERN_WARNING
732 "md: bitmaps not supported for this level.\n");
733 goto abort;
734 }
735 }
736
697 if (sb->level == LEVEL_MULTIPATH) 737 if (sb->level == LEVEL_MULTIPATH)
698 rdev->desc_nr = -1; 738 rdev->desc_nr = -1;
699 else 739 else
@@ -792,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
792 mddev->max_disks = MD_SB_DISKS; 832 mddev->max_disks = MD_SB_DISKS;
793 833
794 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 834 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795 mddev->bitmap_file == NULL) { 835 mddev->bitmap_file == NULL)
796 if (mddev->level != 1 && mddev->level != 4
797 && mddev->level != 5 && mddev->level != 6
798 && mddev->level != 10) {
799 /* FIXME use a better test */
800 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801 return -EINVAL;
802 }
803 mddev->bitmap_offset = mddev->default_bitmap_offset; 836 mddev->bitmap_offset = mddev->default_bitmap_offset;
804 }
805 837
806 } else if (mddev->pers == NULL) { 838 } else if (mddev->pers == NULL) {
807 /* Insist on good event counter while assembling */ 839 /* Insist on good event counter while assembling */
@@ -1058,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 bdevname(rdev->bdev,b)); 1090 bdevname(rdev->bdev,b));
1059 return -EINVAL; 1091 return -EINVAL;
1060 } 1092 }
1093 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1094 if (sb->level != cpu_to_le32(1) &&
1095 sb->level != cpu_to_le32(4) &&
1096 sb->level != cpu_to_le32(5) &&
1097 sb->level != cpu_to_le32(6) &&
1098 sb->level != cpu_to_le32(10)) {
1099 printk(KERN_WARNING
1100 "md: bitmaps not supported for this level.\n");
1101 return -EINVAL;
1102 }
1103 }
1104
1061 rdev->preferred_minor = 0xffff; 1105 rdev->preferred_minor = 0xffff;
1062 rdev->data_offset = le64_to_cpu(sb->data_offset); 1106 rdev->data_offset = le64_to_cpu(sb->data_offset);
1063 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1107 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1141 mddev->max_disks = (4096-256)/2; 1185 mddev->max_disks = (4096-256)/2;
1142 1186
1143 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1187 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144 mddev->bitmap_file == NULL ) { 1188 mddev->bitmap_file == NULL )
1145 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146 && mddev->level != 10) {
1147 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1148 return -EINVAL;
1149 }
1150 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1189 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1151 } 1190
1152 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1191 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1192 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1193 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -2204,6 +2243,10 @@ static ssize_t
2204layout_show(mddev_t *mddev, char *page) 2243layout_show(mddev_t *mddev, char *page)
2205{ 2244{
2206 /* just a number, not meaningful for all levels */ 2245 /* just a number, not meaningful for all levels */
2246 if (mddev->reshape_position != MaxSector &&
2247 mddev->layout != mddev->new_layout)
2248 return sprintf(page, "%d (%d)\n",
2249 mddev->new_layout, mddev->layout);
2207 return sprintf(page, "%d\n", mddev->layout); 2250 return sprintf(page, "%d\n", mddev->layout);
2208} 2251}
2209 2252
@@ -2212,13 +2255,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
2212{ 2255{
2213 char *e; 2256 char *e;
2214 unsigned long n = simple_strtoul(buf, &e, 10); 2257 unsigned long n = simple_strtoul(buf, &e, 10);
2215 if (mddev->pers)
2216 return -EBUSY;
2217 2258
2218 if (!*buf || (*e && *e != '\n')) 2259 if (!*buf || (*e && *e != '\n'))
2219 return -EINVAL; 2260 return -EINVAL;
2220 2261
2221 mddev->layout = n; 2262 if (mddev->pers)
2263 return -EBUSY;
2264 if (mddev->reshape_position != MaxSector)
2265 mddev->new_layout = n;
2266 else
2267 mddev->layout = n;
2222 return len; 2268 return len;
2223} 2269}
2224static struct md_sysfs_entry md_layout = 2270static struct md_sysfs_entry md_layout =
@@ -2230,6 +2276,10 @@ raid_disks_show(mddev_t *mddev, char *page)
2230{ 2276{
2231 if (mddev->raid_disks == 0) 2277 if (mddev->raid_disks == 0)
2232 return 0; 2278 return 0;
2279 if (mddev->reshape_position != MaxSector &&
2280 mddev->delta_disks != 0)
2281 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2282 mddev->raid_disks - mddev->delta_disks);
2233 return sprintf(page, "%d\n", mddev->raid_disks); 2283 return sprintf(page, "%d\n", mddev->raid_disks);
2234} 2284}
2235 2285
@@ -2247,7 +2297,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2247 2297
2248 if (mddev->pers) 2298 if (mddev->pers)
2249 rv = update_raid_disks(mddev, n); 2299 rv = update_raid_disks(mddev, n);
2250 else 2300 else if (mddev->reshape_position != MaxSector) {
2301 int olddisks = mddev->raid_disks - mddev->delta_disks;
2302 mddev->delta_disks = n - olddisks;
2303 mddev->raid_disks = n;
2304 } else
2251 mddev->raid_disks = n; 2305 mddev->raid_disks = n;
2252 return rv ? rv : len; 2306 return rv ? rv : len;
2253} 2307}
@@ -2257,6 +2311,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2257static ssize_t 2311static ssize_t
2258chunk_size_show(mddev_t *mddev, char *page) 2312chunk_size_show(mddev_t *mddev, char *page)
2259{ 2313{
2314 if (mddev->reshape_position != MaxSector &&
2315 mddev->chunk_size != mddev->new_chunk)
2316 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2317 mddev->chunk_size);
2260 return sprintf(page, "%d\n", mddev->chunk_size); 2318 return sprintf(page, "%d\n", mddev->chunk_size);
2261} 2319}
2262 2320
@@ -2267,12 +2325,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2267 char *e; 2325 char *e;
2268 unsigned long n = simple_strtoul(buf, &e, 10); 2326 unsigned long n = simple_strtoul(buf, &e, 10);
2269 2327
2270 if (mddev->pers)
2271 return -EBUSY;
2272 if (!*buf || (*e && *e != '\n')) 2328 if (!*buf || (*e && *e != '\n'))
2273 return -EINVAL; 2329 return -EINVAL;
2274 2330
2275 mddev->chunk_size = n; 2331 if (mddev->pers)
2332 return -EBUSY;
2333 else if (mddev->reshape_position != MaxSector)
2334 mddev->new_chunk = n;
2335 else
2336 mddev->chunk_size = n;
2276 return len; 2337 return len;
2277} 2338}
2278static struct md_sysfs_entry md_chunk_size = 2339static struct md_sysfs_entry md_chunk_size =
@@ -2637,8 +2698,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
2637 minor = simple_strtoul(buf, &e, 10); 2698 minor = simple_strtoul(buf, &e, 10);
2638 if (e==buf || (*e && *e != '\n') ) 2699 if (e==buf || (*e && *e != '\n') )
2639 return -EINVAL; 2700 return -EINVAL;
2640 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2701 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2641 super_types[major].name == NULL)
2642 return -ENOENT; 2702 return -ENOENT;
2643 mddev->major_version = major; 2703 mddev->major_version = major;
2644 mddev->minor_version = minor; 2704 mddev->minor_version = minor;
@@ -2859,6 +2919,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2859static struct md_sysfs_entry md_suspend_hi = 2919static struct md_sysfs_entry md_suspend_hi =
2860__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2920__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2861 2921
2922static ssize_t
2923reshape_position_show(mddev_t *mddev, char *page)
2924{
2925 if (mddev->reshape_position != MaxSector)
2926 return sprintf(page, "%llu\n",
2927 (unsigned long long)mddev->reshape_position);
2928 strcpy(page, "none\n");
2929 return 5;
2930}
2931
2932static ssize_t
2933reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
2934{
2935 char *e;
2936 unsigned long long new = simple_strtoull(buf, &e, 10);
2937 if (mddev->pers)
2938 return -EBUSY;
2939 if (buf == e || (*e && *e != '\n'))
2940 return -EINVAL;
2941 mddev->reshape_position = new;
2942 mddev->delta_disks = 0;
2943 mddev->new_level = mddev->level;
2944 mddev->new_layout = mddev->layout;
2945 mddev->new_chunk = mddev->chunk_size;
2946 return len;
2947}
2948
2949static struct md_sysfs_entry md_reshape_position =
2950__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
2951 reshape_position_store);
2952
2862 2953
2863static struct attribute *md_default_attrs[] = { 2954static struct attribute *md_default_attrs[] = {
2864 &md_level.attr, 2955 &md_level.attr,
@@ -2871,6 +2962,7 @@ static struct attribute *md_default_attrs[] = {
2871 &md_new_device.attr, 2962 &md_new_device.attr,
2872 &md_safe_delay.attr, 2963 &md_safe_delay.attr,
2873 &md_array_state.attr, 2964 &md_array_state.attr,
2965 &md_reshape_position.attr,
2874 NULL, 2966 NULL,
2875}; 2967};
2876 2968
@@ -3012,6 +3104,7 @@ static int do_md_run(mddev_t * mddev)
3012 struct gendisk *disk; 3104 struct gendisk *disk;
3013 struct mdk_personality *pers; 3105 struct mdk_personality *pers;
3014 char b[BDEVNAME_SIZE]; 3106 char b[BDEVNAME_SIZE];
3107 struct block_device *bdev;
3015 3108
3016 if (list_empty(&mddev->disks)) 3109 if (list_empty(&mddev->disks))
3017 /* cannot run an array with no devices.. */ 3110 /* cannot run an array with no devices.. */
@@ -3239,7 +3332,13 @@ static int do_md_run(mddev_t * mddev)
3239 md_wakeup_thread(mddev->thread); 3332 md_wakeup_thread(mddev->thread);
3240 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 3333 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3241 3334
3242 mddev->changed = 1; 3335 bdev = bdget_disk(mddev->gendisk, 0);
3336 if (bdev) {
3337 bd_set_size(bdev, mddev->array_size << 1);
3338 blkdev_ioctl(bdev->bd_inode, NULL, BLKRRPART, 0);
3339 bdput(bdev);
3340 }
3341
3243 md_new_event(mddev); 3342 md_new_event(mddev);
3244 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE); 3343 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3245 return 0; 3344 return 0;
@@ -3361,7 +3460,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3361 mddev->pers = NULL; 3460 mddev->pers = NULL;
3362 3461
3363 set_capacity(disk, 0); 3462 set_capacity(disk, 0);
3364 mddev->changed = 1;
3365 3463
3366 if (mddev->ro) 3464 if (mddev->ro)
3367 mddev->ro = 0; 3465 mddev->ro = 0;
@@ -3409,6 +3507,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
3409 mddev->size = 0; 3507 mddev->size = 0;
3410 mddev->raid_disks = 0; 3508 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0; 3509 mddev->recovery_cp = 0;
3510 mddev->reshape_position = MaxSector;
3412 3511
3413 } else if (mddev->pers) 3512 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3513 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4019,7 +4118,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) { 4118 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */ 4119 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 || 4120 if (info->major_version < 0 ||
4022 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 4121 info->major_version >= ARRAY_SIZE(super_types) ||
4023 super_types[info->major_version].name == NULL) { 4122 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */ 4123 /* maybe try to auto-load a module? */
4025 printk(KERN_INFO 4124 printk(KERN_INFO
@@ -4500,20 +4599,6 @@ static int md_release(struct inode *inode, struct file * file)
4500 return 0; 4599 return 0;
4501} 4600}
4502 4601
4503static int md_media_changed(struct gendisk *disk)
4504{
4505 mddev_t *mddev = disk->private_data;
4506
4507 return mddev->changed;
4508}
4509
4510static int md_revalidate(struct gendisk *disk)
4511{
4512 mddev_t *mddev = disk->private_data;
4513
4514 mddev->changed = 0;
4515 return 0;
4516}
4517static struct block_device_operations md_fops = 4602static struct block_device_operations md_fops =
4518{ 4603{
4519 .owner = THIS_MODULE, 4604 .owner = THIS_MODULE,
@@ -4521,8 +4606,6 @@ static struct block_device_operations md_fops =
4521 .release = md_release, 4606 .release = md_release,
4522 .ioctl = md_ioctl, 4607 .ioctl = md_ioctl,
4523 .getgeo = md_getgeo, 4608 .getgeo = md_getgeo,
4524 .media_changed = md_media_changed,
4525 .revalidate_disk= md_revalidate,
4526}; 4609};
4527 4610
4528static int md_thread(void * arg) 4611static int md_thread(void * arg)
@@ -4941,15 +5024,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
4941 return error; 5024 return error;
4942} 5025}
4943 5026
4944static int md_seq_release(struct inode *inode, struct file *file)
4945{
4946 struct seq_file *m = file->private_data;
4947 struct mdstat_info *mi = m->private;
4948 m->private = NULL;
4949 kfree(mi);
4950 return seq_release(inode, file);
4951}
4952
4953static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5027static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4954{ 5028{
4955 struct seq_file *m = filp->private_data; 5029 struct seq_file *m = filp->private_data;
@@ -4971,7 +5045,7 @@ static const struct file_operations md_seq_fops = {
4971 .open = md_seq_open, 5045 .open = md_seq_open,
4972 .read = seq_read, 5046 .read = seq_read,
4973 .llseek = seq_lseek, 5047 .llseek = seq_lseek,
4974 .release = md_seq_release, 5048 .release = seq_release_private,
4975 .poll = mdstat_poll, 5049 .poll = mdstat_poll,
4976}; 5050};
4977 5051
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265d..1b7130cad21f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2063,7 +2063,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2063 */ 2063 */
2064 mddev->array_size = sectors>>1; 2064 mddev->array_size = sectors>>1;
2065 set_capacity(mddev->gendisk, mddev->array_size << 1); 2065 set_capacity(mddev->gendisk, mddev->array_size << 1);
2066 mddev->changed = 1;
2067 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { 2066 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
2068 mddev->recovery_cp = mddev->size << 1; 2067 mddev->recovery_cp = mddev->size << 1;
2069 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2068 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d59914f2057..a72e70ad0975 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,8 +353,8 @@ static int grow_stripes(raid5_conf_t *conf, int num)
353 struct kmem_cache *sc; 353 struct kmem_cache *sc;
354 int devs = conf->raid_disks; 354 int devs = conf->raid_disks;
355 355
356 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 356 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
357 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 357 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
358 conf->active_name = 0; 358 conf->active_name = 0;
359 sc = kmem_cache_create(conf->cache_name[conf->active_name], 359 sc = kmem_cache_create(conf->cache_name[conf->active_name],
360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
@@ -3864,7 +3864,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
3866 set_capacity(mddev->gendisk, mddev->array_size << 1); 3866 set_capacity(mddev->gendisk, mddev->array_size << 1);
3867 mddev->changed = 1;
3868 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3867 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
3869 mddev->recovery_cp = mddev->size << 1; 3868 mddev->recovery_cp = mddev->size << 1;
3870 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3869 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -3999,7 +3998,6 @@ static void end_reshape(raid5_conf_t *conf)
3999 conf->mddev->array_size = conf->mddev->size * 3998 conf->mddev->array_size = conf->mddev->size *
4000 (conf->raid_disks - conf->max_degraded); 3999 (conf->raid_disks - conf->max_degraded);
4001 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4000 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
4002 conf->mddev->changed = 1;
4003 4001
4004 bdev = bdget_disk(conf->mddev->gendisk, 0); 4002 bdev = bdget_disk(conf->mddev->gendisk, 0);
4005 if (bdev) { 4003 if (bdev) {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 68ed3a788083..9200a30dd1b9 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -3,7 +3,7 @@
3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) 3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
4 * see dvb-usb-init.c for copyright information. 4 * see dvb-usb-init.c for copyright information.
5 * 5 *
6 * This file contains functions for initializing the the input-device and for handling remote-control-queries. 6 * This file contains functions for initializing the input-device and for handling remote-control-queries.
7 */ 7 */
8#include "dvb-usb-common.h" 8#include "dvb-usb-common.h"
9#include <linux/usb/input.h> 9#include <linux/usb/input.h>
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index f5d40aa3d27f..f64546c6aeb5 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -266,7 +266,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state)
266{ 266{
267 267
268/* internal */ 268/* internal */
269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); 270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
272 272
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0349a4b5da3f..aece458cfe12 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -223,7 +223,7 @@ static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx)
223static int dib7000p_sad_calib(struct dib7000p_state *state) 223static int dib7000p_sad_calib(struct dib7000p_state *state)
224{ 224{
225/* internal */ 225/* internal */
226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0)); 227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096 228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
229 229
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 110536843e8e..e725f612a6b7 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -1,6 +1,6 @@
1/* 1/*
2 TDA10021 - Single Chip Cable Channel Receiver driver module 2 TDA10021 - Single Chip Cable Channel Receiver driver module
3 used on the the Siemens DVB-C cards 3 used on the Siemens DVB-C cards
4 4
5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> 5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de> 6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index 54d7b07571b8..23fd0303c91b 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -306,7 +306,7 @@ static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status)
306 * The ves1893 sometimes returns sync values that make no sense, 306 * The ves1893 sometimes returns sync values that make no sense,
307 * because, e.g., the SIGNAL bit is 0, while some of the higher 307 * because, e.g., the SIGNAL bit is 0, while some of the higher
308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?). 308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?).
309 * Tests showed that the the VITERBI and SYNC bits are returned 309 * Tests showed that the VITERBI and SYNC bits are returned
310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. 310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong.
311 * If such a case occurs, we read the value again, until we get a 311 * If such a case occurs, we read the value again, until we get a
312 * valid value. 312 * valid value.
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 563a8319e608..54ccc6e1f92e 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -70,7 +70,7 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
70 70
71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len); 71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
72 if (ret != 2 + len) { 72 if (ret != 2 + len) {
73 em28xx_warn("writting to i2c device failed (error=%i)\n", ret); 73 em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
74 return -EIO; 74 return -EIO;
75 } 75 }
76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0; 76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index bec67609500f..2c7b158ce7e1 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1729,7 +1729,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1729 1729
1730 endpoint = &interface->cur_altsetting->endpoint[1].desc; 1730 endpoint = &interface->cur_altsetting->endpoint[1].desc;
1731 1731
1732 /* check if the the device has the iso in endpoint at the correct place */ 1732 /* check if the device has the iso in endpoint at the correct place */
1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
1734 USB_ENDPOINT_XFER_ISOC) { 1734 USB_ENDPOINT_XFER_ISOC) {
1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n"); 1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
diff --git a/drivers/media/video/pwc/philips.txt b/drivers/media/video/pwc/philips.txt
index f5e848410311..f9f3584281d8 100644
--- a/drivers/media/video/pwc/philips.txt
+++ b/drivers/media/video/pwc/philips.txt
@@ -54,9 +54,9 @@ fps
54 Specifies the desired framerate. Is an integer in the range of 4-30. 54 Specifies the desired framerate. Is an integer in the range of 4-30.
55 55
56fbufs 56fbufs
57 This paramter specifies the number of internal buffers to use for storing 57 This parameter specifies the number of internal buffers to use for storing
58 frames from the cam. This will help if the process that reads images from 58 frames from the cam. This will help if the process that reads images from
59 the cam is a bit slow or momentarely busy. However, on slow machines it 59 the cam is a bit slow or momentarily busy. However, on slow machines it
60 only introduces lag, so choose carefully. The default is 3, which is 60 only introduces lag, so choose carefully. The default is 3, which is
61 reasonable. You can set it between 2 and 5. 61 reasonable. You can set it between 2 and 5.
62 62
@@ -209,7 +209,7 @@ trace
209 209
210 128 0x80 PWCX debugging Off 210 128 0x80 PWCX debugging Off
211 211
212 For example, to trace the open() & read() fuctions, sum 8 + 4 = 12, 212 For example, to trace the open() & read() functions, sum 8 + 4 = 12,
213 so you would supply trace=12 during insmod or modprobe. If 213 so you would supply trace=12 during insmod or modprobe. If
214 you want to turn the initialization and probing tracing off, set trace=0. 214 you want to turn the initialization and probing tracing off, set trace=0.
215 The default value for trace is 35 (0x23). 215 The default value for trace is 35 (0x23).
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 876fd2768242..982b115193f8 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * Portions of this code were also copied from usbvideo.c 29 * Portions of this code were also copied from usbvideo.c
30 * 30 *
31 * Special thanks to the the whole team at Sourceforge for help making 31 * Special thanks to the whole team at Sourceforge for help making
32 * this driver become a reality. Notably: 32 * this driver become a reality. Notably:
33 * Andy Armstrong who reverse engineered the color encoding and 33 * Andy Armstrong who reverse engineered the color encoding and
34 * Pavel Machek and Chris Cheney who worked on reverse engineering the 34 * Pavel Machek and Chris Cheney who worked on reverse engineering the
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index d6b4c607453b..ddc7ae029dd3 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -571,7 +571,7 @@ mpi_fc.h
571 * 11-02-00 01.01.01 Original release for post 1.0 work 571 * 11-02-00 01.01.01 Original release for post 1.0 work
572 * 12-04-00 01.01.02 Added messages for Common Transport Send and 572 * 12-04-00 01.01.02 Added messages for Common Transport Send and
573 * Primitive Send. 573 * Primitive Send.
574 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix 574 * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
575 * and modified the FcPrimitiveSend flags. 575 * and modified the FcPrimitiveSend flags.
576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger 576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
577 * field. 577 * field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 97471af4309c..5021d1a2a1d4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -3585,7 +3585,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
3585 * index = chain_idx 3585 * index = chain_idx
3586 * 3586 *
3587 * Calculate the number of chain buffers needed(plus 1) per I/O 3587 * Calculate the number of chain buffers needed(plus 1) per I/O
3588 * then multiply the the maximum number of simultaneous cmds 3588 * then multiply the maximum number of simultaneous cmds
3589 * 3589 *
3590 * num_sge = num sge in request frame + last chain buffer 3590 * num_sge = num sge in request frame + last chain buffer
3591 * scale = num sge per chain buffer if no chain element 3591 * scale = num sge per chain buffer if no chain element
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba6c085419a..c08ad8f823d2 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -105,7 +105,8 @@ static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
105 == TIFM_TYPE_XD) 105 == TIFM_TYPE_XD)
106 msleep(40); 106 msleep(40);
107 107
108 writel((s_state & 7) | 0x0c00, sock_addr + SOCK_CONTROL); 108 writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00,
109 sock_addr + SOCK_CONTROL);
109 /* wait for power to stabilize */ 110 /* wait for power to stabilize */
110 msleep(20); 111 msleep(20);
111 for (cnt = 16; cnt <= 256; cnt <<= 1) { 112 for (cnt = 16; cnt <= 256; cnt <<= 1) {
@@ -122,6 +123,12 @@ static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
122 return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7; 123 return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
123} 124}
124 125
126inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr)
127{
128 writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL),
129 sock_addr + SOCK_CONTROL);
130}
131
125inline static char __iomem * 132inline static char __iomem *
126tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) 133tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
127{ 134{
@@ -133,6 +140,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
133 struct tifm_adapter *fm = container_of(work, struct tifm_adapter, 140 struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
134 media_switcher); 141 media_switcher);
135 struct tifm_dev *sock; 142 struct tifm_dev *sock;
143 char __iomem *sock_addr;
136 unsigned long flags; 144 unsigned long flags;
137 unsigned char media_id; 145 unsigned char media_id;
138 unsigned int socket_change_set, cnt; 146 unsigned int socket_change_set, cnt;
@@ -158,11 +166,12 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
158 "%s : demand removing card from socket %u:%u\n", 166 "%s : demand removing card from socket %u:%u\n",
159 fm->cdev.class_id, fm->id, cnt); 167 fm->cdev.class_id, fm->id, cnt);
160 fm->sockets[cnt] = NULL; 168 fm->sockets[cnt] = NULL;
169 sock_addr = sock->addr;
161 spin_unlock_irqrestore(&fm->lock, flags); 170 spin_unlock_irqrestore(&fm->lock, flags);
162 device_unregister(&sock->dev); 171 device_unregister(&sock->dev);
163 spin_lock_irqsave(&fm->lock, flags); 172 spin_lock_irqsave(&fm->lock, flags);
164 writel(0x0e00, tifm_7xx1_sock_addr(fm->addr, cnt) 173 tifm_7xx1_sock_power_off(sock_addr);
165 + SOCK_CONTROL); 174 writel(0x0e00, sock_addr + SOCK_CONTROL);
166 } 175 }
167 176
168 spin_unlock_irqrestore(&fm->lock, flags); 177 spin_unlock_irqrestore(&fm->lock, flags);
@@ -205,8 +214,16 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
205 214
206static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) 215static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
207{ 216{
217 struct tifm_adapter *fm = pci_get_drvdata(dev);
218 int cnt;
219
208 dev_dbg(&dev->dev, "suspending host\n"); 220 dev_dbg(&dev->dev, "suspending host\n");
209 221
222 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
223 if (fm->sockets[cnt])
224 tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
225 }
226
210 pci_save_state(dev); 227 pci_save_state(dev);
211 pci_enable_wake(dev, pci_choose_state(dev, state), 0); 228 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
212 pci_disable_device(dev); 229 pci_disable_device(dev);
@@ -357,6 +374,7 @@ err_out:
357static void tifm_7xx1_remove(struct pci_dev *dev) 374static void tifm_7xx1_remove(struct pci_dev *dev)
358{ 375{
359 struct tifm_adapter *fm = pci_get_drvdata(dev); 376 struct tifm_adapter *fm = pci_get_drvdata(dev);
377 int cnt;
360 378
361 fm->eject = tifm_7xx1_dummy_eject; 379 fm->eject = tifm_7xx1_dummy_eject;
362 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 380 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
@@ -365,6 +383,9 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
365 383
366 tifm_remove_adapter(fm); 384 tifm_remove_adapter(fm);
367 385
386 for (cnt = 0; cnt < fm->num_sockets; cnt++)
387 tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
388
368 pci_set_drvdata(dev, NULL); 389 pci_set_drvdata(dev, NULL);
369 390
370 iounmap(fm->addr); 391 iounmap(fm->addr);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 6c97491543db..45b7d53b949c 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -2,10 +2,8 @@
2# MMC subsystem configuration 2# MMC subsystem configuration
3# 3#
4 4
5menu "MMC/SD Card support" 5menuconfig MMC
6 6 tristate "MMC/SD card support"
7config MMC
8 tristate "MMC support"
9 help 7 help
10 MMC is the "multi-media card" bus protocol. 8 MMC is the "multi-media card" bus protocol.
11 9
@@ -19,10 +17,12 @@ config MMC_DEBUG
19 This is an option for use by developers; most people should 17 This is an option for use by developers; most people should
20 say N here. This enables MMC core and driver debugging. 18 say N here. This enables MMC core and driver debugging.
21 19
20if MMC
21
22source "drivers/mmc/core/Kconfig" 22source "drivers/mmc/core/Kconfig"
23 23
24source "drivers/mmc/card/Kconfig" 24source "drivers/mmc/card/Kconfig"
25 25
26source "drivers/mmc/host/Kconfig" 26source "drivers/mmc/host/Kconfig"
27 27
28endmenu 28endif # MMC
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 01a9fd376a1f..9320a8c73239 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -3,11 +3,10 @@
3# 3#
4 4
5comment "MMC/SD Card Drivers" 5comment "MMC/SD Card Drivers"
6 depends MMC
7 6
8config MMC_BLOCK 7config MMC_BLOCK
9 tristate "MMC block device driver" 8 tristate "MMC block device driver"
10 depends on MMC && BLOCK 9 depends on BLOCK
11 default y 10 default y
12 help 11 help
13 Say Y here to enable the MMC block device driver support. 12 Say Y here to enable the MMC block device driver support.
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 94222b9a15ea..ab37a6d9d32a 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -4,7 +4,6 @@
4 4
5config MMC_UNSAFE_RESUME 5config MMC_UNSAFE_RESUME
6 bool "Allow unsafe resume (DANGEROUS)" 6 bool "Allow unsafe resume (DANGEROUS)"
7 depends on MMC != n
8 help 7 help
9 If you say Y here, the MMC layer will assume that all cards 8 If you say Y here, the MMC layer will assume that all cards
10 stayed in their respective slots during the suspend. The 9 stayed in their respective slots during the suspend. The
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 72c7cf4a9f9d..7385acfa1dd9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -500,9 +500,10 @@ void __mmc_release_bus(struct mmc_host *host)
500void mmc_detect_change(struct mmc_host *host, unsigned long delay) 500void mmc_detect_change(struct mmc_host *host, unsigned long delay)
501{ 501{
502#ifdef CONFIG_MMC_DEBUG 502#ifdef CONFIG_MMC_DEBUG
503 mmc_claim_host(host); 503 unsigned long flags;
504 spin_lock_irqsave(&host->lock, flags);
504 BUG_ON(host->removed); 505 BUG_ON(host->removed);
505 mmc_release_host(host); 506 spin_unlock_irqrestore(&host->lock, flags);
506#endif 507#endif
507 508
508 mmc_schedule_delayed_work(&host->detect, delay); 509 mmc_schedule_delayed_work(&host->detect, delay);
@@ -625,9 +626,10 @@ EXPORT_SYMBOL(mmc_add_host);
625void mmc_remove_host(struct mmc_host *host) 626void mmc_remove_host(struct mmc_host *host)
626{ 627{
627#ifdef CONFIG_MMC_DEBUG 628#ifdef CONFIG_MMC_DEBUG
628 mmc_claim_host(host); 629 unsigned long flags;
630 spin_lock_irqsave(&host->lock, flags);
629 host->removed = 1; 631 host->removed = 1;
630 mmc_release_host(host); 632 spin_unlock_irqrestore(&host->lock, flags);
631#endif 633#endif
632 634
633 mmc_flush_scheduled_work(); 635 mmc_flush_scheduled_work();
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index ed4deab2203d..e23082fe88d0 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -3,11 +3,10 @@
3# 3#
4 4
5comment "MMC/SD Host Controller Drivers" 5comment "MMC/SD Host Controller Drivers"
6 depends on MMC
7 6
8config MMC_ARMMMCI 7config MMC_ARMMMCI
9 tristate "ARM AMBA Multimedia Card Interface support" 8 tristate "ARM AMBA Multimedia Card Interface support"
10 depends on ARM_AMBA && MMC 9 depends on ARM_AMBA
11 help 10 help
12 This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card 11 This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
13 Interface (PL180 and PL181) support. If you have an ARM(R) 12 Interface (PL180 and PL181) support. If you have an ARM(R)
@@ -17,7 +16,7 @@ config MMC_ARMMMCI
17 16
18config MMC_PXA 17config MMC_PXA
19 tristate "Intel PXA25x/26x/27x Multimedia Card Interface support" 18 tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
20 depends on ARCH_PXA && MMC 19 depends on ARCH_PXA
21 help 20 help
22 This selects the Intel(R) PXA(R) Multimedia card Interface. 21 This selects the Intel(R) PXA(R) Multimedia card Interface.
23 If you have a PXA(R) platform with a Multimedia Card slot, 22 If you have a PXA(R) platform with a Multimedia Card slot,
@@ -27,7 +26,7 @@ config MMC_PXA
27 26
28config MMC_SDHCI 27config MMC_SDHCI
29 tristate "Secure Digital Host Controller Interface support (EXPERIMENTAL)" 28 tristate "Secure Digital Host Controller Interface support (EXPERIMENTAL)"
30 depends on PCI && MMC && EXPERIMENTAL 29 depends on PCI && EXPERIMENTAL
31 help 30 help
32 This select the generic Secure Digital Host Controller Interface. 31 This select the generic Secure Digital Host Controller Interface.
33 It is used by manufacturers such as Texas Instruments(R), Ricoh(R) 32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
@@ -38,7 +37,7 @@ config MMC_SDHCI
38 37
39config MMC_OMAP 38config MMC_OMAP
40 tristate "TI OMAP Multimedia Card Interface support" 39 tristate "TI OMAP Multimedia Card Interface support"
41 depends on ARCH_OMAP && MMC 40 depends on ARCH_OMAP
42 select TPS65010 if MACH_OMAP_H2 41 select TPS65010 if MACH_OMAP_H2
43 help 42 help
44 This selects the TI OMAP Multimedia card Interface. 43 This selects the TI OMAP Multimedia card Interface.
@@ -49,7 +48,7 @@ config MMC_OMAP
49 48
50config MMC_WBSD 49config MMC_WBSD
51 tristate "Winbond W83L51xD SD/MMC Card Interface support" 50 tristate "Winbond W83L51xD SD/MMC Card Interface support"
52 depends on MMC && ISA_DMA_API 51 depends on ISA_DMA_API
53 help 52 help
54 This selects the Winbond(R) W83L51xD Secure digital and 53 This selects the Winbond(R) W83L51xD Secure digital and
55 Multimedia card Interface. 54 Multimedia card Interface.
@@ -60,7 +59,7 @@ config MMC_WBSD
60 59
61config MMC_AU1X 60config MMC_AU1X
62 tristate "Alchemy AU1XX0 MMC Card Interface support" 61 tristate "Alchemy AU1XX0 MMC Card Interface support"
63 depends on MMC && SOC_AU1200 62 depends on SOC_AU1200
64 help 63 help
65 This selects the AMD Alchemy(R) Multimedia card interface. 64 This selects the AMD Alchemy(R) Multimedia card interface.
66 If you have a Alchemy platform with a MMC slot, say Y or M here. 65 If you have a Alchemy platform with a MMC slot, say Y or M here.
@@ -69,7 +68,7 @@ config MMC_AU1X
69 68
70config MMC_AT91 69config MMC_AT91
71 tristate "AT91 SD/MMC Card Interface support" 70 tristate "AT91 SD/MMC Card Interface support"
72 depends on ARCH_AT91 && MMC 71 depends on ARCH_AT91
73 help 72 help
74 This selects the AT91 MCI controller. 73 This selects the AT91 MCI controller.
75 74
@@ -77,7 +76,7 @@ config MMC_AT91
77 76
78config MMC_IMX 77config MMC_IMX
79 tristate "Motorola i.MX Multimedia Card Interface support" 78 tristate "Motorola i.MX Multimedia Card Interface support"
80 depends on ARCH_IMX && MMC 79 depends on ARCH_IMX
81 help 80 help
82 This selects the Motorola i.MX Multimedia card Interface. 81 This selects the Motorola i.MX Multimedia card Interface.
83 If you have a i.MX platform with a Multimedia Card slot, 82 If you have a i.MX platform with a Multimedia Card slot,
@@ -87,7 +86,7 @@ config MMC_IMX
87 86
88config MMC_TIFM_SD 87config MMC_TIFM_SD
89 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" 88 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
90 depends on MMC && EXPERIMENTAL && PCI 89 depends on EXPERIMENTAL && PCI
91 select TIFM_CORE 90 select TIFM_CORE
92 help 91 help
93 Say Y here if you want to be able to access MMC/SD cards with 92 Say Y here if you want to be able to access MMC/SD cards with
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 7511f961c67b..8b736e968447 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1021,10 +1021,6 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1021 mmc_remove_host(mmc); 1021 mmc_remove_host(mmc);
1022 dev_dbg(&sock->dev, "after remove\n"); 1022 dev_dbg(&sock->dev, "after remove\n");
1023 1023
1024 /* The meaning of the bit majority in this constant is unknown. */
1025 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1026 sock->addr + SOCK_CONTROL);
1027
1028 mmc_free_host(mmc); 1024 mmc_free_host(mmc);
1029} 1025}
1030 1026
@@ -1032,14 +1028,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1028
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1029static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1030{
1035 struct mmc_host *mmc = tifm_get_drvdata(sock); 1031 return mmc_suspend_host(tifm_get_drvdata(sock), state);
1036 int rc;
1037
1038 rc = mmc_suspend_host(mmc, state);
1039 /* The meaning of the bit majority in this constant is unknown. */
1040 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1041 sock->addr + SOCK_CONTROL);
1042 return rc;
1043} 1032}
1044 1033
1045static int tifm_sd_resume(struct tifm_dev *sock) 1034static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d990d8141ef5..5fcd8b3631ba 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -60,7 +60,7 @@ config MTD_PHYSMAP_BANKWIDTH
60 (i.e., run-time calling physmap_configure()). 60 (i.e., run-time calling physmap_configure()).
61 61
62config MTD_PHYSMAP_OF 62config MTD_PHYSMAP_OF
63 tristate "Flash device in physical memory map based on OF descirption" 63 tristate "Flash device in physical memory map based on OF description"
64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) 64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
65 help 65 help
66 This provides a 'mapping' driver which allows the NOR Flash and 66 This provides a 'mapping' driver which allows the NOR Flash and
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 9f53c655af3a..7b96cd02f82b 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -358,7 +358,7 @@ int __init nettel_init(void)
358 /* Turn other PAR off so the first probe doesn't find it */ 358 /* Turn other PAR off so the first probe doesn't find it */
359 *intel1par = 0; 359 *intel1par = 0;
360 360
361 /* Probe for the the size of the first Intel flash */ 361 /* Probe for the size of the first Intel flash */
362 nettel_intel_map.size = maxsize; 362 nettel_intel_map.size = maxsize;
363 nettel_intel_map.phys = intel0addr; 363 nettel_intel_map.phys = intel0addr;
364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); 364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 000794c6caf5..0537fac8de74 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2192,7 +2192,7 @@ static int onenand_check_maf(int manuf)
2192 * @param mtd MTD device structure 2192 * @param mtd MTD device structure
2193 * 2193 *
2194 * OneNAND detection method: 2194 * OneNAND detection method:
2195 * Compare the the values from command with ones from register 2195 * Compare the values from command with ones from register
2196 */ 2196 */
2197static int onenand_probe(struct mtd_info *mtd) 2197static int onenand_probe(struct mtd_info *mtd)
2198{ 2198{
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9588da3a30e7..127f60841b10 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -95,8 +95,7 @@ static int max_interrupt_work = 10;
95#include <asm/io.h> 95#include <asm/io.h>
96#include <asm/irq.h> 96#include <asm/irq.h>
97 97
98static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 98static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
99static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
100 99
101#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) 100#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
102#define EL3_SUSPEND 101#define EL3_SUSPEND
@@ -360,7 +359,7 @@ static int __init el3_common_init(struct net_device *dev)
360 printk(", IRQ %d.\n", dev->irq); 359 printk(", IRQ %d.\n", dev->irq);
361 360
362 if (el3_debug > 0) 361 if (el3_debug > 0)
363 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 362 printk(KERN_INFO "%s", version);
364 return 0; 363 return 0;
365 364
366} 365}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80924f76dee8..f26ca331615e 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -103,7 +103,7 @@ static int vortex_debug = 1;
103 103
104 104
105static char version[] __devinitdata = 105static char version[] __devinitdata =
106DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 106DRV_NAME ": Donald Becker and others.\n";
107 107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); 109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 18aba838c1ff..82d78ff8399b 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -31,10 +31,8 @@
31 31
32*/ 32*/
33 33
34static const char versionA[] = 34static const char version[] =
35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n"; 35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
36static const char versionB[] =
37" http://www.scyld.com/network/atp.html\n";
38 36
39/* The user-configurable values. 37/* The user-configurable values.
40 These may be modified when a driver module is loaded.*/ 38 These may be modified when a driver module is loaded.*/
@@ -324,7 +322,7 @@ static int __init atp_probe1(long ioaddr)
324 322
325#ifndef MODULE 323#ifndef MODULE
326 if (net_debug) 324 if (net_debug)
327 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 325 printk(KERN_INFO "%s", version);
328#endif 326#endif
329 327
330 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " 328 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
@@ -926,7 +924,7 @@ static void set_rx_mode_8012(struct net_device *dev)
926 924
927static int __init atp_init_module(void) { 925static int __init atp_init_module(void) {
928 if (debug) /* Emit version even if no cards detected. */ 926 if (debug) /* Emit version even if no cards detected. */
929 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 927 printk(KERN_INFO "%s", version);
930 return atp_init(); 928 return atp_init();
931} 929}
932 930
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 724bce51f936..223517dcbcfd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond)
3461/*---------------------------- Hashing Policies -----------------------------*/ 3461/*---------------------------- Hashing Policies -----------------------------*/
3462 3462
3463/* 3463/*
3464 * Hash for the the output device based upon layer 3 and layer 4 data. If 3464 * Hash for the output device based upon layer 3 and layer 4 data. If
3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3466 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3466 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3467 */ 3467 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c0609..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_scheduled_work(); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 39654e1e2bed..47680237f783 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev)
1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, 1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
1127 "network cable problem"); 1127 "network cable problem");
1128 /* This is not a duplicate. One message for the console, 1128 /* This is not a duplicate. One message for the console,
1129 one for the the log file */ 1129 one for the log file */
1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, 1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
1131 "network cable problem"); 1131 "network cable problem");
1132 eepro_complete_selreset(ioaddr); 1132 eepro_complete_selreset(ioaddr);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 6c267c38df97..9800341956a2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30static const char * const version = 30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" 31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; 32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33 33
34/* A few user-configurable values that apply to all boards. 34/* A few user-configurable values that apply to all boards.
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4e3f14c9c717..5e517946f46a 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -93,8 +93,6 @@ static int rx_copybreak;
93static char version[] __devinitdata = 93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; 94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata = 95static char version2[] __devinitdata =
96" http://www.scyld.com/network/epic100.html\n";
97static char version3[] __devinitdata =
98" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
99 97
100MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -323,8 +321,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
323#ifndef MODULE 321#ifndef MODULE
324 static int printed_version; 322 static int printed_version;
325 if (!printed_version++) 323 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 324 printk (KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3); 325 version, version2);
328#endif 326#endif
329 327
330 card_idx++; 328 card_idx++;
@@ -1596,8 +1594,8 @@ static int __init epic_init (void)
1596{ 1594{
1597/* when a module, this is printed whether or not devices are found in probe */ 1595/* when a module, this is printed whether or not devices are found in probe */
1598#ifdef MODULE 1596#ifdef MODULE
1599 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 1597 printk (KERN_INFO "%s" KERN_INFO "%s",
1600 version, version2, version3); 1598 version, version2);
1601#endif 1599#endif
1602 1600
1603 return pci_register_driver(&epic_driver); 1601 return pci_register_driver(&epic_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 6e90619b3b41..36d2c7d4f4d0 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -140,7 +140,7 @@ config BAYCOM_SER_HDX
140 modems that connect to a serial interface. The driver supports the 140 modems that connect to a serial interface. The driver supports the
141 ser12 design in half-duplex mode. This is the old driver. It is 141 ser12 design in half-duplex mode. This is the old driver. It is
142 still provided in case your serial interface chip does not work with 142 still provided in case your serial interface chip does not work with
143 the full-duplex driver. This driver is depreciated. To configure 143 the full-duplex driver. This driver is deprecated. To configure
144 the driver, use the sethdlc utility available in the standard ax25 144 the driver, use the sethdlc utility available in the standard ax25
145 utilities package. For information on the modems, see 145 utilities package. For information on the modems, see
146 <http://www.baycom.de/> and 146 <http://www.baycom.de/> and
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 2ab173d9a0e4..1e67720f1066 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -113,7 +113,7 @@
113/* RxOver overflow in Recv FIFO */ 113/* RxOver overflow in Recv FIFO */
114/* SipRcv received serial gap (or other condition you set) */ 114/* SipRcv received serial gap (or other condition you set) */
115/* Interrupts are enabled by writing a one to the IER register */ 115/* Interrupts are enabled by writing a one to the IER register */
116/* Interrupts are cleared by writting a one to the ISR register */ 116/* Interrupts are cleared by writing a one to the ISR register */
117/* */ 117/* */
118/* 6. The remaining registers: 0x6 and 0x3 appear to be */ 118/* 6. The remaining registers: 0x6 and 0x3 appear to be */
119/* reserved parts of 16 or 32 bit registersthe remainder */ 119/* reserved parts of 16 or 32 bit registersthe remainder */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index f15aebde7b90..52c99d01d568 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
315 * hw - Struct containing variables accessed by shared code 315 * hw - Struct containing variables accessed by shared code
316 * 316 *
317 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 317 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
318 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 318 * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index 84960dae2a22..ea3b8fc86d1e 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -126,7 +126,7 @@ typedef struct rx_packet {
126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ 126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
127 /* cause a collision to be reported. */ 127 /* cause a collision to be reported. */
128 128
129 /* Bits 5 and 6 are used to determine the the Destination address filter mode */ 129 /* Bits 5 and 6 are used to determine the Destination address filter mode */
130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ 130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ 131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ 132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 223e0e6264ba..4cf0d3fcb519 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -131,7 +131,6 @@ static const char version[] __devinitdata =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
134 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
135 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
136 135
137MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 589785d1e762..995c0a5d4066 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -63,8 +63,7 @@ static int options[MAX_UNITS];
63 63
64/* These identify the driver base version and may not be removed. */ 64/* These identify the driver base version and may not be removed. */
65static char version[] __devinitdata = 65static char version[] __devinitdata =
66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n" 66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n";
67KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
68 67
69#if defined(__powerpc__) 68#if defined(__powerpc__)
70#define inl_le(addr) le32_to_cpu(inl(addr)) 69#define inl_le(addr) le32_to_cpu(inl(addr))
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056a..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
662 phy_error(phydev); 662 phy_error(phydev);
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled 665 * Finish any pending work; we might have been scheduled to be called
666 * to be called from keventd ourselves, though. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 run_scheduled_work(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f51ba31970aa..e1f912d04043 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -110,8 +110,7 @@ static char *media[MAX_UNITS];
110 110
111/* These identify the driver base version and may not be removed. */ 111/* These identify the driver base version and may not be removed. */
112static char version[] = 112static char version[] =
113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115 114
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); 116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd808..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
3716 unsigned int restart_timer; 3716 unsigned int restart_timer;
3717 3717
3718 tg3_full_lock(tp, 0); 3718 tg3_full_lock(tp, 0);
3719 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3720 3719
3721 if (!netif_running(tp->dev)) { 3720 if (!netif_running(tp->dev)) {
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723 tg3_full_unlock(tp); 3721 tg3_full_unlock(tp);
3724 return; 3722 return;
3725 } 3723 }
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
3750 mod_timer(&tp->timer, jiffies + 1); 3748 mod_timer(&tp->timer, jiffies + 1);
3751 3749
3752out: 3750out:
3753 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3754
3755 tg3_full_unlock(tp); 3751 tg3_full_unlock(tp);
3756} 3752}
3757 3753
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
7390{ 7386{
7391 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7392 7388
7393 /* Calling flush_scheduled_work() may deadlock because 7389 cancel_work_sync(&tp->reset_task);
7394 * linkwatch_event() may be on the workqueue and it will try to get
7395 * the rtnl_lock which we are holding.
7396 */
7397 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7398 msleep(1);
7399 7390
7400 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7401 7392
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a243..bd9f4f428e5b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2229#define TG3_FLAG_10_100_ONLY 0x01000000 2229#define TG3_FLAG_10_100_ONLY 0x01000000
2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2231#define TG3_FLAG_IN_RESET_TASK 0x04000000 2231
2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2234#define TG3_FLAG_SUPPORT_MSI 0x20000000 2234#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 9b08afbd1f65..ea896777bcaf 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -269,7 +269,7 @@ done:
269 This would turn on IM for devices that is not contributing 269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency. 270 to backlog congestion with unnecessary latency.
271 271
272 We monitor the the device RX-ring and have: 272 We monitor the device RX-ring and have:
273 273
274 HW Interrupt Mitigation either ON or OFF. 274 HW Interrupt Mitigation either ON or OFF.
275 275
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index fa440706fb4a..38f3b99716b8 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1021 np->tx_ring[entry].length |= DescEndRing; 1021 np->tx_ring[entry].length |= DescEndRing;
1022 1022
1023 /* Now acquire the irq spinlock. 1023 /* Now acquire the irq spinlock.
1024 * The difficult race is the the ordering between 1024 * The difficult race is the ordering between
1025 * increasing np->cur_tx and setting DescOwned: 1025 * increasing np->cur_tx and setting DescOwned:
1026 * - if np->cur_tx is increased first the interrupt 1026 * - if np->cur_tx is increased first the interrupt
1027 * handler could consider the packet as transmitted 1027 * handler could consider the packet as transmitted
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 985a1810ca59..2470b1ee33c0 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card)
1043 1043
1044 1044
1045/* 1045/*
1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. 1046link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
1047 1047
1048Must be called in locked state with interrupts disabled 1048Must be called in locked state with interrupts disabled
1049*/ 1049*/
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index f2dd7763cd0b..f72573594121 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
639 639
640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); 640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641 641
642 /* "I feel a presence... another warrior is on the the mesa." 642 /* "I feel a presence... another warrior is on the mesa."
643 */ 643 */
644 wmb(); 644 wmb();
645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 38fac3bbcd82..7d5b8c2cc614 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv)
149 /* Vitally important. If we don't do this it seems we get an 149 /* Vitally important. If we don't do this it seems we get an
150 * interrupt somewhere during the power cycle, since 150 * interrupt somewhere during the power cycle, since
151 * hw_unavailable is already set it doesn't get ACKed, we get 151 * hw_unavailable is already set it doesn't get ACKed, we get
152 * into an interrupt loop and the the PMU decides to turn us 152 * into an interrupt loop and the PMU decides to turn us
153 * off. */ 153 * off. */
154 disable_irq(dev->irq); 154 disable_irq(dev->irq);
155 155
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 841b3c136ad9..283be4a70524 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3054,7 +3054,7 @@ static const iw_handler prism54_handler[] = {
3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */ 3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */
3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */ 3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */
3056 (iw_handler) NULL, /* -- hole -- */ 3056 (iw_handler) NULL, /* -- hole -- */
3057 (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */ 3057 (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ 3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ 3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ 3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a037b11dac9d..084795355b74 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv)
115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len; 115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; 116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
117 117
118 /* set the cards base address for writting the data */ 118 /* set the card's base address for writing the data */
119 isl38xx_w32_flush(device_base, reg, 119 isl38xx_w32_flush(device_base, reg,
120 ISL38XX_DIR_MEM_BASE_REG); 120 ISL38XX_DIR_MEM_BASE_REG);
121 wmb(); /* be paranoid */ 121 wmb(); /* be paranoid */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 67b867f837ca..5740d4d4267c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -176,7 +176,7 @@ psa_write(struct net_device * dev,
176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR + 176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
177 (psaoff(0, psa_comp_number) << 1); 177 (psaoff(0, psa_comp_number) << 1);
178 178
179 /* Authorize writting to PSA */ 179 /* Authorize writing to PSA */
180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN); 180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
181 181
182 while(n-- > 0) 182 while(n-- > 0)
@@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
1676 fee_write(base, 0x60, 1676 fee_write(base, 0x60,
1677 dac, 2); 1677 dac, 2);
1678 1678
1679 /* We now should verify here that the EEprom writting was ok */ 1679 /* We now should verify here that the EEprom writing was ok */
1680 1680
1681 /* ReRead the first area */ 1681 /* ReRead the first area */
1682 fee_read(base, 0x00, 1682 fee_read(base, 0x00,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 4d1c4905c749..4b9de0093a7b 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -120,7 +120,7 @@
120 * the Wavelan itself (NCR -> AT&T -> Lucent). 120 * the Wavelan itself (NCR -> AT&T -> Lucent).
121 * 121 *
122 * All started with Anders Klemets <klemets@paul.rutgers.edu>, 122 * All started with Anders Klemets <klemets@paul.rutgers.edu>,
123 * writting a Wavelan ISA driver for the MACH microkernel. Girish 123 * writing a Wavelan ISA driver for the MACH microkernel. Girish
124 * Welling <welling@paul.rutgers.edu> had also worked on it. 124 * Welling <welling@paul.rutgers.edu> had also worked on it.
125 * Keith Moore modify this for the Pcmcia hardware. 125 * Keith Moore modify this for the Pcmcia hardware.
126 * 126 *
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3f4a7cf9efea..f2a90a7fa2d6 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,6 @@ static int gx_fix;
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] __devinitdata =
111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
112KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
113KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 112KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
114 113
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bb7739d26a5..8e58ea3d95c0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -119,7 +119,7 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
119 * system is in its list of supported devices. Returns the matching 119 * system is in its list of supported devices. Returns the matching
120 * pci_device_id structure or %NULL if there is no match. 120 * pci_device_id structure or %NULL if there is no match.
121 * 121 *
122 * Depreciated, don't use this as it will not catch any dynamic ids 122 * Deprecated, don't use this as it will not catch any dynamic ids
123 * that a driver might want to check for. 123 * that a driver might want to check for.
124 */ 124 */
125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5e439836db2d..76422eded36e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -98,7 +98,7 @@ config RTC_INTF_DEV_UIE_EMUL
98 bool "RTC UIE emulation on dev interface" 98 bool "RTC UIE emulation on dev interface"
99 depends on RTC_INTF_DEV 99 depends on RTC_INTF_DEV
100 help 100 help
101 Provides an emulation for RTC_UIE if the underlaying rtc chip 101 Provides an emulation for RTC_UIE if the underlying rtc chip
102 driver does not expose RTC_UIE ioctls. Those requests generate 102 driver does not expose RTC_UIE ioctls. Those requests generate
103 once-per-second update interrupts, used for synchronization. 103 once-per-second update interrupts, used for synchronization.
104 104
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index bbd5b8b66f42..d6b06ab81188 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -23,7 +23,7 @@
23 23
24/* 24/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 25 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the the SCLP documentation) 26 * (as it is specified as the maximum size in the SCLP documentation)
27 * because of the additional data structure described above. 27 * because of the additional data structure described above.
28 */ 28 */
29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) 29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 29d176036e5c..0b96d49dd636 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2860,7 +2860,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2860 if (!atomic_read(&queue->set_pci_flags_count)){ 2860 if (!atomic_read(&queue->set_pci_flags_count)){
2861 /* 2861 /*
2862 * there's no outstanding PCI any more, so we 2862 * there's no outstanding PCI any more, so we
2863 * have to request a PCI to be sure the the PCI 2863 * have to request a PCI to be sure that the PCI
2864 * will wake at some time in the future then we 2864 * will wake at some time in the future then we
2865 * can flush packed buffers that might still be 2865 * can flush packed buffers that might still be
2866 * hanging around, which can happen if no 2866 * hanging around, which can happen if no
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1f9554e08013..324899c96efe 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -118,97 +118,32 @@ _zfcp_hex_dump(char *addr, int count)
118 118
119#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF 119#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
120 120
121static int zfcp_reqlist_init(struct zfcp_adapter *adapter) 121static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
122{ 122{
123 int i; 123 int idx;
124 124
125 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), 125 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
126 GFP_KERNEL); 126 GFP_KERNEL);
127
128 if (!adapter->req_list) 127 if (!adapter->req_list)
129 return -ENOMEM; 128 return -ENOMEM;
130 129
131 for (i=0; i<REQUEST_LIST_SIZE; i++) 130 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
132 INIT_LIST_HEAD(&adapter->req_list[i]); 131 INIT_LIST_HEAD(&adapter->req_list[idx]);
133
134 return 0; 132 return 0;
135} 133}
136 134
137static void zfcp_reqlist_free(struct zfcp_adapter *adapter) 135static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
138{ 136{
139 struct zfcp_fsf_req *request, *tmp;
140 unsigned int i;
141
142 for (i=0; i<REQUEST_LIST_SIZE; i++) {
143 if (list_empty(&adapter->req_list[i]))
144 continue;
145
146 list_for_each_entry_safe(request, tmp,
147 &adapter->req_list[i], list)
148 list_del(&request->list);
149 }
150
151 kfree(adapter->req_list); 137 kfree(adapter->req_list);
152} 138}
153 139
154void zfcp_reqlist_add(struct zfcp_adapter *adapter,
155 struct zfcp_fsf_req *fsf_req)
156{
157 unsigned int i;
158
159 i = fsf_req->req_id % REQUEST_LIST_SIZE;
160 list_add_tail(&fsf_req->list, &adapter->req_list[i]);
161}
162
163void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
164{
165 struct zfcp_fsf_req *request, *tmp;
166 unsigned int i, counter;
167 u64 dbg_tmp[2];
168
169 i = req_id % REQUEST_LIST_SIZE;
170 BUG_ON(list_empty(&adapter->req_list[i]));
171
172 counter = 0;
173 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
174 if (request->req_id == req_id) {
175 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
176 dbg_tmp[1] = (u64) counter;
177 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
178 list_del(&request->list);
179 break;
180 }
181 counter++;
182 }
183}
184
185struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
186 unsigned long req_id)
187{
188 struct zfcp_fsf_req *request, *tmp;
189 unsigned int i;
190
191 /* 0 is reserved as an invalid req_id */
192 if (req_id == 0)
193 return NULL;
194
195 i = req_id % REQUEST_LIST_SIZE;
196
197 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
198 if (request->req_id == req_id)
199 return request;
200
201 return NULL;
202}
203
204int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) 140int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
205{ 141{
206 unsigned int i; 142 unsigned int idx;
207 143
208 for (i=0; i<REQUEST_LIST_SIZE; i++) 144 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
209 if (!list_empty(&adapter->req_list[i])) 145 if (!list_empty(&adapter->req_list[idx]))
210 return 0; 146 return 0;
211
212 return 1; 147 return 1;
213} 148}
214 149
@@ -913,6 +848,8 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
913 unit->sysfs_device.release = zfcp_sysfs_unit_release; 848 unit->sysfs_device.release = zfcp_sysfs_unit_release;
914 dev_set_drvdata(&unit->sysfs_device, unit); 849 dev_set_drvdata(&unit->sysfs_device, unit);
915 850
851 init_waitqueue_head(&unit->scsi_scan_wq);
852
916 /* mark unit unusable as long as sysfs registration is not complete */ 853 /* mark unit unusable as long as sysfs registration is not complete */
917 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 854 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
918 855
@@ -1104,7 +1041,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1104 1041
1105 /* initialize list of fsf requests */ 1042 /* initialize list of fsf requests */
1106 spin_lock_init(&adapter->req_list_lock); 1043 spin_lock_init(&adapter->req_list_lock);
1107 retval = zfcp_reqlist_init(adapter); 1044 retval = zfcp_reqlist_alloc(adapter);
1108 if (retval) { 1045 if (retval) {
1109 ZFCP_LOG_INFO("request list initialization failed\n"); 1046 ZFCP_LOG_INFO("request list initialization failed\n");
1110 goto failed_low_mem_buffers; 1047 goto failed_low_mem_buffers;
@@ -1165,6 +1102,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1165 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 1102 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1166 sysfs_failed: 1103 sysfs_failed:
1167 dev_set_drvdata(&ccw_device->dev, NULL); 1104 dev_set_drvdata(&ccw_device->dev, NULL);
1105 zfcp_reqlist_free(adapter);
1168 failed_low_mem_buffers: 1106 failed_low_mem_buffers:
1169 zfcp_free_low_mem_buffers(adapter); 1107 zfcp_free_low_mem_buffers(adapter);
1170 if (qdio_free(ccw_device) != 0) 1108 if (qdio_free(ccw_device) != 0)
@@ -1497,7 +1435,7 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1497 1435
1498 if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) { 1436 if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) {
1499 ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port " 1437 ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
1500 "with d_id 0x%08x on adapter %s\n", 1438 "with d_id 0x%06x on adapter %s\n",
1501 status_buffer->d_id, 1439 status_buffer->d_id,
1502 zfcp_get_busid_by_adapter(adapter)); 1440 zfcp_get_busid_by_adapter(adapter));
1503 } else { 1441 } else {
@@ -1522,7 +1460,7 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1522 1460
1523 if (!port || (port->wwpn != els_logo->nport_wwpn)) { 1461 if (!port || (port->wwpn != els_logo->nport_wwpn)) {
1524 ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port " 1462 ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
1525 "with d_id 0x%08x on adapter %s\n", 1463 "with d_id 0x%06x on adapter %s\n",
1526 status_buffer->d_id, 1464 status_buffer->d_id,
1527 zfcp_get_busid_by_adapter(adapter)); 1465 zfcp_get_busid_by_adapter(adapter));
1528 } else { 1466 } else {
@@ -1704,7 +1642,7 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
1704 /* looks like a valid d_id */ 1642 /* looks like a valid d_id */
1705 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 1643 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
1706 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); 1644 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
1707 ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%08x\n", 1645 ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%06x\n",
1708 zfcp_get_busid_by_port(port), port->wwpn, port->d_id); 1646 zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
1709 goto out; 1647 goto out;
1710 1648
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 32933ed54b8a..22649639230b 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -637,6 +637,7 @@ do { \
637#define ZFCP_STATUS_UNIT_SHARED 0x00000004 637#define ZFCP_STATUS_UNIT_SHARED 0x00000004
638#define ZFCP_STATUS_UNIT_READONLY 0x00000008 638#define ZFCP_STATUS_UNIT_READONLY 0x00000008
639#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 639#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
640#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
640 641
641/* FSF request status (this does not have a common part) */ 642/* FSF request status (this does not have a common part) */
642#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000 643#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
@@ -980,6 +981,10 @@ struct zfcp_unit {
980 struct scsi_device *device; /* scsi device struct pointer */ 981 struct scsi_device *device; /* scsi device struct pointer */
981 struct zfcp_erp_action erp_action; /* pending error recovery */ 982 struct zfcp_erp_action erp_action; /* pending error recovery */
982 atomic_t erp_counter; 983 atomic_t erp_counter;
984 wait_queue_head_t scsi_scan_wq; /* can be used to wait until
985 all scsi_scan_target
986 requests have been
987 completed. */
983}; 988};
984 989
985/* FSF request */ 990/* FSF request */
@@ -1085,6 +1090,42 @@ extern void _zfcp_hex_dump(char *, int);
1085#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port)) 1090#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
1086 1091
1087/* 1092/*
1093 * Helper functions for request ID management.
1094 */
1095static inline int zfcp_reqlist_hash(unsigned long req_id)
1096{
1097 return req_id % REQUEST_LIST_SIZE;
1098}
1099
1100static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
1101 struct zfcp_fsf_req *fsf_req)
1102{
1103 unsigned int idx;
1104
1105 idx = zfcp_reqlist_hash(fsf_req->req_id);
1106 list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
1107}
1108
1109static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
1110 struct zfcp_fsf_req *fsf_req)
1111{
1112 list_del(&fsf_req->list);
1113}
1114
1115static inline struct zfcp_fsf_req *
1116zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
1117{
1118 struct zfcp_fsf_req *request;
1119 unsigned int idx;
1120
1121 idx = zfcp_reqlist_hash(req_id);
1122 list_for_each_entry(request, &adapter->req_list[idx], list)
1123 if (request->req_id == req_id)
1124 return request;
1125 return NULL;
1126}
1127
1128/*
1088 * functions needed for reference/usage counting 1129 * functions needed for reference/usage counting
1089 */ 1130 */
1090 1131
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c1f2d4b14c2b..aef66bc2b6ca 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -179,7 +179,7 @@ static void zfcp_close_fsf(struct zfcp_adapter *adapter)
179static void zfcp_fsf_request_timeout_handler(unsigned long data) 179static void zfcp_fsf_request_timeout_handler(unsigned long data)
180{ 180{
181 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 181 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
182 zfcp_erp_adapter_reopen(adapter, 0); 182 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
183} 183}
184 184
185void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) 185void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
@@ -342,9 +342,9 @@ zfcp_erp_adisc(struct zfcp_port *port)
342 adisc->wwpn = fc_host_port_name(adapter->scsi_host); 342 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
343 adisc->wwnn = fc_host_node_name(adapter->scsi_host); 343 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
344 adisc->nport_id = fc_host_port_id(adapter->scsi_host); 344 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
345 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x " 345 ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x "
346 "(wwpn=0x%016Lx, wwnn=0x%016Lx, " 346 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
347 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 347 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
348 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn, 348 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
349 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 349 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
350 adisc->nport_id); 350 adisc->nport_id);
@@ -352,7 +352,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
352 retval = zfcp_fsf_send_els(send_els); 352 retval = zfcp_fsf_send_els(send_els);
353 if (retval != 0) { 353 if (retval != 0) {
354 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port " 354 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
355 "0x%08x on adapter %s\n", send_els->d_id, 355 "0x%06x on adapter %s\n", send_els->d_id,
356 zfcp_get_busid_by_adapter(adapter)); 356 zfcp_get_busid_by_adapter(adapter));
357 goto freemem; 357 goto freemem;
358 } 358 }
@@ -398,7 +398,7 @@ zfcp_erp_adisc_handler(unsigned long data)
398 if (send_els->status != 0) { 398 if (send_els->status != 0) {
399 ZFCP_LOG_NORMAL("ELS request rejected/timed out, " 399 ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
400 "force physical port reopen " 400 "force physical port reopen "
401 "(adapter %s, port d_id=0x%08x)\n", 401 "(adapter %s, port d_id=0x%06x)\n",
402 zfcp_get_busid_by_adapter(adapter), d_id); 402 zfcp_get_busid_by_adapter(adapter), d_id);
403 debug_text_event(adapter->erp_dbf, 3, "forcreop"); 403 debug_text_event(adapter->erp_dbf, 3, "forcreop");
404 if (zfcp_erp_port_forced_reopen(port, 0)) 404 if (zfcp_erp_port_forced_reopen(port, 0))
@@ -411,9 +411,9 @@ zfcp_erp_adisc_handler(unsigned long data)
411 411
412 adisc = zfcp_sg_to_address(send_els->resp); 412 adisc = zfcp_sg_to_address(send_els->resp);
413 413
414 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id " 414 ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id "
415 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, " 415 "0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
416 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 416 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
417 d_id, fc_host_port_id(adapter->scsi_host), 417 d_id, fc_host_port_id(adapter->scsi_host),
418 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn, 418 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
419 adisc->hard_nport_id, adisc->nport_id); 419 adisc->hard_nport_id, adisc->nport_id);
@@ -847,8 +847,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
847 if (erp_action->fsf_req) { 847 if (erp_action->fsf_req) {
848 /* take lock to ensure that request is not deleted meanwhile */ 848 /* take lock to ensure that request is not deleted meanwhile */
849 spin_lock(&adapter->req_list_lock); 849 spin_lock(&adapter->req_list_lock);
850 if (zfcp_reqlist_ismember(adapter, 850 if (zfcp_reqlist_find(adapter, erp_action->fsf_req->req_id)) {
851 erp_action->fsf_req->req_id)) {
852 /* fsf_req still exists */ 851 /* fsf_req still exists */
853 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 852 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
854 debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req, 853 debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
@@ -1377,7 +1376,7 @@ zfcp_erp_port_failed(struct zfcp_port *port)
1377 1376
1378 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) 1377 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
1379 ZFCP_LOG_NORMAL("port erp failed (adapter %s, " 1378 ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
1380 "port d_id=0x%08x)\n", 1379 "port d_id=0x%06x)\n",
1381 zfcp_get_busid_by_port(port), port->d_id); 1380 zfcp_get_busid_by_port(port), port->d_id);
1382 else 1381 else
1383 ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n", 1382 ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
@@ -1591,6 +1590,62 @@ zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
1591 return result; 1590 return result;
1592} 1591}
1593 1592
1593struct zfcp_erp_add_work {
1594 struct zfcp_unit *unit;
1595 struct work_struct work;
1596};
1597
1598/**
1599 * zfcp_erp_scsi_scan
1600 * @data: pointer to a struct zfcp_erp_add_work
1601 *
1602 * Registers a logical unit with the SCSI stack.
1603 */
1604static void zfcp_erp_scsi_scan(struct work_struct *work)
1605{
1606 struct zfcp_erp_add_work *p =
1607 container_of(work, struct zfcp_erp_add_work, work);
1608 struct zfcp_unit *unit = p->unit;
1609 struct fc_rport *rport = unit->port->rport;
1610 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1611 unit->scsi_lun, 0);
1612 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1613 wake_up(&unit->scsi_scan_wq);
1614 zfcp_unit_put(unit);
1615 kfree(p);
1616}
1617
1618/**
1619 * zfcp_erp_schedule_work
1620 * @unit: pointer to unit which should be registered with SCSI stack
1621 *
1622 * Schedules work which registers a unit with the SCSI stack
1623 */
1624static void
1625zfcp_erp_schedule_work(struct zfcp_unit *unit)
1626{
1627 struct zfcp_erp_add_work *p;
1628
1629 p = kmalloc(sizeof(*p), GFP_KERNEL);
1630 if (!p) {
1631 ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
1632 "the FCP-LUN 0x%Lx connected to "
1633 "the port with WWPN 0x%Lx connected to "
1634 "the adapter %s with the SCSI stack.\n",
1635 unit->fcp_lun,
1636 unit->port->wwpn,
1637 zfcp_get_busid_by_unit(unit));
1638 return;
1639 }
1640
1641 zfcp_unit_get(unit);
1642 memset(p, 0, sizeof(*p));
1643 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1644 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1645 p->unit = unit;
1646 schedule_work(&p->work);
1647}
1648
1594/* 1649/*
1595 * function: 1650 * function:
1596 * 1651 *
@@ -2401,7 +2456,7 @@ zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
2401 retval = ZFCP_ERP_FAILED; 2456 retval = ZFCP_ERP_FAILED;
2402 } 2457 }
2403 } else { 2458 } else {
2404 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> " 2459 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> "
2405 "trying open\n", port->wwpn, port->d_id); 2460 "trying open\n", port->wwpn, port->d_id);
2406 retval = zfcp_erp_port_strategy_open_port(erp_action); 2461 retval = zfcp_erp_port_strategy_open_port(erp_action);
2407 } 2462 }
@@ -2441,7 +2496,7 @@ zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
2441 case ZFCP_ERP_STEP_UNINITIALIZED: 2496 case ZFCP_ERP_STEP_UNINITIALIZED:
2442 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 2497 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2443 case ZFCP_ERP_STEP_PORT_CLOSING: 2498 case ZFCP_ERP_STEP_PORT_CLOSING:
2444 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> trying open\n", 2499 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n",
2445 port->wwpn, port->d_id); 2500 port->wwpn, port->d_id);
2446 retval = zfcp_erp_port_strategy_open_port(erp_action); 2501 retval = zfcp_erp_port_strategy_open_port(erp_action);
2447 break; 2502 break;
@@ -3092,9 +3147,9 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3092 && port->rport) { 3147 && port->rport) {
3093 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, 3148 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
3094 &unit->status); 3149 &unit->status);
3095 scsi_scan_target(&port->rport->dev, 0, 3150 if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
3096 port->rport->scsi_target_id, 3151 &unit->status) == 0)
3097 unit->scsi_lun, 0); 3152 zfcp_erp_schedule_work(unit);
3098 } 3153 }
3099 zfcp_unit_put(unit); 3154 zfcp_unit_put(unit);
3100 break; 3155 break;
@@ -3121,7 +3176,7 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3121 zfcp_get_busid_by_port(port), 3176 zfcp_get_busid_by_port(port),
3122 port->wwpn); 3177 port->wwpn);
3123 else { 3178 else {
3124 scsi_flush_work(adapter->scsi_host); 3179 scsi_target_unblock(&port->rport->dev);
3125 port->rport->maxframe_size = port->maxframe_size; 3180 port->rport->maxframe_size = port->maxframe_size;
3126 port->rport->supported_classes = 3181 port->rport->supported_classes =
3127 port->supported_classes; 3182 port->supported_classes;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 01386ac688a2..991d45667a44 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -184,10 +184,6 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
184 unsigned long); 184 unsigned long);
185extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 185extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
186 struct scsi_cmnd *); 186 struct scsi_cmnd *);
187extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
188extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
189extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
190 unsigned long);
191extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 187extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
192 188
193#endif /* ZFCP_EXT_H */ 189#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4c0a59afd5c8..a8b02542ac2d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -828,7 +828,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
828 828
829 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { 829 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
830 ZFCP_LOG_NORMAL("bug: Reopen port indication received for" 830 ZFCP_LOG_NORMAL("bug: Reopen port indication received for"
831 "nonexisting port with d_id 0x%08x on " 831 "nonexisting port with d_id 0x%06x on "
832 "adapter %s. Ignored.\n", 832 "adapter %s. Ignored.\n",
833 status_buffer->d_id & ZFCP_DID_MASK, 833 status_buffer->d_id & ZFCP_DID_MASK,
834 zfcp_get_busid_by_adapter(adapter)); 834 zfcp_get_busid_by_adapter(adapter));
@@ -853,7 +853,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
853 &status_buffer->status_subtype, sizeof (u32)); 853 &status_buffer->status_subtype, sizeof (u32));
854 ZFCP_LOG_NORMAL("bug: Undefined status subtype received " 854 ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
855 "for a reopen indication on port with " 855 "for a reopen indication on port with "
856 "d_id 0x%08x on the adapter %s. " 856 "d_id 0x%06x on the adapter %s. "
857 "Ignored. (debug info 0x%x)\n", 857 "Ignored. (debug info 0x%x)\n",
858 status_buffer->d_id, 858 status_buffer->d_id,
859 zfcp_get_busid_by_adapter(adapter), 859 zfcp_get_busid_by_adapter(adapter),
@@ -1156,7 +1156,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1156 } 1156 }
1157 1157
1158 ZFCP_LOG_DEBUG("Abort FCP Command request initiated " 1158 ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
1159 "(adapter%s, port d_id=0x%08x, " 1159 "(adapter%s, port d_id=0x%06x, "
1160 "unit x%016Lx, old_req_id=0x%lx)\n", 1160 "unit x%016Lx, old_req_id=0x%lx)\n",
1161 zfcp_get_busid_by_adapter(adapter), 1161 zfcp_get_busid_by_adapter(adapter),
1162 unit->port->d_id, 1162 unit->port->d_id,
@@ -1554,7 +1554,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1554 1554
1555 case FSF_ACCESS_DENIED: 1555 case FSF_ACCESS_DENIED:
1556 ZFCP_LOG_NORMAL("access denied, cannot send generic service " 1556 ZFCP_LOG_NORMAL("access denied, cannot send generic service "
1557 "command (adapter %s, port d_id=0x%08x)\n", 1557 "command (adapter %s, port d_id=0x%06x)\n",
1558 zfcp_get_busid_by_port(port), port->d_id); 1558 zfcp_get_busid_by_port(port), port->d_id);
1559 for (counter = 0; counter < 2; counter++) { 1559 for (counter = 0; counter < 2; counter++) {
1560 subtable = header->fsf_status_qual.halfword[counter * 2]; 1560 subtable = header->fsf_status_qual.halfword[counter * 2];
@@ -1576,7 +1576,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1576 1576
1577 case FSF_GENERIC_COMMAND_REJECTED: 1577 case FSF_GENERIC_COMMAND_REJECTED:
1578 ZFCP_LOG_INFO("generic service command rejected " 1578 ZFCP_LOG_INFO("generic service command rejected "
1579 "(adapter %s, port d_id=0x%08x)\n", 1579 "(adapter %s, port d_id=0x%06x)\n",
1580 zfcp_get_busid_by_port(port), port->d_id); 1580 zfcp_get_busid_by_port(port), port->d_id);
1581 ZFCP_LOG_INFO("status qualifier:\n"); 1581 ZFCP_LOG_INFO("status qualifier:\n");
1582 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, 1582 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
@@ -1602,7 +1602,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1602 1602
1603 case FSF_PORT_BOXED: 1603 case FSF_PORT_BOXED:
1604 ZFCP_LOG_INFO("port needs to be reopened " 1604 ZFCP_LOG_INFO("port needs to be reopened "
1605 "(adapter %s, port d_id=0x%08x)\n", 1605 "(adapter %s, port d_id=0x%06x)\n",
1606 zfcp_get_busid_by_port(port), port->d_id); 1606 zfcp_get_busid_by_port(port), port->d_id);
1607 debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed"); 1607 debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
1608 zfcp_erp_port_boxed(port); 1608 zfcp_erp_port_boxed(port);
@@ -1683,7 +1683,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1683 NULL, &lock_flags, &fsf_req); 1683 NULL, &lock_flags, &fsf_req);
1684 if (ret < 0) { 1684 if (ret < 0) {
1685 ZFCP_LOG_INFO("error: creation of ELS request failed " 1685 ZFCP_LOG_INFO("error: creation of ELS request failed "
1686 "(adapter %s, port d_id: 0x%08x)\n", 1686 "(adapter %s, port d_id: 0x%06x)\n",
1687 zfcp_get_busid_by_adapter(adapter), d_id); 1687 zfcp_get_busid_by_adapter(adapter), d_id);
1688 goto failed_req; 1688 goto failed_req;
1689 } 1689 }
@@ -1708,7 +1708,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1708 ZFCP_MAX_SBALS_PER_ELS_REQ); 1708 ZFCP_MAX_SBALS_PER_ELS_REQ);
1709 if (bytes <= 0) { 1709 if (bytes <= 0) {
1710 ZFCP_LOG_INFO("error: creation of ELS request failed " 1710 ZFCP_LOG_INFO("error: creation of ELS request failed "
1711 "(adapter %s, port d_id: 0x%08x)\n", 1711 "(adapter %s, port d_id: 0x%06x)\n",
1712 zfcp_get_busid_by_adapter(adapter), d_id); 1712 zfcp_get_busid_by_adapter(adapter), d_id);
1713 if (bytes == 0) { 1713 if (bytes == 0) {
1714 ret = -ENOMEM; 1714 ret = -ENOMEM;
@@ -1725,7 +1725,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1725 ZFCP_MAX_SBALS_PER_ELS_REQ); 1725 ZFCP_MAX_SBALS_PER_ELS_REQ);
1726 if (bytes <= 0) { 1726 if (bytes <= 0) {
1727 ZFCP_LOG_INFO("error: creation of ELS request failed " 1727 ZFCP_LOG_INFO("error: creation of ELS request failed "
1728 "(adapter %s, port d_id: 0x%08x)\n", 1728 "(adapter %s, port d_id: 0x%06x)\n",
1729 zfcp_get_busid_by_adapter(adapter), d_id); 1729 zfcp_get_busid_by_adapter(adapter), d_id);
1730 if (bytes == 0) { 1730 if (bytes == 0) {
1731 ret = -ENOMEM; 1731 ret = -ENOMEM;
@@ -1739,7 +1739,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1739 /* reject request */ 1739 /* reject request */
1740 ZFCP_LOG_INFO("error: microcode does not support chained SBALs" 1740 ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
1741 ", ELS request too big (adapter %s, " 1741 ", ELS request too big (adapter %s, "
1742 "port d_id: 0x%08x)\n", 1742 "port d_id: 0x%06x)\n",
1743 zfcp_get_busid_by_adapter(adapter), d_id); 1743 zfcp_get_busid_by_adapter(adapter), d_id);
1744 ret = -EOPNOTSUPP; 1744 ret = -EOPNOTSUPP;
1745 goto failed_send; 1745 goto failed_send;
@@ -1760,13 +1760,13 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1760 ret = zfcp_fsf_req_send(fsf_req); 1760 ret = zfcp_fsf_req_send(fsf_req);
1761 if (ret) { 1761 if (ret) {
1762 ZFCP_LOG_DEBUG("error: initiation of ELS request failed " 1762 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1763 "(adapter %s, port d_id: 0x%08x)\n", 1763 "(adapter %s, port d_id: 0x%06x)\n",
1764 zfcp_get_busid_by_adapter(adapter), d_id); 1764 zfcp_get_busid_by_adapter(adapter), d_id);
1765 goto failed_send; 1765 goto failed_send;
1766 } 1766 }
1767 1767
1768 ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: " 1768 ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: "
1769 "0x%08x)\n", zfcp_get_busid_by_adapter(adapter), d_id); 1769 "0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
1770 goto out; 1770 goto out;
1771 1771
1772 failed_send: 1772 failed_send:
@@ -1859,7 +1859,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1859 case FSF_ELS_COMMAND_REJECTED: 1859 case FSF_ELS_COMMAND_REJECTED:
1860 ZFCP_LOG_INFO("ELS has been rejected because command filter " 1860 ZFCP_LOG_INFO("ELS has been rejected because command filter "
1861 "prohibited sending " 1861 "prohibited sending "
1862 "(adapter: %s, port d_id: 0x%08x)\n", 1862 "(adapter: %s, port d_id: 0x%06x)\n",
1863 zfcp_get_busid_by_adapter(adapter), d_id); 1863 zfcp_get_busid_by_adapter(adapter), d_id);
1864 1864
1865 break; 1865 break;
@@ -1907,7 +1907,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1907 1907
1908 case FSF_ACCESS_DENIED: 1908 case FSF_ACCESS_DENIED:
1909 ZFCP_LOG_NORMAL("access denied, cannot send ELS command " 1909 ZFCP_LOG_NORMAL("access denied, cannot send ELS command "
1910 "(adapter %s, port d_id=0x%08x)\n", 1910 "(adapter %s, port d_id=0x%06x)\n",
1911 zfcp_get_busid_by_adapter(adapter), d_id); 1911 zfcp_get_busid_by_adapter(adapter), d_id);
1912 for (counter = 0; counter < 2; counter++) { 1912 for (counter = 0; counter < 2; counter++) {
1913 subtable = header->fsf_status_qual.halfword[counter * 2]; 1913 subtable = header->fsf_status_qual.halfword[counter * 2];
@@ -2070,7 +2070,7 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2070 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n" 2070 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
2071 "WWNN 0x%016Lx, " 2071 "WWNN 0x%016Lx, "
2072 "WWPN 0x%016Lx, " 2072 "WWPN 0x%016Lx, "
2073 "S_ID 0x%08x,\n" 2073 "S_ID 0x%06x,\n"
2074 "adapter version 0x%x, " 2074 "adapter version 0x%x, "
2075 "LIC version 0x%x, " 2075 "LIC version 0x%x, "
2076 "FC link speed %d Gb/s\n", 2076 "FC link speed %d Gb/s\n",
@@ -3043,6 +3043,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3043 queue_designator = &header->fsf_status_qual.fsf_queue_designator; 3043 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
3044 3044
3045 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 3045 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
3046 ZFCP_STATUS_COMMON_ACCESS_BOXED |
3046 ZFCP_STATUS_UNIT_SHARED | 3047 ZFCP_STATUS_UNIT_SHARED |
3047 ZFCP_STATUS_UNIT_READONLY, 3048 ZFCP_STATUS_UNIT_READONLY,
3048 &unit->status); 3049 &unit->status);
@@ -4645,23 +4646,22 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4645 fsf_req->adapter = adapter; 4646 fsf_req->adapter = adapter;
4646 fsf_req->fsf_command = fsf_cmd; 4647 fsf_req->fsf_command = fsf_cmd;
4647 INIT_LIST_HEAD(&fsf_req->list); 4648 INIT_LIST_HEAD(&fsf_req->list);
4648
4649 /* this is serialized (we are holding req_queue-lock of adapter */
4650 if (adapter->req_no == 0)
4651 adapter->req_no++;
4652 fsf_req->req_id = adapter->req_no++;
4653
4654 init_timer(&fsf_req->timer); 4649 init_timer(&fsf_req->timer);
4655 zfcp_fsf_req_qtcb_init(fsf_req);
4656 4650
4657 /* initialize waitqueue which may be used to wait on 4651 /* initialize waitqueue which may be used to wait on
4658 this request completion */ 4652 this request completion */
4659 init_waitqueue_head(&fsf_req->completion_wq); 4653 init_waitqueue_head(&fsf_req->completion_wq);
4660 4654
4661 ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags); 4655 ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
4662 if(ret < 0) { 4656 if (ret < 0)
4663 goto failed_sbals; 4657 goto failed_sbals;
4664 } 4658
4659 /* this is serialized (we are holding req_queue-lock of adapter) */
4660 if (adapter->req_no == 0)
4661 adapter->req_no++;
4662 fsf_req->req_id = adapter->req_no++;
4663
4664 zfcp_fsf_req_qtcb_init(fsf_req);
4665 4665
4666 /* 4666 /*
4667 * We hold queue_lock here. Check if QDIOUP is set and let request fail 4667 * We hold queue_lock here. Check if QDIOUP is set and let request fail
@@ -4788,7 +4788,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
4788 retval = -EIO; 4788 retval = -EIO;
4789 del_timer(&fsf_req->timer); 4789 del_timer(&fsf_req->timer);
4790 spin_lock(&adapter->req_list_lock); 4790 spin_lock(&adapter->req_list_lock);
4791 zfcp_reqlist_remove(adapter, fsf_req->req_id); 4791 zfcp_reqlist_remove(adapter, fsf_req);
4792 spin_unlock(&adapter->req_list_lock); 4792 spin_unlock(&adapter->req_list_lock);
4793 /* undo changes in request queue made for this request */ 4793 /* undo changes in request queue made for this request */
4794 zfcp_qdio_zero_sbals(req_queue->buffer, 4794 zfcp_qdio_zero_sbals(req_queue->buffer,
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 1e12a78e8edd..bdf5782b8a7a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -222,7 +222,7 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
222 * Since we have been using this adapter, it is save to assume 222 * Since we have been using this adapter, it is save to assume
223 * that it is not failed but recoverable. The card seems to 223 * that it is not failed but recoverable. The card seems to
224 * report link-up events by self-initiated queue shutdown. 224 * report link-up events by self-initiated queue shutdown.
225 * That is why we need to clear the the link-down flag 225 * That is why we need to clear the link-down flag
226 * which is set again in case we have missed by a mile. 226 * which is set again in case we have missed by a mile.
227 */ 227 */
228 zfcp_erp_adapter_reopen( 228 zfcp_erp_adapter_reopen(
@@ -283,10 +283,10 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
283} 283}
284 284
285/** 285/**
286 * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status 286 * zfcp_qdio_reqid_check - checks for valid reqids.
287 */ 287 */
288static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, 288static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
289 unsigned long req_id) 289 unsigned long req_id)
290{ 290{
291 struct zfcp_fsf_req *fsf_req; 291 struct zfcp_fsf_req *fsf_req;
292 unsigned long flags; 292 unsigned long flags;
@@ -294,23 +294,22 @@ static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
294 debug_long_event(adapter->erp_dbf, 4, req_id); 294 debug_long_event(adapter->erp_dbf, 4, req_id);
295 295
296 spin_lock_irqsave(&adapter->req_list_lock, flags); 296 spin_lock_irqsave(&adapter->req_list_lock, flags);
297 fsf_req = zfcp_reqlist_ismember(adapter, req_id); 297 fsf_req = zfcp_reqlist_find(adapter, req_id);
298 298
299 if (!fsf_req) { 299 if (!fsf_req)
300 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 300 /*
301 ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); 301 * Unknown request means that we have potentially memory
302 zfcp_erp_adapter_reopen(adapter, 0); 302 * corruption and must stop the machine immediatly.
303 return -EINVAL; 303 */
304 } 304 panic("error: unknown request id (%ld) on adapter %s.\n",
305 req_id, zfcp_get_busid_by_adapter(adapter));
305 306
306 zfcp_reqlist_remove(adapter, req_id); 307 zfcp_reqlist_remove(adapter, fsf_req);
307 atomic_dec(&adapter->reqs_active); 308 atomic_dec(&adapter->reqs_active);
308 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 309 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
309 310
310 /* finish the FSF request */ 311 /* finish the FSF request */
311 zfcp_fsf_req_complete(fsf_req); 312 zfcp_fsf_req_complete(fsf_req);
312
313 return 0;
314} 313}
315 314
316/* 315/*
@@ -374,27 +373,9 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
374 373
375 /* look for QDIO request identifiers in SB */ 374 /* look for QDIO request identifiers in SB */
376 buffere = &buffer->element[buffere_index]; 375 buffere = &buffer->element[buffere_index];
377 retval = zfcp_qdio_reqid_check(adapter, 376 zfcp_qdio_reqid_check(adapter,
378 (unsigned long) buffere->addr); 377 (unsigned long) buffere->addr);
379 378
380 if (retval) {
381 ZFCP_LOG_NORMAL("bug: unexpected inbound "
382 "packet on adapter %s "
383 "(reqid=0x%lx, "
384 "first_element=%d, "
385 "elements_processed=%d)\n",
386 zfcp_get_busid_by_adapter(adapter),
387 (unsigned long) buffere->addr,
388 first_element,
389 elements_processed);
390 ZFCP_LOG_NORMAL("hex dump of inbound buffer "
391 "at address %p "
392 "(buffer_index=%d, "
393 "buffere_index=%d)\n", buffer,
394 buffer_index, buffere_index);
395 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
396 (char *) buffer, SBAL_SIZE);
397 }
398 /* 379 /*
399 * A single used SBALE per inbound SBALE has been 380 * A single used SBALE per inbound SBALE has been
400 * implemented by QDIO so far. Hope they will 381 * implemented by QDIO so far. Hope they will
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 99db02062c3b..16e2d64658af 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -22,6 +22,7 @@
22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI 22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
23 23
24#include "zfcp_ext.h" 24#include "zfcp_ext.h"
25#include <asm/atomic.h>
25 26
26static void zfcp_scsi_slave_destroy(struct scsi_device *sdp); 27static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
27static int zfcp_scsi_slave_alloc(struct scsi_device *sdp); 28static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
@@ -179,6 +180,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
179 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 180 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
180 181
181 if (unit) { 182 if (unit) {
183 zfcp_erp_wait(unit->port->adapter);
184 wait_event(unit->scsi_scan_wq,
185 atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
186 &unit->status) == 0);
182 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 187 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
183 sdpnt->hostdata = NULL; 188 sdpnt->hostdata = NULL;
184 unit->device = NULL; 189 unit->device = NULL;
@@ -402,8 +407,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
402 407
403 /* Check whether corresponding fsf_req is still pending */ 408 /* Check whether corresponding fsf_req is still pending */
404 spin_lock(&adapter->req_list_lock); 409 spin_lock(&adapter->req_list_lock);
405 fsf_req = zfcp_reqlist_ismember(adapter, (unsigned long) 410 fsf_req = zfcp_reqlist_find(adapter,
406 scpnt->host_scribble); 411 (unsigned long) scpnt->host_scribble);
407 spin_unlock(&adapter->req_list_lock); 412 spin_unlock(&adapter->req_list_lock);
408 if (!fsf_req) { 413 if (!fsf_req) {
409 write_unlock_irqrestore(&adapter->abort_lock, flags); 414 write_unlock_irqrestore(&adapter->abort_lock, flags);
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 74b999d77bbf..4fab0c23814c 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -156,7 +156,7 @@ static unsigned short get_pins(unsigned minor)
156#define BPP_ICR 0x18 156#define BPP_ICR 0x18
157#define BPP_SIZE 0x1A 157#define BPP_SIZE 0x1A
158 158
159/* BPP_CSR. Bits of type RW1 are cleared with writting '1'. */ 159/* BPP_CSR. Bits of type RW1 are cleared with writing '1'. */
160#define P_DEV_ID_MASK 0xf0000000 /* R */ 160#define P_DEV_ID_MASK 0xf0000000 /* R */
161#define P_DEV_ID_ZEBRA 0x40000000 161#define P_DEV_ID_ZEBRA 0x40000000
162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */ 162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 33682ce96a5d..3009ad8c4073 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -387,12 +387,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
387 * Ok now init the communication subsystem 387 * Ok now init the communication subsystem
388 */ 388 */
389 389
390 dev->queues = kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL); 390 dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
391 if (dev->queues == NULL) { 391 if (dev->queues == NULL) {
392 printk(KERN_ERR "Error could not allocate comm region.\n"); 392 printk(KERN_ERR "Error could not allocate comm region.\n");
393 return NULL; 393 return NULL;
394 } 394 }
395 memset(dev->queues, 0, sizeof(struct aac_queue_block));
396 395
397 if (aac_comm_init(dev)<0){ 396 if (aac_comm_init(dev)<0){
398 kfree(dev->queues); 397 kfree(dev->queues);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5824a757a753..9aca57eda943 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1223,13 +1223,11 @@ int aac_check_health(struct aac_dev * aac)
1223 * Warning: no sleep allowed while 1223 * Warning: no sleep allowed while
1224 * holding spinlock 1224 * holding spinlock
1225 */ 1225 */
1226 hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); 1226 hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1227 fib = kmalloc(sizeof(struct fib), GFP_ATOMIC); 1227 fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1228 if (fib && hw_fib) { 1228 if (fib && hw_fib) {
1229 struct aac_aifcmd * aif; 1229 struct aac_aifcmd * aif;
1230 1230
1231 memset(hw_fib, 0, sizeof(struct hw_fib));
1232 memset(fib, 0, sizeof(struct fib));
1233 fib->hw_fib_va = hw_fib; 1231 fib->hw_fib_va = hw_fib;
1234 fib->dev = aac; 1232 fib->dev = aac;
1235 aac_fib_init(fib); 1233 aac_fib_init(fib);
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 42c7dcda6d9b..fcd25f7d0bc6 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -248,16 +248,14 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
248 * manage the linked lists. 248 * manage the linked lists.
249 */ 249 */
250 if ((!dev->aif_thread) 250 if ((!dev->aif_thread)
251 || (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC)))) 251 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
252 return 1; 252 return 1;
253 if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { 253 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
254 kfree (fib); 254 kfree (fib);
255 return 1; 255 return 1;
256 } 256 }
257 memset(hw_fib, 0, sizeof(struct hw_fib));
258 memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) + 257 memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
259 (index & ~0x00000002L)), sizeof(struct hw_fib)); 258 (index & ~0x00000002L)), sizeof(struct hw_fib));
260 memset(fib, 0, sizeof(struct fib));
261 INIT_LIST_HEAD(&fib->fiblink); 259 INIT_LIST_HEAD(&fib->fiblink);
262 fib->type = FSAFS_NTC_FIB_CONTEXT; 260 fib->type = FSAFS_NTC_FIB_CONTEXT;
263 fib->size = sizeof(struct fib); 261 fib->size = sizeof(struct fib);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 0c71315cbf1a..291cd14f4e98 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -539,8 +539,10 @@ int _aac_rx_init(struct aac_dev *dev)
539 } 539 }
540 540
541 /* Failure to reset here is an option ... */ 541 /* Failure to reset here is an option ... */
542 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
543 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
542 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 544 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
543 if ((((status & 0xff) != 0xff) || reset_devices) && 545 if ((((status & 0x0c) != 0x0c) || reset_devices) &&
544 !aac_rx_restart_adapter(dev, 0)) 546 !aac_rx_restart_adapter(dev, 0))
545 ++restart; 547 ++restart;
546 /* 548 /*
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8d72bbae96ad..0bada0028aa0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -966,7 +966,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
966 | AHD_BUSFREEREV_BUG; 966 | AHD_BUSFREEREV_BUG;
967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; 967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
968 968
969 /* If the user requested the the SLOWCRC bit to be set. */ 969 /* If the user requested that the SLOWCRC bit to be set. */
970 if (aic79xx_slowcrc) 970 if (aic79xx_slowcrc)
971 ahd->features |= AHD_AIC79XXB_SLOWCRC; 971 ahd->features |= AHD_AIC79XXB_SLOWCRC;
972 972
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index e6b70123940c..e78ce0fa44d2 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -6,7 +6,7 @@
6# 6#
7# This file is licensed under GPLv2. 7# This file is licensed under GPLv2.
8# 8#
9# This file is part of the the aic94xx driver. 9# This file is part of the aic94xx driver.
10# 10#
11# The aic94xx driver is free software; you can redistribute it and/or 11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as 12# modify it under the terms of the GNU General Public License as
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2a2cc6cf1182..2311019304c0 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -319,10 +319,9 @@ ch_readconfig(scsi_changer *ch)
319 int result,id,lun,i; 319 int result,id,lun,i;
320 u_int elem; 320 u_int elem;
321 321
322 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 322 buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
323 if (!buffer) 323 if (!buffer)
324 return -ENOMEM; 324 return -ENOMEM;
325 memset(buffer,0,512);
326 325
327 memset(cmd,0,sizeof(cmd)); 326 memset(cmd,0,sizeof(cmd));
328 cmd[0] = MODE_SENSE; 327 cmd[0] = MODE_SENSE;
@@ -530,10 +529,9 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
530 u_char *buffer; 529 u_char *buffer;
531 int result; 530 int result;
532 531
533 buffer = kmalloc(512, GFP_KERNEL); 532 buffer = kzalloc(512, GFP_KERNEL);
534 if (!buffer) 533 if (!buffer)
535 return -ENOMEM; 534 return -ENOMEM;
536 memset(buffer,0,512);
537 535
538 dprintk("%s %s voltag: 0x%x => \"%s\"\n", 536 dprintk("%s %s voltag: 0x%x => \"%s\"\n",
539 clear ? "clear" : "set", 537 clear ? "clear" : "set",
@@ -922,11 +920,10 @@ static int ch_probe(struct device *dev)
922 if (sd->type != TYPE_MEDIUM_CHANGER) 920 if (sd->type != TYPE_MEDIUM_CHANGER)
923 return -ENODEV; 921 return -ENODEV;
924 922
925 ch = kmalloc(sizeof(*ch), GFP_KERNEL); 923 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
926 if (NULL == ch) 924 if (NULL == ch)
927 return -ENOMEM; 925 return -ENOMEM;
928 926
929 memset(ch,0,sizeof(*ch));
930 ch->minor = ch_devcount; 927 ch->minor = ch_devcount;
931 sprintf(ch->name,"ch%d",ch->minor); 928 sprintf(ch->name,"ch%d",ch->minor);
932 mutex_init(&ch->lock); 929 mutex_init(&ch->lock);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a965ed3548d5..564ea90ed3a0 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -541,7 +541,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
541 541
542 542
543/* 543/*
544 * Safe settings. If set to zero the the BIOS/default values with 544 * Safe settings. If set to zero the BIOS/default values with
545 * command line overrides will be used. If set to 1 then safe and 545 * command line overrides will be used. If set to 1 then safe and
546 * slow settings will be used. 546 * slow settings will be used.
547 */ 547 */
@@ -617,7 +617,7 @@ static void __devinit fix_settings(void)
617 617
618/* 618/*
619 * Mapping from the eeprom delay index value (index into this array) 619 * Mapping from the eeprom delay index value (index into this array)
620 * to the the number of actual seconds that the delay should be for. 620 * to the number of actual seconds that the delay should be for.
621 */ 621 */
622static char __devinitdata eeprom_index_to_delay_map[] = 622static char __devinitdata eeprom_index_to_delay_map[] =
623 { 1, 3, 5, 10, 16, 30, 60, 120 }; 623 { 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -4136,7 +4136,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
4136 * @io_port: base I/O address 4136 * @io_port: base I/O address
4137 * @addr: offset into SEEPROM 4137 * @addr: offset into SEEPROM
4138 * 4138 *
4139 * Returns the the byte read. 4139 * Returns the byte read.
4140 **/ 4140 **/
4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr) 4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
4142{ 4142{
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index fb6433a56989..8c7d2bbf9b1a 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1308,13 +1308,12 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1308 schedule_timeout_uninterruptible(1); 1308 schedule_timeout_uninterruptible(1);
1309 } while (m == EMPTY_QUEUE); 1309 } while (m == EMPTY_QUEUE);
1310 1310
1311 status = kmalloc(4, GFP_KERNEL|ADDR32); 1311 status = kzalloc(4, GFP_KERNEL|ADDR32);
1312 if(status == NULL) { 1312 if(status == NULL) {
1313 adpt_send_nop(pHba, m); 1313 adpt_send_nop(pHba, m);
1314 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1314 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1315 return -ENOMEM; 1315 return -ENOMEM;
1316 } 1316 }
1317 memset(status,0,4);
1318 1317
1319 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1318 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1320 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1319 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1504,21 +1503,19 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
1504 continue; 1503 continue;
1505 } 1504 }
1506 if( pHba->channel[bus_no].device[scsi_id] == NULL){ 1505 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1507 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 1506 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1508 if(pDev == NULL) { 1507 if(pDev == NULL) {
1509 return -ENOMEM; 1508 return -ENOMEM;
1510 } 1509 }
1511 pHba->channel[bus_no].device[scsi_id] = pDev; 1510 pHba->channel[bus_no].device[scsi_id] = pDev;
1512 memset(pDev,0,sizeof(struct adpt_device));
1513 } else { 1511 } else {
1514 for( pDev = pHba->channel[bus_no].device[scsi_id]; 1512 for( pDev = pHba->channel[bus_no].device[scsi_id];
1515 pDev->next_lun; pDev = pDev->next_lun){ 1513 pDev->next_lun; pDev = pDev->next_lun){
1516 } 1514 }
1517 pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 1515 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1518 if(pDev->next_lun == NULL) { 1516 if(pDev->next_lun == NULL) {
1519 return -ENOMEM; 1517 return -ENOMEM;
1520 } 1518 }
1521 memset(pDev->next_lun,0,sizeof(struct adpt_device));
1522 pDev = pDev->next_lun; 1519 pDev = pDev->next_lun;
1523 } 1520 }
1524 pDev->tid = tid; 1521 pDev->tid = tid;
@@ -1667,12 +1664,11 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1667 reply_size = REPLY_FRAME_SIZE; 1664 reply_size = REPLY_FRAME_SIZE;
1668 } 1665 }
1669 reply_size *= 4; 1666 reply_size *= 4;
1670 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); 1667 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1671 if(reply == NULL) { 1668 if(reply == NULL) {
1672 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); 1669 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1673 return -ENOMEM; 1670 return -ENOMEM;
1674 } 1671 }
1675 memset(reply,0,REPLY_FRAME_SIZE*4);
1676 sg_offset = (msg[0]>>4)&0xf; 1672 sg_offset = (msg[0]>>4)&0xf;
1677 msg[2] = 0x40000000; // IOCTL context 1673 msg[2] = 0x40000000; // IOCTL context
1678 msg[3] = (u32)reply; 1674 msg[3] = (u32)reply;
@@ -2444,7 +2440,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2444 } 2440 }
2445 pDev = pHba->channel[bus_no].device[scsi_id]; 2441 pDev = pHba->channel[bus_no].device[scsi_id];
2446 if( pDev == NULL){ 2442 if( pDev == NULL){
2447 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 2443 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
2448 if(pDev == NULL) { 2444 if(pDev == NULL) {
2449 return -ENOMEM; 2445 return -ENOMEM;
2450 } 2446 }
@@ -2453,12 +2449,11 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2453 while (pDev->next_lun) { 2449 while (pDev->next_lun) {
2454 pDev = pDev->next_lun; 2450 pDev = pDev->next_lun;
2455 } 2451 }
2456 pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); 2452 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
2457 if(pDev == NULL) { 2453 if(pDev == NULL) {
2458 return -ENOMEM; 2454 return -ENOMEM;
2459 } 2455 }
2460 } 2456 }
2461 memset(pDev,0,sizeof(struct adpt_device));
2462 pDev->tid = d->lct_data.tid; 2457 pDev->tid = d->lct_data.tid;
2463 pDev->scsi_channel = bus_no; 2458 pDev->scsi_channel = bus_no;
2464 pDev->scsi_id = scsi_id; 2459 pDev->scsi_id = scsi_id;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2c7b77e833f9..4baa79e68679 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -92,6 +92,7 @@ static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = 0; 92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1; 93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0; 94static unsigned int ipr_debug = 0;
95static unsigned int ipr_dual_ioa_raid = 1;
95static DEFINE_SPINLOCK(ipr_driver_lock); 96static DEFINE_SPINLOCK(ipr_driver_lock);
96 97
97/* This table describes the differences between DMA controller chips */ 98/* This table describes the differences between DMA controller chips */
@@ -158,6 +159,8 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0);
158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); 159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159module_param_named(debug, ipr_debug, int, 0); 160module_param_named(debug, ipr_debug, int, 0);
160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
163MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
161MODULE_LICENSE("GPL"); 164MODULE_LICENSE("GPL");
162MODULE_VERSION(IPR_DRIVER_VERSION); 165MODULE_VERSION(IPR_DRIVER_VERSION);
163 166
@@ -206,6 +209,8 @@ struct ipr_error_table_t ipr_error_table[] = {
206 "8009: Impending cache battery pack failure"}, 209 "8009: Impending cache battery pack failure"},
207 {0x02040400, 0, 0, 210 {0x02040400, 0, 0,
208 "34FF: Disk device format in progress"}, 211 "34FF: Disk device format in progress"},
212 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
213 "9070: IOA requested reset"},
209 {0x023F0000, 0, 0, 214 {0x023F0000, 0, 0,
210 "Synchronization required"}, 215 "Synchronization required"},
211 {0x024E0000, 0, 0, 216 {0x024E0000, 0, 0,
@@ -951,6 +956,53 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
951} 956}
952 957
953/** 958/**
959 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
960 * @i: index into buffer
961 * @buf: string to modify
962 *
963 * This function will strip all trailing whitespace, pad the end
964 * of the string with a single space, and NULL terminate the string.
965 *
966 * Return value:
967 * new length of string
968 **/
969static int strip_and_pad_whitespace(int i, char *buf)
970{
971 while (i && buf[i] == ' ')
972 i--;
973 buf[i+1] = ' ';
974 buf[i+2] = '\0';
975 return i + 2;
976}
977
978/**
979 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
980 * @prefix: string to print at start of printk
981 * @hostrcb: hostrcb pointer
982 * @vpd: vendor/product id/sn struct
983 *
984 * Return value:
985 * none
986 **/
987static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
988 struct ipr_vpd *vpd)
989{
990 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
991 int i = 0;
992
993 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
994 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
995
996 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
997 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
998
999 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1000 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1001
1002 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1003}
1004
1005/**
954 * ipr_log_vpd - Log the passed VPD to the error log. 1006 * ipr_log_vpd - Log the passed VPD to the error log.
955 * @vpd: vendor/product id/sn struct 1007 * @vpd: vendor/product id/sn struct
956 * 1008 *
@@ -974,6 +1026,23 @@ static void ipr_log_vpd(struct ipr_vpd *vpd)
974} 1026}
975 1027
976/** 1028/**
1029 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1030 * @prefix: string to print at start of printk
1031 * @hostrcb: hostrcb pointer
1032 * @vpd: vendor/product id/sn/wwn struct
1033 *
1034 * Return value:
1035 * none
1036 **/
1037static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1038 struct ipr_ext_vpd *vpd)
1039{
1040 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1041 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1042 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1043}
1044
1045/**
977 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1046 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
978 * @vpd: vendor/product id/sn/wwn struct 1047 * @vpd: vendor/product id/sn/wwn struct
979 * 1048 *
@@ -1287,10 +1356,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1287 1356
1288 error = &hostrcb->hcam.u.error.u.type_17_error; 1357 error = &hostrcb->hcam.u.error.u.type_17_error;
1289 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1358 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1359 strstrip(error->failure_reason);
1290 1360
1291 ipr_err("%s\n", error->failure_reason); 1361 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1292 ipr_err("Remote Adapter VPD:\n"); 1362 be32_to_cpu(hostrcb->hcam.u.error.prc));
1293 ipr_log_ext_vpd(&error->vpd); 1363 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1294 ipr_log_hex_data(ioa_cfg, error->data, 1364 ipr_log_hex_data(ioa_cfg, error->data,
1295 be32_to_cpu(hostrcb->hcam.length) - 1365 be32_to_cpu(hostrcb->hcam.length) -
1296 (offsetof(struct ipr_hostrcb_error, u) + 1366 (offsetof(struct ipr_hostrcb_error, u) +
@@ -1312,10 +1382,11 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1312 1382
1313 error = &hostrcb->hcam.u.error.u.type_07_error; 1383 error = &hostrcb->hcam.u.error.u.type_07_error;
1314 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1384 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1385 strstrip(error->failure_reason);
1315 1386
1316 ipr_err("%s\n", error->failure_reason); 1387 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1317 ipr_err("Remote Adapter VPD:\n"); 1388 be32_to_cpu(hostrcb->hcam.u.error.prc));
1318 ipr_log_vpd(&error->vpd); 1389 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1319 ipr_log_hex_data(ioa_cfg, error->data, 1390 ipr_log_hex_data(ioa_cfg, error->data,
1320 be32_to_cpu(hostrcb->hcam.length) - 1391 be32_to_cpu(hostrcb->hcam.length) -
1321 (offsetof(struct ipr_hostrcb_error, u) + 1392 (offsetof(struct ipr_hostrcb_error, u) +
@@ -1672,12 +1743,15 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1673 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1744 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1674 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 1745 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1746 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1675 1747
1676 list_del(&hostrcb->queue); 1748 list_del(&hostrcb->queue);
1677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1749 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1678 1750
1679 if (!ioasc) { 1751 if (!ioasc) {
1680 ipr_handle_log_data(ioa_cfg, hostrcb); 1752 ipr_handle_log_data(ioa_cfg, hostrcb);
1753 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1754 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1681 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { 1755 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1682 dev_err(&ioa_cfg->pdev->dev, 1756 dev_err(&ioa_cfg->pdev->dev,
1683 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1757 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
@@ -2635,8 +2709,13 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2635 if (!capable(CAP_SYS_ADMIN)) 2709 if (!capable(CAP_SYS_ADMIN))
2636 return -EACCES; 2710 return -EACCES;
2637 2711
2638 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2713 while(ioa_cfg->in_reset_reload) {
2714 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2715 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 }
2718
2640 ioa_cfg->errors_logged = 0; 2719 ioa_cfg->errors_logged = 0;
2641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 2720 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2642 2721
@@ -2958,6 +3037,11 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2958 unsigned long lock_flags; 3037 unsigned long lock_flags;
2959 3038
2960 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3040 while(ioa_cfg->in_reset_reload) {
3041 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3042 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3043 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3044 }
2961 3045
2962 if (ioa_cfg->ucode_sglist) { 3046 if (ioa_cfg->ucode_sglist) {
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4656,18 +4740,19 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4656 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4740 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4657 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 4741 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4658 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4742 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4743 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4659 4744
4660 if (!res) { 4745 if (!res) {
4661 ipr_scsi_eh_done(ipr_cmd); 4746 ipr_scsi_eh_done(ipr_cmd);
4662 return; 4747 return;
4663 } 4748 }
4664 4749
4665 if (!ipr_is_gscsi(res)) 4750 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4666 ipr_gen_sense(ipr_cmd); 4751 ipr_gen_sense(ipr_cmd);
4667 4752
4668 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 4753 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4669 4754
4670 switch (ioasc & IPR_IOASC_IOASC_MASK) { 4755 switch (masked_ioasc) {
4671 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 4756 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4672 if (ipr_is_naca_model(res)) 4757 if (ipr_is_naca_model(res))
4673 scsi_cmd->result |= (DID_ABORT << 16); 4758 scsi_cmd->result |= (DID_ABORT << 16);
@@ -5363,6 +5448,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5363 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 5448 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5364 } 5449 }
5365 5450
5451 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5366 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 5452 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5367 5453
5368 ioa_cfg->reset_retries = 0; 5454 ioa_cfg->reset_retries = 0;
@@ -5799,6 +5885,94 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5799} 5885}
5800 5886
5801/** 5887/**
5888 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5889 * @ipr_cmd: ipr command struct
5890 *
5891 * This function enables dual IOA RAID support if possible.
5892 *
5893 * Return value:
5894 * IPR_RC_JOB_RETURN
5895 **/
5896static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5897{
5898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5899 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5900 struct ipr_mode_page24 *mode_page;
5901 int length;
5902
5903 ENTER;
5904 mode_page = ipr_get_mode_page(mode_pages, 0x24,
5905 sizeof(struct ipr_mode_page24));
5906
5907 if (mode_page)
5908 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5909
5910 length = mode_pages->hdr.length + 1;
5911 mode_pages->hdr.length = 0;
5912
5913 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5914 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5915 length);
5916
5917 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5918 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5919
5920 LEAVE;
5921 return IPR_RC_JOB_RETURN;
5922}
5923
5924/**
5925 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5926 * @ipr_cmd: ipr command struct
5927 *
5928 * This function handles the failure of a Mode Sense to the IOAFP.
5929 * Some adapters do not handle all mode pages.
5930 *
5931 * Return value:
5932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5933 **/
5934static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5935{
5936 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5937
5938 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5939 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5940 return IPR_RC_JOB_CONTINUE;
5941 }
5942
5943 return ipr_reset_cmd_failed(ipr_cmd);
5944}
5945
5946/**
5947 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5948 * @ipr_cmd: ipr command struct
5949 *
5950 * This function send a mode sense to the IOA to retrieve
5951 * the IOA Advanced Function Control mode page.
5952 *
5953 * Return value:
5954 * IPR_RC_JOB_RETURN
5955 **/
5956static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5957{
5958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5959
5960 ENTER;
5961 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5962 0x24, ioa_cfg->vpd_cbs_dma +
5963 offsetof(struct ipr_misc_cbs, mode_pages),
5964 sizeof(struct ipr_mode_pages));
5965
5966 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5967 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5968
5969 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5970
5971 LEAVE;
5972 return IPR_RC_JOB_RETURN;
5973}
5974
5975/**
5802 * ipr_init_res_table - Initialize the resource table 5976 * ipr_init_res_table - Initialize the resource table
5803 * @ipr_cmd: ipr command struct 5977 * @ipr_cmd: ipr command struct
5804 * 5978 *
@@ -5866,7 +6040,10 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5866 } 6040 }
5867 } 6041 }
5868 6042
5869 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6043 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6044 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6045 else
6046 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5870 6047
5871 LEAVE; 6048 LEAVE;
5872 return IPR_RC_JOB_CONTINUE; 6049 return IPR_RC_JOB_CONTINUE;
@@ -5888,8 +6065,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5888 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6065 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5889 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 6066 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5890 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 6067 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6068 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
5891 6069
5892 ENTER; 6070 ENTER;
6071 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6072 ioa_cfg->dual_raid = 1;
5893 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 6073 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5894 ucode_vpd->major_release, ucode_vpd->card_type, 6074 ucode_vpd->major_release, ucode_vpd->card_type,
5895 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 6075 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
@@ -5973,6 +6153,37 @@ static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5973} 6153}
5974 6154
5975/** 6155/**
6156 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6157 * @ipr_cmd: ipr command struct
6158 *
6159 * This function sends a Page 0xD0 inquiry to the adapter
6160 * to retrieve adapter capabilities.
6161 *
6162 * Return value:
6163 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6164 **/
6165static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6166{
6167 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6168 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6169 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6170
6171 ENTER;
6172 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6173 memset(cap, 0, sizeof(*cap));
6174
6175 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6176 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6177 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6178 sizeof(struct ipr_inquiry_cap));
6179 return IPR_RC_JOB_RETURN;
6180 }
6181
6182 LEAVE;
6183 return IPR_RC_JOB_CONTINUE;
6184}
6185
6186/**
5976 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 6187 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5977 * @ipr_cmd: ipr command struct 6188 * @ipr_cmd: ipr command struct
5978 * 6189 *
@@ -5992,7 +6203,7 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5992 if (!ipr_inquiry_page_supported(page0, 1)) 6203 if (!ipr_inquiry_page_supported(page0, 1))
5993 ioa_cfg->cache_state = CACHE_NONE; 6204 ioa_cfg->cache_state = CACHE_NONE;
5994 6205
5995 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 6206 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
5996 6207
5997 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 6208 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5998 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 6209 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
@@ -6278,6 +6489,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6278 struct ipr_hostrcb *hostrcb; 6489 struct ipr_hostrcb *hostrcb;
6279 struct ipr_uc_sdt sdt; 6490 struct ipr_uc_sdt sdt;
6280 int rc, length; 6491 int rc, length;
6492 u32 ioasc;
6281 6493
6282 mailbox = readl(ioa_cfg->ioa_mailbox); 6494 mailbox = readl(ioa_cfg->ioa_mailbox);
6283 6495
@@ -6310,9 +6522,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6310 (__be32 *)&hostrcb->hcam, 6522 (__be32 *)&hostrcb->hcam,
6311 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 6523 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6312 6524
6313 if (!rc) 6525 if (!rc) {
6314 ipr_handle_log_data(ioa_cfg, hostrcb); 6526 ipr_handle_log_data(ioa_cfg, hostrcb);
6315 else 6527 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6528 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6529 ioa_cfg->sdt_state == GET_DUMP)
6530 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6531 } else
6316 ipr_unit_check_no_data(ioa_cfg); 6532 ipr_unit_check_no_data(ioa_cfg);
6317 6533
6318 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 6534 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -6425,6 +6641,48 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6425} 6641}
6426 6642
6427/** 6643/**
6644 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6645 * @ipr_cmd: ipr command struct
6646 *
6647 * Description: This clears PCI reset to the adapter and delays two seconds.
6648 *
6649 * Return value:
6650 * IPR_RC_JOB_RETURN
6651 **/
6652static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6653{
6654 ENTER;
6655 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6656 ipr_cmd->job_step = ipr_reset_bist_done;
6657 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6658 LEAVE;
6659 return IPR_RC_JOB_RETURN;
6660}
6661
6662/**
6663 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6664 * @ipr_cmd: ipr command struct
6665 *
6666 * Description: This asserts PCI reset to the adapter.
6667 *
6668 * Return value:
6669 * IPR_RC_JOB_RETURN
6670 **/
6671static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6672{
6673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6674 struct pci_dev *pdev = ioa_cfg->pdev;
6675
6676 ENTER;
6677 pci_block_user_cfg_access(pdev);
6678 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6679 ipr_cmd->job_step = ipr_reset_slot_reset_done;
6680 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6681 LEAVE;
6682 return IPR_RC_JOB_RETURN;
6683}
6684
6685/**
6428 * ipr_reset_allowed - Query whether or not IOA can be reset 6686 * ipr_reset_allowed - Query whether or not IOA can be reset
6429 * @ioa_cfg: ioa config struct 6687 * @ioa_cfg: ioa config struct
6430 * 6688 *
@@ -6463,7 +6721,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6463 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 6721 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6464 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 6722 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6465 } else { 6723 } else {
6466 ipr_cmd->job_step = ipr_reset_start_bist; 6724 ipr_cmd->job_step = ioa_cfg->reset;
6467 rc = IPR_RC_JOB_CONTINUE; 6725 rc = IPR_RC_JOB_CONTINUE;
6468 } 6726 }
6469 6727
@@ -6496,7 +6754,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6496 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); 6754 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6497 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 6755 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6498 } else { 6756 } else {
6499 ipr_cmd->job_step = ipr_reset_start_bist; 6757 ipr_cmd->job_step = ioa_cfg->reset;
6500 } 6758 }
6501 6759
6502 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 6760 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
@@ -6591,12 +6849,14 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6591 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 6849 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6592 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 6850 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6593 6851
6594 if (shutdown_type == IPR_SHUTDOWN_ABBREV) 6852 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6595 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 6853 timeout = IPR_SHUTDOWN_TIMEOUT;
6596 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 6854 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6597 timeout = IPR_INTERNAL_TIMEOUT; 6855 timeout = IPR_INTERNAL_TIMEOUT;
6856 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6857 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
6598 else 6858 else
6599 timeout = IPR_SHUTDOWN_TIMEOUT; 6859 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6600 6860
6601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 6861 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6602 6862
@@ -6776,8 +7036,11 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6776 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7036 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6777 7037
6778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 7038 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 7039 if (ioa_cfg->needs_warm_reset)
6780 IPR_SHUTDOWN_NONE); 7040 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7041 else
7042 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7043 IPR_SHUTDOWN_NONE);
6781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782 return PCI_ERS_RESULT_RECOVERED; 7045 return PCI_ERS_RESULT_RECOVERED;
6783} 7046}
@@ -7226,7 +7489,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7226 unsigned long ipr_regs_pci; 7489 unsigned long ipr_regs_pci;
7227 void __iomem *ipr_regs; 7490 void __iomem *ipr_regs;
7228 int rc = PCIBIOS_SUCCESSFUL; 7491 int rc = PCIBIOS_SUCCESSFUL;
7229 volatile u32 mask, uproc; 7492 volatile u32 mask, uproc, interrupts;
7230 7493
7231 ENTER; 7494 ENTER;
7232 7495
@@ -7265,6 +7528,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7265 else 7528 else
7266 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 7529 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7267 7530
7531 rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
7532
7533 if (rc != PCIBIOS_SUCCESSFUL) {
7534 dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
7535 rc = -EIO;
7536 goto out_scsi_host_put;
7537 }
7538
7268 ipr_regs_pci = pci_resource_start(pdev, 0); 7539 ipr_regs_pci = pci_resource_start(pdev, 0);
7269 7540
7270 rc = pci_request_regions(pdev, IPR_NAME); 7541 rc = pci_request_regions(pdev, IPR_NAME);
@@ -7333,9 +7604,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7333 * the card is in an unknown state and needs a hard reset 7604 * the card is in an unknown state and needs a hard reset
7334 */ 7605 */
7335 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7606 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7607 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7336 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 7608 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7337 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 7609 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7338 ioa_cfg->needs_hard_reset = 1; 7610 ioa_cfg->needs_hard_reset = 1;
7611 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7612 ioa_cfg->needs_hard_reset = 1;
7613 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7614 ioa_cfg->ioa_unit_checked = 1;
7339 7615
7340 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7616 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7341 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); 7617 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
@@ -7346,6 +7622,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7346 goto cleanup_nolog; 7622 goto cleanup_nolog;
7347 } 7623 }
7348 7624
7625 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7626 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7627 ioa_cfg->needs_warm_reset = 1;
7628 ioa_cfg->reset = ipr_reset_slot_reset;
7629 } else
7630 ioa_cfg->reset = ipr_reset_start_bist;
7631
7349 spin_lock(&ipr_driver_lock); 7632 spin_lock(&ipr_driver_lock);
7350 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 7633 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7351 spin_unlock(&ipr_driver_lock); 7634 spin_unlock(&ipr_driver_lock);
@@ -7428,6 +7711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
7428 ENTER; 7711 ENTER;
7429 7712
7430 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 7713 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7714 while(ioa_cfg->in_reset_reload) {
7715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7716 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7717 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7718 }
7719
7431 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 7720 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7432 7721
7433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 7722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@@ -7551,6 +7840,12 @@ static void ipr_shutdown(struct pci_dev *pdev)
7551 unsigned long lock_flags = 0; 7840 unsigned long lock_flags = 0;
7552 7841
7553 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7842 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7843 while(ioa_cfg->in_reset_reload) {
7844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7845 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7846 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7847 }
7848
7554 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 7849 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7555 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7556 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 7851 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -7577,19 +7872,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7577 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7872 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7578 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7873 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7579 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7874 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7580 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7875 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7876 IPR_USE_LONG_TRANSOP_TIMEOUT },
7581 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7877 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7582 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 7878 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7583 IPR_USE_LONG_TRANSOP_TIMEOUT }, 7879 IPR_USE_LONG_TRANSOP_TIMEOUT },
7584 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7880 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7586 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7882 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7884 IPR_USE_LONG_TRANSOP_TIMEOUT},
7588 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7885 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 7886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7590 IPR_USE_LONG_TRANSOP_TIMEOUT }, 7887 IPR_USE_LONG_TRANSOP_TIMEOUT },
7591 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 }, 7889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7890 IPR_USE_LONG_TRANSOP_TIMEOUT },
7593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7891 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 7892 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7595 IPR_USE_LONG_TRANSOP_TIMEOUT }, 7893 IPR_USE_LONG_TRANSOP_TIMEOUT },
@@ -7597,7 +7895,7 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 7895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 7897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7600 IPR_USE_LONG_TRANSOP_TIMEOUT }, 7898 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
7601 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7899 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 7900 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7901 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
@@ -7627,6 +7925,7 @@ static struct pci_driver ipr_driver = {
7627 .remove = ipr_remove, 7925 .remove = ipr_remove,
7628 .shutdown = ipr_shutdown, 7926 .shutdown = ipr_shutdown,
7629 .err_handler = &ipr_err_handler, 7927 .err_handler = &ipr_err_handler,
7928 .dynids.use_driver_data = 1
7630}; 7929};
7631 7930
7632/** 7931/**
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index bc53d7cebe0a..d93156671e93 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.3.2" 40#define IPR_DRIVER_VERSION "2.4.1"
41#define IPR_DRIVER_DATE "(March 23, 2007)" 41#define IPR_DRIVER_DATE "(April 24, 2007)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -91,6 +91,7 @@
91 * IOASCs 91 * IOASCs
92 */ 92 */
93#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 93#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
94#define IPR_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
94#define IPR_IOASC_SYNC_REQUIRED 0x023f0000 95#define IPR_IOASC_SYNC_REQUIRED 0x023f0000
95#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00 96#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00
96#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000 97#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000
@@ -111,6 +112,7 @@
111 112
112/* Driver data flags */ 113/* Driver data flags */
113#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001 114#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
115#define IPR_USE_PCI_WARM_RESET 0x00000002
114 116
115#define IPR_DEFAULT_MAX_ERROR_DUMP 984 117#define IPR_DEFAULT_MAX_ERROR_DUMP 984
116#define IPR_NUM_LOG_HCAMS 2 118#define IPR_NUM_LOG_HCAMS 2
@@ -179,6 +181,7 @@
179#define IPR_SHUTDOWN_TIMEOUT (ipr_fastfail ? 60 * HZ : 10 * 60 * HZ) 181#define IPR_SHUTDOWN_TIMEOUT (ipr_fastfail ? 60 * HZ : 10 * 60 * HZ)
180#define IPR_VSET_RW_TIMEOUT (ipr_fastfail ? 30 * HZ : 2 * 60 * HZ) 182#define IPR_VSET_RW_TIMEOUT (ipr_fastfail ? 30 * HZ : 2 * 60 * HZ)
181#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ) 183#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
184#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
182#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 185#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
183#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 186#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
184#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 187#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
@@ -191,6 +194,7 @@
191#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) 194#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
192#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 195#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
193#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 196#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
197#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
194#define IPR_DUMP_TIMEOUT (15 * HZ) 198#define IPR_DUMP_TIMEOUT (15 * HZ)
195 199
196/* 200/*
@@ -602,6 +606,12 @@ struct ipr_mode_page28 {
602 struct ipr_dev_bus_entry bus[0]; 606 struct ipr_dev_bus_entry bus[0];
603}__attribute__((packed)); 607}__attribute__((packed));
604 608
609struct ipr_mode_page24 {
610 struct ipr_mode_page_hdr hdr;
611 u8 flags;
612#define IPR_ENABLE_DUAL_IOA_AF 0x80
613}__attribute__((packed));
614
605struct ipr_ioa_vpd { 615struct ipr_ioa_vpd {
606 struct ipr_std_inq_data std_inq_data; 616 struct ipr_std_inq_data std_inq_data;
607 u8 ascii_part_num[12]; 617 u8 ascii_part_num[12];
@@ -624,6 +634,19 @@ struct ipr_inquiry_page3 {
624 u8 patch_number[4]; 634 u8 patch_number[4];
625}__attribute__((packed)); 635}__attribute__((packed));
626 636
637struct ipr_inquiry_cap {
638 u8 peri_qual_dev_type;
639 u8 page_code;
640 u8 reserved1;
641 u8 page_length;
642 u8 ascii_len;
643 u8 reserved2;
644 u8 sis_version[2];
645 u8 cap;
646#define IPR_CAP_DUAL_IOA_RAID 0x80
647 u8 reserved3[15];
648}__attribute__((packed));
649
627#define IPR_INQUIRY_PAGE0_ENTRIES 20 650#define IPR_INQUIRY_PAGE0_ENTRIES 20
628struct ipr_inquiry_page0 { 651struct ipr_inquiry_page0 {
629 u8 peri_qual_dev_type; 652 u8 peri_qual_dev_type;
@@ -962,6 +985,7 @@ struct ipr_misc_cbs {
962 struct ipr_ioa_vpd ioa_vpd; 985 struct ipr_ioa_vpd ioa_vpd;
963 struct ipr_inquiry_page0 page0_data; 986 struct ipr_inquiry_page0 page0_data;
964 struct ipr_inquiry_page3 page3_data; 987 struct ipr_inquiry_page3 page3_data;
988 struct ipr_inquiry_cap cap;
965 struct ipr_mode_pages mode_pages; 989 struct ipr_mode_pages mode_pages;
966 struct ipr_supported_device supp_dev; 990 struct ipr_supported_device supp_dev;
967}; 991};
@@ -1068,6 +1092,10 @@ struct ipr_ioa_cfg {
1068 u8 allow_cmds:1; 1092 u8 allow_cmds:1;
1069 u8 allow_ml_add_del:1; 1093 u8 allow_ml_add_del:1;
1070 u8 needs_hard_reset:1; 1094 u8 needs_hard_reset:1;
1095 u8 dual_raid:1;
1096 u8 needs_warm_reset:1;
1097
1098 u8 revid;
1071 1099
1072 enum ipr_cache_state cache_state; 1100 enum ipr_cache_state cache_state;
1073 u16 type; /* CCIN of the card */ 1101 u16 type; /* CCIN of the card */
@@ -1161,6 +1189,7 @@ struct ipr_ioa_cfg {
1161 struct pci_pool *ipr_cmd_pool; 1189 struct pci_pool *ipr_cmd_pool;
1162 1190
1163 struct ipr_cmnd *reset_cmd; 1191 struct ipr_cmnd *reset_cmd;
1192 int (*reset) (struct ipr_cmnd *);
1164 1193
1165 struct ata_host ata_host; 1194 struct ata_host ata_host;
1166 char ipr_cmd_label[8]; 1195 char ipr_cmd_label[8];
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 897a5e2c55e4..b4b52694497c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -23,6 +23,8 @@
23 * 23 *
24 */ 24 */
25 25
26#include <linux/kthread.h>
27
26#include "sas_internal.h" 28#include "sas_internal.h"
27 29
28#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -184,7 +186,7 @@ static int sas_queue_up(struct sas_task *task)
184 list_add_tail(&task->list, &core->task_queue); 186 list_add_tail(&task->list, &core->task_queue);
185 core->task_queue_size += 1; 187 core->task_queue_size += 1;
186 spin_unlock_irqrestore(&core->task_queue_lock, flags); 188 spin_unlock_irqrestore(&core->task_queue_lock, flags);
187 up(&core->queue_thread_sema); 189 wake_up_process(core->queue_thread);
188 190
189 return 0; 191 return 0;
190} 192}
@@ -819,7 +821,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
819 struct sas_internal *i = to_sas_internal(core->shost->transportt); 821 struct sas_internal *i = to_sas_internal(core->shost->transportt);
820 822
821 spin_lock_irqsave(&core->task_queue_lock, flags); 823 spin_lock_irqsave(&core->task_queue_lock, flags);
822 while (!core->queue_thread_kill && 824 while (!kthread_should_stop() &&
823 !list_empty(&core->task_queue)) { 825 !list_empty(&core->task_queue)) {
824 826
825 can_queue = sas_ha->lldd_queue_size - core->task_queue_size; 827 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
@@ -858,8 +860,6 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
858 spin_unlock_irqrestore(&core->task_queue_lock, flags); 860 spin_unlock_irqrestore(&core->task_queue_lock, flags);
859} 861}
860 862
861static DECLARE_COMPLETION(queue_th_comp);
862
863/** 863/**
864 * sas_queue_thread -- The Task Collector thread 864 * sas_queue_thread -- The Task Collector thread
865 * @_sas_ha: pointer to struct sas_ha 865 * @_sas_ha: pointer to struct sas_ha
@@ -867,40 +867,33 @@ static DECLARE_COMPLETION(queue_th_comp);
867static int sas_queue_thread(void *_sas_ha) 867static int sas_queue_thread(void *_sas_ha)
868{ 868{
869 struct sas_ha_struct *sas_ha = _sas_ha; 869 struct sas_ha_struct *sas_ha = _sas_ha;
870 struct scsi_core *core = &sas_ha->core;
871 870
872 daemonize("sas_queue_%d", core->shost->host_no);
873 current->flags |= PF_NOFREEZE; 871 current->flags |= PF_NOFREEZE;
874 872
875 complete(&queue_th_comp);
876
877 while (1) { 873 while (1) {
878 down_interruptible(&core->queue_thread_sema); 874 set_current_state(TASK_INTERRUPTIBLE);
875 schedule();
879 sas_queue(sas_ha); 876 sas_queue(sas_ha);
880 if (core->queue_thread_kill) 877 if (kthread_should_stop())
881 break; 878 break;
882 } 879 }
883 880
884 complete(&queue_th_comp);
885
886 return 0; 881 return 0;
887} 882}
888 883
889int sas_init_queue(struct sas_ha_struct *sas_ha) 884int sas_init_queue(struct sas_ha_struct *sas_ha)
890{ 885{
891 int res;
892 struct scsi_core *core = &sas_ha->core; 886 struct scsi_core *core = &sas_ha->core;
893 887
894 spin_lock_init(&core->task_queue_lock); 888 spin_lock_init(&core->task_queue_lock);
895 core->task_queue_size = 0; 889 core->task_queue_size = 0;
896 INIT_LIST_HEAD(&core->task_queue); 890 INIT_LIST_HEAD(&core->task_queue);
897 init_MUTEX_LOCKED(&core->queue_thread_sema);
898 891
899 res = kernel_thread(sas_queue_thread, sas_ha, 0); 892 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
900 if (res >= 0) 893 "sas_queue_%d", core->shost->host_no);
901 wait_for_completion(&queue_th_comp); 894 if (IS_ERR(core->queue_thread))
902 895 return PTR_ERR(core->queue_thread);
903 return res < 0 ? res : 0; 896 return 0;
904} 897}
905 898
906void sas_shutdown_queue(struct sas_ha_struct *sas_ha) 899void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
@@ -909,10 +902,7 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
909 struct scsi_core *core = &sas_ha->core; 902 struct scsi_core *core = &sas_ha->core;
910 struct sas_task *task, *n; 903 struct sas_task *task, *n;
911 904
912 init_completion(&queue_th_comp); 905 kthread_stop(core->queue_thread);
913 core->queue_thread_kill = 1;
914 up(&core->queue_thread_sema);
915 wait_for_completion(&queue_th_comp);
916 906
917 if (!list_empty(&core->task_queue)) 907 if (!list_empty(&core->task_queue))
918 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", 908 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a7de0bca5bdd..82e8f90c4617 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -27,10 +27,6 @@ struct lpfc_sli2_slim;
27 requests */ 27 requests */
28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
29 the NameServer before giving up. */ 29 the NameServer before giving up. */
30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
33
34#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 30#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
35#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 31#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
36#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 32#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -244,28 +240,23 @@ struct lpfc_hba {
244#define FC_FABRIC 0x100 /* We are fabric attached */ 240#define FC_FABRIC 0x100 /* We are fabric attached */
245#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ 241#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
246#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/ 242#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
243#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
247#define FC_LOADING 0x1000 /* HBA in process of loading drvr */ 244#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
248#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */ 245#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
249#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 246#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
250#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 247#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
251#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ 248#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
252#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 249#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
250#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
251 /* This flag is set while issuing */
252 /* INIT_LINK mailbox command */
253#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
253 254
254 uint32_t fc_topology; /* link topology, from LINK INIT */ 255 uint32_t fc_topology; /* link topology, from LINK INIT */
255 256
256 struct lpfc_stats fc_stat; 257 struct lpfc_stats fc_stat;
257 258
258 /* These are the head/tail pointers for the bind, plogi, adisc, unmap, 259 struct list_head fc_nodes;
259 * and map lists. Their counters are immediately following.
260 */
261 struct list_head fc_plogi_list;
262 struct list_head fc_adisc_list;
263 struct list_head fc_reglogin_list;
264 struct list_head fc_prli_list;
265 struct list_head fc_nlpunmap_list;
266 struct list_head fc_nlpmap_list;
267 struct list_head fc_npr_list;
268 struct list_head fc_unused_list;
269 260
270 /* Keep counters for the number of entries in each list. */ 261 /* Keep counters for the number of entries in each list. */
271 uint16_t fc_plogi_cnt; 262 uint16_t fc_plogi_cnt;
@@ -387,13 +378,17 @@ struct lpfc_hba {
387 378
388 mempool_t *mbox_mem_pool; 379 mempool_t *mbox_mem_pool;
389 mempool_t *nlp_mem_pool; 380 mempool_t *nlp_mem_pool;
390 struct list_head freebufList;
391 struct list_head ctrspbuflist;
392 struct list_head rnidrspbuflist;
393 381
394 struct fc_host_statistics link_stats; 382 struct fc_host_statistics link_stats;
395}; 383};
396 384
385static inline void
386lpfc_set_loopback_flag(struct lpfc_hba *phba) {
387 if (phba->cfg_topology == FLAGS_LOCAL_LB)
388 phba->fc_flag |= FC_LOOPBACK_MODE;
389 else
390 phba->fc_flag &= ~FC_LOOPBACK_MODE;
391}
397 392
398struct rnidrsp { 393struct rnidrsp {
399 void *buf; 394 void *buf;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f247e786af99..95fe77e816f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/delay.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
@@ -213,6 +214,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
213 int mbxstatus = MBXERR_ERROR; 214 int mbxstatus = MBXERR_ERROR;
214 215
215 if ((phba->fc_flag & FC_OFFLINE_MODE) || 216 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
217 (phba->fc_flag & FC_BLOCK_MGMT_IO) ||
216 (phba->hba_state != LPFC_HBA_READY)) 218 (phba->hba_state != LPFC_HBA_READY))
217 return -EPERM; 219 return -EPERM;
218 220
@@ -235,6 +237,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
235 phba->fc_ratov * 2); 237 phba->fc_ratov * 2);
236 } 238 }
237 239
240 lpfc_set_loopback_flag(phba);
238 if (mbxstatus == MBX_TIMEOUT) 241 if (mbxstatus == MBX_TIMEOUT)
239 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 242 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
240 else 243 else
@@ -247,19 +250,62 @@ lpfc_issue_lip(struct Scsi_Host *host)
247} 250}
248 251
249static int 252static int
250lpfc_selective_reset(struct lpfc_hba *phba) 253lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
251{ 254{
252 struct completion online_compl; 255 struct completion online_compl;
256 struct lpfc_sli_ring *pring;
257 struct lpfc_sli *psli;
253 int status = 0; 258 int status = 0;
259 int cnt = 0;
260 int i;
254 261
255 init_completion(&online_compl); 262 init_completion(&online_compl);
256 lpfc_workq_post_event(phba, &status, &online_compl, 263 lpfc_workq_post_event(phba, &status, &online_compl,
257 LPFC_EVT_OFFLINE); 264 LPFC_EVT_OFFLINE_PREP);
265 wait_for_completion(&online_compl);
266
267 if (status != 0)
268 return -EIO;
269
270 psli = &phba->sli;
271
272 for (i = 0; i < psli->num_rings; i++) {
273 pring = &psli->ring[i];
274 /* The linkdown event takes 30 seconds to timeout. */
275 while (pring->txcmplq_cnt) {
276 msleep(10);
277 if (cnt++ > 3000) {
278 lpfc_printf_log(phba,
279 KERN_WARNING, LOG_INIT,
280 "%d:0466 Outstanding IO when "
281 "bringing Adapter offline\n",
282 phba->brd_no);
283 break;
284 }
285 }
286 }
287
288 init_completion(&online_compl);
289 lpfc_workq_post_event(phba, &status, &online_compl, type);
258 wait_for_completion(&online_compl); 290 wait_for_completion(&online_compl);
259 291
260 if (status != 0) 292 if (status != 0)
261 return -EIO; 293 return -EIO;
262 294
295 return 0;
296}
297
298static int
299lpfc_selective_reset(struct lpfc_hba *phba)
300{
301 struct completion online_compl;
302 int status = 0;
303
304 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
305
306 if (status != 0)
307 return status;
308
263 init_completion(&online_compl); 309 init_completion(&online_compl);
264 lpfc_workq_post_event(phba, &status, &online_compl, 310 lpfc_workq_post_event(phba, &status, &online_compl,
265 LPFC_EVT_ONLINE); 311 LPFC_EVT_ONLINE);
@@ -324,23 +370,19 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
324 370
325 init_completion(&online_compl); 371 init_completion(&online_compl);
326 372
327 if(strncmp(buf, "online", sizeof("online") - 1) == 0) 373 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
328 lpfc_workq_post_event(phba, &status, &online_compl, 374 lpfc_workq_post_event(phba, &status, &online_compl,
329 LPFC_EVT_ONLINE); 375 LPFC_EVT_ONLINE);
330 else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 376 wait_for_completion(&online_compl);
331 lpfc_workq_post_event(phba, &status, &online_compl, 377 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
332 LPFC_EVT_OFFLINE); 378 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
333 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 379 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
334 lpfc_workq_post_event(phba, &status, &online_compl, 380 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
335 LPFC_EVT_WARM_START); 381 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
336 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 382 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
337 lpfc_workq_post_event(phba, &status, &online_compl,
338 LPFC_EVT_KILL);
339 else 383 else
340 return -EINVAL; 384 return -EINVAL;
341 385
342 wait_for_completion(&online_compl);
343
344 if (!status) 386 if (!status)
345 return strlen(buf); 387 return strlen(buf);
346 else 388 else
@@ -645,9 +687,7 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
645 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 687 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
646 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 688 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
647 689
648 init_completion(&online_compl); 690 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
649 lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE);
650 wait_for_completion(&online_compl);
651 if (stat1) 691 if (stat1)
652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
653 "%d:0463 lpfc_soft_wwpn attribute set failed to reinit " 693 "%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
@@ -789,6 +829,18 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
789 return -EINVAL; 829 return -EINVAL;
790} 830}
791 831
832static void
833lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
834{
835 struct lpfc_nodelist *ndlp;
836
837 spin_lock_irq(phba->host->host_lock);
838 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp)
839 if (ndlp->rport)
840 ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo;
841 spin_unlock_irq(phba->host->host_lock);
842}
843
792static int 844static int
793lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val) 845lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
794{ 846{
@@ -804,6 +856,7 @@ lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
804 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { 856 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
805 phba->cfg_nodev_tmo = val; 857 phba->cfg_nodev_tmo = val;
806 phba->cfg_devloss_tmo = val; 858 phba->cfg_devloss_tmo = val;
859 lpfc_update_rport_devloss_tmo(phba);
807 return 0; 860 return 0;
808 } 861 }
809 862
@@ -839,6 +892,7 @@ lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
839 phba->cfg_nodev_tmo = val; 892 phba->cfg_nodev_tmo = val;
840 phba->cfg_devloss_tmo = val; 893 phba->cfg_devloss_tmo = val;
841 phba->dev_loss_tmo_changed = 1; 894 phba->dev_loss_tmo_changed = 1;
895 lpfc_update_rport_devloss_tmo(phba);
842 return 0; 896 return 0;
843 } 897 }
844 898
@@ -931,9 +985,10 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology");
931# 1 = 1 Gigabaud 985# 1 = 1 Gigabaud
932# 2 = 2 Gigabaud 986# 2 = 2 Gigabaud
933# 4 = 4 Gigabaud 987# 4 = 4 Gigabaud
934# Value range is [0,4]. Default value is 0. 988# 8 = 8 Gigabaud
989# Value range is [0,8]. Default value is 0.
935*/ 990*/
936LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed"); 991LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed");
937 992
938/* 993/*
939# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 994# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@@ -958,7 +1013,7 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
958/* 1013/*
959# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 1014# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
960# cr_delay (msec) or cr_count outstanding commands. cr_delay can take 1015# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
961# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay 1016# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
962# is 0. Default value of cr_count is 1. The cr_count feature is disabled if 1017# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
963# cr_delay is set to 0. 1018# cr_delay is set to 0.
964*/ 1019*/
@@ -1227,11 +1282,11 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1227 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1282 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
1228 int rc; 1283 int rc;
1229 1284
1230 if (off > sizeof(MAILBOX_t)) 1285 if (off > MAILBOX_CMD_SIZE)
1231 return -ERANGE; 1286 return -ERANGE;
1232 1287
1233 if ((count + off) > sizeof(MAILBOX_t)) 1288 if ((count + off) > MAILBOX_CMD_SIZE)
1234 count = sizeof(MAILBOX_t) - off; 1289 count = MAILBOX_CMD_SIZE - off;
1235 1290
1236 if (off % 4 || count % 4 || (unsigned long)buf % 4) 1291 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1237 return -EINVAL; 1292 return -EINVAL;
@@ -1307,6 +1362,12 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1307 return -EPERM; 1362 return -EPERM;
1308 } 1363 }
1309 1364
1365 if (phba->fc_flag & FC_BLOCK_MGMT_IO) {
1366 sysfs_mbox_idle(phba);
1367 spin_unlock_irq(host->host_lock);
1368 return -EAGAIN;
1369 }
1370
1310 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1371 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1311 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 1372 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
1312 1373
@@ -1326,6 +1387,11 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1326 } 1387 }
1327 1388
1328 if (rc != MBX_SUCCESS) { 1389 if (rc != MBX_SUCCESS) {
1390 if (rc == MBX_TIMEOUT) {
1391 phba->sysfs_mbox.mbox->mbox_cmpl =
1392 lpfc_sli_def_mbox_cmpl;
1393 phba->sysfs_mbox.mbox = NULL;
1394 }
1329 sysfs_mbox_idle(phba); 1395 sysfs_mbox_idle(phba);
1330 spin_unlock_irq(host->host_lock); 1396 spin_unlock_irq(host->host_lock);
1331 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 1397 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
@@ -1344,7 +1410,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1344 1410
1345 phba->sysfs_mbox.offset = off + count; 1411 phba->sysfs_mbox.offset = off + count;
1346 1412
1347 if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t)) 1413 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
1348 sysfs_mbox_idle(phba); 1414 sysfs_mbox_idle(phba);
1349 1415
1350 spin_unlock_irq(phba->host->host_lock); 1416 spin_unlock_irq(phba->host->host_lock);
@@ -1358,7 +1424,7 @@ static struct bin_attribute sysfs_mbox_attr = {
1358 .mode = S_IRUSR | S_IWUSR, 1424 .mode = S_IRUSR | S_IWUSR,
1359 .owner = THIS_MODULE, 1425 .owner = THIS_MODULE,
1360 }, 1426 },
1361 .size = sizeof(MAILBOX_t), 1427 .size = MAILBOX_CMD_SIZE,
1362 .read = sysfs_mbox_read, 1428 .read = sysfs_mbox_read,
1363 .write = sysfs_mbox_write, 1429 .write = sysfs_mbox_write,
1364}; 1430};
@@ -1494,6 +1560,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
1494 case LA_4GHZ_LINK: 1560 case LA_4GHZ_LINK:
1495 fc_host_speed(shost) = FC_PORTSPEED_4GBIT; 1561 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1496 break; 1562 break;
1563 case LA_8GHZ_LINK:
1564 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1565 break;
1497 default: 1566 default:
1498 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 1567 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1499 break; 1568 break;
@@ -1546,6 +1615,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
1546 unsigned long seconds; 1615 unsigned long seconds;
1547 int rc = 0; 1616 int rc = 0;
1548 1617
1618 if (phba->fc_flag & FC_BLOCK_MGMT_IO)
1619 return NULL;
1620
1549 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1621 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1550 if (!pmboxq) 1622 if (!pmboxq)
1551 return NULL; 1623 return NULL;
@@ -1631,6 +1703,8 @@ lpfc_get_stats(struct Scsi_Host *shost)
1631 else 1703 else
1632 hs->seconds_since_last_reset = seconds - psli->stats_start; 1704 hs->seconds_since_last_reset = seconds - psli->stats_start;
1633 1705
1706 mempool_free(pmboxq, phba->mbox_mem_pool);
1707
1634 return hs; 1708 return hs;
1635} 1709}
1636 1710
@@ -1644,6 +1718,9 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1644 MAILBOX_t *pmb; 1718 MAILBOX_t *pmb;
1645 int rc = 0; 1719 int rc = 0;
1646 1720
1721 if (phba->fc_flag & FC_BLOCK_MGMT_IO)
1722 return;
1723
1647 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1724 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1648 if (!pmboxq) 1725 if (!pmboxq)
1649 return; 1726 return;
@@ -1699,6 +1776,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1699 1776
1700 psli->stats_start = get_seconds(); 1777 psli->stats_start = get_seconds();
1701 1778
1779 mempool_free(pmboxq, phba->mbox_mem_pool);
1780
1702 return; 1781 return;
1703} 1782}
1704 1783
@@ -1706,67 +1785,51 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1706 * The LPFC driver treats linkdown handling as target loss events so there 1785 * The LPFC driver treats linkdown handling as target loss events so there
1707 * are no sysfs handlers for link_down_tmo. 1786 * are no sysfs handlers for link_down_tmo.
1708 */ 1787 */
1709static void 1788
1710lpfc_get_starget_port_id(struct scsi_target *starget) 1789static struct lpfc_nodelist *
1790lpfc_get_node_by_target(struct scsi_target *starget)
1711{ 1791{
1712 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1792 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1713 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata; 1793 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
1714 uint32_t did = -1; 1794 struct lpfc_nodelist *ndlp;
1715 struct lpfc_nodelist *ndlp = NULL;
1716 1795
1717 spin_lock_irq(shost->host_lock); 1796 spin_lock_irq(shost->host_lock);
1718 /* Search the mapped list for this target ID */ 1797 /* Search for this, mapped, target ID */
1719 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1798 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
1720 if (starget->id == ndlp->nlp_sid) { 1799 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1721 did = ndlp->nlp_DID; 1800 starget->id == ndlp->nlp_sid) {
1722 break; 1801 spin_unlock_irq(shost->host_lock);
1802 return ndlp;
1723 } 1803 }
1724 } 1804 }
1725 spin_unlock_irq(shost->host_lock); 1805 spin_unlock_irq(shost->host_lock);
1806 return NULL;
1807}
1808
1809static void
1810lpfc_get_starget_port_id(struct scsi_target *starget)
1811{
1812 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
1726 1813
1727 fc_starget_port_id(starget) = did; 1814 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
1728} 1815}
1729 1816
1730static void 1817static void
1731lpfc_get_starget_node_name(struct scsi_target *starget) 1818lpfc_get_starget_node_name(struct scsi_target *starget)
1732{ 1819{
1733 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1820 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
1734 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
1735 u64 node_name = 0;
1736 struct lpfc_nodelist *ndlp = NULL;
1737
1738 spin_lock_irq(shost->host_lock);
1739 /* Search the mapped list for this target ID */
1740 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1741 if (starget->id == ndlp->nlp_sid) {
1742 node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1743 break;
1744 }
1745 }
1746 spin_unlock_irq(shost->host_lock);
1747 1821
1748 fc_starget_node_name(starget) = node_name; 1822 fc_starget_node_name(starget) =
1823 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
1749} 1824}
1750 1825
1751static void 1826static void
1752lpfc_get_starget_port_name(struct scsi_target *starget) 1827lpfc_get_starget_port_name(struct scsi_target *starget)
1753{ 1828{
1754 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1829 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
1755 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
1756 u64 port_name = 0;
1757 struct lpfc_nodelist *ndlp = NULL;
1758
1759 spin_lock_irq(shost->host_lock);
1760 /* Search the mapped list for this target ID */
1761 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1762 if (starget->id == ndlp->nlp_sid) {
1763 port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1764 break;
1765 }
1766 }
1767 spin_unlock_irq(shost->host_lock);
1768 1830
1769 fc_starget_port_name(starget) = port_name; 1831 fc_starget_port_name(starget) =
1832 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
1770} 1833}
1771 1834
1772static void 1835static void
@@ -1895,25 +1958,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1895 sizeof(struct fcp_rsp) + 1958 sizeof(struct fcp_rsp) +
1896 (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64)); 1959 (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
1897 1960
1898 switch (phba->pcidev->device) {
1899 case PCI_DEVICE_ID_LP101:
1900 case PCI_DEVICE_ID_BSMB:
1901 case PCI_DEVICE_ID_ZSMB:
1902 phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
1903 break;
1904 case PCI_DEVICE_ID_RFLY:
1905 case PCI_DEVICE_ID_PFLY:
1906 case PCI_DEVICE_ID_BMID:
1907 case PCI_DEVICE_ID_ZMID:
1908 case PCI_DEVICE_ID_TFLY:
1909 phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
1910 break;
1911 default:
1912 phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
1913 }
1914 1961
1915 if (phba->cfg_hba_queue_depth > lpfc_hba_queue_depth) 1962 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
1916 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
1917 1963
1918 return; 1964 return;
1919} 1965}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 1251788ce2a3..b8c2a8862d8c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,6 +18,8 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
22
21struct fc_rport; 23struct fc_rport;
22void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
23void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -43,20 +45,24 @@ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
43void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 45void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
44void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 46void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
45void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 47void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
46int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int); 48void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *);
49void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int);
50void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *);
47void lpfc_set_disctmo(struct lpfc_hba *); 51void lpfc_set_disctmo(struct lpfc_hba *);
48int lpfc_can_disctmo(struct lpfc_hba *); 52int lpfc_can_disctmo(struct lpfc_hba *);
49int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *); 53int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
50int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, 54int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
51 struct lpfc_iocbq *, struct lpfc_nodelist *); 55 struct lpfc_iocbq *, struct lpfc_nodelist *);
52int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
53void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t); 56void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
57struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
58int lpfc_nlp_put(struct lpfc_nodelist *);
54struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t); 59struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
55void lpfc_disc_list_loopmap(struct lpfc_hba *); 60void lpfc_disc_list_loopmap(struct lpfc_hba *);
56void lpfc_disc_start(struct lpfc_hba *); 61void lpfc_disc_start(struct lpfc_hba *);
57void lpfc_disc_flush_list(struct lpfc_hba *); 62void lpfc_disc_flush_list(struct lpfc_hba *);
58void lpfc_disc_timeout(unsigned long); 63void lpfc_disc_timeout(unsigned long);
59 64
65struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
60struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 66struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
61 67
62int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 68int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
@@ -66,8 +72,7 @@ int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
66 72
67int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *, 73int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
68 struct serv_parm *, uint32_t); 74 struct serv_parm *, uint32_t);
69int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp, 75int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp);
70 int);
71int lpfc_els_abort_flogi(struct lpfc_hba *); 76int lpfc_els_abort_flogi(struct lpfc_hba *);
72int lpfc_initial_flogi(struct lpfc_hba *); 77int lpfc_initial_flogi(struct lpfc_hba *);
73int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t); 78int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t);
@@ -113,7 +118,10 @@ void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
113int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); 118int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
114void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); 119void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
115int lpfc_online(struct lpfc_hba *); 120int lpfc_online(struct lpfc_hba *);
116int lpfc_offline(struct lpfc_hba *); 121void lpfc_block_mgmt_io(struct lpfc_hba *);
122void lpfc_unblock_mgmt_io(struct lpfc_hba *);
123void lpfc_offline_prep(struct lpfc_hba *);
124void lpfc_offline(struct lpfc_hba *);
117 125
118int lpfc_sli_setup(struct lpfc_hba *); 126int lpfc_sli_setup(struct lpfc_hba *);
119int lpfc_sli_queue_setup(struct lpfc_hba *); 127int lpfc_sli_queue_setup(struct lpfc_hba *);
@@ -162,8 +170,8 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
162struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 170struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
163 struct lpfc_sli_ring *, 171 struct lpfc_sli_ring *,
164 dma_addr_t); 172 dma_addr_t);
165int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *, 173int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
166 struct lpfc_iocbq *); 174 struct lpfc_iocbq *);
167int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 175int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
168 uint64_t, lpfc_ctx_cmd); 176 uint64_t, lpfc_ctx_cmd);
169int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 177int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
@@ -172,9 +180,8 @@ int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
172void lpfc_mbox_timeout(unsigned long); 180void lpfc_mbox_timeout(unsigned long);
173void lpfc_mbox_timeout_handler(struct lpfc_hba *); 181void lpfc_mbox_timeout_handler(struct lpfc_hba *);
174 182
175struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t, uint32_t); 183struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t);
176struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, uint32_t, 184struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *);
177 struct lpfc_name *);
178 185
179int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 186int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
180 uint32_t timeout); 187 uint32_t timeout);
@@ -193,6 +200,9 @@ void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
193 200
194/* Function prototypes. */ 201/* Function prototypes. */
195const char* lpfc_info(struct Scsi_Host *); 202const char* lpfc_info(struct Scsi_Host *);
203void lpfc_scan_start(struct Scsi_Host *);
204int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
205
196void lpfc_get_cfgparam(struct lpfc_hba *); 206void lpfc_get_cfgparam(struct lpfc_hba *);
197int lpfc_alloc_sysfs_attr(struct lpfc_hba *); 207int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
198void lpfc_free_sysfs_attr(struct lpfc_hba *); 208void lpfc_free_sysfs_attr(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index a51a41b7f15d..34a9e3bb2614 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -334,21 +334,22 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
334 334
335 lpfc_set_disctmo(phba); 335 lpfc_set_disctmo(phba);
336 336
337 Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
338 337
339 list_add_tail(&head, &mp->list); 338 list_add_tail(&head, &mp->list);
340 list_for_each_entry_safe(mp, next_mp, &head, list) { 339 list_for_each_entry_safe(mp, next_mp, &head, list) {
341 mlast = mp; 340 mlast = mp;
342 341
342 Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
343
343 Size -= Cnt; 344 Size -= Cnt;
344 345
345 if (!ctptr) 346 if (!ctptr) {
346 ctptr = (uint32_t *) mlast->virt; 347 ctptr = (uint32_t *) mlast->virt;
347 else 348 } else
348 Cnt -= 16; /* subtract length of CT header */ 349 Cnt -= 16; /* subtract length of CT header */
349 350
350 /* Loop through entire NameServer list of DIDs */ 351 /* Loop through entire NameServer list of DIDs */
351 while (Cnt) { 352 while (Cnt >= sizeof (uint32_t)) {
352 353
353 /* Get next DID from NameServer List */ 354 /* Get next DID from NameServer List */
354 CTentry = *ctptr++; 355 CTentry = *ctptr++;
@@ -442,10 +443,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
442 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 443 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
443 phba->fc_ns_retry++; 444 phba->fc_ns_retry++;
444 /* CT command is being retried */ 445 /* CT command is being retried */
445 ndlp = 446 ndlp = lpfc_findnode_did(phba, NameServer_DID);
446 lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED, 447 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
447 NameServer_DID);
448 if (ndlp) {
449 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 448 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
450 0) { 449 0) {
451 goto out; 450 goto out;
@@ -729,7 +728,7 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
729 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; 728 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
730 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; 729 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
731 730
732 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID); 731 ndlp = lpfc_findnode_did(phba, FDMI_DID);
733 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 732 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
734 /* FDMI rsp failed */ 733 /* FDMI rsp failed */
735 lpfc_printf_log(phba, 734 lpfc_printf_log(phba,
@@ -1039,6 +1038,9 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1039 case LA_4GHZ_LINK: 1038 case LA_4GHZ_LINK:
1040 ae->un.PortSpeed = HBA_PORTSPEED_4GBIT; 1039 ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
1041 break; 1040 break;
1041 case LA_8GHZ_LINK:
1042 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1043 break;
1042 default: 1044 default:
1043 ae->un.PortSpeed = 1045 ae->un.PortSpeed =
1044 HBA_PORTSPEED_UNKNOWN; 1046 HBA_PORTSPEED_UNKNOWN;
@@ -1161,7 +1163,7 @@ lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
1161{ 1163{
1162 struct lpfc_nodelist *ndlp; 1164 struct lpfc_nodelist *ndlp;
1163 1165
1164 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID); 1166 ndlp = lpfc_findnode_did(phba, FDMI_DID);
1165 if (ndlp) { 1167 if (ndlp) {
1166 if (init_utsname()->nodename[0] != '\0') { 1168 if (init_utsname()->nodename[0] != '\0') {
1167 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); 1169 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 9766f909c9c6..498059f3f7f4 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -31,6 +31,7 @@
31/* worker thread events */ 31/* worker thread events */
32enum lpfc_work_type { 32enum lpfc_work_type {
33 LPFC_EVT_ONLINE, 33 LPFC_EVT_ONLINE,
34 LPFC_EVT_OFFLINE_PREP,
34 LPFC_EVT_OFFLINE, 35 LPFC_EVT_OFFLINE,
35 LPFC_EVT_WARM_START, 36 LPFC_EVT_WARM_START,
36 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
@@ -68,7 +69,6 @@ struct lpfc_nodelist {
68 uint16_t nlp_maxframe; /* Max RCV frame size */ 69 uint16_t nlp_maxframe; /* Max RCV frame size */
69 uint8_t nlp_class_sup; /* Supported Classes */ 70 uint8_t nlp_class_sup; /* Supported Classes */
70 uint8_t nlp_retry; /* used for ELS retries */ 71 uint8_t nlp_retry; /* used for ELS retries */
71 uint8_t nlp_disc_refcnt; /* used for DSM */
72 uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 72 uint8_t nlp_fcp_info; /* class info, bits 0-3 */
73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
74 74
@@ -79,20 +79,10 @@ struct lpfc_nodelist {
79 struct lpfc_work_evt els_retry_evt; 79 struct lpfc_work_evt els_retry_evt;
80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
81 unsigned long last_q_full_time; /* jiffy of last queue full */ 81 unsigned long last_q_full_time; /* jiffy of last queue full */
82 struct kref kref;
82}; 83};
83 84
84/* Defines for nlp_flag (uint32) */ 85/* Defines for nlp_flag (uint32) */
85#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
86#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
87#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
88#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
89#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
90#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
91#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
92#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
93#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
94#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
95#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
96#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ 86#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
97#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ 87#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
98#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ 88#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
@@ -108,20 +98,8 @@ struct lpfc_nodelist {
108 ACC */ 98 ACC */
109#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 99#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
110 NPR list */ 100 NPR list */
111#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
112#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 101#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
113 102
114/* Defines for list searchs */
115#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
116#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
117#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
118#define NLP_SEARCH_ADISC 0x8 /* search adisc */
119#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
120#define NLP_SEARCH_PRLI 0x20 /* search prli */
121#define NLP_SEARCH_NPR 0x40 /* search npr */
122#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
123#define NLP_SEARCH_ALL 0xff /* search all lists */
124
125/* There are 4 different double linked lists nodelist entries can reside on. 103/* There are 4 different double linked lists nodelist entries can reside on.
126 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used 104 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
127 * when Link Up discovery or Registered State Change Notification (RSCN) 105 * when Link Up discovery or Registered State Change Notification (RSCN)
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index a5f33a0dd4e7..638b3cd677bd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -182,6 +182,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
183 icmd->un.elsreq64.remoteID = did; /* DID */ 183 icmd->un.elsreq64.remoteID = did; /* DID */
184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
185 icmd->ulpTimeout = phba->fc_ratov * 2;
185 } else { 186 } else {
186 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64); 187 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
187 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 188 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
@@ -208,9 +209,9 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
208 } 209 }
209 210
210 /* Save for completion so we can release these resources */ 211 /* Save for completion so we can release these resources */
211 elsiocb->context1 = (uint8_t *) ndlp; 212 elsiocb->context1 = lpfc_nlp_get(ndlp);
212 elsiocb->context2 = (uint8_t *) pcmd; 213 elsiocb->context2 = pcmd;
213 elsiocb->context3 = (uint8_t *) pbuflist; 214 elsiocb->context3 = pbuflist;
214 elsiocb->retry = retry; 215 elsiocb->retry = retry;
215 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 216 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
216 217
@@ -222,16 +223,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
222 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 223 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
223 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 224 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
224 "%d:0116 Xmit ELS command x%x to remote " 225 "%d:0116 Xmit ELS command x%x to remote "
225 "NPORT x%x Data: x%x x%x\n", 226 "NPORT x%x I/O tag: x%x, HBA state: x%x\n",
226 phba->brd_no, elscmd, 227 phba->brd_no, elscmd,
227 did, icmd->ulpIoTag, phba->hba_state); 228 did, elsiocb->iotag, phba->hba_state);
228 } else { 229 } else {
229 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 230 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
230 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 231 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
231 "%d:0117 Xmit ELS response x%x to remote " 232 "%d:0117 Xmit ELS response x%x to remote "
232 "NPORT x%x Data: x%x x%x\n", 233 "NPORT x%x I/O tag: x%x, size: x%x\n",
233 phba->brd_no, elscmd, 234 phba->brd_no, elscmd,
234 ndlp->nlp_DID, icmd->ulpIoTag, cmdSize); 235 ndlp->nlp_DID, elsiocb->iotag, cmdSize);
235 } 236 }
236 237
237 return elsiocb; 238 return elsiocb;
@@ -304,7 +305,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
304 goto fail_free_mbox; 305 goto fail_free_mbox;
305 306
306 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 307 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
307 mbox->context2 = ndlp; 308 mbox->context2 = lpfc_nlp_get(ndlp);
308 309
309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 310 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
310 if (rc == MBX_NOT_FINISHED) 311 if (rc == MBX_NOT_FINISHED)
@@ -313,6 +314,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
313 return 0; 314 return 0;
314 315
315 fail_issue_reg_login: 316 fail_issue_reg_login:
317 lpfc_nlp_put(ndlp);
316 mp = (struct lpfc_dmabuf *) mbox->context1; 318 mp = (struct lpfc_dmabuf *) mbox->context1;
317 lpfc_mbuf_free(phba, mp->virt, mp->phys); 319 lpfc_mbuf_free(phba, mp->virt, mp->phys);
318 kfree(mp); 320 kfree(mp);
@@ -368,9 +370,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
368 mempool_free(mbox, phba->mbox_mem_pool); 370 mempool_free(mbox, phba->mbox_mem_pool);
369 goto fail; 371 goto fail;
370 } 372 }
371 mempool_free(ndlp, phba->nlp_mem_pool); 373 lpfc_nlp_put(ndlp);
372 374
373 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID); 375 ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID);
374 if (!ndlp) { 376 if (!ndlp) {
375 /* 377 /*
376 * Cannot find existing Fabric ndlp, so allocate a 378 * Cannot find existing Fabric ndlp, so allocate a
@@ -387,12 +389,11 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
387 sizeof(struct lpfc_name)); 389 sizeof(struct lpfc_name));
388 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 390 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
389 sizeof(struct lpfc_name)); 391 sizeof(struct lpfc_name));
390 ndlp->nlp_state = NLP_STE_NPR_NODE; 392 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
391 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
392 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 393 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
393 } else { 394 } else {
394 /* This side will wait for the PLOGI */ 395 /* This side will wait for the PLOGI */
395 mempool_free( ndlp, phba->nlp_mem_pool); 396 lpfc_nlp_put(ndlp);
396 } 397 }
397 398
398 spin_lock_irq(phba->host->host_lock); 399 spin_lock_irq(phba->host->host_lock);
@@ -407,8 +408,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
407} 408}
408 409
409static void 410static void
410lpfc_cmpl_els_flogi(struct lpfc_hba * phba, 411lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
411 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) 412 struct lpfc_iocbq *rspiocb)
412{ 413{
413 IOCB_t *irsp = &rspiocb->iocb; 414 IOCB_t *irsp = &rspiocb->iocb;
414 struct lpfc_nodelist *ndlp = cmdiocb->context1; 415 struct lpfc_nodelist *ndlp = cmdiocb->context1;
@@ -418,7 +419,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
418 419
419 /* Check to see if link went down during discovery */ 420 /* Check to see if link went down during discovery */
420 if (lpfc_els_chk_latt(phba)) { 421 if (lpfc_els_chk_latt(phba)) {
421 lpfc_nlp_remove(phba, ndlp); 422 lpfc_nlp_put(ndlp);
422 goto out; 423 goto out;
423 } 424 }
424 425
@@ -433,13 +434,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
433 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 434 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
434 spin_unlock_irq(phba->host->host_lock); 435 spin_unlock_irq(phba->host->host_lock);
435 436
436 /* If private loop, then allow max outstandting els to be 437 /* If private loop, then allow max outstanding els to be
437 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 438 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
438 * alpa map would take too long otherwise. 439 * alpa map would take too long otherwise.
439 */ 440 */
440 if (phba->alpa_map[0] == 0) { 441 if (phba->alpa_map[0] == 0) {
441 phba->cfg_discovery_threads = 442 phba->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
442 LPFC_MAX_DISC_THREADS;
443 } 443 }
444 444
445 /* FLOGI failure */ 445 /* FLOGI failure */
@@ -484,7 +484,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
484 } 484 }
485 485
486flogifail: 486flogifail:
487 lpfc_nlp_remove(phba, ndlp); 487 lpfc_nlp_put(ndlp);
488 488
489 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 489 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
490 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED && 490 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
@@ -582,24 +582,8 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
582 icmd = &iocb->iocb; 582 icmd = &iocb->iocb;
583 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 583 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
584 ndlp = (struct lpfc_nodelist *)(iocb->context1); 584 ndlp = (struct lpfc_nodelist *)(iocb->context1);
585 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { 585 if (ndlp && (ndlp->nlp_DID == Fabric_DID))
586 list_del(&iocb->list); 586 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
587 pring->txcmplq_cnt--;
588
589 if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
590 lpfc_sli_issue_abort_iotag32
591 (phba, pring, iocb);
592 }
593 if (iocb->iocb_cmpl) {
594 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
595 icmd->un.ulpWord[4] =
596 IOERR_SLI_ABORTED;
597 spin_unlock_irq(phba->host->host_lock);
598 (iocb->iocb_cmpl) (phba, iocb, iocb);
599 spin_lock_irq(phba->host->host_lock);
600 } else
601 lpfc_sli_release_iocbq(phba, iocb);
602 }
603 } 587 }
604 } 588 }
605 spin_unlock_irq(phba->host->host_lock); 589 spin_unlock_irq(phba->host->host_lock);
@@ -608,12 +592,12 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
608} 592}
609 593
610int 594int
611lpfc_initial_flogi(struct lpfc_hba * phba) 595lpfc_initial_flogi(struct lpfc_hba *phba)
612{ 596{
613 struct lpfc_nodelist *ndlp; 597 struct lpfc_nodelist *ndlp;
614 598
615 /* First look for the Fabric ndlp */ 599 /* First look for the Fabric ndlp */
616 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, Fabric_DID); 600 ndlp = lpfc_findnode_did(phba, Fabric_DID);
617 if (!ndlp) { 601 if (!ndlp) {
618 /* Cannot find existing Fabric ndlp, so allocate a new one */ 602 /* Cannot find existing Fabric ndlp, so allocate a new one */
619 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 603 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
@@ -621,10 +605,10 @@ lpfc_initial_flogi(struct lpfc_hba * phba)
621 return 0; 605 return 0;
622 lpfc_nlp_init(phba, ndlp, Fabric_DID); 606 lpfc_nlp_init(phba, ndlp, Fabric_DID);
623 } else { 607 } else {
624 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ); 608 lpfc_dequeue_node(phba, ndlp);
625 } 609 }
626 if (lpfc_issue_els_flogi(phba, ndlp, 0)) { 610 if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
627 mempool_free( ndlp, phba->nlp_mem_pool); 611 lpfc_nlp_put(ndlp);
628 } 612 }
629 return 1; 613 return 1;
630} 614}
@@ -653,7 +637,7 @@ lpfc_more_plogi(struct lpfc_hba * phba)
653} 637}
654 638
655static struct lpfc_nodelist * 639static struct lpfc_nodelist *
656lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, 640lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
657 struct lpfc_nodelist *ndlp) 641 struct lpfc_nodelist *ndlp)
658{ 642{
659 struct lpfc_nodelist *new_ndlp; 643 struct lpfc_nodelist *new_ndlp;
@@ -670,12 +654,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
670 654
671 lp = (uint32_t *) prsp->virt; 655 lp = (uint32_t *) prsp->virt;
672 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 656 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
673 memset(name, 0, sizeof (struct lpfc_name)); 657 memset(name, 0, sizeof(struct lpfc_name));
674 658
675 /* Now we to find out if the NPort we are logging into, matches the WWPN 659 /* Now we find out if the NPort we are logging into, matches the WWPN
676 * we have for that ndlp. If not, we have some work to do. 660 * we have for that ndlp. If not, we have some work to do.
677 */ 661 */
678 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 662 new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName);
679 663
680 if (new_ndlp == ndlp) 664 if (new_ndlp == ndlp)
681 return ndlp; 665 return ndlp;
@@ -695,18 +679,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
695 lpfc_unreg_rpi(phba, new_ndlp); 679 lpfc_unreg_rpi(phba, new_ndlp);
696 new_ndlp->nlp_DID = ndlp->nlp_DID; 680 new_ndlp->nlp_DID = ndlp->nlp_DID;
697 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 681 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
698 new_ndlp->nlp_state = ndlp->nlp_state; 682 lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state);
699 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
700 683
701 /* Move this back to NPR list */ 684 /* Move this back to NPR list */
702 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 685 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
703 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 686 lpfc_drop_node(phba, ndlp);
704 }
705 else { 687 else {
706 lpfc_unreg_rpi(phba, ndlp); 688 lpfc_unreg_rpi(phba, ndlp);
707 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 689 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
708 ndlp->nlp_state = NLP_STE_NPR_NODE; 690 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
709 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
710 } 691 }
711 return new_ndlp; 692 return new_ndlp;
712} 693}
@@ -720,13 +701,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
720 struct lpfc_dmabuf *prsp; 701 struct lpfc_dmabuf *prsp;
721 int disc, rc, did, type; 702 int disc, rc, did, type;
722 703
723
724 /* we pass cmdiocb to state machine which needs rspiocb as well */ 704 /* we pass cmdiocb to state machine which needs rspiocb as well */
725 cmdiocb->context_un.rsp_iocb = rspiocb; 705 cmdiocb->context_un.rsp_iocb = rspiocb;
726 706
727 irsp = &rspiocb->iocb; 707 irsp = &rspiocb->iocb;
728 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, 708 ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID);
729 irsp->un.elsreq64.remoteID);
730 if (!ndlp) 709 if (!ndlp)
731 goto out; 710 goto out;
732 711
@@ -1354,7 +1333,7 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1354 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1333 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1355 ndlp->nlp_DID, ELS_CMD_SCR); 1334 ndlp->nlp_DID, ELS_CMD_SCR);
1356 if (!elsiocb) { 1335 if (!elsiocb) {
1357 mempool_free( ndlp, phba->nlp_mem_pool); 1336 lpfc_nlp_put(ndlp);
1358 return 1; 1337 return 1;
1359 } 1338 }
1360 1339
@@ -1373,12 +1352,12 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1373 spin_lock_irq(phba->host->host_lock); 1352 spin_lock_irq(phba->host->host_lock);
1374 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1353 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1375 spin_unlock_irq(phba->host->host_lock); 1354 spin_unlock_irq(phba->host->host_lock);
1376 mempool_free( ndlp, phba->nlp_mem_pool); 1355 lpfc_nlp_put(ndlp);
1377 lpfc_els_free_iocb(phba, elsiocb); 1356 lpfc_els_free_iocb(phba, elsiocb);
1378 return 1; 1357 return 1;
1379 } 1358 }
1380 spin_unlock_irq(phba->host->host_lock); 1359 spin_unlock_irq(phba->host->host_lock);
1381 mempool_free( ndlp, phba->nlp_mem_pool); 1360 lpfc_nlp_put(ndlp);
1382 return 0; 1361 return 0;
1383} 1362}
1384 1363
@@ -1407,7 +1386,7 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1407 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1386 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1408 ndlp->nlp_DID, ELS_CMD_RNID); 1387 ndlp->nlp_DID, ELS_CMD_RNID);
1409 if (!elsiocb) { 1388 if (!elsiocb) {
1410 mempool_free( ndlp, phba->nlp_mem_pool); 1389 lpfc_nlp_put(ndlp);
1411 return 1; 1390 return 1;
1412 } 1391 }
1413 1392
@@ -1428,7 +1407,7 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1428 1407
1429 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name)); 1408 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
1430 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 1409 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1431 if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) { 1410 if ((ondlp = lpfc_findnode_did(phba, nportid))) {
1432 memcpy(&fp->OportName, &ondlp->nlp_portname, 1411 memcpy(&fp->OportName, &ondlp->nlp_portname,
1433 sizeof (struct lpfc_name)); 1412 sizeof (struct lpfc_name));
1434 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1413 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -1440,12 +1419,12 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1440 spin_lock_irq(phba->host->host_lock); 1419 spin_lock_irq(phba->host->host_lock);
1441 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1420 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1442 spin_unlock_irq(phba->host->host_lock); 1421 spin_unlock_irq(phba->host->host_lock);
1443 mempool_free( ndlp, phba->nlp_mem_pool); 1422 lpfc_nlp_put(ndlp);
1444 lpfc_els_free_iocb(phba, elsiocb); 1423 lpfc_els_free_iocb(phba, elsiocb);
1445 return 1; 1424 return 1;
1446 } 1425 }
1447 spin_unlock_irq(phba->host->host_lock); 1426 spin_unlock_irq(phba->host->host_lock);
1448 mempool_free( ndlp, phba->nlp_mem_pool); 1427 lpfc_nlp_put(ndlp);
1449 return 0; 1428 return 0;
1450} 1429}
1451 1430
@@ -1554,29 +1533,25 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1554 case ELS_CMD_PLOGI: 1533 case ELS_CMD_PLOGI:
1555 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) { 1534 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) {
1556 ndlp->nlp_prev_state = ndlp->nlp_state; 1535 ndlp->nlp_prev_state = ndlp->nlp_state;
1557 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1536 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1558 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1559 } 1537 }
1560 break; 1538 break;
1561 case ELS_CMD_ADISC: 1539 case ELS_CMD_ADISC:
1562 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) { 1540 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) {
1563 ndlp->nlp_prev_state = ndlp->nlp_state; 1541 ndlp->nlp_prev_state = ndlp->nlp_state;
1564 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1542 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1565 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1566 } 1543 }
1567 break; 1544 break;
1568 case ELS_CMD_PRLI: 1545 case ELS_CMD_PRLI:
1569 if (!lpfc_issue_els_prli(phba, ndlp, retry)) { 1546 if (!lpfc_issue_els_prli(phba, ndlp, retry)) {
1570 ndlp->nlp_prev_state = ndlp->nlp_state; 1547 ndlp->nlp_prev_state = ndlp->nlp_state;
1571 ndlp->nlp_state = NLP_STE_PRLI_ISSUE; 1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
1572 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1573 } 1549 }
1574 break; 1550 break;
1575 case ELS_CMD_LOGO: 1551 case ELS_CMD_LOGO:
1576 if (!lpfc_issue_els_logo(phba, ndlp, retry)) { 1552 if (!lpfc_issue_els_logo(phba, ndlp, retry)) {
1577 ndlp->nlp_prev_state = ndlp->nlp_state; 1553 ndlp->nlp_prev_state = ndlp->nlp_state;
1578 ndlp->nlp_state = NLP_STE_NPR_NODE; 1554 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1579 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1580 } 1555 }
1581 break; 1556 break;
1582 } 1557 }
@@ -1614,12 +1589,12 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1614 cmd = *elscmd++; 1589 cmd = *elscmd++;
1615 } 1590 }
1616 1591
1617 if(ndlp) 1592 if (ndlp)
1618 did = ndlp->nlp_DID; 1593 did = ndlp->nlp_DID;
1619 else { 1594 else {
1620 /* We should only hit this case for retrying PLOGI */ 1595 /* We should only hit this case for retrying PLOGI */
1621 did = irsp->un.elsreq64.remoteID; 1596 did = irsp->un.elsreq64.remoteID;
1622 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did); 1597 ndlp = lpfc_findnode_did(phba, did);
1623 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1598 if (!ndlp && (cmd != ELS_CMD_PLOGI))
1624 return 1; 1599 return 1;
1625 } 1600 }
@@ -1746,8 +1721,7 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1746 ndlp->nlp_flag |= NLP_DELAY_TMO; 1721 ndlp->nlp_flag |= NLP_DELAY_TMO;
1747 1722
1748 ndlp->nlp_prev_state = ndlp->nlp_state; 1723 ndlp->nlp_prev_state = ndlp->nlp_state;
1749 ndlp->nlp_state = NLP_STE_NPR_NODE; 1724 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1750 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1751 ndlp->nlp_last_elscmd = cmd; 1725 ndlp->nlp_last_elscmd = cmd;
1752 1726
1753 return 1; 1727 return 1;
@@ -1759,27 +1733,24 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1759 case ELS_CMD_PLOGI: 1733 case ELS_CMD_PLOGI:
1760 if (ndlp) { 1734 if (ndlp) {
1761 ndlp->nlp_prev_state = ndlp->nlp_state; 1735 ndlp->nlp_prev_state = ndlp->nlp_state;
1762 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1736 lpfc_nlp_set_state(phba, ndlp,
1763 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 1737 NLP_STE_PLOGI_ISSUE);
1764 } 1738 }
1765 lpfc_issue_els_plogi(phba, did, cmdiocb->retry); 1739 lpfc_issue_els_plogi(phba, did, cmdiocb->retry);
1766 return 1; 1740 return 1;
1767 case ELS_CMD_ADISC: 1741 case ELS_CMD_ADISC:
1768 ndlp->nlp_prev_state = ndlp->nlp_state; 1742 ndlp->nlp_prev_state = ndlp->nlp_state;
1769 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1743 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1770 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1771 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry); 1744 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
1772 return 1; 1745 return 1;
1773 case ELS_CMD_PRLI: 1746 case ELS_CMD_PRLI:
1774 ndlp->nlp_prev_state = ndlp->nlp_state; 1747 ndlp->nlp_prev_state = ndlp->nlp_state;
1775 ndlp->nlp_state = NLP_STE_PRLI_ISSUE; 1748 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
1776 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1777 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry); 1749 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
1778 return 1; 1750 return 1;
1779 case ELS_CMD_LOGO: 1751 case ELS_CMD_LOGO:
1780 ndlp->nlp_prev_state = ndlp->nlp_state; 1752 ndlp->nlp_prev_state = ndlp->nlp_state;
1781 ndlp->nlp_state = NLP_STE_NPR_NODE; 1753 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1782 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1783 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry); 1754 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
1784 return 1; 1755 return 1;
1785 } 1756 }
@@ -1796,10 +1767,14 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1796} 1767}
1797 1768
1798int 1769int
1799lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb) 1770lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
1800{ 1771{
1801 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 1772 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
1802 1773
1774 if (elsiocb->context1) {
1775 lpfc_nlp_put(elsiocb->context1);
1776 elsiocb->context1 = NULL;
1777 }
1803 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 1778 /* context2 = cmd, context2->next = rsp, context3 = bpl */
1804 if (elsiocb->context2) { 1779 if (elsiocb->context2) {
1805 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 1780 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
@@ -1843,7 +1818,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1843 1818
1844 switch (ndlp->nlp_state) { 1819 switch (ndlp->nlp_state) {
1845 case NLP_STE_UNUSED_NODE: /* node is just allocated */ 1820 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1846 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1821 lpfc_drop_node(phba, ndlp);
1847 break; 1822 break;
1848 case NLP_STE_NPR_NODE: /* NPort Recovery mode */ 1823 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1849 lpfc_unreg_rpi(phba, ndlp); 1824 lpfc_unreg_rpi(phba, ndlp);
@@ -1856,8 +1831,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1856} 1831}
1857 1832
1858static void 1833static void
1859lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1834lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1860 struct lpfc_iocbq * rspiocb) 1835 struct lpfc_iocbq *rspiocb)
1861{ 1836{
1862 IOCB_t *irsp; 1837 IOCB_t *irsp;
1863 struct lpfc_nodelist *ndlp; 1838 struct lpfc_nodelist *ndlp;
@@ -1872,14 +1847,14 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1872 1847
1873 1848
1874 /* Check to see if link went down during discovery */ 1849 /* Check to see if link went down during discovery */
1875 if ((lpfc_els_chk_latt(phba)) || !ndlp) { 1850 if (lpfc_els_chk_latt(phba) || !ndlp) {
1876 if (mbox) { 1851 if (mbox) {
1877 mp = (struct lpfc_dmabuf *) mbox->context1; 1852 mp = (struct lpfc_dmabuf *) mbox->context1;
1878 if (mp) { 1853 if (mp) {
1879 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1854 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1880 kfree(mp); 1855 kfree(mp);
1881 } 1856 }
1882 mempool_free( mbox, phba->mbox_mem_pool); 1857 mempool_free(mbox, phba->mbox_mem_pool);
1883 } 1858 }
1884 goto out; 1859 goto out;
1885 } 1860 }
@@ -1899,15 +1874,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1899 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1900 lpfc_unreg_rpi(phba, ndlp); 1875 lpfc_unreg_rpi(phba, ndlp);
1901 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1876 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1902 mbox->context2 = ndlp; 1877 mbox->context2 = lpfc_nlp_get(ndlp);
1903 ndlp->nlp_prev_state = ndlp->nlp_state; 1878 ndlp->nlp_prev_state = ndlp->nlp_state;
1904 ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE; 1879 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
1905 lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
1906 if (lpfc_sli_issue_mbox(phba, mbox, 1880 if (lpfc_sli_issue_mbox(phba, mbox,
1907 (MBX_NOWAIT | MBX_STOP_IOCB)) 1881 (MBX_NOWAIT | MBX_STOP_IOCB))
1908 != MBX_NOT_FINISHED) { 1882 != MBX_NOT_FINISHED) {
1909 goto out; 1883 goto out;
1910 } 1884 }
1885 lpfc_nlp_put(ndlp);
1911 /* NOTE: we should have messages for unsuccessful 1886 /* NOTE: we should have messages for unsuccessful
1912 reglogin */ 1887 reglogin */
1913 } else { 1888 } else {
@@ -1917,7 +1892,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1917 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 1892 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1918 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { 1893 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1919 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1894 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1920 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1895 lpfc_drop_node(phba, ndlp);
1921 ndlp = NULL; 1896 ndlp = NULL;
1922 } 1897 }
1923 } 1898 }
@@ -2012,15 +1987,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
2012 return 1; 1987 return 1;
2013 } 1988 }
2014 1989
2015 if (newnode) 1990 if (newnode) {
1991 lpfc_nlp_put(ndlp);
2016 elsiocb->context1 = NULL; 1992 elsiocb->context1 = NULL;
1993 }
2017 1994
2018 /* Xmit ELS ACC response tag <ulpIoTag> */ 1995 /* Xmit ELS ACC response tag <ulpIoTag> */
2019 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2020 "%d:0128 Xmit ELS ACC response tag x%x " 1997 "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
2021 "Data: x%x x%x x%x x%x x%x\n", 1998 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
2022 phba->brd_no, 1999 phba->brd_no, elsiocb->iotag,
2023 elsiocb->iocb.ulpIoTag,
2024 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2000 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2025 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2001 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2026 2002
@@ -2077,10 +2053,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2077 2053
2078 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 2054 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2079 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2055 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2080 "%d:0129 Xmit ELS RJT x%x response tag x%x " 2056 "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, "
2081 "Data: x%x x%x x%x x%x x%x\n", 2057 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2082 phba->brd_no, 2058 phba->brd_no, rejectError, elsiocb->iotag,
2083 rejectError, elsiocb->iocb.ulpIoTag,
2084 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2059 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2085 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2060 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2086 2061
@@ -2119,18 +2094,18 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2119 if (!elsiocb) 2094 if (!elsiocb)
2120 return 1; 2095 return 1;
2121 2096
2097 icmd = &elsiocb->iocb;
2098 oldcmd = &oldiocb->iocb;
2099 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2100
2122 /* Xmit ADISC ACC response tag <ulpIoTag> */ 2101 /* Xmit ADISC ACC response tag <ulpIoTag> */
2123 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2102 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2124 "%d:0130 Xmit ADISC ACC response tag x%x " 2103 "%d:0130 Xmit ADISC ACC response iotag x%x xri: "
2125 "Data: x%x x%x x%x x%x x%x\n", 2104 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2126 phba->brd_no, 2105 phba->brd_no, elsiocb->iotag,
2127 elsiocb->iocb.ulpIoTag,
2128 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2106 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2129 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2107 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2130 2108
2131 icmd = &elsiocb->iocb;
2132 oldcmd = &oldiocb->iocb;
2133 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2134 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2109 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2135 2110
2136 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2111 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -2155,8 +2130,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2155} 2130}
2156 2131
2157int 2132int
2158lpfc_els_rsp_prli_acc(struct lpfc_hba * phba, 2133lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2159 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 2134 struct lpfc_nodelist *ndlp)
2160{ 2135{
2161 PRLI *npr; 2136 PRLI *npr;
2162 lpfc_vpd_t *vpd; 2137 lpfc_vpd_t *vpd;
@@ -2178,18 +2153,18 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2178 if (!elsiocb) 2153 if (!elsiocb)
2179 return 1; 2154 return 1;
2180 2155
2156 icmd = &elsiocb->iocb;
2157 oldcmd = &oldiocb->iocb;
2158 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2159
2181 /* Xmit PRLI ACC response tag <ulpIoTag> */ 2160 /* Xmit PRLI ACC response tag <ulpIoTag> */
2182 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2183 "%d:0131 Xmit PRLI ACC response tag x%x " 2162 "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, "
2184 "Data: x%x x%x x%x x%x x%x\n", 2163 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2185 phba->brd_no, 2164 phba->brd_no, elsiocb->iotag,
2186 elsiocb->iocb.ulpIoTag,
2187 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2165 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2188 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2166 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2189 2167
2190 icmd = &elsiocb->iocb;
2191 oldcmd = &oldiocb->iocb;
2192 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2193 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2168 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2194 2169
2195 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 2170 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
@@ -2232,9 +2207,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2232} 2207}
2233 2208
2234static int 2209static int
2235lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba, 2210lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2236 uint8_t format, 2211 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2237 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2238{ 2212{
2239 RNID *rn; 2213 RNID *rn;
2240 IOCB_t *icmd; 2214 IOCB_t *icmd;
@@ -2259,17 +2233,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2259 if (!elsiocb) 2233 if (!elsiocb)
2260 return 1; 2234 return 1;
2261 2235
2236 icmd = &elsiocb->iocb;
2237 oldcmd = &oldiocb->iocb;
2238 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2239
2262 /* Xmit RNID ACC response tag <ulpIoTag> */ 2240 /* Xmit RNID ACC response tag <ulpIoTag> */
2263 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2241 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2264 "%d:0132 Xmit RNID ACC response tag x%x " 2242 "%d:0132 Xmit RNID ACC response tag x%x "
2265 "Data: x%x\n", 2243 "xri x%x\n",
2266 phba->brd_no, 2244 phba->brd_no, elsiocb->iotag,
2267 elsiocb->iocb.ulpIoTag,
2268 elsiocb->iocb.ulpContext); 2245 elsiocb->iocb.ulpContext);
2269 2246
2270 icmd = &elsiocb->iocb;
2271 oldcmd = &oldiocb->iocb;
2272 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2273 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2247 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2274 2248
2275 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2249 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -2301,6 +2275,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2301 2275
2302 phba->fc_stat.elsXmitACC++; 2276 phba->fc_stat.elsXmitACC++;
2303 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2277 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2278 lpfc_nlp_put(ndlp);
2304 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 2279 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2305 * it could be freed */ 2280 * it could be freed */
2306 2281
@@ -2315,32 +2290,31 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2315} 2290}
2316 2291
2317int 2292int
2318lpfc_els_disc_adisc(struct lpfc_hba * phba) 2293lpfc_els_disc_adisc(struct lpfc_hba *phba)
2319{ 2294{
2320 int sentadisc; 2295 int sentadisc;
2321 struct lpfc_nodelist *ndlp, *next_ndlp; 2296 struct lpfc_nodelist *ndlp, *next_ndlp;
2322 2297
2323 sentadisc = 0; 2298 sentadisc = 0;
2324 /* go thru NPR list and issue any remaining ELS ADISCs */ 2299 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2325 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 2300 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
2326 nlp_listp) { 2301 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2327 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 2302 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2328 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 2303 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2329 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2304 spin_lock_irq(phba->host->host_lock);
2330 ndlp->nlp_prev_state = ndlp->nlp_state; 2305 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2331 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 2306 spin_unlock_irq(phba->host->host_lock);
2332 lpfc_nlp_list(phba, ndlp, 2307 ndlp->nlp_prev_state = ndlp->nlp_state;
2333 NLP_ADISC_LIST); 2308 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
2334 lpfc_issue_els_adisc(phba, ndlp, 0); 2309 lpfc_issue_els_adisc(phba, ndlp, 0);
2335 sentadisc++; 2310 sentadisc++;
2336 phba->num_disc_nodes++; 2311 phba->num_disc_nodes++;
2337 if (phba->num_disc_nodes >= 2312 if (phba->num_disc_nodes >=
2338 phba->cfg_discovery_threads) { 2313 phba->cfg_discovery_threads) {
2339 spin_lock_irq(phba->host->host_lock); 2314 spin_lock_irq(phba->host->host_lock);
2340 phba->fc_flag |= FC_NLP_MORE; 2315 phba->fc_flag |= FC_NLP_MORE;
2341 spin_unlock_irq(phba->host->host_lock); 2316 spin_unlock_irq(phba->host->host_lock);
2342 break; 2317 break;
2343 }
2344 } 2318 }
2345 } 2319 }
2346 } 2320 }
@@ -2360,24 +2334,22 @@ lpfc_els_disc_plogi(struct lpfc_hba * phba)
2360 2334
2361 sentplogi = 0; 2335 sentplogi = 0;
2362 /* go thru NPR list and issue any remaining ELS PLOGIs */ 2336 /* go thru NPR list and issue any remaining ELS PLOGIs */
2363 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 2337 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
2364 nlp_listp) { 2338 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2365 if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) && 2339 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2366 (!(ndlp->nlp_flag & NLP_DELAY_TMO))) { 2340 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2367 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2341 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2368 ndlp->nlp_prev_state = ndlp->nlp_state; 2342 ndlp->nlp_prev_state = ndlp->nlp_state;
2369 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 2343 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
2370 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 2344 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
2371 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 2345 sentplogi++;
2372 sentplogi++; 2346 phba->num_disc_nodes++;
2373 phba->num_disc_nodes++; 2347 if (phba->num_disc_nodes >=
2374 if (phba->num_disc_nodes >= 2348 phba->cfg_discovery_threads) {
2375 phba->cfg_discovery_threads) { 2349 spin_lock_irq(phba->host->host_lock);
2376 spin_lock_irq(phba->host->host_lock); 2350 phba->fc_flag |= FC_NLP_MORE;
2377 phba->fc_flag |= FC_NLP_MORE; 2351 spin_unlock_irq(phba->host->host_lock);
2378 spin_unlock_irq(phba->host->host_lock); 2352 break;
2379 break;
2380 }
2381 } 2353 }
2382 } 2354 }
2383 } 2355 }
@@ -2479,42 +2451,30 @@ lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
2479} 2451}
2480 2452
2481static int 2453static int
2482lpfc_rscn_recovery_check(struct lpfc_hba * phba) 2454lpfc_rscn_recovery_check(struct lpfc_hba *phba)
2483{ 2455{
2484 struct lpfc_nodelist *ndlp = NULL, *next_ndlp; 2456 struct lpfc_nodelist *ndlp = NULL;
2485 struct list_head *listp;
2486 struct list_head *node_list[7];
2487 int i;
2488 2457
2489 /* Look at all nodes effected by pending RSCNs and move 2458 /* Look at all nodes effected by pending RSCNs and move
2490 * them to NPR list. 2459 * them to NPR state.
2491 */ 2460 */
2492 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
2493 node_list[1] = &phba->fc_nlpmap_list;
2494 node_list[2] = &phba->fc_nlpunmap_list;
2495 node_list[3] = &phba->fc_prli_list;
2496 node_list[4] = &phba->fc_reglogin_list;
2497 node_list[5] = &phba->fc_adisc_list;
2498 node_list[6] = &phba->fc_plogi_list;
2499 for (i = 0; i < 7; i++) {
2500 listp = node_list[i];
2501 if (list_empty(listp))
2502 continue;
2503 2461
2504 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) { 2462 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
2505 if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) 2463 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
2506 continue; 2464 lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0)
2465 continue;
2507 2466
2508 lpfc_disc_state_machine(phba, ndlp, NULL, 2467 lpfc_disc_state_machine(phba, ndlp, NULL,
2509 NLP_EVT_DEVICE_RECOVERY); 2468 NLP_EVT_DEVICE_RECOVERY);
2510 2469
2511 /* Make sure NLP_DELAY_TMO is NOT running 2470 /*
2512 * after a device recovery event. 2471 * Make sure NLP_DELAY_TMO is NOT running after a device
2513 */ 2472 * recovery event.
2514 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2473 */
2515 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2474 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2516 } 2475 lpfc_cancel_retry_delay_tmo(phba, ndlp);
2517 } 2476 }
2477
2518 return 0; 2478 return 0;
2519} 2479}
2520 2480
@@ -2639,8 +2599,8 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
2639 2599
2640 /* To process RSCN, first compare RSCN data with NameServer */ 2600 /* To process RSCN, first compare RSCN data with NameServer */
2641 phba->fc_ns_retry = 0; 2601 phba->fc_ns_retry = 0;
2642 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED, NameServer_DID); 2602 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2643 if (ndlp) { 2603 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2644 /* Good ndlp, issue CT Request to NameServer */ 2604 /* Good ndlp, issue CT Request to NameServer */
2645 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) { 2605 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
2646 /* Wait for NameServer query cmpl before we can 2606 /* Wait for NameServer query cmpl before we can
@@ -2650,7 +2610,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
2650 } else { 2610 } else {
2651 /* If login to NameServer does not exist, issue one */ 2611 /* If login to NameServer does not exist, issue one */
2652 /* Good status, issue PLOGI to NameServer */ 2612 /* Good status, issue PLOGI to NameServer */
2653 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID); 2613 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2654 if (ndlp) { 2614 if (ndlp) {
2655 /* Wait for NameServer login cmpl before we can 2615 /* Wait for NameServer login cmpl before we can
2656 continue */ 2616 continue */
@@ -2664,8 +2624,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
2664 lpfc_nlp_init(phba, ndlp, NameServer_DID); 2624 lpfc_nlp_init(phba, ndlp, NameServer_DID);
2665 ndlp->nlp_type |= NLP_FABRIC; 2625 ndlp->nlp_type |= NLP_FABRIC;
2666 ndlp->nlp_prev_state = ndlp->nlp_state; 2626 ndlp->nlp_prev_state = ndlp->nlp_state;
2667 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 2627 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
2668 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2669 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 2628 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
2670 /* Wait for NameServer login cmpl before we can 2629 /* Wait for NameServer login cmpl before we can
2671 continue */ 2630 continue */
@@ -2734,8 +2693,9 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2734 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2693 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2735 rc = lpfc_sli_issue_mbox 2694 rc = lpfc_sli_issue_mbox
2736 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 2695 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2696 lpfc_set_loopback_flag(phba);
2737 if (rc == MBX_NOT_FINISHED) { 2697 if (rc == MBX_NOT_FINISHED) {
2738 mempool_free( mbox, phba->mbox_mem_pool); 2698 mempool_free(mbox, phba->mbox_mem_pool);
2739 } 2699 }
2740 return 1; 2700 return 1;
2741 } else if (rc > 0) { /* greater than */ 2701 } else if (rc > 0) { /* greater than */
@@ -2800,8 +2760,8 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2800} 2760}
2801 2761
2802static int 2762static int
2803lpfc_els_rcv_lirr(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2763lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2804 struct lpfc_nodelist * ndlp) 2764 struct lpfc_nodelist *ndlp)
2805{ 2765{
2806 struct ls_rjt stat; 2766 struct ls_rjt stat;
2807 2767
@@ -2815,7 +2775,7 @@ lpfc_els_rcv_lirr(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2815} 2775}
2816 2776
2817static void 2777static void
2818lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 2778lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2819{ 2779{
2820 struct lpfc_sli *psli; 2780 struct lpfc_sli *psli;
2821 struct lpfc_sli_ring *pring; 2781 struct lpfc_sli_ring *pring;
@@ -2838,14 +2798,15 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2838 pmb->context2 = NULL; 2798 pmb->context2 = NULL;
2839 2799
2840 if (mb->mbxStatus) { 2800 if (mb->mbxStatus) {
2841 mempool_free( pmb, phba->mbox_mem_pool); 2801 mempool_free(pmb, phba->mbox_mem_pool);
2842 return; 2802 return;
2843 } 2803 }
2844 2804
2845 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 2805 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
2846 mempool_free( pmb, phba->mbox_mem_pool); 2806 mempool_free(pmb, phba->mbox_mem_pool);
2847 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp, 2807 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp,
2848 ndlp->nlp_DID, ELS_CMD_ACC); 2808 ndlp->nlp_DID, ELS_CMD_ACC);
2809 lpfc_nlp_put(ndlp);
2849 if (!elsiocb) 2810 if (!elsiocb)
2850 return; 2811 return;
2851 2812
@@ -2875,15 +2836,15 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2875 2836
2876 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 2837 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2877 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2838 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2878 "%d:0118 Xmit ELS RPS ACC response tag x%x " 2839 "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
2879 "Data: x%x x%x x%x x%x x%x\n", 2840 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2880 phba->brd_no, 2841 phba->brd_no, elsiocb->iotag,
2881 elsiocb->iocb.ulpIoTag,
2882 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2842 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2883 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2843 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2884 2844
2885 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2845 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2886 phba->fc_stat.elsXmitACC++; 2846 phba->fc_stat.elsXmitACC++;
2847
2887 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2848 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2888 lpfc_els_free_iocb(phba, elsiocb); 2849 lpfc_els_free_iocb(phba, elsiocb);
2889 } 2850 }
@@ -2923,13 +2884,14 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2923 lpfc_read_lnk_stat(phba, mbox); 2884 lpfc_read_lnk_stat(phba, mbox);
2924 mbox->context1 = 2885 mbox->context1 =
2925 (void *)((unsigned long)cmdiocb->iocb.ulpContext); 2886 (void *)((unsigned long)cmdiocb->iocb.ulpContext);
2926 mbox->context2 = ndlp; 2887 mbox->context2 = lpfc_nlp_get(ndlp);
2927 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 2888 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2928 if (lpfc_sli_issue_mbox (phba, mbox, 2889 if (lpfc_sli_issue_mbox (phba, mbox,
2929 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { 2890 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) {
2930 /* Mbox completion will send ELS Response */ 2891 /* Mbox completion will send ELS Response */
2931 return 0; 2892 return 0;
2932 } 2893 }
2894 lpfc_nlp_put(ndlp);
2933 mempool_free(mbox, phba->mbox_mem_pool); 2895 mempool_free(mbox, phba->mbox_mem_pool);
2934 } 2896 }
2935 } 2897 }
@@ -2984,10 +2946,9 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2984 2946
2985 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 2947 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2986 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2948 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2987 "%d:0120 Xmit ELS RPL ACC response tag x%x " 2949 "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, "
2988 "Data: x%x x%x x%x x%x x%x\n", 2950 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2989 phba->brd_no, 2951 phba->brd_no, elsiocb->iotag,
2990 elsiocb->iocb.ulpIoTag,
2991 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2952 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2992 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2953 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2993 2954
@@ -3091,8 +3052,8 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3091 /* Log back into the node before sending the FARP. */ 3052 /* Log back into the node before sending the FARP. */
3092 if (fp->Rflags & FARP_REQUEST_PLOGI) { 3053 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3093 ndlp->nlp_prev_state = ndlp->nlp_state; 3054 ndlp->nlp_prev_state = ndlp->nlp_state;
3094 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 3055 lpfc_nlp_set_state(phba, ndlp,
3095 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 3056 NLP_STE_PLOGI_ISSUE);
3096 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 3057 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
3097 } 3058 }
3098 3059
@@ -3169,14 +3130,15 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3169 */ 3130 */
3170 3131
3171 list_for_each_entry_safe(ndlp, next_ndlp, 3132 list_for_each_entry_safe(ndlp, next_ndlp,
3172 &phba->fc_npr_list, nlp_listp) { 3133 &phba->fc_nodes, nlp_listp) {
3173 3134 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3135 continue;
3174 if (ndlp->nlp_type & NLP_FABRIC) { 3136 if (ndlp->nlp_type & NLP_FABRIC) {
3175 /* 3137 /*
3176 * Clean up old Fabric, Nameserver and 3138 * Clean up old Fabric, Nameserver and
3177 * other NLP_FABRIC logins 3139 * other NLP_FABRIC logins
3178 */ 3140 */
3179 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3141 lpfc_drop_node(phba, ndlp);
3180 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3142 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3181 /* Fail outstanding I/O now since this 3143 /* Fail outstanding I/O now since this
3182 * device is marked for PLOGI 3144 * device is marked for PLOGI
@@ -3193,20 +3155,22 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3193 /* Discovery not needed, 3155 /* Discovery not needed,
3194 * move the nodes to their original state. 3156 * move the nodes to their original state.
3195 */ 3157 */
3196 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 3158 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
3197 nlp_listp) { 3159 nlp_listp) {
3160 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3161 continue;
3198 3162
3199 switch (ndlp->nlp_prev_state) { 3163 switch (ndlp->nlp_prev_state) {
3200 case NLP_STE_UNMAPPED_NODE: 3164 case NLP_STE_UNMAPPED_NODE:
3201 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3165 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3202 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 3166 lpfc_nlp_set_state(phba, ndlp,
3203 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 3167 NLP_STE_UNMAPPED_NODE);
3204 break; 3168 break;
3205 3169
3206 case NLP_STE_MAPPED_NODE: 3170 case NLP_STE_MAPPED_NODE:
3207 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3171 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3208 ndlp->nlp_state = NLP_STE_MAPPED_NODE; 3172 lpfc_nlp_set_state(phba, ndlp,
3209 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST); 3173 NLP_STE_MAPPED_NODE);
3210 break; 3174 break;
3211 3175
3212 default: 3176 default:
@@ -3246,9 +3210,8 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3246 struct lpfc_iocbq *tmp_iocb, *piocb; 3210 struct lpfc_iocbq *tmp_iocb, *piocb;
3247 IOCB_t *cmd = NULL; 3211 IOCB_t *cmd = NULL;
3248 struct lpfc_dmabuf *pcmd; 3212 struct lpfc_dmabuf *pcmd;
3249 struct list_head *dlp;
3250 uint32_t *elscmd; 3213 uint32_t *elscmd;
3251 uint32_t els_command; 3214 uint32_t els_command=0;
3252 uint32_t timeout; 3215 uint32_t timeout;
3253 uint32_t remote_ID; 3216 uint32_t remote_ID;
3254 3217
@@ -3263,17 +3226,20 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3263 timeout = (uint32_t)(phba->fc_ratov << 1); 3226 timeout = (uint32_t)(phba->fc_ratov << 1);
3264 3227
3265 pring = &phba->sli.ring[LPFC_ELS_RING]; 3228 pring = &phba->sli.ring[LPFC_ELS_RING];
3266 dlp = &pring->txcmplq;
3267 3229
3268 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3230 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3269 cmd = &piocb->iocb; 3231 cmd = &piocb->iocb;
3270 3232
3271 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3233 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) ||
3234 (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) ||
3235 (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) {
3272 continue; 3236 continue;
3273 } 3237 }
3274 pcmd = (struct lpfc_dmabuf *) piocb->context2; 3238 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3275 elscmd = (uint32_t *) (pcmd->virt); 3239 if (pcmd) {
3276 els_command = *elscmd; 3240 elscmd = (uint32_t *) (pcmd->virt);
3241 els_command = *elscmd;
3242 }
3277 3243
3278 if ((els_command == ELS_CMD_FARP) 3244 if ((els_command == ELS_CMD_FARP)
3279 || (els_command == ELS_CMD_FARPR)) { 3245 || (els_command == ELS_CMD_FARPR)) {
@@ -3289,19 +3255,10 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3289 continue; 3255 continue;
3290 } 3256 }
3291 3257
3292 list_del(&piocb->list);
3293 pring->txcmplq_cnt--;
3294
3295 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) { 3258 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
3296 struct lpfc_nodelist *ndlp; 3259 struct lpfc_nodelist *ndlp;
3297 spin_unlock_irq(phba->host->host_lock); 3260 ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext);
3298 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
3299 spin_lock_irq(phba->host->host_lock);
3300 remote_ID = ndlp->nlp_DID; 3261 remote_ID = ndlp->nlp_DID;
3301 if (cmd->un.elsreq64.bdl.ulpIoTag32) {
3302 lpfc_sli_issue_abort_iotag32(phba,
3303 pring, piocb);
3304 }
3305 } else { 3262 } else {
3306 remote_ID = cmd->un.elsreq64.remoteID; 3263 remote_ID = cmd->un.elsreq64.remoteID;
3307 } 3264 }
@@ -3313,17 +3270,7 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3313 phba->brd_no, els_command, 3270 phba->brd_no, els_command,
3314 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 3271 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3315 3272
3316 /* 3273 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3317 * The iocb has timed out; abort it.
3318 */
3319 if (piocb->iocb_cmpl) {
3320 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3321 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3322 spin_unlock_irq(phba->host->host_lock);
3323 (piocb->iocb_cmpl) (phba, piocb, piocb);
3324 spin_lock_irq(phba->host->host_lock);
3325 } else
3326 lpfc_sli_release_iocbq(phba, piocb);
3327 } 3274 }
3328 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 3275 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3329 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); 3276 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
@@ -3332,16 +3279,13 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3332} 3279}
3333 3280
3334void 3281void
3335lpfc_els_flush_cmd(struct lpfc_hba * phba) 3282lpfc_els_flush_cmd(struct lpfc_hba *phba)
3336{ 3283{
3337 struct lpfc_sli_ring *pring; 3284 LIST_HEAD(completions);
3285 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3338 struct lpfc_iocbq *tmp_iocb, *piocb; 3286 struct lpfc_iocbq *tmp_iocb, *piocb;
3339 IOCB_t *cmd = NULL; 3287 IOCB_t *cmd = NULL;
3340 struct lpfc_dmabuf *pcmd;
3341 uint32_t *elscmd;
3342 uint32_t els_command;
3343 3288
3344 pring = &phba->sli.ring[LPFC_ELS_RING];
3345 spin_lock_irq(phba->host->host_lock); 3289 spin_lock_irq(phba->host->host_lock);
3346 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3290 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3347 cmd = &piocb->iocb; 3291 cmd = &piocb->iocb;
@@ -3351,29 +3295,15 @@ lpfc_els_flush_cmd(struct lpfc_hba * phba)
3351 } 3295 }
3352 3296
3353 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 3297 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
3354 if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) || 3298 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
3355 (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) || 3299 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
3356 (cmd->ulpCommand == CMD_CLOSE_XRI_CN) || 3300 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3357 (cmd->ulpCommand == CMD_ABORT_XRI_CN)) { 3301 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3358 continue; 3302 continue;
3359 }
3360 3303
3361 pcmd = (struct lpfc_dmabuf *) piocb->context2; 3304 list_move_tail(&piocb->list, &completions);
3362 elscmd = (uint32_t *) (pcmd->virt); 3305 pring->txq_cnt--;
3363 els_command = *elscmd;
3364 3306
3365 list_del(&piocb->list);
3366 pring->txcmplq_cnt--;
3367
3368 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3369 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3370
3371 if (piocb->iocb_cmpl) {
3372 spin_unlock_irq(phba->host->host_lock);
3373 (piocb->iocb_cmpl) (phba, piocb, piocb);
3374 spin_lock_irq(phba->host->host_lock);
3375 } else
3376 lpfc_sli_release_iocbq(phba, piocb);
3377 } 3307 }
3378 3308
3379 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3309 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -3382,24 +3312,24 @@ lpfc_els_flush_cmd(struct lpfc_hba * phba)
3382 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3312 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3383 continue; 3313 continue;
3384 } 3314 }
3385 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3386 elscmd = (uint32_t *) (pcmd->virt);
3387 els_command = *elscmd;
3388 3315
3389 list_del(&piocb->list); 3316 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3390 pring->txcmplq_cnt--; 3317 }
3318 spin_unlock_irq(phba->host->host_lock);
3391 3319
3392 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3320 while(!list_empty(&completions)) {
3393 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 3321 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3322 cmd = &piocb->iocb;
3323 list_del(&piocb->list);
3394 3324
3395 if (piocb->iocb_cmpl) { 3325 if (piocb->iocb_cmpl) {
3396 spin_unlock_irq(phba->host->host_lock); 3326 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3327 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3397 (piocb->iocb_cmpl) (phba, piocb, piocb); 3328 (piocb->iocb_cmpl) (phba, piocb, piocb);
3398 spin_lock_irq(phba->host->host_lock);
3399 } else 3329 } else
3400 lpfc_sli_release_iocbq(phba, piocb); 3330 lpfc_sli_release_iocbq(phba, piocb);
3401 } 3331 }
3402 spin_unlock_irq(phba->host->host_lock); 3332
3403 return; 3333 return;
3404} 3334}
3405 3335
@@ -3468,7 +3398,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3468 } 3398 }
3469 3399
3470 did = icmd->un.rcvels.remoteID; 3400 did = icmd->un.rcvels.remoteID;
3471 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did); 3401 ndlp = lpfc_findnode_did(phba, did);
3472 if (!ndlp) { 3402 if (!ndlp) {
3473 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3403 /* Cannot find existing Fabric ndlp, so allocate a new one */
3474 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3404 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
@@ -3484,12 +3414,13 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3484 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3414 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3485 ndlp->nlp_type |= NLP_FABRIC; 3415 ndlp->nlp_type |= NLP_FABRIC;
3486 } 3416 }
3487 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 3417 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
3488 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3489 } 3418 }
3490 3419
3491 phba->fc_stat.elsRcvFrame++; 3420 phba->fc_stat.elsRcvFrame++;
3492 elsiocb->context1 = ndlp; 3421 if (elsiocb->context1)
3422 lpfc_nlp_put(elsiocb->context1);
3423 elsiocb->context1 = lpfc_nlp_get(ndlp);
3493 elsiocb->context2 = mp; 3424 elsiocb->context2 = mp;
3494 3425
3495 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 3426 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
@@ -3513,9 +3444,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3513 case ELS_CMD_FLOGI: 3444 case ELS_CMD_FLOGI:
3514 phba->fc_stat.elsRcvFLOGI++; 3445 phba->fc_stat.elsRcvFLOGI++;
3515 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3446 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3516 if (newnode) { 3447 if (newnode)
3517 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3448 lpfc_drop_node(phba, ndlp);
3518 }
3519 break; 3449 break;
3520 case ELS_CMD_LOGO: 3450 case ELS_CMD_LOGO:
3521 phba->fc_stat.elsRcvLOGO++; 3451 phba->fc_stat.elsRcvLOGO++;
@@ -3536,9 +3466,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3536 case ELS_CMD_RSCN: 3466 case ELS_CMD_RSCN:
3537 phba->fc_stat.elsRcvRSCN++; 3467 phba->fc_stat.elsRcvRSCN++;
3538 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3468 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3539 if (newnode) { 3469 if (newnode)
3540 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3470 lpfc_drop_node(phba, ndlp);
3541 }
3542 break; 3471 break;
3543 case ELS_CMD_ADISC: 3472 case ELS_CMD_ADISC:
3544 phba->fc_stat.elsRcvADISC++; 3473 phba->fc_stat.elsRcvADISC++;
@@ -3579,30 +3508,26 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3579 case ELS_CMD_LIRR: 3508 case ELS_CMD_LIRR:
3580 phba->fc_stat.elsRcvLIRR++; 3509 phba->fc_stat.elsRcvLIRR++;
3581 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3510 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3582 if (newnode) { 3511 if (newnode)
3583 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3512 lpfc_drop_node(phba, ndlp);
3584 }
3585 break; 3513 break;
3586 case ELS_CMD_RPS: 3514 case ELS_CMD_RPS:
3587 phba->fc_stat.elsRcvRPS++; 3515 phba->fc_stat.elsRcvRPS++;
3588 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3516 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3589 if (newnode) { 3517 if (newnode)
3590 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3518 lpfc_drop_node(phba, ndlp);
3591 }
3592 break; 3519 break;
3593 case ELS_CMD_RPL: 3520 case ELS_CMD_RPL:
3594 phba->fc_stat.elsRcvRPL++; 3521 phba->fc_stat.elsRcvRPL++;
3595 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3522 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3596 if (newnode) { 3523 if (newnode)
3597 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3524 lpfc_drop_node(phba, ndlp);
3598 }
3599 break; 3525 break;
3600 case ELS_CMD_RNID: 3526 case ELS_CMD_RNID:
3601 phba->fc_stat.elsRcvRNID++; 3527 phba->fc_stat.elsRcvRNID++;
3602 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3528 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3603 if (newnode) { 3529 if (newnode)
3604 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3530 lpfc_drop_node(phba, ndlp);
3605 }
3606 break; 3531 break;
3607 default: 3532 default:
3608 /* Unsupported ELS command, reject */ 3533 /* Unsupported ELS command, reject */
@@ -3612,9 +3537,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3612 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3537 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3613 "%d:0115 Unknown ELS command x%x received from " 3538 "%d:0115 Unknown ELS command x%x received from "
3614 "NPORT x%x\n", phba->brd_no, cmd, did); 3539 "NPORT x%x\n", phba->brd_no, cmd, did);
3615 if (newnode) { 3540 if (newnode)
3616 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3541 lpfc_drop_node(phba, ndlp);
3617 }
3618 break; 3542 break;
3619 } 3543 }
3620 3544
@@ -3627,6 +3551,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3627 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp); 3551 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
3628 } 3552 }
3629 3553
3554 lpfc_nlp_put(elsiocb->context1);
3555 elsiocb->context1 = NULL;
3630 if (elsiocb->context2) { 3556 if (elsiocb->context2) {
3631 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3557 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3632 kfree(mp); 3558 kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c39564e85e94..61caa8d379e2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -109,6 +109,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
109 return; 109 return;
110 } 110 }
111 111
112 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
113 return;
114
112 name = (uint8_t *)&ndlp->nlp_portname; 115 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba; 116 phba = ndlp->nlp_phba;
114 117
@@ -147,11 +150,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
147 ndlp->nlp_state, ndlp->nlp_rpi); 150 ndlp->nlp_state, ndlp->nlp_rpi);
148 } 151 }
149 152
150 ndlp->rport = NULL; 153 if (!(phba->fc_flag & FC_UNLOADING) &&
151 rdata->pnode = NULL; 154 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
152 155 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
153 if (!(phba->fc_flag & FC_UNLOADING)) 156 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
154 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 157 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
158 else {
159 rdata->pnode = NULL;
160 ndlp->rport = NULL;
161 lpfc_nlp_put(ndlp);
162 put_device(&rport->dev);
163 }
155 164
156 return; 165 return;
157} 166}
@@ -182,29 +191,35 @@ lpfc_work_list_done(struct lpfc_hba * phba)
182 *(int *)(evtp->evt_arg1) = 0; 191 *(int *)(evtp->evt_arg1) = 0;
183 complete((struct completion *)(evtp->evt_arg2)); 192 complete((struct completion *)(evtp->evt_arg2));
184 break; 193 break;
185 case LPFC_EVT_OFFLINE: 194 case LPFC_EVT_OFFLINE_PREP:
186 if (phba->hba_state >= LPFC_LINK_DOWN) 195 if (phba->hba_state >= LPFC_LINK_DOWN)
187 lpfc_offline(phba); 196 lpfc_offline_prep(phba);
197 *(int *)(evtp->evt_arg1) = 0;
198 complete((struct completion *)(evtp->evt_arg2));
199 break;
200 case LPFC_EVT_OFFLINE:
201 lpfc_offline(phba);
188 lpfc_sli_brdrestart(phba); 202 lpfc_sli_brdrestart(phba);
189 *(int *)(evtp->evt_arg1) = 203 *(int *)(evtp->evt_arg1) =
190 lpfc_sli_brdready(phba,HS_FFRDY | HS_MBRDY); 204 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
205 lpfc_unblock_mgmt_io(phba);
191 complete((struct completion *)(evtp->evt_arg2)); 206 complete((struct completion *)(evtp->evt_arg2));
192 break; 207 break;
193 case LPFC_EVT_WARM_START: 208 case LPFC_EVT_WARM_START:
194 if (phba->hba_state >= LPFC_LINK_DOWN) 209 lpfc_offline(phba);
195 lpfc_offline(phba);
196 lpfc_reset_barrier(phba); 210 lpfc_reset_barrier(phba);
197 lpfc_sli_brdreset(phba); 211 lpfc_sli_brdreset(phba);
198 lpfc_hba_down_post(phba); 212 lpfc_hba_down_post(phba);
199 *(int *)(evtp->evt_arg1) = 213 *(int *)(evtp->evt_arg1) =
200 lpfc_sli_brdready(phba, HS_MBRDY); 214 lpfc_sli_brdready(phba, HS_MBRDY);
215 lpfc_unblock_mgmt_io(phba);
201 complete((struct completion *)(evtp->evt_arg2)); 216 complete((struct completion *)(evtp->evt_arg2));
202 break; 217 break;
203 case LPFC_EVT_KILL: 218 case LPFC_EVT_KILL:
204 if (phba->hba_state >= LPFC_LINK_DOWN) 219 lpfc_offline(phba);
205 lpfc_offline(phba);
206 *(int *)(evtp->evt_arg1) 220 *(int *)(evtp->evt_arg1)
207 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba); 221 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
222 lpfc_unblock_mgmt_io(phba);
208 complete((struct completion *)(evtp->evt_arg2)); 223 complete((struct completion *)(evtp->evt_arg2));
209 break; 224 break;
210 } 225 }
@@ -359,13 +374,12 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
359} 374}
360 375
361int 376int
362lpfc_linkdown(struct lpfc_hba * phba) 377lpfc_linkdown(struct lpfc_hba *phba)
363{ 378{
364 struct lpfc_sli *psli; 379 struct lpfc_sli *psli;
365 struct lpfc_nodelist *ndlp, *next_ndlp; 380 struct lpfc_nodelist *ndlp, *next_ndlp;
366 struct list_head *listp, *node_list[7]; 381 LPFC_MBOXQ_t *mb;
367 LPFC_MBOXQ_t *mb; 382 int rc;
368 int rc, i;
369 383
370 psli = &phba->sli; 384 psli = &phba->sli;
371 /* sysfs or selective reset may call this routine to clean up */ 385 /* sysfs or selective reset may call this routine to clean up */
@@ -397,31 +411,16 @@ lpfc_linkdown(struct lpfc_hba * phba)
397 /* Cleanup any outstanding ELS commands */ 411 /* Cleanup any outstanding ELS commands */
398 lpfc_els_flush_cmd(phba); 412 lpfc_els_flush_cmd(phba);
399 413
400 /* Issue a LINK DOWN event to all nodes */ 414 /*
401 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */ 415 * Issue a LINK DOWN event to all nodes.
402 node_list[1] = &phba->fc_nlpmap_list; 416 */
403 node_list[2] = &phba->fc_nlpunmap_list; 417 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
404 node_list[3] = &phba->fc_prli_list; 418 /* free any ndlp's on unused list */
405 node_list[4] = &phba->fc_reglogin_list; 419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
406 node_list[5] = &phba->fc_adisc_list; 420 lpfc_drop_node(phba, ndlp);
407 node_list[6] = &phba->fc_plogi_list; 421 else /* otherwise, force node recovery. */
408 for (i = 0; i < 7; i++) {
409 listp = node_list[i];
410 if (list_empty(listp))
411 continue;
412
413 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
414
415 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 422 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
416 NLP_EVT_DEVICE_RECOVERY); 423 NLP_EVT_DEVICE_RECOVERY);
417
418 }
419 }
420
421 /* free any ndlp's on unused list */
422 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
423 nlp_listp) {
424 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
425 } 424 }
426 425
427 /* Setup myDID for link up if we are in pt2pt mode */ 426 /* Setup myDID for link up if we are in pt2pt mode */
@@ -452,11 +451,9 @@ lpfc_linkdown(struct lpfc_hba * phba)
452} 451}
453 452
454static int 453static int
455lpfc_linkup(struct lpfc_hba * phba) 454lpfc_linkup(struct lpfc_hba *phba)
456{ 455{
457 struct lpfc_nodelist *ndlp, *next_ndlp; 456 struct lpfc_nodelist *ndlp, *next_ndlp;
458 struct list_head *listp, *node_list[7];
459 int i;
460 457
461 fc_host_post_event(phba->host, fc_get_event_number(), 458 fc_host_post_event(phba->host, fc_get_event_number(),
462 FCH_EVT_LINKUP, 0); 459 FCH_EVT_LINKUP, 0);
@@ -470,29 +467,20 @@ lpfc_linkup(struct lpfc_hba * phba)
470 spin_unlock_irq(phba->host->host_lock); 467 spin_unlock_irq(phba->host->host_lock);
471 468
472 469
473 node_list[0] = &phba->fc_plogi_list; 470 if (phba->fc_flag & FC_LBIT) {
474 node_list[1] = &phba->fc_adisc_list; 471 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
475 node_list[2] = &phba->fc_reglogin_list; 472 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
476 node_list[3] = &phba->fc_prli_list;
477 node_list[4] = &phba->fc_nlpunmap_list;
478 node_list[5] = &phba->fc_nlpmap_list;
479 node_list[6] = &phba->fc_npr_list;
480 for (i = 0; i < 7; i++) {
481 listp = node_list[i];
482 if (list_empty(listp))
483 continue;
484
485 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
486 if (phba->fc_flag & FC_LBIT) {
487 if (ndlp->nlp_type & NLP_FABRIC) { 473 if (ndlp->nlp_type & NLP_FABRIC) {
488 /* On Linkup its safe to clean up the 474 /*
475 * On Linkup its safe to clean up the
489 * ndlp from Fabric connections. 476 * ndlp from Fabric connections.
490 */ 477 */
491 lpfc_nlp_list(phba, ndlp, 478 lpfc_nlp_set_state(phba, ndlp,
492 NLP_UNUSED_LIST); 479 NLP_STE_UNUSED_NODE);
493 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 480 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
494 /* Fail outstanding IO now since device 481 /*
495 * is marked for PLOGI. 482 * Fail outstanding IO now since
483 * device is marked for PLOGI.
496 */ 484 */
497 lpfc_unreg_rpi(phba, ndlp); 485 lpfc_unreg_rpi(phba, ndlp);
498 } 486 }
@@ -501,9 +489,10 @@ lpfc_linkup(struct lpfc_hba * phba)
501 } 489 }
502 490
503 /* free any ndlp's on unused list */ 491 /* free any ndlp's on unused list */
504 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list, 492 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
505 nlp_listp) { 493 nlp_listp) {
506 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 494 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
495 lpfc_drop_node(phba, ndlp);
507 } 496 }
508 497
509 return 0; 498 return 0;
@@ -734,6 +723,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
734 case LA_4GHZ_LINK: 723 case LA_4GHZ_LINK:
735 phba->fc_linkspeed = LA_4GHZ_LINK; 724 phba->fc_linkspeed = LA_4GHZ_LINK;
736 break; 725 break;
726 case LA_8GHZ_LINK:
727 phba->fc_linkspeed = LA_8GHZ_LINK;
728 break;
737 default: 729 default:
738 phba->fc_linkspeed = LA_UNKNW_LINK; 730 phba->fc_linkspeed = LA_UNKNW_LINK;
739 break; 731 break;
@@ -889,12 +881,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
889 881
890 if (la->attType == AT_LINK_UP) { 882 if (la->attType == AT_LINK_UP) {
891 phba->fc_stat.LinkUp++; 883 phba->fc_stat.LinkUp++;
892 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 884 if (phba->fc_flag & FC_LOOPBACK_MODE) {
885 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
886 "%d:1306 Link Up Event in loop back mode "
887 "x%x received Data: x%x x%x x%x x%x\n",
888 phba->brd_no, la->eventTag, phba->fc_eventTag,
889 la->granted_AL_PA, la->UlnkSpeed,
890 phba->alpa_map[0]);
891 } else {
892 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
893 "%d:1303 Link Up Event x%x received " 893 "%d:1303 Link Up Event x%x received "
894 "Data: x%x x%x x%x x%x\n", 894 "Data: x%x x%x x%x x%x\n",
895 phba->brd_no, la->eventTag, phba->fc_eventTag, 895 phba->brd_no, la->eventTag, phba->fc_eventTag,
896 la->granted_AL_PA, la->UlnkSpeed, 896 la->granted_AL_PA, la->UlnkSpeed,
897 phba->alpa_map[0]); 897 phba->alpa_map[0]);
898 }
898 lpfc_mbx_process_link_up(phba, la); 899 lpfc_mbx_process_link_up(phba, la);
899 } else { 900 } else {
900 phba->fc_stat.LinkDown++; 901 phba->fc_stat.LinkDown++;
@@ -940,6 +941,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
940 lpfc_mbuf_free(phba, mp->virt, mp->phys); 941 lpfc_mbuf_free(phba, mp->virt, mp->phys);
941 kfree(mp); 942 kfree(mp);
942 mempool_free( pmb, phba->mbox_mem_pool); 943 mempool_free( pmb, phba->mbox_mem_pool);
944 lpfc_nlp_put(ndlp);
943 945
944 return; 946 return;
945} 947}
@@ -966,11 +968,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
966 ndlp = (struct lpfc_nodelist *) pmb->context2; 968 ndlp = (struct lpfc_nodelist *) pmb->context2;
967 mp = (struct lpfc_dmabuf *) (pmb->context1); 969 mp = (struct lpfc_dmabuf *) (pmb->context1);
968 970
971 pmb->context1 = NULL;
972 pmb->context2 = NULL;
973
969 if (mb->mbxStatus) { 974 if (mb->mbxStatus) {
970 lpfc_mbuf_free(phba, mp->virt, mp->phys); 975 lpfc_mbuf_free(phba, mp->virt, mp->phys);
971 kfree(mp); 976 kfree(mp);
972 mempool_free( pmb, phba->mbox_mem_pool); 977 mempool_free(pmb, phba->mbox_mem_pool);
973 mempool_free( ndlp, phba->nlp_mem_pool); 978 lpfc_nlp_put(ndlp);
974 979
975 /* FLOGI failed, so just use loop map to make discovery list */ 980 /* FLOGI failed, so just use loop map to make discovery list */
976 lpfc_disc_list_loopmap(phba); 981 lpfc_disc_list_loopmap(phba);
@@ -980,12 +985,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
980 return; 985 return;
981 } 986 }
982 987
983 pmb->context1 = NULL;
984
985 ndlp->nlp_rpi = mb->un.varWords[0]; 988 ndlp->nlp_rpi = mb->un.varWords[0];
986 ndlp->nlp_type |= NLP_FABRIC; 989 ndlp->nlp_type |= NLP_FABRIC;
987 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 990 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
988 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 991
992 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
989 993
990 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) { 994 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
991 /* This NPort has been assigned an NPort_ID by the fabric as a 995 /* This NPort has been assigned an NPort_ID by the fabric as a
@@ -996,7 +1000,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
996 */ 1000 */
997 lpfc_issue_els_scr(phba, SCR_DID, 0); 1001 lpfc_issue_els_scr(phba, SCR_DID, 0);
998 1002
999 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID); 1003 ndlp = lpfc_findnode_did(phba, NameServer_DID);
1000 if (!ndlp) { 1004 if (!ndlp) {
1001 /* Allocate a new node instance. If the pool is empty, 1005 /* Allocate a new node instance. If the pool is empty,
1002 * start the discovery process and skip the Nameserver 1006 * start the discovery process and skip the Nameserver
@@ -1008,15 +1012,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1008 lpfc_disc_start(phba); 1012 lpfc_disc_start(phba);
1009 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1013 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1010 kfree(mp); 1014 kfree(mp);
1011 mempool_free( pmb, phba->mbox_mem_pool); 1015 mempool_free(pmb, phba->mbox_mem_pool);
1012 return; 1016 return;
1013 } else { 1017 } else {
1014 lpfc_nlp_init(phba, ndlp, NameServer_DID); 1018 lpfc_nlp_init(phba, ndlp, NameServer_DID);
1015 ndlp->nlp_type |= NLP_FABRIC; 1019 ndlp->nlp_type |= NLP_FABRIC;
1016 } 1020 }
1017 } 1021 }
1018 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1022 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1019 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1020 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 1023 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
1021 if (phba->cfg_fdmi_on) { 1024 if (phba->cfg_fdmi_on) {
1022 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 1025 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
@@ -1032,7 +1035,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1032 1035
1033 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1034 kfree(mp); 1037 kfree(mp);
1035 mempool_free( pmb, phba->mbox_mem_pool); 1038 mempool_free(pmb, phba->mbox_mem_pool);
1036 return; 1039 return;
1037} 1040}
1038 1041
@@ -1057,10 +1060,11 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1057 mp = (struct lpfc_dmabuf *) (pmb->context1); 1060 mp = (struct lpfc_dmabuf *) (pmb->context1);
1058 1061
1059 if (mb->mbxStatus) { 1062 if (mb->mbxStatus) {
1063 lpfc_nlp_put(ndlp);
1060 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1064 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1061 kfree(mp); 1065 kfree(mp);
1062 mempool_free( pmb, phba->mbox_mem_pool); 1066 mempool_free(pmb, phba->mbox_mem_pool);
1063 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1067 lpfc_drop_node(phba, ndlp);
1064 1068
1065 /* RegLogin failed, so just use loop map to make discovery 1069 /* RegLogin failed, so just use loop map to make discovery
1066 list */ 1070 list */
@@ -1075,8 +1079,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1075 1079
1076 ndlp->nlp_rpi = mb->un.varWords[0]; 1080 ndlp->nlp_rpi = mb->un.varWords[0];
1077 ndlp->nlp_type |= NLP_FABRIC; 1081 ndlp->nlp_type |= NLP_FABRIC;
1078 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1082 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1079 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1080 1083
1081 if (phba->hba_state < LPFC_HBA_READY) { 1084 if (phba->hba_state < LPFC_HBA_READY) {
1082 /* Link up discovery requires Fabrib registration. */ 1085 /* Link up discovery requires Fabrib registration. */
@@ -1093,6 +1096,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1093 lpfc_disc_start(phba); 1096 lpfc_disc_start(phba);
1094 } 1097 }
1095 1098
1099 lpfc_nlp_put(ndlp);
1096 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1100 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1097 kfree(mp); 1101 kfree(mp);
1098 mempool_free( pmb, phba->mbox_mem_pool); 1102 mempool_free( pmb, phba->mbox_mem_pool);
@@ -1101,8 +1105,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1101} 1105}
1102 1106
1103static void 1107static void
1104lpfc_register_remote_port(struct lpfc_hba * phba, 1108lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1105 struct lpfc_nodelist * ndlp)
1106{ 1109{
1107 struct fc_rport *rport; 1110 struct fc_rport *rport;
1108 struct lpfc_rport_data *rdata; 1111 struct lpfc_rport_data *rdata;
@@ -1114,8 +1117,19 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1114 rport_ids.port_id = ndlp->nlp_DID; 1117 rport_ids.port_id = ndlp->nlp_DID;
1115 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1118 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1116 1119
1120 /*
1121 * We leave our node pointer in rport->dd_data when we unregister a
1122 * FCP target port. But fc_remote_port_add zeros the space to which
1123 * rport->dd_data points. So, if we're reusing a previously
1124 * registered port, drop the reference that we took the last time we
1125 * registered the port.
1126 */
1127 if (ndlp->rport && ndlp->rport->dd_data &&
1128 *(struct lpfc_rport_data **) ndlp->rport->dd_data) {
1129 lpfc_nlp_put(ndlp);
1130 }
1117 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); 1131 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1118 if (!rport) { 1132 if (!rport || !get_device(&rport->dev)) {
1119 dev_printk(KERN_WARNING, &phba->pcidev->dev, 1133 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1120 "Warning: fc_remote_port_add failed\n"); 1134 "Warning: fc_remote_port_add failed\n");
1121 return; 1135 return;
@@ -1125,7 +1139,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1125 rport->maxframe_size = ndlp->nlp_maxframe; 1139 rport->maxframe_size = ndlp->nlp_maxframe;
1126 rport->supported_classes = ndlp->nlp_class_sup; 1140 rport->supported_classes = ndlp->nlp_class_sup;
1127 rdata = rport->dd_data; 1141 rdata = rport->dd_data;
1128 rdata->pnode = ndlp; 1142 rdata->pnode = lpfc_nlp_get(ndlp);
1129 1143
1130 if (ndlp->nlp_type & NLP_FCP_TARGET) 1144 if (ndlp->nlp_type & NLP_FCP_TARGET)
1131 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1145 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
@@ -1145,8 +1159,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1145} 1159}
1146 1160
1147static void 1161static void
1148lpfc_unregister_remote_port(struct lpfc_hba * phba, 1162lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1149 struct lpfc_nodelist * ndlp)
1150{ 1163{
1151 struct fc_rport *rport = ndlp->rport; 1164 struct fc_rport *rport = ndlp->rport;
1152 struct lpfc_rport_data *rdata = rport->dd_data; 1165 struct lpfc_rport_data *rdata = rport->dd_data;
@@ -1154,6 +1167,8 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1154 if (rport->scsi_target_id == -1) { 1167 if (rport->scsi_target_id == -1) {
1155 ndlp->rport = NULL; 1168 ndlp->rport = NULL;
1156 rdata->pnode = NULL; 1169 rdata->pnode = NULL;
1170 lpfc_nlp_put(ndlp);
1171 put_device(&rport->dev);
1157 } 1172 }
1158 1173
1159 fc_remote_port_delete(rport); 1174 fc_remote_port_delete(rport);
@@ -1161,178 +1176,70 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1161 return; 1176 return;
1162} 1177}
1163 1178
1164int 1179static void
1165lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) 1180lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count)
1166{ 1181{
1167 enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1168 struct lpfc_sli *psli;
1169
1170 psli = &phba->sli;
1171 /* Sanity check to ensure we are not moving to / from the same list */
1172 if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
1173 if (list != NLP_NO_LIST)
1174 return 0;
1175
1176 spin_lock_irq(phba->host->host_lock); 1182 spin_lock_irq(phba->host->host_lock);
1177 switch (nlp->nlp_flag & NLP_LIST_MASK) { 1183 switch (state) {
1178 case NLP_NO_LIST: /* Not on any list */ 1184 case NLP_STE_UNUSED_NODE:
1185 phba->fc_unused_cnt += count;
1179 break; 1186 break;
1180 case NLP_UNUSED_LIST: 1187 case NLP_STE_PLOGI_ISSUE:
1181 phba->fc_unused_cnt--; 1188 phba->fc_plogi_cnt += count;
1182 list_del(&nlp->nlp_listp);
1183 break; 1189 break;
1184 case NLP_PLOGI_LIST: 1190 case NLP_STE_ADISC_ISSUE:
1185 phba->fc_plogi_cnt--; 1191 phba->fc_adisc_cnt += count;
1186 list_del(&nlp->nlp_listp);
1187 break; 1192 break;
1188 case NLP_ADISC_LIST: 1193 case NLP_STE_REG_LOGIN_ISSUE:
1189 phba->fc_adisc_cnt--; 1194 phba->fc_reglogin_cnt += count;
1190 list_del(&nlp->nlp_listp);
1191 break; 1195 break;
1192 case NLP_REGLOGIN_LIST: 1196 case NLP_STE_PRLI_ISSUE:
1193 phba->fc_reglogin_cnt--; 1197 phba->fc_prli_cnt += count;
1194 list_del(&nlp->nlp_listp);
1195 break; 1198 break;
1196 case NLP_PRLI_LIST: 1199 case NLP_STE_UNMAPPED_NODE:
1197 phba->fc_prli_cnt--; 1200 phba->fc_unmap_cnt += count;
1198 list_del(&nlp->nlp_listp);
1199 break; 1201 break;
1200 case NLP_UNMAPPED_LIST: 1202 case NLP_STE_MAPPED_NODE:
1201 phba->fc_unmap_cnt--; 1203 phba->fc_map_cnt += count;
1202 list_del(&nlp->nlp_listp);
1203 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1204 nlp->nlp_type &= ~NLP_FC_NODE;
1205 phba->nport_event_cnt++;
1206 if (nlp->rport)
1207 rport_del = unmapped;
1208 break; 1204 break;
1209 case NLP_MAPPED_LIST: 1205 case NLP_STE_NPR_NODE:
1210 phba->fc_map_cnt--; 1206 phba->fc_npr_cnt += count;
1211 list_del(&nlp->nlp_listp);
1212 phba->nport_event_cnt++;
1213 if (nlp->rport)
1214 rport_del = mapped;
1215 break;
1216 case NLP_NPR_LIST:
1217 phba->fc_npr_cnt--;
1218 list_del(&nlp->nlp_listp);
1219 /* Stop delay tmo if taking node off NPR list */
1220 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1221 (list != NLP_NPR_LIST)) {
1222 spin_unlock_irq(phba->host->host_lock);
1223 lpfc_cancel_retry_delay_tmo(phba, nlp);
1224 spin_lock_irq(phba->host->host_lock);
1225 }
1226 break; 1207 break;
1227 } 1208 }
1209 spin_unlock_irq(phba->host->host_lock);
1210}
1228 1211
1229 nlp->nlp_flag &= ~NLP_LIST_MASK; 1212static void
1230 1213lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1231 /* Add NPort <did> to <num> list */ 1214 int old_state, int new_state)
1232 lpfc_printf_log(phba, 1215{
1233 KERN_INFO, 1216 if (new_state == NLP_STE_UNMAPPED_NODE) {
1234 LOG_NODE, 1217 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1235 "%d:0904 Add NPort x%x to %d list Data: x%x\n", 1218 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1236 phba->brd_no, 1219 ndlp->nlp_type |= NLP_FC_NODE;
1237 nlp->nlp_DID, list, nlp->nlp_flag); 1220 }
1238 1221 if (new_state == NLP_STE_MAPPED_NODE)
1239 switch (list) { 1222 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1240 case NLP_NO_LIST: /* No list, just remove it */ 1223 if (new_state == NLP_STE_NPR_NODE)
1241 spin_unlock_irq(phba->host->host_lock); 1224 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1242 lpfc_nlp_remove(phba, nlp); 1225
1243 spin_lock_irq(phba->host->host_lock); 1226 /* Transport interface */
1244 /* as node removed - stop further transport calls */ 1227 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1245 rport_del = none; 1228 old_state == NLP_STE_UNMAPPED_NODE)) {
1246 break;
1247 case NLP_UNUSED_LIST:
1248 nlp->nlp_flag |= list;
1249 /* Put it at the end of the unused list */
1250 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1251 phba->fc_unused_cnt++;
1252 break;
1253 case NLP_PLOGI_LIST:
1254 nlp->nlp_flag |= list;
1255 /* Put it at the end of the plogi list */
1256 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1257 phba->fc_plogi_cnt++;
1258 break;
1259 case NLP_ADISC_LIST:
1260 nlp->nlp_flag |= list;
1261 /* Put it at the end of the adisc list */
1262 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1263 phba->fc_adisc_cnt++;
1264 break;
1265 case NLP_REGLOGIN_LIST:
1266 nlp->nlp_flag |= list;
1267 /* Put it at the end of the reglogin list */
1268 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1269 phba->fc_reglogin_cnt++;
1270 break;
1271 case NLP_PRLI_LIST:
1272 nlp->nlp_flag |= list;
1273 /* Put it at the end of the prli list */
1274 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1275 phba->fc_prli_cnt++;
1276 break;
1277 case NLP_UNMAPPED_LIST:
1278 rport_add = unmapped;
1279 /* ensure all vestiges of "mapped" significance are gone */
1280 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1281 nlp->nlp_flag |= list;
1282 /* Put it at the end of the unmap list */
1283 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1284 phba->fc_unmap_cnt++;
1285 phba->nport_event_cnt++;
1286 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1287 nlp->nlp_type |= NLP_FC_NODE;
1288 break;
1289 case NLP_MAPPED_LIST:
1290 rport_add = mapped;
1291 nlp->nlp_flag |= list;
1292 /* Put it at the end of the map list */
1293 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1294 phba->fc_map_cnt++;
1295 phba->nport_event_cnt++; 1229 phba->nport_event_cnt++;
1296 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1230 lpfc_unregister_remote_port(phba, ndlp);
1297 break;
1298 case NLP_NPR_LIST:
1299 nlp->nlp_flag |= list;
1300 /* Put it at the end of the npr list */
1301 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1302 phba->fc_npr_cnt++;
1303
1304 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1305 break;
1306 case NLP_JUST_DQ:
1307 break;
1308 } 1231 }
1309 1232
1310 spin_unlock_irq(phba->host->host_lock); 1233 if (new_state == NLP_STE_MAPPED_NODE ||
1311 1234 new_state == NLP_STE_UNMAPPED_NODE) {
1312 /* 1235 phba->nport_event_cnt++;
1313 * We make all the calls into the transport after we have
1314 * moved the node between lists. This so that we don't
1315 * release the lock while in-between lists.
1316 */
1317
1318 /* Don't upcall midlayer if we're unloading */
1319 if (!(phba->fc_flag & FC_UNLOADING)) {
1320 /*
1321 * We revalidate the rport pointer as the "add" function
1322 * may have removed the remote port.
1323 */
1324 if ((rport_del != none) && nlp->rport)
1325 lpfc_unregister_remote_port(phba, nlp);
1326
1327 if (rport_add != none) {
1328 /* 1236 /*
1329 * Tell the fc transport about the port, if we haven't 1237 * Tell the fc transport about the port, if we haven't
1330 * already. If we have, and it's a scsi entity, be 1238 * already. If we have, and it's a scsi entity, be
1331 * sure to unblock any attached scsi devices 1239 * sure to unblock any attached scsi devices
1332 */ 1240 */
1333 if ((!nlp->rport) || (nlp->rport->port_state == 1241 lpfc_register_remote_port(phba, ndlp);
1334 FC_PORTSTATE_BLOCKED)) 1242 }
1335 lpfc_register_remote_port(phba, nlp);
1336 1243
1337 /* 1244 /*
1338 * if we added to Mapped list, but the remote port 1245 * if we added to Mapped list, but the remote port
@@ -1340,19 +1247,95 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1340 * our presentable range - move the node to the 1247 * our presentable range - move the node to the
1341 * Unmapped List 1248 * Unmapped List
1342 */ 1249 */
1343 if ((rport_add == mapped) && 1250 if (new_state == NLP_STE_MAPPED_NODE &&
1344 ((!nlp->rport) || 1251 (!ndlp->rport ||
1345 (nlp->rport->scsi_target_id == -1) || 1252 ndlp->rport->scsi_target_id == -1 ||
1346 (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) { 1253 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1347 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1254 spin_lock_irq(phba->host->host_lock);
1348 spin_lock_irq(phba->host->host_lock); 1255 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1349 nlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1256 spin_unlock_irq(phba->host->host_lock);
1350 spin_unlock_irq(phba->host->host_lock); 1257 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1351 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1352 }
1353 }
1354 } 1258 }
1355 return 0; 1259}
1260
1261static char *
1262lpfc_nlp_state_name(char *buffer, size_t size, int state)
1263{
1264 static char *states[] = {
1265 [NLP_STE_UNUSED_NODE] = "UNUSED",
1266 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1267 [NLP_STE_ADISC_ISSUE] = "ADISC",
1268 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1269 [NLP_STE_PRLI_ISSUE] = "PRLI",
1270 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1271 [NLP_STE_MAPPED_NODE] = "MAPPED",
1272 [NLP_STE_NPR_NODE] = "NPR",
1273 };
1274
1275 if (state < ARRAY_SIZE(states) && states[state])
1276 strlcpy(buffer, states[state], size);
1277 else
1278 snprintf(buffer, size, "unknown (%d)", state);
1279 return buffer;
1280}
1281
1282void
1283lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state)
1284{
1285 int old_state = ndlp->nlp_state;
1286 char name1[16], name2[16];
1287
1288 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1289 "%d:0904 NPort state transition x%06x, %s -> %s\n",
1290 phba->brd_no,
1291 ndlp->nlp_DID,
1292 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1293 lpfc_nlp_state_name(name2, sizeof(name2), state));
1294 if (old_state == NLP_STE_NPR_NODE &&
1295 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1296 state != NLP_STE_NPR_NODE)
1297 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1298 if (old_state == NLP_STE_UNMAPPED_NODE) {
1299 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1300 ndlp->nlp_type &= ~NLP_FC_NODE;
1301 }
1302
1303 if (list_empty(&ndlp->nlp_listp)) {
1304 spin_lock_irq(phba->host->host_lock);
1305 list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes);
1306 spin_unlock_irq(phba->host->host_lock);
1307 } else if (old_state)
1308 lpfc_nlp_counters(phba, old_state, -1);
1309
1310 ndlp->nlp_state = state;
1311 lpfc_nlp_counters(phba, state, 1);
1312 lpfc_nlp_state_cleanup(phba, ndlp, old_state, state);
1313}
1314
1315void
1316lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1317{
1318 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1319 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1320 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1321 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1322 spin_lock_irq(phba->host->host_lock);
1323 list_del_init(&ndlp->nlp_listp);
1324 spin_unlock_irq(phba->host->host_lock);
1325 lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0);
1326}
1327
1328void
1329lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1330{
1331 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1332 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1333 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1334 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1335 spin_lock_irq(phba->host->host_lock);
1336 list_del_init(&ndlp->nlp_listp);
1337 spin_unlock_irq(phba->host->host_lock);
1338 lpfc_nlp_put(ndlp);
1356} 1339}
1357 1340
1358/* 1341/*
@@ -1464,6 +1447,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1464static int 1447static int
1465lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1448lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1466{ 1449{
1450 LIST_HEAD(completions);
1467 struct lpfc_sli *psli; 1451 struct lpfc_sli *psli;
1468 struct lpfc_sli_ring *pring; 1452 struct lpfc_sli_ring *pring;
1469 struct lpfc_iocbq *iocb, *next_iocb; 1453 struct lpfc_iocbq *iocb, *next_iocb;
@@ -1492,29 +1476,29 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1492 (phba, pring, iocb, ndlp))) { 1476 (phba, pring, iocb, ndlp))) {
1493 /* It matches, so deque and call compl 1477 /* It matches, so deque and call compl
1494 with an error */ 1478 with an error */
1495 list_del(&iocb->list); 1479 list_move_tail(&iocb->list,
1480 &completions);
1496 pring->txq_cnt--; 1481 pring->txq_cnt--;
1497 if (iocb->iocb_cmpl) {
1498 icmd = &iocb->iocb;
1499 icmd->ulpStatus =
1500 IOSTAT_LOCAL_REJECT;
1501 icmd->un.ulpWord[4] =
1502 IOERR_SLI_ABORTED;
1503 spin_unlock_irq(phba->host->
1504 host_lock);
1505 (iocb->iocb_cmpl) (phba,
1506 iocb, iocb);
1507 spin_lock_irq(phba->host->
1508 host_lock);
1509 } else
1510 lpfc_sli_release_iocbq(phba,
1511 iocb);
1512 } 1482 }
1513 } 1483 }
1514 spin_unlock_irq(phba->host->host_lock); 1484 spin_unlock_irq(phba->host->host_lock);
1515 1485
1516 } 1486 }
1517 } 1487 }
1488
1489 while (!list_empty(&completions)) {
1490 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1491 list_del(&iocb->list);
1492
1493 if (iocb->iocb_cmpl) {
1494 icmd = &iocb->iocb;
1495 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1496 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1497 (iocb->iocb_cmpl) (phba, iocb, iocb);
1498 } else
1499 lpfc_sli_release_iocbq(phba, iocb);
1500 }
1501
1518 return 0; 1502 return 0;
1519} 1503}
1520 1504
@@ -1554,7 +1538,7 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1554 * so it can be freed. 1538 * so it can be freed.
1555 */ 1539 */
1556static int 1540static int
1557lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1541lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1558{ 1542{
1559 LPFC_MBOXQ_t *mb; 1543 LPFC_MBOXQ_t *mb;
1560 LPFC_MBOXQ_t *nextmb; 1544 LPFC_MBOXQ_t *nextmb;
@@ -1567,17 +1551,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1567 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 1551 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1568 ndlp->nlp_state, ndlp->nlp_rpi); 1552 ndlp->nlp_state, ndlp->nlp_rpi);
1569 1553
1570 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ); 1554 lpfc_dequeue_node(phba, ndlp);
1571
1572 /*
1573 * if unloading the driver - just leave the remote port in place.
1574 * The driver unload will force the attached devices to detach
1575 * and flush cache's w/o generating flush errors.
1576 */
1577 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1578 lpfc_unregister_remote_port(phba, ndlp);
1579 ndlp->nlp_sid = NLP_NO_SID;
1580 }
1581 1555
1582 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1556 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1583 if ((mb = phba->sli.mbox_active)) { 1557 if ((mb = phba->sli.mbox_active)) {
@@ -1599,11 +1573,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1599 } 1573 }
1600 list_del(&mb->list); 1574 list_del(&mb->list);
1601 mempool_free(mb, phba->mbox_mem_pool); 1575 mempool_free(mb, phba->mbox_mem_pool);
1576 lpfc_nlp_put(ndlp);
1602 } 1577 }
1603 } 1578 }
1604 spin_unlock_irq(phba->host->host_lock); 1579 spin_unlock_irq(phba->host->host_lock);
1605 1580
1606 lpfc_els_abort(phba,ndlp,0); 1581 lpfc_els_abort(phba,ndlp);
1607 spin_lock_irq(phba->host->host_lock); 1582 spin_lock_irq(phba->host->host_lock);
1608 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1583 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1609 spin_unlock_irq(phba->host->host_lock); 1584 spin_unlock_irq(phba->host->host_lock);
@@ -1624,27 +1599,27 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1624 * If we are in the middle of using the nlp in the discovery state 1599 * If we are in the middle of using the nlp in the discovery state
1625 * machine, defer the free till we reach the end of the state machine. 1600 * machine, defer the free till we reach the end of the state machine.
1626 */ 1601 */
1627int 1602static void
1628lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1603lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1629{ 1604{
1605 struct lpfc_rport_data *rdata;
1630 1606
1631 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1607 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1632 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1608 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1633 } 1609 }
1634 1610
1635 if (ndlp->nlp_disc_refcnt) { 1611 lpfc_cleanup_node(phba, ndlp);
1636 spin_lock_irq(phba->host->host_lock); 1612
1637 ndlp->nlp_flag |= NLP_DELAY_REMOVE; 1613 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1638 spin_unlock_irq(phba->host->host_lock); 1614 put_device(&ndlp->rport->dev);
1639 } else { 1615 rdata = ndlp->rport->dd_data;
1640 lpfc_freenode(phba, ndlp); 1616 rdata->pnode = NULL;
1641 mempool_free( ndlp, phba->nlp_mem_pool); 1617 ndlp->rport = NULL;
1642 } 1618 }
1643 return 0;
1644} 1619}
1645 1620
1646static int 1621static int
1647lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did) 1622lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1648{ 1623{
1649 D_ID mydid; 1624 D_ID mydid;
1650 D_ID ndlpdid; 1625 D_ID ndlpdid;
@@ -1693,57 +1668,36 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1693 return 0; 1668 return 0;
1694} 1669}
1695 1670
1696/* Search for a nodelist entry on a specific list */ 1671/* Search for a nodelist entry */
1697struct lpfc_nodelist * 1672struct lpfc_nodelist *
1698lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) 1673lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did)
1699{ 1674{
1700 struct lpfc_nodelist *ndlp; 1675 struct lpfc_nodelist *ndlp;
1701 struct list_head *lists[]={&phba->fc_nlpunmap_list,
1702 &phba->fc_nlpmap_list,
1703 &phba->fc_plogi_list,
1704 &phba->fc_adisc_list,
1705 &phba->fc_reglogin_list,
1706 &phba->fc_prli_list,
1707 &phba->fc_npr_list,
1708 &phba->fc_unused_list};
1709 uint32_t search[]={NLP_SEARCH_UNMAPPED,
1710 NLP_SEARCH_MAPPED,
1711 NLP_SEARCH_PLOGI,
1712 NLP_SEARCH_ADISC,
1713 NLP_SEARCH_REGLOGIN,
1714 NLP_SEARCH_PRLI,
1715 NLP_SEARCH_NPR,
1716 NLP_SEARCH_UNUSED};
1717 int i;
1718 uint32_t data1; 1676 uint32_t data1;
1719 1677
1720 spin_lock_irq(phba->host->host_lock); 1678 spin_lock_irq(phba->host->host_lock);
1721 for (i = 0; i < ARRAY_SIZE(lists); i++ ) { 1679 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
1722 if (!(order & search[i])) 1680 if (lpfc_matchdid(phba, ndlp, did)) {
1723 continue; 1681 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1724 list_for_each_entry(ndlp, lists[i], nlp_listp) { 1682 ((uint32_t) ndlp->nlp_xri << 16) |
1725 if (lpfc_matchdid(phba, ndlp, did)) { 1683 ((uint32_t) ndlp->nlp_type << 8) |
1726 data1 = (((uint32_t) ndlp->nlp_state << 24) | 1684 ((uint32_t) ndlp->nlp_rpi & 0xff));
1727 ((uint32_t) ndlp->nlp_xri << 16) | 1685 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1728 ((uint32_t) ndlp->nlp_type << 8) | 1686 "%d:0929 FIND node DID "
1729 ((uint32_t) ndlp->nlp_rpi & 0xff)); 1687 " Data: x%p x%x x%x x%x\n",
1730 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1688 phba->brd_no,
1731 "%d:0929 FIND node DID " 1689 ndlp, ndlp->nlp_DID,
1732 " Data: x%p x%x x%x x%x\n", 1690 ndlp->nlp_flag, data1);
1733 phba->brd_no, 1691 spin_unlock_irq(phba->host->host_lock);
1734 ndlp, ndlp->nlp_DID, 1692 return ndlp;
1735 ndlp->nlp_flag, data1);
1736 spin_unlock_irq(phba->host->host_lock);
1737 return ndlp;
1738 }
1739 } 1693 }
1740 } 1694 }
1741 spin_unlock_irq(phba->host->host_lock); 1695 spin_unlock_irq(phba->host->host_lock);
1742 1696
1743 /* FIND node did <did> NOT FOUND */ 1697 /* FIND node did <did> NOT FOUND */
1744 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1698 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1745 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n", 1699 "%d:0932 FIND node did x%x NOT FOUND.\n",
1746 phba->brd_no, did, order); 1700 phba->brd_no, did);
1747 return NULL; 1701 return NULL;
1748} 1702}
1749 1703
@@ -1751,9 +1705,8 @@ struct lpfc_nodelist *
1751lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) 1705lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1752{ 1706{
1753 struct lpfc_nodelist *ndlp; 1707 struct lpfc_nodelist *ndlp;
1754 uint32_t flg;
1755 1708
1756 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did); 1709 ndlp = lpfc_findnode_did(phba, did);
1757 if (!ndlp) { 1710 if (!ndlp) {
1758 if ((phba->fc_flag & FC_RSCN_MODE) && 1711 if ((phba->fc_flag & FC_RSCN_MODE) &&
1759 ((lpfc_rscn_payload_check(phba, did) == 0))) 1712 ((lpfc_rscn_payload_check(phba, did) == 0)))
@@ -1763,8 +1716,7 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1763 if (!ndlp) 1716 if (!ndlp)
1764 return NULL; 1717 return NULL;
1765 lpfc_nlp_init(phba, ndlp, did); 1718 lpfc_nlp_init(phba, ndlp, did);
1766 ndlp->nlp_state = NLP_STE_NPR_NODE; 1719 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1767 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1768 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1720 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1769 return ndlp; 1721 return ndlp;
1770 } 1722 }
@@ -1780,11 +1732,10 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1780 } else 1732 } else
1781 ndlp = NULL; 1733 ndlp = NULL;
1782 } else { 1734 } else {
1783 flg = ndlp->nlp_flag & NLP_LIST_MASK; 1735 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
1784 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) 1736 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
1785 return NULL; 1737 return NULL;
1786 ndlp->nlp_state = NLP_STE_NPR_NODE; 1738 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1787 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1788 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1739 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1789 } 1740 }
1790 return ndlp; 1741 return ndlp;
@@ -1842,8 +1793,9 @@ lpfc_disc_start(struct lpfc_hba * phba)
1842 struct lpfc_sli *psli; 1793 struct lpfc_sli *psli;
1843 LPFC_MBOXQ_t *mbox; 1794 LPFC_MBOXQ_t *mbox;
1844 struct lpfc_nodelist *ndlp, *next_ndlp; 1795 struct lpfc_nodelist *ndlp, *next_ndlp;
1845 uint32_t did_changed, num_sent; 1796 uint32_t num_sent;
1846 uint32_t clear_la_pending; 1797 uint32_t clear_la_pending;
1798 int did_changed;
1847 int rc; 1799 int rc;
1848 1800
1849 psli = &phba->sli; 1801 psli = &phba->sli;
@@ -1877,14 +1829,13 @@ lpfc_disc_start(struct lpfc_hba * phba)
1877 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 1829 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1878 1830
1879 /* If our did changed, we MUST do PLOGI */ 1831 /* If our did changed, we MUST do PLOGI */
1880 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 1832 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
1881 nlp_listp) { 1833 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1882 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1834 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1883 if (did_changed) { 1835 did_changed) {
1884 spin_lock_irq(phba->host->host_lock); 1836 spin_lock_irq(phba->host->host_lock);
1885 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1837 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1886 spin_unlock_irq(phba->host->host_lock); 1838 spin_unlock_irq(phba->host->host_lock);
1887 }
1888 } 1839 }
1889 } 1840 }
1890 1841
@@ -1944,11 +1895,11 @@ lpfc_disc_start(struct lpfc_hba * phba)
1944static void 1895static void
1945lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1896lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1946{ 1897{
1898 LIST_HEAD(completions);
1947 struct lpfc_sli *psli; 1899 struct lpfc_sli *psli;
1948 IOCB_t *icmd; 1900 IOCB_t *icmd;
1949 struct lpfc_iocbq *iocb, *next_iocb; 1901 struct lpfc_iocbq *iocb, *next_iocb;
1950 struct lpfc_sli_ring *pring; 1902 struct lpfc_sli_ring *pring;
1951 struct lpfc_dmabuf *mp;
1952 1903
1953 psli = &phba->sli; 1904 psli = &phba->sli;
1954 pring = &psli->ring[LPFC_ELS_RING]; 1905 pring = &psli->ring[LPFC_ELS_RING];
@@ -1956,6 +1907,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1956 /* Error matching iocb on txq or txcmplq 1907 /* Error matching iocb on txq or txcmplq
1957 * First check the txq. 1908 * First check the txq.
1958 */ 1909 */
1910 spin_lock_irq(phba->host->host_lock);
1959 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 1911 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1960 if (iocb->context1 != ndlp) { 1912 if (iocb->context1 != ndlp) {
1961 continue; 1913 continue;
@@ -1964,9 +1916,8 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1964 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 1916 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
1965 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 1917 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
1966 1918
1967 list_del(&iocb->list); 1919 list_move_tail(&iocb->list, &completions);
1968 pring->txq_cnt--; 1920 pring->txq_cnt--;
1969 lpfc_els_free_iocb(phba, iocb);
1970 } 1921 }
1971 } 1922 }
1972 1923
@@ -1978,43 +1929,22 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1978 icmd = &iocb->iocb; 1929 icmd = &iocb->iocb;
1979 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 1930 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
1980 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 1931 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
1932 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1933 }
1934 }
1935 spin_unlock_irq(phba->host->host_lock);
1981 1936
1982 iocb->iocb_cmpl = NULL; 1937 while (!list_empty(&completions)) {
1983 /* context2 = cmd, context2->next = rsp, context3 = 1938 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1984 bpl */ 1939 list_del(&iocb->list);
1985 if (iocb->context2) {
1986 /* Free the response IOCB before handling the
1987 command. */
1988
1989 mp = (struct lpfc_dmabuf *) (iocb->context2);
1990 mp = list_get_first(&mp->list,
1991 struct lpfc_dmabuf,
1992 list);
1993 if (mp) {
1994 /* Delay before releasing rsp buffer to
1995 * give UNREG mbox a chance to take
1996 * effect.
1997 */
1998 list_add(&mp->list,
1999 &phba->freebufList);
2000 }
2001 lpfc_mbuf_free(phba,
2002 ((struct lpfc_dmabuf *)
2003 iocb->context2)->virt,
2004 ((struct lpfc_dmabuf *)
2005 iocb->context2)->phys);
2006 kfree(iocb->context2);
2007 }
2008 1940
2009 if (iocb->context3) { 1941 if (iocb->iocb_cmpl) {
2010 lpfc_mbuf_free(phba, 1942 icmd = &iocb->iocb;
2011 ((struct lpfc_dmabuf *) 1943 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2012 iocb->context3)->virt, 1944 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2013 ((struct lpfc_dmabuf *) 1945 (iocb->iocb_cmpl) (phba, iocb, iocb);
2014 iocb->context3)->phys); 1946 } else
2015 kfree(iocb->context3); 1947 lpfc_sli_release_iocbq(phba, iocb);
2016 }
2017 }
2018 } 1948 }
2019 1949
2020 return; 1950 return;
@@ -2025,21 +1955,16 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
2025{ 1955{
2026 struct lpfc_nodelist *ndlp, *next_ndlp; 1956 struct lpfc_nodelist *ndlp, *next_ndlp;
2027 1957
2028 if (phba->fc_plogi_cnt) { 1958 if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) {
2029 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list, 1959 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
2030 nlp_listp) { 1960 nlp_listp) {
2031 lpfc_free_tx(phba, ndlp); 1961 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2032 lpfc_nlp_remove(phba, ndlp); 1962 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2033 } 1963 lpfc_free_tx(phba, ndlp);
2034 } 1964 lpfc_nlp_put(ndlp);
2035 if (phba->fc_adisc_cnt) { 1965 }
2036 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2037 nlp_listp) {
2038 lpfc_free_tx(phba, ndlp);
2039 lpfc_nlp_remove(phba, ndlp);
2040 } 1966 }
2041 } 1967 }
2042 return;
2043} 1968}
2044 1969
2045/*****************************************************************************/ 1970/*****************************************************************************/
@@ -2108,11 +2033,13 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2108 phba->brd_no); 2033 phba->brd_no);
2109 2034
2110 /* Start discovery by sending FLOGI, clean up old rpis */ 2035 /* Start discovery by sending FLOGI, clean up old rpis */
2111 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 2036 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
2112 nlp_listp) { 2037 nlp_listp) {
2038 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2039 continue;
2113 if (ndlp->nlp_type & NLP_FABRIC) { 2040 if (ndlp->nlp_type & NLP_FABRIC) {
2114 /* Clean up the ndlp on Fabric connections */ 2041 /* Clean up the ndlp on Fabric connections */
2115 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 2042 lpfc_drop_node(phba, ndlp);
2116 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2043 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2117 /* Fail outstanding IO now since device 2044 /* Fail outstanding IO now since device
2118 * is marked for PLOGI. 2045 * is marked for PLOGI.
@@ -2153,9 +2080,9 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2153 "login\n", phba->brd_no); 2080 "login\n", phba->brd_no);
2154 2081
2155 /* Next look for NameServer ndlp */ 2082 /* Next look for NameServer ndlp */
2156 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID); 2083 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2157 if (ndlp) 2084 if (ndlp)
2158 lpfc_nlp_remove(phba, ndlp); 2085 lpfc_nlp_put(ndlp);
2159 /* Start discovery */ 2086 /* Start discovery */
2160 lpfc_disc_start(phba); 2087 lpfc_disc_start(phba);
2161 break; 2088 break;
@@ -2168,9 +2095,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2168 phba->brd_no, 2095 phba->brd_no,
2169 phba->fc_ns_retry, LPFC_MAX_NS_RETRY); 2096 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2170 2097
2171 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED, 2098 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2172 NameServer_DID); 2099 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2173 if (ndlp) {
2174 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 2100 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2175 /* Try it one more time */ 2101 /* Try it one more time */
2176 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT); 2102 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
@@ -2220,6 +2146,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2220 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2146 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2221 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2147 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2222 (MBX_NOWAIT | MBX_STOP_IOCB)); 2148 (MBX_NOWAIT | MBX_STOP_IOCB));
2149 lpfc_set_loopback_flag(phba);
2223 if (rc == MBX_NOT_FINISHED) 2150 if (rc == MBX_NOT_FINISHED)
2224 mempool_free(initlinkmbox, phba->mbox_mem_pool); 2151 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2225 2152
@@ -2317,8 +2244,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2317 2244
2318 ndlp->nlp_rpi = mb->un.varWords[0]; 2245 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_type |= NLP_FABRIC; 2246 ndlp->nlp_type |= NLP_FABRIC;
2320 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 2247 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
2321 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2322 2248
2323 /* Start issuing Fabric-Device Management Interface (FDMI) 2249 /* Start issuing Fabric-Device Management Interface (FDMI)
2324 * command to 0xfffffa (FDMI well known port) 2250 * command to 0xfffffa (FDMI well known port)
@@ -2333,87 +2259,100 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2333 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); 2259 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2334 } 2260 }
2335 2261
2262 /* Mailbox took a reference to the node */
2263 lpfc_nlp_put(ndlp);
2336 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2337 kfree(mp); 2265 kfree(mp);
2338 mempool_free( pmb, phba->mbox_mem_pool); 2266 mempool_free(pmb, phba->mbox_mem_pool);
2339 2267
2340 return; 2268 return;
2341} 2269}
2342 2270
2271static int
2272lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2273{
2274 uint16_t *rpi = param;
2275
2276 return ndlp->nlp_rpi == *rpi;
2277}
2278
2279static int
2280lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2281{
2282 return memcmp(&ndlp->nlp_portname, param,
2283 sizeof(ndlp->nlp_portname)) == 0;
2284}
2285
2286/*
2287 * Search node lists for a remote port matching filter criteria
2288 * Caller needs to hold host_lock before calling this routine.
2289 */
2290struct lpfc_nodelist *
2291__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2292{
2293 struct lpfc_nodelist *ndlp;
2294
2295 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
2296 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2297 filter(ndlp, param))
2298 return ndlp;
2299 }
2300 return NULL;
2301}
2302
2343/* 2303/*
2344 * This routine looks up the ndlp lists 2304 * Search node lists for a remote port matching filter criteria
2345 * for the given RPI. If rpi found 2305 * This routine is used when the caller does NOT have host_lock.
2346 * it return the node list pointer
2347 * else return NULL.
2348 */ 2306 */
2349struct lpfc_nodelist * 2307struct lpfc_nodelist *
2308lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2309{
2310 struct lpfc_nodelist *ndlp;
2311
2312 spin_lock_irq(phba->host->host_lock);
2313 ndlp = __lpfc_find_node(phba, filter, param);
2314 spin_unlock_irq(phba->host->host_lock);
2315 return ndlp;
2316}
2317
2318/*
2319 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2320 * returns the node list pointer else return NULL.
2321 */
2322struct lpfc_nodelist *
2323__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi)
2324{
2325 return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi);
2326}
2327
2328struct lpfc_nodelist *
2350lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) 2329lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2351{ 2330{
2352 struct lpfc_nodelist *ndlp; 2331 struct lpfc_nodelist *ndlp;
2353 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2354 &phba->fc_nlpmap_list,
2355 &phba->fc_plogi_list,
2356 &phba->fc_adisc_list,
2357 &phba->fc_reglogin_list};
2358 int i;
2359 2332
2360 spin_lock_irq(phba->host->host_lock); 2333 spin_lock_irq(phba->host->host_lock);
2361 for (i = 0; i < ARRAY_SIZE(lists); i++ ) 2334 ndlp = __lpfc_findnode_rpi(phba, rpi);
2362 list_for_each_entry(ndlp, lists[i], nlp_listp)
2363 if (ndlp->nlp_rpi == rpi) {
2364 spin_unlock_irq(phba->host->host_lock);
2365 return ndlp;
2366 }
2367 spin_unlock_irq(phba->host->host_lock); 2335 spin_unlock_irq(phba->host->host_lock);
2368 return NULL; 2336 return ndlp;
2369} 2337}
2370 2338
2371/* 2339/*
2372 * This routine looks up the ndlp lists 2340 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2373 * for the given WWPN. If WWPN found 2341 * returns the node list pointer else return NULL.
2374 * it return the node list pointer
2375 * else return NULL.
2376 */ 2342 */
2377struct lpfc_nodelist * 2343struct lpfc_nodelist *
2378lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order, 2344lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn)
2379 struct lpfc_name * wwpn)
2380{ 2345{
2381 struct lpfc_nodelist *ndlp; 2346 struct lpfc_nodelist *ndlp;
2382 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2383 &phba->fc_nlpmap_list,
2384 &phba->fc_npr_list,
2385 &phba->fc_plogi_list,
2386 &phba->fc_adisc_list,
2387 &phba->fc_reglogin_list,
2388 &phba->fc_prli_list};
2389 uint32_t search[]={NLP_SEARCH_UNMAPPED,
2390 NLP_SEARCH_MAPPED,
2391 NLP_SEARCH_NPR,
2392 NLP_SEARCH_PLOGI,
2393 NLP_SEARCH_ADISC,
2394 NLP_SEARCH_REGLOGIN,
2395 NLP_SEARCH_PRLI};
2396 int i;
2397 2347
2398 spin_lock_irq(phba->host->host_lock); 2348 spin_lock_irq(phba->host->host_lock);
2399 for (i = 0; i < ARRAY_SIZE(lists); i++ ) { 2349 ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn);
2400 if (!(order & search[i]))
2401 continue;
2402 list_for_each_entry(ndlp, lists[i], nlp_listp) {
2403 if (memcmp(&ndlp->nlp_portname, wwpn,
2404 sizeof(struct lpfc_name)) == 0) {
2405 spin_unlock_irq(phba->host->host_lock);
2406 return ndlp;
2407 }
2408 }
2409 }
2410 spin_unlock_irq(phba->host->host_lock); 2350 spin_unlock_irq(phba->host->host_lock);
2411 return NULL; 2351 return NULL;
2412} 2352}
2413 2353
2414void 2354void
2415lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 2355lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
2416 uint32_t did)
2417{ 2356{
2418 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2357 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2419 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2358 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
@@ -2423,5 +2362,30 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2423 ndlp->nlp_DID = did; 2362 ndlp->nlp_DID = did;
2424 ndlp->nlp_phba = phba; 2363 ndlp->nlp_phba = phba;
2425 ndlp->nlp_sid = NLP_NO_SID; 2364 ndlp->nlp_sid = NLP_NO_SID;
2365 INIT_LIST_HEAD(&ndlp->nlp_listp);
2366 kref_init(&ndlp->kref);
2426 return; 2367 return;
2427} 2368}
2369
2370void
2371lpfc_nlp_release(struct kref *kref)
2372{
2373 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2374 kref);
2375 lpfc_nlp_remove(ndlp->nlp_phba, ndlp);
2376 mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool);
2377}
2378
2379struct lpfc_nodelist *
2380lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2381{
2382 if (ndlp)
2383 kref_get(&ndlp->kref);
2384 return ndlp;
2385}
2386
2387int
2388lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2389{
2390 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2391}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index f79cb6136906..2623a9bc7775 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -1078,6 +1078,8 @@ typedef struct {
1078/* Start FireFly Register definitions */ 1078/* Start FireFly Register definitions */
1079#define PCI_VENDOR_ID_EMULEX 0x10df 1079#define PCI_VENDOR_ID_EMULEX 0x10df
1080#define PCI_DEVICE_ID_FIREFLY 0x1ae5 1080#define PCI_DEVICE_ID_FIREFLY 0x1ae5
1081#define PCI_DEVICE_ID_SAT_SMB 0xf011
1082#define PCI_DEVICE_ID_SAT_MID 0xf015
1081#define PCI_DEVICE_ID_RFLY 0xf095 1083#define PCI_DEVICE_ID_RFLY 0xf095
1082#define PCI_DEVICE_ID_PFLY 0xf098 1084#define PCI_DEVICE_ID_PFLY 0xf098
1083#define PCI_DEVICE_ID_LP101 0xf0a1 1085#define PCI_DEVICE_ID_LP101 0xf0a1
@@ -1089,6 +1091,9 @@ typedef struct {
1089#define PCI_DEVICE_ID_NEPTUNE 0xf0f5 1091#define PCI_DEVICE_ID_NEPTUNE 0xf0f5
1090#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6 1092#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6
1091#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7 1093#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7
1094#define PCI_DEVICE_ID_SAT 0xf100
1095#define PCI_DEVICE_ID_SAT_SCSP 0xf111
1096#define PCI_DEVICE_ID_SAT_DCSP 0xf112
1092#define PCI_DEVICE_ID_SUPERFLY 0xf700 1097#define PCI_DEVICE_ID_SUPERFLY 0xf700
1093#define PCI_DEVICE_ID_DRAGONFLY 0xf800 1098#define PCI_DEVICE_ID_DRAGONFLY 0xf800
1094#define PCI_DEVICE_ID_CENTAUR 0xf900 1099#define PCI_DEVICE_ID_CENTAUR 0xf900
@@ -1098,6 +1103,7 @@ typedef struct {
1098#define PCI_DEVICE_ID_LP10000S 0xfc00 1103#define PCI_DEVICE_ID_LP10000S 0xfc00
1099#define PCI_DEVICE_ID_LP11000S 0xfc10 1104#define PCI_DEVICE_ID_LP11000S 0xfc10
1100#define PCI_DEVICE_ID_LPE11000S 0xfc20 1105#define PCI_DEVICE_ID_LPE11000S 0xfc20
1106#define PCI_DEVICE_ID_SAT_S 0xfc40
1101#define PCI_DEVICE_ID_HELIOS 0xfd00 1107#define PCI_DEVICE_ID_HELIOS 0xfd00
1102#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 1108#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
1103#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 1109#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
@@ -1118,6 +1124,7 @@ typedef struct {
1118#define HELIOS_JEDEC_ID 0x0364 1124#define HELIOS_JEDEC_ID 0x0364
1119#define ZEPHYR_JEDEC_ID 0x0577 1125#define ZEPHYR_JEDEC_ID 0x0577
1120#define VIPER_JEDEC_ID 0x4838 1126#define VIPER_JEDEC_ID 0x4838
1127#define SATURN_JEDEC_ID 0x1004
1121 1128
1122#define JEDEC_ID_MASK 0x0FFFF000 1129#define JEDEC_ID_MASK 0x0FFFF000
1123#define JEDEC_ID_SHIFT 12 1130#define JEDEC_ID_SHIFT 12
@@ -1565,7 +1572,7 @@ typedef struct {
1565#define LINK_SPEED_1G 1 /* 1 Gigabaud */ 1572#define LINK_SPEED_1G 1 /* 1 Gigabaud */
1566#define LINK_SPEED_2G 2 /* 2 Gigabaud */ 1573#define LINK_SPEED_2G 2 /* 2 Gigabaud */
1567#define LINK_SPEED_4G 4 /* 4 Gigabaud */ 1574#define LINK_SPEED_4G 4 /* 4 Gigabaud */
1568#define LINK_SPEED_8G 8 /* 4 Gigabaud */ 1575#define LINK_SPEED_8G 8 /* 8 Gigabaud */
1569#define LINK_SPEED_10G 16 /* 10 Gigabaud */ 1576#define LINK_SPEED_10G 16 /* 10 Gigabaud */
1570 1577
1571} INIT_LINK_VAR; 1578} INIT_LINK_VAR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index dcf6106f557a..dcb4ba0ecee1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -386,12 +386,12 @@ lpfc_config_port_post(struct lpfc_hba * phba)
386 * Setup the ring 0 (els) timeout handler 386 * Setup the ring 0 (els) timeout handler
387 */ 387 */
388 timeout = phba->fc_ratov << 1; 388 timeout = phba->fc_ratov << 1;
389 phba->els_tmofunc.expires = jiffies + HZ * timeout; 389 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
390 add_timer(&phba->els_tmofunc);
391 390
392 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 391 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
393 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 392 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
394 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 393 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
394 lpfc_set_loopback_flag(phba);
395 if (rc != MBX_SUCCESS) { 395 if (rc != MBX_SUCCESS) {
396 lpfc_printf_log(phba, 396 lpfc_printf_log(phba,
397 KERN_ERR, 397 KERN_ERR,
@@ -418,33 +418,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
418 return (0); 418 return (0);
419} 419}
420 420
421static int
422lpfc_discovery_wait(struct lpfc_hba *phba)
423{
424 int i = 0;
425
426 while ((phba->hba_state != LPFC_HBA_READY) ||
427 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
428 ((phba->fc_map_cnt == 0) && (i<2)) ||
429 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
430 /* Check every second for 30 retries. */
431 i++;
432 if (i > 30) {
433 return -ETIMEDOUT;
434 }
435 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
436 /* The link is down. Set linkdown timeout */
437 return -ETIMEDOUT;
438 }
439
440 /* Delay for 1 second to give discovery time to complete. */
441 msleep(1000);
442
443 }
444
445 return 0;
446}
447
448/************************************************************************/ 421/************************************************************************/
449/* */ 422/* */
450/* lpfc_hba_down_prep */ 423/* lpfc_hba_down_prep */
@@ -550,12 +523,15 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
550 * There was a firmware error. Take the hba offline and then 523 * There was a firmware error. Take the hba offline and then
551 * attempt to restart it. 524 * attempt to restart it.
552 */ 525 */
526 lpfc_offline_prep(phba);
553 lpfc_offline(phba); 527 lpfc_offline(phba);
554 lpfc_sli_brdrestart(phba); 528 lpfc_sli_brdrestart(phba);
555 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 529 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
556 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 530 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
531 lpfc_unblock_mgmt_io(phba);
557 return; 532 return;
558 } 533 }
534 lpfc_unblock_mgmt_io(phba);
559 } else { 535 } else {
560 /* The if clause above forces this code path when the status 536 /* The if clause above forces this code path when the status
561 * failure is a value other than FFER6. Do not call the offline 537 * failure is a value other than FFER6. Do not call the offline
@@ -573,7 +549,9 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
573 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 549 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
574 550
575 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 551 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
552 lpfc_offline_prep(phba);
576 lpfc_offline(phba); 553 lpfc_offline(phba);
554 lpfc_unblock_mgmt_io(phba);
577 phba->hba_state = LPFC_HBA_ERROR; 555 phba->hba_state = LPFC_HBA_ERROR;
578 lpfc_hba_down_post(phba); 556 lpfc_hba_down_post(phba);
579 } 557 }
@@ -633,7 +611,7 @@ lpfc_handle_latt_free_mbuf:
633lpfc_handle_latt_free_mp: 611lpfc_handle_latt_free_mp:
634 kfree(mp); 612 kfree(mp);
635lpfc_handle_latt_free_pmb: 613lpfc_handle_latt_free_pmb:
636 kfree(pmb); 614 mempool_free(pmb, phba->mbox_mem_pool);
637lpfc_handle_latt_err_exit: 615lpfc_handle_latt_err_exit:
638 /* Enable Link attention interrupts */ 616 /* Enable Link attention interrupts */
639 spin_lock_irq(phba->host->host_lock); 617 spin_lock_irq(phba->host->host_lock);
@@ -925,6 +903,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
925 m = (typeof(m)){"LPe11000-S", max_speed, 903 m = (typeof(m)){"LPe11000-S", max_speed,
926 "PCIe"}; 904 "PCIe"};
927 break; 905 break;
906 case PCI_DEVICE_ID_SAT:
907 m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
908 break;
909 case PCI_DEVICE_ID_SAT_MID:
910 m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
911 break;
912 case PCI_DEVICE_ID_SAT_SMB:
913 m = (typeof(m)){"LPe121", max_speed, "PCIe"};
914 break;
915 case PCI_DEVICE_ID_SAT_DCSP:
916 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
917 break;
918 case PCI_DEVICE_ID_SAT_SCSP:
919 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
920 break;
921 case PCI_DEVICE_ID_SAT_S:
922 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
923 break;
928 default: 924 default:
929 m = (typeof(m)){ NULL }; 925 m = (typeof(m)){ NULL };
930 break; 926 break;
@@ -1174,69 +1170,17 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1174} 1170}
1175 1171
1176static void 1172static void
1177lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind) 1173lpfc_cleanup(struct lpfc_hba * phba)
1178{ 1174{
1179 struct lpfc_nodelist *ndlp, *next_ndlp; 1175 struct lpfc_nodelist *ndlp, *next_ndlp;
1180 1176
1181 /* clean up phba - lpfc specific */ 1177 /* clean up phba - lpfc specific */
1182 lpfc_can_disctmo(phba); 1178 lpfc_can_disctmo(phba);
1183 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 1179 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
1184 nlp_listp) { 1180 lpfc_nlp_put(ndlp);
1185 lpfc_nlp_remove(phba, ndlp);
1186 }
1187 1181
1188 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list, 1182 INIT_LIST_HEAD(&phba->fc_nodes);
1189 nlp_listp) {
1190 lpfc_nlp_remove(phba, ndlp);
1191 }
1192
1193 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
1194 nlp_listp) {
1195 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1196 }
1197
1198 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1199 nlp_listp) {
1200 lpfc_nlp_remove(phba, ndlp);
1201 }
1202
1203 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1204 nlp_listp) {
1205 lpfc_nlp_remove(phba, ndlp);
1206 }
1207
1208 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
1209 nlp_listp) {
1210 lpfc_nlp_remove(phba, ndlp);
1211 }
1212 1183
1213 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1214 nlp_listp) {
1215 lpfc_nlp_remove(phba, ndlp);
1216 }
1217
1218 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1219 nlp_listp) {
1220 lpfc_nlp_remove(phba, ndlp);
1221 }
1222
1223 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1224 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1225 INIT_LIST_HEAD(&phba->fc_unused_list);
1226 INIT_LIST_HEAD(&phba->fc_plogi_list);
1227 INIT_LIST_HEAD(&phba->fc_adisc_list);
1228 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1229 INIT_LIST_HEAD(&phba->fc_prli_list);
1230 INIT_LIST_HEAD(&phba->fc_npr_list);
1231
1232 phba->fc_map_cnt = 0;
1233 phba->fc_unmap_cnt = 0;
1234 phba->fc_plogi_cnt = 0;
1235 phba->fc_adisc_cnt = 0;
1236 phba->fc_reglogin_cnt = 0;
1237 phba->fc_prli_cnt = 0;
1238 phba->fc_npr_cnt = 0;
1239 phba->fc_unused_cnt= 0;
1240 return; 1184 return;
1241} 1185}
1242 1186
@@ -1262,21 +1206,6 @@ lpfc_stop_timer(struct lpfc_hba * phba)
1262{ 1206{
1263 struct lpfc_sli *psli = &phba->sli; 1207 struct lpfc_sli *psli = &phba->sli;
1264 1208
1265 /* Instead of a timer, this has been converted to a
1266 * deferred procedding list.
1267 */
1268 while (!list_empty(&phba->freebufList)) {
1269
1270 struct lpfc_dmabuf *mp = NULL;
1271
1272 list_remove_head((&phba->freebufList), mp,
1273 struct lpfc_dmabuf, list);
1274 if (mp) {
1275 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1276 kfree(mp);
1277 }
1278 }
1279
1280 del_timer_sync(&phba->fcp_poll_timer); 1209 del_timer_sync(&phba->fcp_poll_timer);
1281 del_timer_sync(&phba->fc_estabtmo); 1210 del_timer_sync(&phba->fc_estabtmo);
1282 del_timer_sync(&phba->fc_disctmo); 1211 del_timer_sync(&phba->fc_disctmo);
@@ -1302,60 +1231,76 @@ lpfc_online(struct lpfc_hba * phba)
1302 "%d:0458 Bring Adapter online\n", 1231 "%d:0458 Bring Adapter online\n",
1303 phba->brd_no); 1232 phba->brd_no);
1304 1233
1305 if (!lpfc_sli_queue_setup(phba)) 1234 lpfc_block_mgmt_io(phba);
1235
1236 if (!lpfc_sli_queue_setup(phba)) {
1237 lpfc_unblock_mgmt_io(phba);
1306 return 1; 1238 return 1;
1239 }
1307 1240
1308 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */ 1241 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
1242 lpfc_unblock_mgmt_io(phba);
1309 return 1; 1243 return 1;
1244 }
1310 1245
1311 spin_lock_irq(phba->host->host_lock); 1246 spin_lock_irq(phba->host->host_lock);
1312 phba->fc_flag &= ~FC_OFFLINE_MODE; 1247 phba->fc_flag &= ~FC_OFFLINE_MODE;
1313 spin_unlock_irq(phba->host->host_lock); 1248 spin_unlock_irq(phba->host->host_lock);
1314 1249
1250 lpfc_unblock_mgmt_io(phba);
1315 return 0; 1251 return 0;
1316} 1252}
1317 1253
1318int 1254void
1319lpfc_offline(struct lpfc_hba * phba) 1255lpfc_block_mgmt_io(struct lpfc_hba * phba)
1320{ 1256{
1321 struct lpfc_sli_ring *pring;
1322 struct lpfc_sli *psli;
1323 unsigned long iflag; 1257 unsigned long iflag;
1324 int i;
1325 int cnt = 0;
1326 1258
1327 if (!phba) 1259 spin_lock_irqsave(phba->host->host_lock, iflag);
1328 return 0; 1260 phba->fc_flag |= FC_BLOCK_MGMT_IO;
1261 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1262}
1263
1264void
1265lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1266{
1267 unsigned long iflag;
1268
1269 spin_lock_irqsave(phba->host->host_lock, iflag);
1270 phba->fc_flag &= ~FC_BLOCK_MGMT_IO;
1271 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1272}
1273
1274void
1275lpfc_offline_prep(struct lpfc_hba * phba)
1276{
1277 struct lpfc_nodelist *ndlp, *next_ndlp;
1329 1278
1330 if (phba->fc_flag & FC_OFFLINE_MODE) 1279 if (phba->fc_flag & FC_OFFLINE_MODE)
1331 return 0; 1280 return;
1332 1281
1333 psli = &phba->sli; 1282 lpfc_block_mgmt_io(phba);
1334 1283
1335 lpfc_linkdown(phba); 1284 lpfc_linkdown(phba);
1285
1286 /* Issue an unreg_login to all nodes */
1287 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
1288 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
1289 lpfc_unreg_rpi(phba, ndlp);
1290
1336 lpfc_sli_flush_mbox_queue(phba); 1291 lpfc_sli_flush_mbox_queue(phba);
1292}
1337 1293
1338 for (i = 0; i < psli->num_rings; i++) { 1294void
1339 pring = &psli->ring[i]; 1295lpfc_offline(struct lpfc_hba * phba)
1340 /* The linkdown event takes 30 seconds to timeout. */ 1296{
1341 while (pring->txcmplq_cnt) { 1297 unsigned long iflag;
1342 mdelay(10);
1343 if (cnt++ > 3000) {
1344 lpfc_printf_log(phba,
1345 KERN_WARNING, LOG_INIT,
1346 "%d:0466 Outstanding IO when "
1347 "bringing Adapter offline\n",
1348 phba->brd_no);
1349 break;
1350 }
1351 }
1352 }
1353 1298
1299 if (phba->fc_flag & FC_OFFLINE_MODE)
1300 return;
1354 1301
1355 /* stop all timers associated with this hba */ 1302 /* stop all timers associated with this hba */
1356 lpfc_stop_timer(phba); 1303 lpfc_stop_timer(phba);
1357 phba->work_hba_events = 0;
1358 phba->work_ha = 0;
1359 1304
1360 lpfc_printf_log(phba, 1305 lpfc_printf_log(phba,
1361 KERN_WARNING, 1306 KERN_WARNING,
@@ -1366,11 +1311,12 @@ lpfc_offline(struct lpfc_hba * phba)
1366 /* Bring down the SLI Layer and cleanup. The HBA is offline 1311 /* Bring down the SLI Layer and cleanup. The HBA is offline
1367 now. */ 1312 now. */
1368 lpfc_sli_hba_down(phba); 1313 lpfc_sli_hba_down(phba);
1369 lpfc_cleanup(phba, 1); 1314 lpfc_cleanup(phba);
1370 spin_lock_irqsave(phba->host->host_lock, iflag); 1315 spin_lock_irqsave(phba->host->host_lock, iflag);
1316 phba->work_hba_events = 0;
1317 phba->work_ha = 0;
1371 phba->fc_flag |= FC_OFFLINE_MODE; 1318 phba->fc_flag |= FC_OFFLINE_MODE;
1372 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1319 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1373 return 0;
1374} 1320}
1375 1321
1376/****************************************************************************** 1322/******************************************************************************
@@ -1407,6 +1353,156 @@ lpfc_scsi_free(struct lpfc_hba * phba)
1407 return 0; 1353 return 0;
1408} 1354}
1409 1355
1356void lpfc_remove_device(struct lpfc_hba *phba)
1357{
1358 unsigned long iflag;
1359
1360 lpfc_free_sysfs_attr(phba);
1361
1362 spin_lock_irqsave(phba->host->host_lock, iflag);
1363 phba->fc_flag |= FC_UNLOADING;
1364
1365 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1366
1367 fc_remove_host(phba->host);
1368 scsi_remove_host(phba->host);
1369
1370 kthread_stop(phba->worker_thread);
1371
1372 /*
1373 * Bring down the SLI Layer. This step disable all interrupts,
1374 * clears the rings, discards all mailbox commands, and resets
1375 * the HBA.
1376 */
1377 lpfc_sli_hba_down(phba);
1378 lpfc_sli_brdrestart(phba);
1379
1380 /* Release the irq reservation */
1381 free_irq(phba->pcidev->irq, phba);
1382 pci_disable_msi(phba->pcidev);
1383
1384 lpfc_cleanup(phba);
1385 lpfc_stop_timer(phba);
1386 phba->work_hba_events = 0;
1387
1388 /*
1389 * Call scsi_free before mem_free since scsi bufs are released to their
1390 * corresponding pools here.
1391 */
1392 lpfc_scsi_free(phba);
1393 lpfc_mem_free(phba);
1394
1395 /* Free resources associated with SLI2 interface */
1396 dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1397 phba->slim2p, phba->slim2p_mapping);
1398
1399 /* unmap adapter SLIM and Control Registers */
1400 iounmap(phba->ctrl_regs_memmap_p);
1401 iounmap(phba->slim_memmap_p);
1402
1403 pci_release_regions(phba->pcidev);
1404 pci_disable_device(phba->pcidev);
1405
1406 idr_remove(&lpfc_hba_index, phba->brd_no);
1407 scsi_host_put(phba->host);
1408}
1409
1410void lpfc_scan_start(struct Scsi_Host *host)
1411{
1412 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
1413
1414 if (lpfc_alloc_sysfs_attr(phba))
1415 goto error;
1416
1417 phba->MBslimaddr = phba->slim_memmap_p;
1418 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1419 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1420 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1421 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1422
1423 if (lpfc_sli_hba_setup(phba))
1424 goto error;
1425
1426 /*
1427 * hba setup may have changed the hba_queue_depth so we need to adjust
1428 * the value of can_queue.
1429 */
1430 host->can_queue = phba->cfg_hba_queue_depth - 10;
1431 return;
1432
1433error:
1434 lpfc_remove_device(phba);
1435}
1436
1437int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1438{
1439 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1440
1441 if (!phba->host)
1442 return 1;
1443 if (time >= 30 * HZ)
1444 goto finished;
1445
1446 if (phba->hba_state != LPFC_HBA_READY)
1447 return 0;
1448 if (phba->num_disc_nodes || phba->fc_prli_sent)
1449 return 0;
1450 if ((phba->fc_map_cnt == 0) && (time < 2 * HZ))
1451 return 0;
1452 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)
1453 return 0;
1454 if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ))
1455 return 0;
1456
1457finished:
1458 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1459 spin_lock_irq(shost->host_lock);
1460 lpfc_poll_start_timer(phba);
1461 spin_unlock_irq(shost->host_lock);
1462 }
1463
1464 /*
1465 * set fixed host attributes
1466 * Must done after lpfc_sli_hba_setup()
1467 */
1468
1469 fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn);
1470 fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn);
1471 fc_host_supported_classes(shost) = FC_COS_CLASS3;
1472
1473 memset(fc_host_supported_fc4s(shost), 0,
1474 sizeof(fc_host_supported_fc4s(shost)));
1475 fc_host_supported_fc4s(shost)[2] = 1;
1476 fc_host_supported_fc4s(shost)[7] = 1;
1477
1478 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1479
1480 fc_host_supported_speeds(shost) = 0;
1481 if (phba->lmt & LMT_10Gb)
1482 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
1483 if (phba->lmt & LMT_4Gb)
1484 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
1485 if (phba->lmt & LMT_2Gb)
1486 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
1487 if (phba->lmt & LMT_1Gb)
1488 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
1489
1490 fc_host_maxframe_size(shost) =
1491 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1492 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
1493
1494 /* This value is also unchanging */
1495 memset(fc_host_active_fc4s(shost), 0,
1496 sizeof(fc_host_active_fc4s(shost)));
1497 fc_host_active_fc4s(shost)[2] = 1;
1498 fc_host_active_fc4s(shost)[7] = 1;
1499
1500 spin_lock_irq(shost->host_lock);
1501 phba->fc_flag &= ~FC_LOADING;
1502 spin_unlock_irq(shost->host_lock);
1503
1504 return 1;
1505}
1410 1506
1411static int __devinit 1507static int __devinit
1412lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1508lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
@@ -1445,9 +1541,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1445 goto out_put_host; 1541 goto out_put_host;
1446 1542
1447 host->unique_id = phba->brd_no; 1543 host->unique_id = phba->brd_no;
1448 INIT_LIST_HEAD(&phba->ctrspbuflist);
1449 INIT_LIST_HEAD(&phba->rnidrspbuflist);
1450 INIT_LIST_HEAD(&phba->freebufList);
1451 1544
1452 /* Initialize timers used by driver */ 1545 /* Initialize timers used by driver */
1453 init_timer(&phba->fc_estabtmo); 1546 init_timer(&phba->fc_estabtmo);
@@ -1482,16 +1575,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1482 host->max_lun = phba->cfg_max_luns; 1575 host->max_lun = phba->cfg_max_luns;
1483 host->this_id = -1; 1576 host->this_id = -1;
1484 1577
1485 /* Initialize all internally managed lists. */ 1578 INIT_LIST_HEAD(&phba->fc_nodes);
1486 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1487 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1488 INIT_LIST_HEAD(&phba->fc_unused_list);
1489 INIT_LIST_HEAD(&phba->fc_plogi_list);
1490 INIT_LIST_HEAD(&phba->fc_adisc_list);
1491 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1492 INIT_LIST_HEAD(&phba->fc_prli_list);
1493 INIT_LIST_HEAD(&phba->fc_npr_list);
1494
1495 1579
1496 pci_set_master(pdev); 1580 pci_set_master(pdev);
1497 retval = pci_set_mwi(pdev); 1581 retval = pci_set_mwi(pdev);
@@ -1609,13 +1693,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1609 1693
1610 host->transportt = lpfc_transport_template; 1694 host->transportt = lpfc_transport_template;
1611 pci_set_drvdata(pdev, host); 1695 pci_set_drvdata(pdev, host);
1612 error = scsi_add_host(host, &pdev->dev);
1613 if (error)
1614 goto out_kthread_stop;
1615
1616 error = lpfc_alloc_sysfs_attr(phba);
1617 if (error)
1618 goto out_remove_host;
1619 1696
1620 if (phba->cfg_use_msi) { 1697 if (phba->cfg_use_msi) {
1621 error = pci_enable_msi(phba->pcidev); 1698 error = pci_enable_msi(phba->pcidev);
@@ -1631,73 +1708,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1632 "%d:0451 Enable interrupt handler failed\n", 1709 "%d:0451 Enable interrupt handler failed\n",
1633 phba->brd_no); 1710 phba->brd_no);
1634 goto out_free_sysfs_attr; 1711 goto out_kthread_stop;
1635 } 1712 }
1636 phba->MBslimaddr = phba->slim_memmap_p;
1637 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1638 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1639 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1640 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1641 1713
1642 error = lpfc_sli_hba_setup(phba); 1714 error = scsi_add_host(host, &pdev->dev);
1643 if (error) { 1715 if (error)
1644 error = -ENODEV;
1645 goto out_free_irq; 1716 goto out_free_irq;
1646 }
1647
1648 /*
1649 * hba setup may have changed the hba_queue_depth so we need to adjust
1650 * the value of can_queue.
1651 */
1652 host->can_queue = phba->cfg_hba_queue_depth - 10;
1653
1654 lpfc_discovery_wait(phba);
1655 1717
1656 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1718 scsi_scan_host(host);
1657 spin_lock_irq(phba->host->host_lock);
1658 lpfc_poll_start_timer(phba);
1659 spin_unlock_irq(phba->host->host_lock);
1660 }
1661 1719
1662 /*
1663 * set fixed host attributes
1664 * Must done after lpfc_sli_hba_setup()
1665 */
1666
1667 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
1668 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
1669 fc_host_supported_classes(host) = FC_COS_CLASS3;
1670
1671 memset(fc_host_supported_fc4s(host), 0,
1672 sizeof(fc_host_supported_fc4s(host)));
1673 fc_host_supported_fc4s(host)[2] = 1;
1674 fc_host_supported_fc4s(host)[7] = 1;
1675
1676 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
1677
1678 fc_host_supported_speeds(host) = 0;
1679 if (phba->lmt & LMT_10Gb)
1680 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
1681 if (phba->lmt & LMT_4Gb)
1682 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
1683 if (phba->lmt & LMT_2Gb)
1684 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
1685 if (phba->lmt & LMT_1Gb)
1686 fc_host_supported_speeds(host) |= FC_PORTSPEED_1GBIT;
1687
1688 fc_host_maxframe_size(host) =
1689 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1690 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
1691
1692 /* This value is also unchanging */
1693 memset(fc_host_active_fc4s(host), 0,
1694 sizeof(fc_host_active_fc4s(host)));
1695 fc_host_active_fc4s(host)[2] = 1;
1696 fc_host_active_fc4s(host)[7] = 1;
1697
1698 spin_lock_irq(phba->host->host_lock);
1699 phba->fc_flag &= ~FC_LOADING;
1700 spin_unlock_irq(phba->host->host_lock);
1701 return 0; 1720 return 0;
1702 1721
1703out_free_irq: 1722out_free_irq:
@@ -1705,11 +1724,6 @@ out_free_irq:
1705 phba->work_hba_events = 0; 1724 phba->work_hba_events = 0;
1706 free_irq(phba->pcidev->irq, phba); 1725 free_irq(phba->pcidev->irq, phba);
1707 pci_disable_msi(phba->pcidev); 1726 pci_disable_msi(phba->pcidev);
1708out_free_sysfs_attr:
1709 lpfc_free_sysfs_attr(phba);
1710out_remove_host:
1711 fc_remove_host(phba->host);
1712 scsi_remove_host(phba->host);
1713out_kthread_stop: 1727out_kthread_stop:
1714 kthread_stop(phba->worker_thread); 1728 kthread_stop(phba->worker_thread);
1715out_free_iocbq: 1729out_free_iocbq:
@@ -1747,56 +1761,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1747{ 1761{
1748 struct Scsi_Host *host = pci_get_drvdata(pdev); 1762 struct Scsi_Host *host = pci_get_drvdata(pdev);
1749 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; 1763 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
1750 unsigned long iflag;
1751
1752 lpfc_free_sysfs_attr(phba);
1753
1754 spin_lock_irqsave(phba->host->host_lock, iflag);
1755 phba->fc_flag |= FC_UNLOADING;
1756
1757 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1758 1764
1759 fc_remove_host(phba->host); 1765 lpfc_remove_device(phba);
1760 scsi_remove_host(phba->host);
1761
1762 kthread_stop(phba->worker_thread);
1763
1764 /*
1765 * Bring down the SLI Layer. This step disable all interrupts,
1766 * clears the rings, discards all mailbox commands, and resets
1767 * the HBA.
1768 */
1769 lpfc_sli_hba_down(phba);
1770 lpfc_sli_brdrestart(phba);
1771
1772 /* Release the irq reservation */
1773 free_irq(phba->pcidev->irq, phba);
1774 pci_disable_msi(phba->pcidev);
1775
1776 lpfc_cleanup(phba, 0);
1777 lpfc_stop_timer(phba);
1778 phba->work_hba_events = 0;
1779
1780 /*
1781 * Call scsi_free before mem_free since scsi bufs are released to their
1782 * corresponding pools here.
1783 */
1784 lpfc_scsi_free(phba);
1785 lpfc_mem_free(phba);
1786
1787 /* Free resources associated with SLI2 interface */
1788 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
1789 phba->slim2p, phba->slim2p_mapping);
1790
1791 /* unmap adapter SLIM and Control Registers */
1792 iounmap(phba->ctrl_regs_memmap_p);
1793 iounmap(phba->slim_memmap_p);
1794
1795 pci_release_regions(phba->pcidev);
1796 pci_disable_device(phba->pcidev);
1797
1798 idr_remove(&lpfc_hba_index, phba->brd_no);
1799 scsi_host_put(phba->host);
1800 1766
1801 pci_set_drvdata(pdev, NULL); 1767 pci_set_drvdata(pdev, NULL);
1802} 1768}
@@ -1941,6 +1907,18 @@ static struct pci_device_id lpfc_id_table[] = {
1941 PCI_ANY_ID, PCI_ANY_ID, }, 1907 PCI_ANY_ID, PCI_ANY_ID, },
1942 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 1908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
1943 PCI_ANY_ID, PCI_ANY_ID, }, 1909 PCI_ANY_ID, PCI_ANY_ID, },
1910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
1911 PCI_ANY_ID, PCI_ANY_ID, },
1912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
1913 PCI_ANY_ID, PCI_ANY_ID, },
1914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
1915 PCI_ANY_ID, PCI_ANY_ID, },
1916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
1917 PCI_ANY_ID, PCI_ANY_ID, },
1918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
1919 PCI_ANY_ID, PCI_ANY_ID, },
1920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
1921 PCI_ANY_ID, PCI_ANY_ID, },
1944 { 0 } 1922 { 0 }
1945}; 1923};
1946 1924
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 4d016c2a1b26..8041c3f06f7b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -212,6 +212,7 @@ lpfc_init_link(struct lpfc_hba * phba,
212 case LINK_SPEED_1G: 212 case LINK_SPEED_1G:
213 case LINK_SPEED_2G: 213 case LINK_SPEED_2G:
214 case LINK_SPEED_4G: 214 case LINK_SPEED_4G:
215 case LINK_SPEED_8G:
215 mb->un.varInitLnk.link_flags |= 216 mb->un.varInitLnk.link_flags |=
216 FLAGS_LINK_SPEED; 217 FLAGS_LINK_SPEED;
217 mb->un.varInitLnk.link_speed = linkspeed; 218 mb->un.varInitLnk.link_speed = linkspeed;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0c7e731dc45a..b309841e3846 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -168,14 +168,13 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
168 * routine effectively results in a "software abort". 168 * routine effectively results in a "software abort".
169 */ 169 */
170int 170int
171lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 171lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
172 int send_abts)
173{ 172{
173 LIST_HEAD(completions);
174 struct lpfc_sli *psli; 174 struct lpfc_sli *psli;
175 struct lpfc_sli_ring *pring; 175 struct lpfc_sli_ring *pring;
176 struct lpfc_iocbq *iocb, *next_iocb; 176 struct lpfc_iocbq *iocb, *next_iocb;
177 IOCB_t *icmd; 177 IOCB_t *cmd;
178 int found = 0;
179 178
180 /* Abort outstanding I/O on NPort <nlp_DID> */ 179 /* Abort outstanding I/O on NPort <nlp_DID> */
181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 180 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
@@ -188,75 +187,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
188 pring = &psli->ring[LPFC_ELS_RING]; 187 pring = &psli->ring[LPFC_ELS_RING];
189 188
190 /* First check the txq */ 189 /* First check the txq */
191 do { 190 spin_lock_irq(phba->host->host_lock);
192 found = 0; 191 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
193 spin_lock_irq(phba->host->host_lock); 192 /* Check to see if iocb matches the nport we are looking
194 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 193 for */
195 /* Check to see if iocb matches the nport we are looking 194 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
196 for */ 195 /* It matches, so deque and call compl with an
197 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) { 196 error */
198 found = 1; 197 list_move_tail(&iocb->list, &completions);
199 /* It matches, so deque and call compl with an 198 pring->txq_cnt--;
200 error */
201 list_del(&iocb->list);
202 pring->txq_cnt--;
203 if (iocb->iocb_cmpl) {
204 icmd = &iocb->iocb;
205 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
206 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
207 spin_unlock_irq(phba->host->host_lock);
208 (iocb->iocb_cmpl) (phba, iocb, iocb);
209 spin_lock_irq(phba->host->host_lock);
210 } else
211 lpfc_sli_release_iocbq(phba, iocb);
212 break;
213 }
214 } 199 }
215 spin_unlock_irq(phba->host->host_lock); 200 }
216 } while (found);
217 201
218 /* Everything on txcmplq will be returned by firmware
219 * with a no rpi / linkdown / abort error. For ring 0,
220 * ELS discovery, we want to get rid of it right here.
221 */
222 /* Next check the txcmplq */ 202 /* Next check the txcmplq */
223 do { 203 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
224 found = 0; 204 /* Check to see if iocb matches the nport we are looking
225 spin_lock_irq(phba->host->host_lock); 205 for */
226 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 206 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
227 list) { 207 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
228 /* Check to see if iocb matches the nport we are looking 208 }
229 for */ 209 spin_unlock_irq(phba->host->host_lock);
230 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) { 210
231 found = 1; 211 while (!list_empty(&completions)) {
232 /* It matches, so deque and call compl with an 212 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
233 error */ 213 cmd = &iocb->iocb;
234 list_del(&iocb->list); 214 list_del(&iocb->list);
235 pring->txcmplq_cnt--; 215
236 216 if (iocb->iocb_cmpl) {
237 icmd = &iocb->iocb; 217 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
238 /* If the driver is completing an ELS 218 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
239 * command early, flush it out of the firmware. 219 (iocb->iocb_cmpl) (phba, iocb, iocb);
240 */ 220 } else
241 if (send_abts && 221 lpfc_sli_release_iocbq(phba, iocb);
242 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) && 222 }
243 (icmd->un.elsreq64.bdl.ulpIoTag32)) {
244 lpfc_sli_issue_abort_iotag32(phba,
245 pring, iocb);
246 }
247 if (iocb->iocb_cmpl) {
248 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
249 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
250 spin_unlock_irq(phba->host->host_lock);
251 (iocb->iocb_cmpl) (phba, iocb, iocb);
252 spin_lock_irq(phba->host->host_lock);
253 } else
254 lpfc_sli_release_iocbq(phba, iocb);
255 break;
256 }
257 }
258 spin_unlock_irq(phba->host->host_lock);
259 } while(found);
260 223
261 /* If we are delaying issuing an ELS command, cancel it */ 224 /* If we are delaying issuing an ELS command, cancel it */
262 if (ndlp->nlp_flag & NLP_DELAY_TMO) 225 if (ndlp->nlp_flag & NLP_DELAY_TMO)
@@ -390,7 +353,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
390 * queue this mbox command to be processed later. 353 * queue this mbox command to be processed later.
391 */ 354 */
392 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 355 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
393 mbox->context2 = ndlp; 356 /*
357 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
358 * command issued in lpfc_cmpl_els_acc().
359 */
394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 360 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
395 361
396 /* 362 /*
@@ -404,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
404 */ 370 */
405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 371 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
406 /* software abort outstanding PLOGI */ 372 /* software abort outstanding PLOGI */
407 lpfc_els_abort(phba, ndlp, 1); 373 lpfc_els_abort(phba, ndlp);
408 } 374 }
409 375
410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 376 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
@@ -471,8 +437,7 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
471 spin_unlock_irq(phba->host->host_lock); 437 spin_unlock_irq(phba->host->host_lock);
472 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 438 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
473 ndlp->nlp_prev_state = ndlp->nlp_state; 439 ndlp->nlp_prev_state = ndlp->nlp_state;
474 ndlp->nlp_state = NLP_STE_NPR_NODE; 440 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
475 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
476 return 0; 441 return 0;
477} 442}
478 443
@@ -502,12 +467,10 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
502 467
503 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 468 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
504 ndlp->nlp_prev_state = ndlp->nlp_state; 469 ndlp->nlp_prev_state = ndlp->nlp_state;
505 ndlp->nlp_state = NLP_STE_NPR_NODE; 470 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
506 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
507 } else { 471 } else {
508 ndlp->nlp_prev_state = ndlp->nlp_state; 472 ndlp->nlp_prev_state = ndlp->nlp_state;
509 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 473 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
510 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
511 } 474 }
512 475
513 spin_lock_irq(phba->host->host_lock); 476 spin_lock_irq(phba->host->host_lock);
@@ -601,11 +564,10 @@ lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
601 564
602 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 565 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
603 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 566 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
604 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 567 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
605 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
606 return ndlp->nlp_state; 568 return ndlp->nlp_state;
607 } 569 }
608 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 570 lpfc_drop_node(phba, ndlp);
609 return NLP_STE_FREED_NODE; 571 return NLP_STE_FREED_NODE;
610} 572}
611 573
@@ -614,7 +576,7 @@ lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
614 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 576 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
615{ 577{
616 lpfc_issue_els_logo(phba, ndlp, 0); 578 lpfc_issue_els_logo(phba, ndlp, 0);
617 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 579 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
618 return ndlp->nlp_state; 580 return ndlp->nlp_state;
619} 581}
620 582
@@ -630,7 +592,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
630 ndlp->nlp_flag |= NLP_LOGO_ACC; 592 ndlp->nlp_flag |= NLP_LOGO_ACC;
631 spin_unlock_irq(phba->host->host_lock); 593 spin_unlock_irq(phba->host->host_lock);
632 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 594 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
633 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 595 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
634 596
635 return ndlp->nlp_state; 597 return ndlp->nlp_state;
636} 598}
@@ -639,7 +601,7 @@ static uint32_t
639lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba, 601lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
640 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 602 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
641{ 603{
642 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 604 lpfc_drop_node(phba, ndlp);
643 return NLP_STE_FREED_NODE; 605 return NLP_STE_FREED_NODE;
644} 606}
645 607
@@ -647,7 +609,7 @@ static uint32_t
647lpfc_device_rm_unused_node(struct lpfc_hba * phba, 609lpfc_device_rm_unused_node(struct lpfc_hba * phba,
648 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 610 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
649{ 611{
650 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 612 lpfc_drop_node(phba, ndlp);
651 return NLP_STE_FREED_NODE; 613 return NLP_STE_FREED_NODE;
652} 614}
653 615
@@ -697,7 +659,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
697 cmdiocb = (struct lpfc_iocbq *) arg; 659 cmdiocb = (struct lpfc_iocbq *) arg;
698 660
699 /* software abort outstanding PLOGI */ 661 /* software abort outstanding PLOGI */
700 lpfc_els_abort(phba, ndlp, 1); 662 lpfc_els_abort(phba, ndlp);
701 663
702 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 664 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
703 return ndlp->nlp_state; 665 return ndlp->nlp_state;
@@ -712,7 +674,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
712 cmdiocb = (struct lpfc_iocbq *) arg; 674 cmdiocb = (struct lpfc_iocbq *) arg;
713 675
714 /* software abort outstanding PLOGI */ 676 /* software abort outstanding PLOGI */
715 lpfc_els_abort(phba, ndlp, 1); 677 lpfc_els_abort(phba, ndlp);
716 678
717 if (evt == NLP_EVT_RCV_LOGO) { 679 if (evt == NLP_EVT_RCV_LOGO) {
718 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 680 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
@@ -727,8 +689,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
727 spin_unlock_irq(phba->host->host_lock); 689 spin_unlock_irq(phba->host->host_lock);
728 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 690 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
729 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 691 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
730 ndlp->nlp_state = NLP_STE_NPR_NODE; 692 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
731 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
732 693
733 return ndlp->nlp_state; 694 return ndlp->nlp_state;
734} 695}
@@ -803,32 +764,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
803 goto out; 764 goto out;
804 765
805 lpfc_unreg_rpi(phba, ndlp); 766 lpfc_unreg_rpi(phba, ndlp);
806 if (lpfc_reg_login 767 if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
807 (phba, irsp->un.elsreq64.remoteID, 768 mbox, 0) == 0) {
808 (uint8_t *) sp, mbox, 0) == 0) {
809 switch (ndlp->nlp_DID) { 769 switch (ndlp->nlp_DID) {
810 case NameServer_DID: 770 case NameServer_DID:
811 mbox->mbox_cmpl = 771 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
812 lpfc_mbx_cmpl_ns_reg_login;
813 break; 772 break;
814 case FDMI_DID: 773 case FDMI_DID:
815 mbox->mbox_cmpl = 774 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
816 lpfc_mbx_cmpl_fdmi_reg_login;
817 break; 775 break;
818 default: 776 default:
819 mbox->mbox_cmpl = 777 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
820 lpfc_mbx_cmpl_reg_login;
821 } 778 }
822 mbox->context2 = ndlp; 779 mbox->context2 = lpfc_nlp_get(ndlp);
823 if (lpfc_sli_issue_mbox(phba, mbox, 780 if (lpfc_sli_issue_mbox(phba, mbox,
824 (MBX_NOWAIT | MBX_STOP_IOCB)) 781 (MBX_NOWAIT | MBX_STOP_IOCB))
825 != MBX_NOT_FINISHED) { 782 != MBX_NOT_FINISHED) {
826 ndlp->nlp_state = 783 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
827 NLP_STE_REG_LOGIN_ISSUE;
828 lpfc_nlp_list(phba, ndlp,
829 NLP_REGLOGIN_LIST);
830 return ndlp->nlp_state; 784 return ndlp->nlp_state;
831 } 785 }
786 lpfc_nlp_put(ndlp);
832 mp = (struct lpfc_dmabuf *)mbox->context1; 787 mp = (struct lpfc_dmabuf *)mbox->context1;
833 lpfc_mbuf_free(phba, mp->virt, mp->phys); 788 lpfc_mbuf_free(phba, mp->virt, mp->phys);
834 kfree(mp); 789 kfree(mp);
@@ -841,7 +796,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
841 out: 796 out:
842 /* Free this node since the driver cannot login or has the wrong 797 /* Free this node since the driver cannot login or has the wrong
843 sparm */ 798 sparm */
844 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 799 lpfc_drop_node(phba, ndlp);
845 return NLP_STE_FREED_NODE; 800 return NLP_STE_FREED_NODE;
846} 801}
847 802
@@ -855,9 +810,9 @@ lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
855 } 810 }
856 else { 811 else {
857 /* software abort outstanding PLOGI */ 812 /* software abort outstanding PLOGI */
858 lpfc_els_abort(phba, ndlp, 1); 813 lpfc_els_abort(phba, ndlp);
859 814
860 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 815 lpfc_drop_node(phba, ndlp);
861 return NLP_STE_FREED_NODE; 816 return NLP_STE_FREED_NODE;
862 } 817 }
863} 818}
@@ -868,11 +823,10 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
868 uint32_t evt) 823 uint32_t evt)
869{ 824{
870 /* software abort outstanding PLOGI */ 825 /* software abort outstanding PLOGI */
871 lpfc_els_abort(phba, ndlp, 1); 826 lpfc_els_abort(phba, ndlp);
872 827
873 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 828 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
874 ndlp->nlp_state = NLP_STE_NPR_NODE; 829 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
875 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
876 spin_lock_irq(phba->host->host_lock); 830 spin_lock_irq(phba->host->host_lock);
877 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 831 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
878 spin_unlock_irq(phba->host->host_lock); 832 spin_unlock_irq(phba->host->host_lock);
@@ -888,7 +842,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
888 struct lpfc_iocbq *cmdiocb; 842 struct lpfc_iocbq *cmdiocb;
889 843
890 /* software abort outstanding ADISC */ 844 /* software abort outstanding ADISC */
891 lpfc_els_abort(phba, ndlp, 1); 845 lpfc_els_abort(phba, ndlp);
892 846
893 cmdiocb = (struct lpfc_iocbq *) arg; 847 cmdiocb = (struct lpfc_iocbq *) arg;
894 848
@@ -896,8 +850,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
896 return ndlp->nlp_state; 850 return ndlp->nlp_state;
897 } 851 }
898 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 852 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
899 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 853 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
900 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
901 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 854 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
902 855
903 return ndlp->nlp_state; 856 return ndlp->nlp_state;
@@ -926,7 +879,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
926 cmdiocb = (struct lpfc_iocbq *) arg; 879 cmdiocb = (struct lpfc_iocbq *) arg;
927 880
928 /* software abort outstanding ADISC */ 881 /* software abort outstanding ADISC */
929 lpfc_els_abort(phba, ndlp, 0); 882 lpfc_els_abort(phba, ndlp);
930 883
931 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 884 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
932 return ndlp->nlp_state; 885 return ndlp->nlp_state;
@@ -987,20 +940,17 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
987 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name)); 940 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
988 941
989 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 942 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
990 ndlp->nlp_state = NLP_STE_NPR_NODE; 943 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
991 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
992 lpfc_unreg_rpi(phba, ndlp); 944 lpfc_unreg_rpi(phba, ndlp);
993 return ndlp->nlp_state; 945 return ndlp->nlp_state;
994 } 946 }
995 947
996 if (ndlp->nlp_type & NLP_FCP_TARGET) { 948 if (ndlp->nlp_type & NLP_FCP_TARGET) {
997 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 949 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
998 ndlp->nlp_state = NLP_STE_MAPPED_NODE; 950 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
999 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1000 } else { 951 } else {
1001 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 952 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1002 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 953 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1003 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1004 } 954 }
1005 return ndlp->nlp_state; 955 return ndlp->nlp_state;
1006} 956}
@@ -1016,9 +966,9 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
1016 } 966 }
1017 else { 967 else {
1018 /* software abort outstanding ADISC */ 968 /* software abort outstanding ADISC */
1019 lpfc_els_abort(phba, ndlp, 1); 969 lpfc_els_abort(phba, ndlp);
1020 970
1021 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 971 lpfc_drop_node(phba, ndlp);
1022 return NLP_STE_FREED_NODE; 972 return NLP_STE_FREED_NODE;
1023 } 973 }
1024} 974}
@@ -1029,11 +979,10 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
1029 uint32_t evt) 979 uint32_t evt)
1030{ 980{
1031 /* software abort outstanding ADISC */ 981 /* software abort outstanding ADISC */
1032 lpfc_els_abort(phba, ndlp, 1); 982 lpfc_els_abort(phba, ndlp);
1033 983
1034 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 984 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1035 ndlp->nlp_state = NLP_STE_NPR_NODE; 985 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1036 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1037 spin_lock_irq(phba->host->host_lock); 986 spin_lock_irq(phba->host->host_lock);
1038 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 987 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1039 ndlp->nlp_flag |= NLP_NPR_ADISC; 988 ndlp->nlp_flag |= NLP_NPR_ADISC;
@@ -1074,9 +1023,36 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1074 uint32_t evt) 1023 uint32_t evt)
1075{ 1024{
1076 struct lpfc_iocbq *cmdiocb; 1025 struct lpfc_iocbq *cmdiocb;
1026 LPFC_MBOXQ_t *mb;
1027 LPFC_MBOXQ_t *nextmb;
1028 struct lpfc_dmabuf *mp;
1077 1029
1078 cmdiocb = (struct lpfc_iocbq *) arg; 1030 cmdiocb = (struct lpfc_iocbq *) arg;
1079 1031
1032 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1033 if ((mb = phba->sli.mbox_active)) {
1034 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1035 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1036 mb->context2 = NULL;
1037 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1038 }
1039 }
1040
1041 spin_lock_irq(phba->host->host_lock);
1042 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1043 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1044 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1045 mp = (struct lpfc_dmabuf *) (mb->context1);
1046 if (mp) {
1047 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1048 kfree(mp);
1049 }
1050 list_del(&mb->list);
1051 mempool_free(mb, phba->mbox_mem_pool);
1052 }
1053 }
1054 spin_unlock_irq(phba->host->host_lock);
1055
1080 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1056 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1081 return ndlp->nlp_state; 1057 return ndlp->nlp_state;
1082} 1058}
@@ -1133,8 +1109,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1133 */ 1109 */
1134 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1110 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1135 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1111 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1136 ndlp->nlp_state = NLP_STE_UNUSED_NODE; 1112 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
1137 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
1138 return ndlp->nlp_state; 1113 return ndlp->nlp_state;
1139 } 1114 }
1140 1115
@@ -1147,8 +1122,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1147 1122
1148 lpfc_issue_els_logo(phba, ndlp, 0); 1123 lpfc_issue_els_logo(phba, ndlp, 0);
1149 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1124 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1150 ndlp->nlp_state = NLP_STE_NPR_NODE; 1125 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1151 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1152 return ndlp->nlp_state; 1126 return ndlp->nlp_state;
1153 } 1127 }
1154 1128
@@ -1157,13 +1131,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1157 /* Only if we are not a fabric nport do we issue PRLI */ 1131 /* Only if we are not a fabric nport do we issue PRLI */
1158 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1132 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1159 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1133 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1160 ndlp->nlp_state = NLP_STE_PRLI_ISSUE; 1134 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
1161 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1162 lpfc_issue_els_prli(phba, ndlp, 0); 1135 lpfc_issue_els_prli(phba, ndlp, 0);
1163 } else { 1136 } else {
1164 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1137 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1165 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1138 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1166 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1167 } 1139 }
1168 return ndlp->nlp_state; 1140 return ndlp->nlp_state;
1169} 1141}
@@ -1178,7 +1150,7 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1178 return ndlp->nlp_state; 1150 return ndlp->nlp_state;
1179 } 1151 }
1180 else { 1152 else {
1181 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1153 lpfc_drop_node(phba, ndlp);
1182 return NLP_STE_FREED_NODE; 1154 return NLP_STE_FREED_NODE;
1183 } 1155 }
1184} 1156}
@@ -1189,8 +1161,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1189 uint32_t evt) 1161 uint32_t evt)
1190{ 1162{
1191 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1163 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1192 ndlp->nlp_state = NLP_STE_NPR_NODE; 1164 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1193 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1194 spin_lock_irq(phba->host->host_lock); 1165 spin_lock_irq(phba->host->host_lock);
1195 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1166 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1196 spin_unlock_irq(phba->host->host_lock); 1167 spin_unlock_irq(phba->host->host_lock);
@@ -1230,7 +1201,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1230 cmdiocb = (struct lpfc_iocbq *) arg; 1201 cmdiocb = (struct lpfc_iocbq *) arg;
1231 1202
1232 /* Software abort outstanding PRLI before sending acc */ 1203 /* Software abort outstanding PRLI before sending acc */
1233 lpfc_els_abort(phba, ndlp, 1); 1204 lpfc_els_abort(phba, ndlp);
1234 1205
1235 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1206 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1236 return ndlp->nlp_state; 1207 return ndlp->nlp_state;
@@ -1279,8 +1250,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1279 irsp = &rspiocb->iocb; 1250 irsp = &rspiocb->iocb;
1280 if (irsp->ulpStatus) { 1251 if (irsp->ulpStatus) {
1281 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1252 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1282 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1253 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1283 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1284 return ndlp->nlp_state; 1254 return ndlp->nlp_state;
1285 } 1255 }
1286 1256
@@ -1298,8 +1268,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1298 } 1268 }
1299 1269
1300 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1270 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1301 ndlp->nlp_state = NLP_STE_MAPPED_NODE; 1271 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
1302 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1303 return ndlp->nlp_state; 1272 return ndlp->nlp_state;
1304} 1273}
1305 1274
@@ -1330,9 +1299,9 @@ lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1330 } 1299 }
1331 else { 1300 else {
1332 /* software abort outstanding PLOGI */ 1301 /* software abort outstanding PLOGI */
1333 lpfc_els_abort(phba, ndlp, 1); 1302 lpfc_els_abort(phba, ndlp);
1334 1303
1335 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1304 lpfc_drop_node(phba, ndlp);
1336 return NLP_STE_FREED_NODE; 1305 return NLP_STE_FREED_NODE;
1337 } 1306 }
1338} 1307}
@@ -1359,11 +1328,10 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1359 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1328 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1360{ 1329{
1361 /* software abort outstanding PRLI */ 1330 /* software abort outstanding PRLI */
1362 lpfc_els_abort(phba, ndlp, 1); 1331 lpfc_els_abort(phba, ndlp);
1363 1332
1364 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1333 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1365 ndlp->nlp_state = NLP_STE_NPR_NODE; 1334 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1366 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1367 spin_lock_irq(phba->host->host_lock); 1335 spin_lock_irq(phba->host->host_lock);
1368 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1336 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1369 spin_unlock_irq(phba->host->host_lock); 1337 spin_unlock_irq(phba->host->host_lock);
@@ -1436,8 +1404,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1436 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1404 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1437{ 1405{
1438 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1406 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1439 ndlp->nlp_state = NLP_STE_NPR_NODE; 1407 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1440 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1441 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1408 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1442 lpfc_disc_set_adisc(phba, ndlp); 1409 lpfc_disc_set_adisc(phba, ndlp);
1443 1410
@@ -1518,8 +1485,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1518 uint32_t evt) 1485 uint32_t evt)
1519{ 1486{
1520 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1487 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1521 ndlp->nlp_state = NLP_STE_NPR_NODE; 1488 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1522 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1523 spin_lock_irq(phba->host->host_lock); 1489 spin_lock_irq(phba->host->host_lock);
1524 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1490 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1525 spin_unlock_irq(phba->host->host_lock); 1491 spin_unlock_irq(phba->host->host_lock);
@@ -1551,8 +1517,7 @@ lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1551 /* send PLOGI immediately, move to PLOGI issue state */ 1517 /* send PLOGI immediately, move to PLOGI issue state */
1552 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1518 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1553 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1519 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1554 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1520 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1555 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1556 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1521 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1557 } 1522 }
1558 1523
@@ -1580,16 +1545,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1580 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1545 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1581 spin_unlock_irq(phba->host->host_lock); 1546 spin_unlock_irq(phba->host->host_lock);
1582 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1547 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1583 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1584 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1585 lpfc_issue_els_adisc(phba, ndlp, 0); 1549 lpfc_issue_els_adisc(phba, ndlp, 0);
1586 } else { 1550 } else {
1587 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1551 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1588 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1552 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1589 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1590 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1553 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1591 } 1554 }
1592
1593 } 1555 }
1594 return ndlp->nlp_state; 1556 return ndlp->nlp_state;
1595} 1557}
@@ -1627,13 +1589,11 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1627 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1589 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1628 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1590 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1629 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1591 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1630 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1592 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1631 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1632 lpfc_issue_els_adisc(phba, ndlp, 0); 1593 lpfc_issue_els_adisc(phba, ndlp, 0);
1633 } else { 1594 } else {
1634 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1595 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1635 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 1596 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1636 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1637 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1597 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1638 } 1598 }
1639 } 1599 }
@@ -1682,7 +1642,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1682 1642
1683 irsp = &rspiocb->iocb; 1643 irsp = &rspiocb->iocb;
1684 if (irsp->ulpStatus) { 1644 if (irsp->ulpStatus) {
1685 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1645 lpfc_drop_node(phba, ndlp);
1686 return NLP_STE_FREED_NODE; 1646 return NLP_STE_FREED_NODE;
1687 } 1647 }
1688 return ndlp->nlp_state; 1648 return ndlp->nlp_state;
@@ -1700,7 +1660,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1700 1660
1701 irsp = &rspiocb->iocb; 1661 irsp = &rspiocb->iocb;
1702 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1662 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1703 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1663 lpfc_drop_node(phba, ndlp);
1704 return NLP_STE_FREED_NODE; 1664 return NLP_STE_FREED_NODE;
1705 } 1665 }
1706 return ndlp->nlp_state; 1666 return ndlp->nlp_state;
@@ -1728,7 +1688,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1728 1688
1729 irsp = &rspiocb->iocb; 1689 irsp = &rspiocb->iocb;
1730 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1690 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1731 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1691 lpfc_drop_node(phba, ndlp);
1732 return NLP_STE_FREED_NODE; 1692 return NLP_STE_FREED_NODE;
1733 } 1693 }
1734 return ndlp->nlp_state; 1694 return ndlp->nlp_state;
@@ -1749,7 +1709,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1749 ndlp->nlp_rpi = mb->un.varWords[0]; 1709 ndlp->nlp_rpi = mb->un.varWords[0];
1750 else { 1710 else {
1751 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1711 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1752 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1712 lpfc_drop_node(phba, ndlp);
1753 return NLP_STE_FREED_NODE; 1713 return NLP_STE_FREED_NODE;
1754 } 1714 }
1755 } 1715 }
@@ -1765,7 +1725,7 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1765 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1725 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1766 return ndlp->nlp_state; 1726 return ndlp->nlp_state;
1767 } 1727 }
1768 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1728 lpfc_drop_node(phba, ndlp);
1769 return NLP_STE_FREED_NODE; 1729 return NLP_STE_FREED_NODE;
1770} 1730}
1771 1731
@@ -1964,7 +1924,7 @@ lpfc_disc_state_machine(struct lpfc_hba * phba,
1964 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *, 1924 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
1965 uint32_t); 1925 uint32_t);
1966 1926
1967 ndlp->nlp_disc_refcnt++; 1927 lpfc_nlp_get(ndlp);
1968 cur_state = ndlp->nlp_state; 1928 cur_state = ndlp->nlp_state;
1969 1929
1970 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 1930 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
@@ -1987,18 +1947,7 @@ lpfc_disc_state_machine(struct lpfc_hba * phba,
1987 phba->brd_no, 1947 phba->brd_no,
1988 rc, ndlp->nlp_DID, ndlp->nlp_flag); 1948 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1989 1949
1990 ndlp->nlp_disc_refcnt--; 1950 lpfc_nlp_put(ndlp);
1991 1951
1992 /* Check to see if ndlp removal is deferred */
1993 if ((ndlp->nlp_disc_refcnt == 0)
1994 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
1995 spin_lock_irq(phba->host->host_lock);
1996 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
1997 spin_unlock_irq(phba->host->host_lock);
1998 lpfc_nlp_remove(phba, ndlp);
1999 return NLP_STE_FREED_NODE;
2000 }
2001 if (rc == NLP_STE_FREED_NODE)
2002 return NLP_STE_FREED_NODE;
2003 return rc; 1952 return rc;
2004} 1953}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c3e68e0d8f74..9a12d05e99e4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -146,6 +146,10 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
146 146
147 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 147 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
148 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 148 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
149 if (lpfc_cmd) {
150 lpfc_cmd->seg_cnt = 0;
151 lpfc_cmd->nonsg_phys = 0;
152 }
149 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 153 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
150 return lpfc_cmd; 154 return lpfc_cmd;
151} 155}
@@ -288,13 +292,13 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
288} 292}
289 293
290static void 294static void
291lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 295lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
292{ 296{
293 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
294 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 298 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
295 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
296 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 300 struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
298 uint32_t resp_info = fcprsp->rspStatus2; 302 uint32_t resp_info = fcprsp->rspStatus2;
299 uint32_t scsi_status = fcprsp->rspStatus3; 303 uint32_t scsi_status = fcprsp->rspStatus3;
300 uint32_t *lp; 304 uint32_t *lp;
@@ -356,6 +360,24 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
356 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
357 361
358 /* 362 /*
363 * If there is an under run check if under run reported by
364 * storage array is same as the under run reported by HBA.
365 * If this is not same, there is a dropped frame.
366 */
367 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
368 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no,
374 be32_to_cpu(fcpcmd->fcpDl),
375 cmnd->resid,
376 fcpi_parm, cmnd->cmnd[0]);
377 cmnd->resid = cmnd->request_bufflen;
378 host_status = DID_ERROR;
379 }
380 /*
359 * The cmnd->underflow is the minimum number of bytes that must 381 * The cmnd->underflow is the minimum number of bytes that must
360 * be transfered for this command. Provided a sense condition 382 * be transfered for this command. Provided a sense condition
361 * is not present, make sure the actual amount transferred is at 383 * is not present, make sure the actual amount transferred is at
@@ -435,7 +457,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
435 switch (lpfc_cmd->status) { 457 switch (lpfc_cmd->status) {
436 case IOSTAT_FCP_RSP_ERROR: 458 case IOSTAT_FCP_RSP_ERROR:
437 /* Call FCP RSP handler to determine result */ 459 /* Call FCP RSP handler to determine result */
438 lpfc_handle_fcp_err(lpfc_cmd); 460 lpfc_handle_fcp_err(lpfc_cmd,pIocbOut);
439 break; 461 break;
440 case IOSTAT_NPORT_BSY: 462 case IOSTAT_NPORT_BSY:
441 case IOSTAT_FABRIC_BSY: 463 case IOSTAT_FABRIC_BSY:
@@ -466,10 +488,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
466 488
467 result = cmd->result; 489 result = cmd->result;
468 sdev = cmd->device; 490 sdev = cmd->device;
491 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
469 cmd->scsi_done(cmd); 492 cmd->scsi_done(cmd);
470 493
471 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 494 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
472 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
473 lpfc_release_scsi_buf(phba, lpfc_cmd); 495 lpfc_release_scsi_buf(phba, lpfc_cmd);
474 return; 496 return;
475 } 497 }
@@ -527,7 +549,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
527 } 549 }
528 } 550 }
529 551
530 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
531 lpfc_release_scsi_buf(phba, lpfc_cmd); 552 lpfc_release_scsi_buf(phba, lpfc_cmd);
532} 553}
533 554
@@ -670,6 +691,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
670 return (1); 691 return (1);
671} 692}
672 693
694static void
695lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
696 struct lpfc_iocbq *cmdiocbq,
697 struct lpfc_iocbq *rspiocbq)
698{
699 struct lpfc_scsi_buf *lpfc_cmd =
700 (struct lpfc_scsi_buf *) cmdiocbq->context1;
701 if (lpfc_cmd)
702 lpfc_release_scsi_buf(phba, lpfc_cmd);
703 return;
704}
705
673static int 706static int
674lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 707lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
675 unsigned tgt_id, unsigned int lun, 708 unsigned tgt_id, unsigned int lun,
@@ -706,8 +739,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
706 &phba->sli.ring[phba->sli.fcp_ring], 739 &phba->sli.ring[phba->sli.fcp_ring],
707 iocbq, iocbqrsp, lpfc_cmd->timeout); 740 iocbq, iocbqrsp, lpfc_cmd->timeout);
708 if (ret != IOCB_SUCCESS) { 741 if (ret != IOCB_SUCCESS) {
742 if (ret == IOCB_TIMEDOUT)
743 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
709 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 744 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
710 ret = FAILED;
711 } else { 745 } else {
712 ret = SUCCESS; 746 ret = SUCCESS;
713 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 747 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
@@ -974,7 +1008,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
974} 1008}
975 1009
976static int 1010static int
977lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) 1011lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
978{ 1012{
979 struct Scsi_Host *shost = cmnd->device->host; 1013 struct Scsi_Host *shost = cmnd->device->host;
980 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1014 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
@@ -984,6 +1018,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
984 struct lpfc_nodelist *pnode = rdata->pnode; 1018 struct lpfc_nodelist *pnode = rdata->pnode;
985 uint32_t cmd_result = 0, cmd_status = 0; 1019 uint32_t cmd_result = 0, cmd_status = 0;
986 int ret = FAILED; 1020 int ret = FAILED;
1021 int iocb_status = IOCB_SUCCESS;
987 int cnt, loopcnt; 1022 int cnt, loopcnt;
988 1023
989 lpfc_block_error_handler(cmnd); 1024 lpfc_block_error_handler(cmnd);
@@ -995,7 +1030,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
995 */ 1030 */
996 while ( 1 ) { 1031 while ( 1 ) {
997 if (!pnode) 1032 if (!pnode)
998 return FAILED; 1033 goto out;
999 1034
1000 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1035 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1001 spin_unlock_irq(phba->host->host_lock); 1036 spin_unlock_irq(phba->host->host_lock);
@@ -1013,7 +1048,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1013 } 1048 }
1014 pnode = rdata->pnode; 1049 pnode = rdata->pnode;
1015 if (!pnode) 1050 if (!pnode)
1016 return FAILED; 1051 goto out;
1017 } 1052 }
1018 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1053 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1019 break; 1054 break;
@@ -1028,7 +1063,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1028 lpfc_cmd->rdata = rdata; 1063 lpfc_cmd->rdata = rdata;
1029 1064
1030 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 1065 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
1031 FCP_LUN_RESET); 1066 FCP_TARGET_RESET);
1032 if (!ret) 1067 if (!ret)
1033 goto out_free_scsi_buf; 1068 goto out_free_scsi_buf;
1034 1069
@@ -1040,16 +1075,21 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1040 goto out_free_scsi_buf; 1075 goto out_free_scsi_buf;
1041 1076
1042 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1077 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1043 "%d:0703 Issue LUN Reset to TGT %d LUN %d " 1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
1044 "Data: x%x x%x\n", phba->brd_no, cmnd->device->id, 1079 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
1045 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1080 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1046 1081
1047 ret = lpfc_sli_issue_iocb_wait(phba, 1082 iocb_status = lpfc_sli_issue_iocb_wait(phba,
1048 &phba->sli.ring[phba->sli.fcp_ring], 1083 &phba->sli.ring[phba->sli.fcp_ring],
1049 iocbq, iocbqrsp, lpfc_cmd->timeout); 1084 iocbq, iocbqrsp, lpfc_cmd->timeout);
1050 if (ret == IOCB_SUCCESS)
1051 ret = SUCCESS;
1052 1085
1086 if (iocb_status == IOCB_TIMEDOUT)
1087 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1088
1089 if (iocb_status == IOCB_SUCCESS)
1090 ret = SUCCESS;
1091 else
1092 ret = iocb_status;
1053 1093
1054 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1094 cmd_result = iocbqrsp->iocb.un.ulpWord[4];
1055 cmd_status = iocbqrsp->iocb.ulpStatus; 1095 cmd_status = iocbqrsp->iocb.ulpStatus;
@@ -1087,18 +1127,19 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1087 1127
1088 if (cnt) { 1128 if (cnt) {
1089 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1129 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1090 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 1130 "%d:0719 device reset I/O flush failure: cnt x%x\n",
1091 phba->brd_no, cnt); 1131 phba->brd_no, cnt);
1092 ret = FAILED; 1132 ret = FAILED;
1093 } 1133 }
1094 1134
1095out_free_scsi_buf: 1135out_free_scsi_buf:
1096 lpfc_release_scsi_buf(phba, lpfc_cmd); 1136 if (iocb_status != IOCB_TIMEDOUT) {
1097 1137 lpfc_release_scsi_buf(phba, lpfc_cmd);
1138 }
1098 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1139 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1099 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1140 "%d:0713 SCSI layer issued device reset (%d, %d) "
1100 "Data: x%x x%x x%x\n", 1141 "return x%x status x%x result x%x\n",
1101 phba->brd_no, cmnd->device->id,cmnd->device->lun, 1142 phba->brd_no, cmnd->device->id, cmnd->device->lun,
1102 ret, cmd_status, cmd_result); 1143 ret, cmd_status, cmd_result);
1103 1144
1104out: 1145out:
@@ -1107,7 +1148,7 @@ out:
1107} 1148}
1108 1149
1109static int 1150static int
1110lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) 1151lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1111{ 1152{
1112 struct Scsi_Host *shost = cmnd->device->host; 1153 struct Scsi_Host *shost = cmnd->device->host;
1113 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1154 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
@@ -1134,10 +1175,12 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1134 * fail, this routine returns failure to the midlayer. 1175 * fail, this routine returns failure to the midlayer.
1135 */ 1176 */
1136 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1177 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1137 /* Search the mapped list for this target ID */ 1178 /* Search for mapped node by target ID */
1138 match = 0; 1179 match = 0;
1139 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1180 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
1140 if ((i == ndlp->nlp_sid) && ndlp->rport) { 1181 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1182 i == ndlp->nlp_sid &&
1183 ndlp->rport) {
1141 match = 1; 1184 match = 1;
1142 break; 1185 break;
1143 } 1186 }
@@ -1152,13 +1195,17 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1152 "%d:0700 Bus Reset on target %d failed\n", 1195 "%d:0700 Bus Reset on target %d failed\n",
1153 phba->brd_no, i); 1196 phba->brd_no, i);
1154 err_count++; 1197 err_count++;
1198 break;
1155 } 1199 }
1156 } 1200 }
1157 1201
1202 if (ret != IOCB_TIMEDOUT)
1203 lpfc_release_scsi_buf(phba, lpfc_cmd);
1204
1158 if (err_count == 0) 1205 if (err_count == 0)
1159 ret = SUCCESS; 1206 ret = SUCCESS;
1160 1207 else
1161 lpfc_release_scsi_buf(phba, lpfc_cmd); 1208 ret = FAILED;
1162 1209
1163 /* 1210 /*
1164 * All outstanding txcmplq I/Os should have been aborted by 1211 * All outstanding txcmplq I/Os should have been aborted by
@@ -1299,11 +1346,13 @@ struct scsi_host_template lpfc_template = {
1299 .info = lpfc_info, 1346 .info = lpfc_info,
1300 .queuecommand = lpfc_queuecommand, 1347 .queuecommand = lpfc_queuecommand,
1301 .eh_abort_handler = lpfc_abort_handler, 1348 .eh_abort_handler = lpfc_abort_handler,
1302 .eh_device_reset_handler= lpfc_reset_lun_handler, 1349 .eh_device_reset_handler= lpfc_device_reset_handler,
1303 .eh_bus_reset_handler = lpfc_reset_bus_handler, 1350 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1304 .slave_alloc = lpfc_slave_alloc, 1351 .slave_alloc = lpfc_slave_alloc,
1305 .slave_configure = lpfc_slave_configure, 1352 .slave_configure = lpfc_slave_configure,
1306 .slave_destroy = lpfc_slave_destroy, 1353 .slave_destroy = lpfc_slave_destroy,
1354 .scan_finished = lpfc_scan_finished,
1355 .scan_start = lpfc_scan_start,
1307 .this_id = -1, 1356 .this_id = -1,
1308 .sg_tablesize = LPFC_SG_SEG_CNT, 1357 .sg_tablesize = LPFC_SG_SEG_CNT,
1309 .cmd_per_lun = LPFC_CMD_PER_LUN, 1358 .cmd_per_lun = LPFC_CMD_PER_LUN,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9fb6960a8ada..a1e721459e2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -528,6 +528,7 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
528 * If pdone_q is empty, the driver thread gave up waiting and 528 * If pdone_q is empty, the driver thread gave up waiting and
529 * continued running. 529 * continued running.
530 */ 530 */
531 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
531 pdone_q = (wait_queue_head_t *) pmboxq->context1; 532 pdone_q = (wait_queue_head_t *) pmboxq->context1;
532 if (pdone_q) 533 if (pdone_q)
533 wake_up_interruptible(pdone_q); 534 wake_up_interruptible(pdone_q);
@@ -538,11 +539,32 @@ void
538lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 539lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
539{ 540{
540 struct lpfc_dmabuf *mp; 541 struct lpfc_dmabuf *mp;
542 uint16_t rpi;
543 int rc;
544
541 mp = (struct lpfc_dmabuf *) (pmb->context1); 545 mp = (struct lpfc_dmabuf *) (pmb->context1);
546
542 if (mp) { 547 if (mp) {
543 lpfc_mbuf_free(phba, mp->virt, mp->phys); 548 lpfc_mbuf_free(phba, mp->virt, mp->phys);
544 kfree(mp); 549 kfree(mp);
545 } 550 }
551
552 /*
553 * If a REG_LOGIN succeeded after node is destroyed or node
554 * is in re-discovery driver need to cleanup the RPI.
555 */
556 if (!(phba->fc_flag & FC_UNLOADING) &&
557 (pmb->mb.mbxCommand == MBX_REG_LOGIN64) &&
558 (!pmb->mb.mbxStatus)) {
559
560 rpi = pmb->mb.un.varWords[0];
561 lpfc_unreg_login(phba, rpi, pmb);
562 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564 if (rc != MBX_NOT_FINISHED)
565 return;
566 }
567
546 mempool_free( pmb, phba->mbox_mem_pool); 568 mempool_free( pmb, phba->mbox_mem_pool);
547 return; 569 return;
548} 570}
@@ -693,25 +715,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
693 } else { 715 } else {
694 spin_unlock_irq(phba->host->host_lock); 716 spin_unlock_irq(phba->host->host_lock);
695 /* Turn on IOCB processing */ 717 /* Turn on IOCB processing */
696 for (i = 0; i < phba->sli.num_rings; i++) { 718 for (i = 0; i < phba->sli.num_rings; i++)
697 lpfc_sli_turn_on_ring(phba, i); 719 lpfc_sli_turn_on_ring(phba, i);
698 }
699
700 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
701 while (!list_empty(&phba->freebufList)) {
702 struct lpfc_dmabuf *mp;
703
704 mp = NULL;
705 list_remove_head((&phba->freebufList),
706 mp,
707 struct lpfc_dmabuf,
708 list);
709 if (mp) {
710 lpfc_mbuf_free(phba, mp->virt,
711 mp->phys);
712 kfree(mp);
713 }
714 }
715 } 720 }
716 721
717 } while (process_next); 722 } while (process_next);
@@ -833,6 +838,14 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
833 * All other are passed to the completion callback. 838 * All other are passed to the completion callback.
834 */ 839 */
835 if (pring->ringno == LPFC_ELS_RING) { 840 if (pring->ringno == LPFC_ELS_RING) {
841 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
842 cmdiocbp->iocb_flag &=
843 ~LPFC_DRIVER_ABORTED;
844 saveq->iocb.ulpStatus =
845 IOSTAT_LOCAL_REJECT;
846 saveq->iocb.un.ulpWord[4] =
847 IOERR_SLI_ABORTED;
848 }
836 spin_unlock_irqrestore(phba->host->host_lock, 849 spin_unlock_irqrestore(phba->host->host_lock,
837 iflag); 850 iflag);
838 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 851 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -1464,8 +1477,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1464int 1477int
1465lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1478lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1466{ 1479{
1480 LIST_HEAD(completions);
1467 struct lpfc_iocbq *iocb, *next_iocb; 1481 struct lpfc_iocbq *iocb, *next_iocb;
1468 IOCB_t *icmd = NULL, *cmd = NULL; 1482 IOCB_t *cmd = NULL;
1469 int errcnt; 1483 int errcnt;
1470 1484
1471 errcnt = 0; 1485 errcnt = 0;
@@ -1474,46 +1488,28 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1474 * First do the txq. 1488 * First do the txq.
1475 */ 1489 */
1476 spin_lock_irq(phba->host->host_lock); 1490 spin_lock_irq(phba->host->host_lock);
1477 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 1491 list_splice_init(&pring->txq, &completions);
1478 list_del_init(&iocb->list);
1479 if (iocb->iocb_cmpl) {
1480 icmd = &iocb->iocb;
1481 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1482 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1483 spin_unlock_irq(phba->host->host_lock);
1484 (iocb->iocb_cmpl) (phba, iocb, iocb);
1485 spin_lock_irq(phba->host->host_lock);
1486 } else
1487 lpfc_sli_release_iocbq(phba, iocb);
1488 }
1489 pring->txq_cnt = 0; 1492 pring->txq_cnt = 0;
1490 INIT_LIST_HEAD(&(pring->txq));
1491 1493
1492 /* Next issue ABTS for everything on the txcmplq */ 1494 /* Next issue ABTS for everything on the txcmplq */
1493 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1494 cmd = &iocb->iocb; 1496 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1495 1497
1496 /* 1498 spin_unlock_irq(phba->host->host_lock);
1497 * Imediate abort of IOCB, deque and call compl
1498 */
1499 1499
1500 list_del_init(&iocb->list); 1500 while (!list_empty(&completions)) {
1501 pring->txcmplq_cnt--; 1501 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1502 cmd = &iocb->iocb;
1503 list_del(&iocb->list);
1502 1504
1503 if (iocb->iocb_cmpl) { 1505 if (iocb->iocb_cmpl) {
1504 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1506 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1505 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1507 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1506 spin_unlock_irq(phba->host->host_lock);
1507 (iocb->iocb_cmpl) (phba, iocb, iocb); 1508 (iocb->iocb_cmpl) (phba, iocb, iocb);
1508 spin_lock_irq(phba->host->host_lock);
1509 } else 1509 } else
1510 lpfc_sli_release_iocbq(phba, iocb); 1510 lpfc_sli_release_iocbq(phba, iocb);
1511 } 1511 }
1512 1512
1513 INIT_LIST_HEAD(&pring->txcmplq);
1514 pring->txcmplq_cnt = 0;
1515 spin_unlock_irq(phba->host->host_lock);
1516
1517 return errcnt; 1513 return errcnt;
1518} 1514}
1519 1515
@@ -1588,6 +1584,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1588 hc_copy = readl(phba->HCregaddr); 1584 hc_copy = readl(phba->HCregaddr);
1589 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1585 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1590 readl(phba->HCregaddr); /* flush */ 1586 readl(phba->HCregaddr); /* flush */
1587 phba->fc_flag |= FC_IGNORE_ERATT;
1591 1588
1592 if (readl(phba->HAregaddr) & HA_ERATT) { 1589 if (readl(phba->HAregaddr) & HA_ERATT) {
1593 /* Clear Chip error bit */ 1590 /* Clear Chip error bit */
@@ -1630,6 +1627,7 @@ clear_errat:
1630 } 1627 }
1631 1628
1632restore_hc: 1629restore_hc:
1630 phba->fc_flag &= ~FC_IGNORE_ERATT;
1633 writel(hc_copy, phba->HCregaddr); 1631 writel(hc_copy, phba->HCregaddr);
1634 readl(phba->HCregaddr); /* flush */ 1632 readl(phba->HCregaddr); /* flush */
1635} 1633}
@@ -1665,6 +1663,7 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1665 status &= ~HC_ERINT_ENA; 1663 status &= ~HC_ERINT_ENA;
1666 writel(status, phba->HCregaddr); 1664 writel(status, phba->HCregaddr);
1667 readl(phba->HCregaddr); /* flush */ 1665 readl(phba->HCregaddr); /* flush */
1666 phba->fc_flag |= FC_IGNORE_ERATT;
1668 spin_unlock_irq(phba->host->host_lock); 1667 spin_unlock_irq(phba->host->host_lock);
1669 1668
1670 lpfc_kill_board(phba, pmb); 1669 lpfc_kill_board(phba, pmb);
@@ -1674,6 +1673,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1674 if (retval != MBX_SUCCESS) { 1673 if (retval != MBX_SUCCESS) {
1675 if (retval != MBX_BUSY) 1674 if (retval != MBX_BUSY)
1676 mempool_free(pmb, phba->mbox_mem_pool); 1675 mempool_free(pmb, phba->mbox_mem_pool);
1676 spin_lock_irq(phba->host->host_lock);
1677 phba->fc_flag &= ~FC_IGNORE_ERATT;
1678 spin_unlock_irq(phba->host->host_lock);
1677 return 1; 1679 return 1;
1678 } 1680 }
1679 1681
@@ -1700,6 +1702,7 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1700 } 1702 }
1701 spin_lock_irq(phba->host->host_lock); 1703 spin_lock_irq(phba->host->host_lock);
1702 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1704 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1705 phba->fc_flag &= ~FC_IGNORE_ERATT;
1703 spin_unlock_irq(phba->host->host_lock); 1706 spin_unlock_irq(phba->host->host_lock);
1704 1707
1705 psli->mbox_active = NULL; 1708 psli->mbox_active = NULL;
@@ -1985,42 +1988,6 @@ lpfc_sli_hba_setup_exit:
1985 return rc; 1988 return rc;
1986} 1989}
1987 1990
1988static void
1989lpfc_mbox_abort(struct lpfc_hba * phba)
1990{
1991 LPFC_MBOXQ_t *pmbox;
1992 MAILBOX_t *mb;
1993
1994 if (phba->sli.mbox_active) {
1995 del_timer_sync(&phba->sli.mbox_tmo);
1996 phba->work_hba_events &= ~WORKER_MBOX_TMO;
1997 pmbox = phba->sli.mbox_active;
1998 mb = &pmbox->mb;
1999 phba->sli.mbox_active = NULL;
2000 if (pmbox->mbox_cmpl) {
2001 mb->mbxStatus = MBX_NOT_FINISHED;
2002 (pmbox->mbox_cmpl) (phba, pmbox);
2003 }
2004 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2005 }
2006
2007 /* Abort all the non active mailbox commands. */
2008 spin_lock_irq(phba->host->host_lock);
2009 pmbox = lpfc_mbox_get(phba);
2010 while (pmbox) {
2011 mb = &pmbox->mb;
2012 if (pmbox->mbox_cmpl) {
2013 mb->mbxStatus = MBX_NOT_FINISHED;
2014 spin_unlock_irq(phba->host->host_lock);
2015 (pmbox->mbox_cmpl) (phba, pmbox);
2016 spin_lock_irq(phba->host->host_lock);
2017 }
2018 pmbox = lpfc_mbox_get(phba);
2019 }
2020 spin_unlock_irq(phba->host->host_lock);
2021 return;
2022}
2023
2024/*! lpfc_mbox_timeout 1991/*! lpfc_mbox_timeout
2025 * 1992 *
2026 * \pre 1993 * \pre
@@ -2055,6 +2022,8 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2055{ 2022{
2056 LPFC_MBOXQ_t *pmbox; 2023 LPFC_MBOXQ_t *pmbox;
2057 MAILBOX_t *mb; 2024 MAILBOX_t *mb;
2025 struct lpfc_sli *psli = &phba->sli;
2026 struct lpfc_sli_ring *pring;
2058 2027
2059 spin_lock_irq(phba->host->host_lock); 2028 spin_lock_irq(phba->host->host_lock);
2060 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2029 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
@@ -2062,8 +2031,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2062 return; 2031 return;
2063 } 2032 }
2064 2033
2065 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2066
2067 pmbox = phba->sli.mbox_active; 2034 pmbox = phba->sli.mbox_active;
2068 mb = &pmbox->mb; 2035 mb = &pmbox->mb;
2069 2036
@@ -2078,17 +2045,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2078 phba->sli.sli_flag, 2045 phba->sli.sli_flag,
2079 phba->sli.mbox_active); 2046 phba->sli.mbox_active);
2080 2047
2081 phba->sli.mbox_active = NULL; 2048 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2082 if (pmbox->mbox_cmpl) { 2049 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2083 mb->mbxStatus = MBX_NOT_FINISHED; 2050 * it to fail all oustanding SCSI IO.
2084 spin_unlock_irq(phba->host->host_lock); 2051 */
2085 (pmbox->mbox_cmpl) (phba, pmbox); 2052 phba->hba_state = LPFC_STATE_UNKNOWN;
2086 spin_lock_irq(phba->host->host_lock); 2053 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2087 } 2054 phba->fc_flag |= FC_ESTABLISH_LINK;
2088 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2055 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2089
2090 spin_unlock_irq(phba->host->host_lock); 2056 spin_unlock_irq(phba->host->host_lock);
2091 lpfc_mbox_abort(phba); 2057
2058 pring = &psli->ring[psli->fcp_ring];
2059 lpfc_sli_abort_iocb_ring(phba, pring);
2060
2061 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2062 "%d:0316 Resetting board due to mailbox timeout\n",
2063 phba->brd_no);
2064 /*
2065 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2066 * on oustanding mailbox commands.
2067 */
2068 lpfc_offline_prep(phba);
2069 lpfc_offline(phba);
2070 lpfc_sli_brdrestart(phba);
2071 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2072 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2073 lpfc_unblock_mgmt_io(phba);
2092 return; 2074 return;
2093} 2075}
2094 2076
@@ -2320,9 +2302,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2320 spin_unlock_irqrestore(phba->host->host_lock, 2302 spin_unlock_irqrestore(phba->host->host_lock,
2321 drvr_flag); 2303 drvr_flag);
2322 2304
2323 /* Can be in interrupt context, do not sleep */ 2305 msleep(1);
2324 /* (or might be called with interrupts disabled) */
2325 mdelay(1);
2326 2306
2327 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2307 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2328 2308
@@ -2430,7 +2410,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2430 2410
2431 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2411 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2432 /* 2412 /*
2433 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF 2413 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2434 * can be issued if the link is not up. 2414 * can be issued if the link is not up.
2435 */ 2415 */
2436 switch (piocb->iocb.ulpCommand) { 2416 switch (piocb->iocb.ulpCommand) {
@@ -2444,6 +2424,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2444 piocb->iocb_cmpl = NULL; 2424 piocb->iocb_cmpl = NULL;
2445 /*FALLTHROUGH*/ 2425 /*FALLTHROUGH*/
2446 case CMD_CREATE_XRI_CR: 2426 case CMD_CREATE_XRI_CR:
2427 case CMD_CLOSE_XRI_CN:
2428 case CMD_CLOSE_XRI_CX:
2447 break; 2429 break;
2448 default: 2430 default:
2449 goto iocb_busy; 2431 goto iocb_busy;
@@ -2637,11 +2619,12 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2637int 2619int
2638lpfc_sli_hba_down(struct lpfc_hba * phba) 2620lpfc_sli_hba_down(struct lpfc_hba * phba)
2639{ 2621{
2622 LIST_HEAD(completions);
2640 struct lpfc_sli *psli; 2623 struct lpfc_sli *psli;
2641 struct lpfc_sli_ring *pring; 2624 struct lpfc_sli_ring *pring;
2642 LPFC_MBOXQ_t *pmb; 2625 LPFC_MBOXQ_t *pmb;
2643 struct lpfc_iocbq *iocb, *next_iocb; 2626 struct lpfc_iocbq *iocb;
2644 IOCB_t *icmd = NULL; 2627 IOCB_t *cmd = NULL;
2645 int i; 2628 int i;
2646 unsigned long flags = 0; 2629 unsigned long flags = 0;
2647 2630
@@ -2649,7 +2632,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2649 lpfc_hba_down_prep(phba); 2632 lpfc_hba_down_prep(phba);
2650 2633
2651 spin_lock_irqsave(phba->host->host_lock, flags); 2634 spin_lock_irqsave(phba->host->host_lock, flags);
2652
2653 for (i = 0; i < psli->num_rings; i++) { 2635 for (i = 0; i < psli->num_rings; i++) {
2654 pring = &psli->ring[i]; 2636 pring = &psli->ring[i];
2655 pring->flag |= LPFC_DEFERRED_RING_EVENT; 2637 pring->flag |= LPFC_DEFERRED_RING_EVENT;
@@ -2658,28 +2640,25 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2658 * Error everything on the txq since these iocbs have not been 2640 * Error everything on the txq since these iocbs have not been
2659 * given to the FW yet. 2641 * given to the FW yet.
2660 */ 2642 */
2643 list_splice_init(&pring->txq, &completions);
2661 pring->txq_cnt = 0; 2644 pring->txq_cnt = 0;
2662 2645
2663 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2646 }
2664 list_del_init(&iocb->list); 2647 spin_unlock_irqrestore(phba->host->host_lock, flags);
2665 if (iocb->iocb_cmpl) {
2666 icmd = &iocb->iocb;
2667 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2668 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2669 spin_unlock_irqrestore(phba->host->host_lock,
2670 flags);
2671 (iocb->iocb_cmpl) (phba, iocb, iocb);
2672 spin_lock_irqsave(phba->host->host_lock, flags);
2673 } else
2674 lpfc_sli_release_iocbq(phba, iocb);
2675 }
2676 2648
2677 INIT_LIST_HEAD(&(pring->txq)); 2649 while (!list_empty(&completions)) {
2650 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2651 cmd = &iocb->iocb;
2652 list_del(&iocb->list);
2678 2653
2654 if (iocb->iocb_cmpl) {
2655 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2656 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2657 (iocb->iocb_cmpl) (phba, iocb, iocb);
2658 } else
2659 lpfc_sli_release_iocbq(phba, iocb);
2679 } 2660 }
2680 2661
2681 spin_unlock_irqrestore(phba->host->host_lock, flags);
2682
2683 /* Return any active mbox cmds */ 2662 /* Return any active mbox cmds */
2684 del_timer_sync(&psli->mbox_tmo); 2663 del_timer_sync(&psli->mbox_tmo);
2685 spin_lock_irqsave(phba->host->host_lock, flags); 2664 spin_lock_irqsave(phba->host->host_lock, flags);
@@ -2768,85 +2747,138 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2768} 2747}
2769 2748
2770static void 2749static void
2771lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2750lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2772 struct lpfc_iocbq * rspiocb) 2751 struct lpfc_iocbq * rspiocb)
2773{ 2752{
2774 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2753 IOCB_t *irsp;
2775 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver 2754 uint16_t abort_iotag, abort_context;
2776 * just aborted. 2755 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
2777 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl 2756 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2778 */ 2757
2779 if (cmdiocb->context2) { 2758 abort_iocb = NULL;
2780 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2; 2759 irsp = &rspiocb->iocb;
2781 2760
2782 /* Free the response IOCB before completing the abort 2761 spin_lock_irq(phba->host->host_lock);
2783 command. */
2784 buf_ptr = NULL;
2785 list_remove_head((&buf_ptr1->list), buf_ptr,
2786 struct lpfc_dmabuf, list);
2787 if (buf_ptr) {
2788 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2789 kfree(buf_ptr);
2790 }
2791 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2792 kfree(buf_ptr1);
2793 }
2794 2762
2795 if (cmdiocb->context3) { 2763 if (irsp->ulpStatus) {
2796 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3; 2764 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
2797 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2765 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
2798 kfree(buf_ptr); 2766
2767 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
2768 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
2769
2770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2771 "%d:0327 Cannot abort els iocb %p"
2772 " with tag %x context %x\n",
2773 phba->brd_no, abort_iocb,
2774 abort_iotag, abort_context);
2775
2776 /*
2777 * make sure we have the right iocbq before taking it
2778 * off the txcmplq and try to call completion routine.
2779 */
2780 if (abort_iocb &&
2781 abort_iocb->iocb.ulpContext == abort_context &&
2782 abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
2783 list_del(&abort_iocb->list);
2784 pring->txcmplq_cnt--;
2785
2786 rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
2787 if (rsp_ab_iocb == NULL)
2788 lpfc_sli_release_iocbq(phba, abort_iocb);
2789 else {
2790 abort_iocb->iocb_flag &=
2791 ~LPFC_DRIVER_ABORTED;
2792 rsp_ab_iocb->iocb.ulpStatus =
2793 IOSTAT_LOCAL_REJECT;
2794 rsp_ab_iocb->iocb.un.ulpWord[4] =
2795 IOERR_SLI_ABORTED;
2796 spin_unlock_irq(phba->host->host_lock);
2797 (abort_iocb->iocb_cmpl)
2798 (phba, abort_iocb, rsp_ab_iocb);
2799 spin_lock_irq(phba->host->host_lock);
2800 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
2801 }
2802 }
2799 } 2803 }
2800 2804
2801 lpfc_sli_release_iocbq(phba, cmdiocb); 2805 lpfc_sli_release_iocbq(phba, cmdiocb);
2806 spin_unlock_irq(phba->host->host_lock);
2802 return; 2807 return;
2803} 2808}
2804 2809
2805int 2810int
2806lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba, 2811lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2807 struct lpfc_sli_ring * pring, 2812 struct lpfc_sli_ring * pring,
2808 struct lpfc_iocbq * cmdiocb) 2813 struct lpfc_iocbq * cmdiocb)
2809{ 2814{
2810 struct lpfc_iocbq *abtsiocbp; 2815 struct lpfc_iocbq *abtsiocbp;
2811 IOCB_t *icmd = NULL; 2816 IOCB_t *icmd = NULL;
2812 IOCB_t *iabt = NULL; 2817 IOCB_t *iabt = NULL;
2818 int retval = IOCB_ERROR;
2819
2820 /* There are certain command types we don't want
2821 * to abort.
2822 */
2823 icmd = &cmdiocb->iocb;
2824 if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) ||
2825 (icmd->ulpCommand == CMD_CLOSE_XRI_CN))
2826 return 0;
2827
2828 /* If we're unloading, interrupts are disabled so we
2829 * need to cleanup the iocb here.
2830 */
2831 if (phba->fc_flag & FC_UNLOADING)
2832 goto abort_iotag_exit;
2813 2833
2814 /* issue ABTS for this IOCB based on iotag */ 2834 /* issue ABTS for this IOCB based on iotag */
2815 abtsiocbp = lpfc_sli_get_iocbq(phba); 2835 abtsiocbp = lpfc_sli_get_iocbq(phba);
2816 if (abtsiocbp == NULL) 2836 if (abtsiocbp == NULL)
2817 return 0; 2837 return 0;
2818 2838
2839 /* This signals the response to set the correct status
2840 * before calling the completion handler.
2841 */
2842 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
2843
2819 iabt = &abtsiocbp->iocb; 2844 iabt = &abtsiocbp->iocb;
2820 icmd = &cmdiocb->iocb; 2845 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
2821 switch (icmd->ulpCommand) { 2846 iabt->un.acxri.abortContextTag = icmd->ulpContext;
2822 case CMD_ELS_REQUEST64_CR: 2847 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
2823 /* Even though we abort the ELS command, the firmware may access 2848 iabt->ulpLe = 1;
2824 * the BPL or other resources before it processes our 2849 iabt->ulpClass = icmd->ulpClass;
2825 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2826 * resources till the actual abort request completes.
2827 */
2828 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2829 abtsiocbp->context2 = cmdiocb->context2;
2830 abtsiocbp->context3 = cmdiocb->context3;
2831 cmdiocb->context2 = NULL;
2832 cmdiocb->context3 = NULL;
2833 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2834 break;
2835 default:
2836 lpfc_sli_release_iocbq(phba, abtsiocbp);
2837 return 0;
2838 }
2839 2850
2840 iabt->un.amxri.abortType = ABORT_TYPE_ABTS; 2851 if (phba->hba_state >= LPFC_LINK_UP)
2841 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32; 2852 iabt->ulpCommand = CMD_ABORT_XRI_CN;
2853 else
2854 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
2842 2855
2843 iabt->ulpLe = 1; 2856 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
2844 iabt->ulpClass = CLASS3;
2845 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2846 2857
2847 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2848 lpfc_sli_release_iocbq(phba, abtsiocbp); 2859 "%d:0339 Abort xri x%x, original iotag x%x, abort "
2849 return 0; 2860 "cmd iotag x%x\n",
2861 phba->brd_no, iabt->un.acxri.abortContextTag,
2862 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
2863 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
2864
2865abort_iotag_exit:
2866
2867 /* If we could not issue an abort dequeue the iocb and handle
2868 * the completion here.
2869 */
2870 if (retval == IOCB_ERROR) {
2871 list_del(&cmdiocb->list);
2872 pring->txcmplq_cnt--;
2873
2874 if (cmdiocb->iocb_cmpl) {
2875 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2876 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2877 spin_unlock_irq(phba->host->host_lock);
2878 (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
2879 spin_lock_irq(phba->host->host_lock);
2880 } else
2881 lpfc_sli_release_iocbq(phba, cmdiocb);
2850 } 2882 }
2851 2883
2852 return 1; 2884 return 1;
@@ -2918,9 +2950,11 @@ void
2918lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2950lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2919 struct lpfc_iocbq * rspiocb) 2951 struct lpfc_iocbq * rspiocb)
2920{ 2952{
2921 spin_lock_irq(phba->host->host_lock); 2953 unsigned long iflags;
2954
2955 spin_lock_irqsave(phba->host->host_lock, iflags);
2922 lpfc_sli_release_iocbq(phba, cmdiocb); 2956 lpfc_sli_release_iocbq(phba, cmdiocb);
2923 spin_unlock_irq(phba->host->host_lock); 2957 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2924 return; 2958 return;
2925} 2959}
2926 2960
@@ -3043,22 +3077,22 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3043 timeout_req); 3077 timeout_req);
3044 spin_lock_irq(phba->host->host_lock); 3078 spin_lock_irq(phba->host->host_lock);
3045 3079
3046 if (timeleft == 0) { 3080 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3082 "%d:0331 IOCB wake signaled\n",
3083 phba->brd_no);
3084 } else if (timeleft == 0) {
3047 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3048 "%d:0338 IOCB wait timeout error - no " 3086 "%d:0338 IOCB wait timeout error - no "
3049 "wake response Data x%x\n", 3087 "wake response Data x%x\n",
3050 phba->brd_no, timeout); 3088 phba->brd_no, timeout);
3051 retval = IOCB_TIMEDOUT; 3089 retval = IOCB_TIMEDOUT;
3052 } else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 3090 } else {
3053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3054 "%d:0330 IOCB wake NOT set, " 3092 "%d:0330 IOCB wake NOT set, "
3055 "Data x%x x%lx\n", phba->brd_no, 3093 "Data x%x x%lx\n", phba->brd_no,
3056 timeout, (timeleft / jiffies)); 3094 timeout, (timeleft / jiffies));
3057 retval = IOCB_TIMEDOUT; 3095 retval = IOCB_TIMEDOUT;
3058 } else {
3059 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3060 "%d:0331 IOCB wake signaled\n",
3061 phba->brd_no);
3062 } 3096 }
3063 } else { 3097 } else {
3064 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3098 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3087,8 +3121,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3087 uint32_t timeout) 3121 uint32_t timeout)
3088{ 3122{
3089 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3123 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3090 DECLARE_WAITQUEUE(wq_entry, current);
3091 uint32_t timeleft = 0;
3092 int retval; 3124 int retval;
3093 3125
3094 /* The caller must leave context1 empty. */ 3126 /* The caller must leave context1 empty. */
@@ -3101,27 +3133,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3101 /* setup context field to pass wait_queue pointer to wake function */ 3133 /* setup context field to pass wait_queue pointer to wake function */
3102 pmboxq->context1 = &done_q; 3134 pmboxq->context1 = &done_q;
3103 3135
3104 /* start to sleep before we wait, to avoid races */
3105 set_current_state(TASK_INTERRUPTIBLE);
3106 add_wait_queue(&done_q, &wq_entry);
3107
3108 /* now issue the command */ 3136 /* now issue the command */
3109 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3137 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3110 3138
3111 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 3139 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3112 timeleft = schedule_timeout(timeout * HZ); 3140 wait_event_interruptible_timeout(done_q,
3141 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3142 timeout * HZ);
3143
3113 pmboxq->context1 = NULL; 3144 pmboxq->context1 = NULL;
3114 /* if schedule_timeout returns 0, we timed out and were not 3145 /*
3115 woken up */ 3146 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3116 if ((timeleft == 0) || signal_pending(current)) 3147 * else do not free the resources.
3117 retval = MBX_TIMEOUT; 3148 */
3118 else 3149 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3119 retval = MBX_SUCCESS; 3150 retval = MBX_SUCCESS;
3151 else
3152 retval = MBX_TIMEOUT;
3120 } 3153 }
3121 3154
3122
3123 set_current_state(TASK_RUNNING);
3124 remove_wait_queue(&done_q, &wq_entry);
3125 return retval; 3155 return retval;
3126} 3156}
3127 3157
@@ -3184,6 +3214,11 @@ lpfc_intr_handler(int irq, void *dev_id)
3184 */ 3214 */
3185 spin_lock(phba->host->host_lock); 3215 spin_lock(phba->host->host_lock);
3186 ha_copy = readl(phba->HAregaddr); 3216 ha_copy = readl(phba->HAregaddr);
3217 /* If somebody is waiting to handle an eratt don't process it
3218 * here. The brdkill function will do this.
3219 */
3220 if (phba->fc_flag & FC_IGNORE_ERATT)
3221 ha_copy &= ~HA_ERATT;
3187 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3222 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3188 readl(phba->HAregaddr); /* flush */ 3223 readl(phba->HAregaddr); /* flush */
3189 spin_unlock(phba->host->host_lock); 3224 spin_unlock(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a43549959dc7..41c38d324ab0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -39,9 +39,10 @@ struct lpfc_iocbq {
39 IOCB_t iocb; /* IOCB cmd */ 39 IOCB_t iocb; /* IOCB cmd */
40 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 40 uint8_t retry; /* retry counter for IOCB cmd - if needed */
41 uint8_t iocb_flag; 41 uint8_t iocb_flag;
42#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 42#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
43#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 43#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
45#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
45 46
46 uint8_t abort_count; 47 uint8_t abort_count;
47 uint8_t rsvd2; 48 uint8_t rsvd2;
@@ -67,6 +68,8 @@ struct lpfc_iocbq {
67#define IOCB_ERROR 2 68#define IOCB_ERROR 2
68#define IOCB_TIMEDOUT 3 69#define IOCB_TIMEDOUT 3
69 70
71#define LPFC_MBX_WAKE 1
72
70typedef struct lpfcMboxq { 73typedef struct lpfcMboxq {
71 /* MBOXQs are used in single linked lists */ 74 /* MBOXQs are used in single linked lists */
72 struct list_head list; /* ptr to next mailbox command */ 75 struct list_head list; /* ptr to next mailbox command */
@@ -75,6 +78,7 @@ typedef struct lpfcMboxq {
75 void *context2; /* caller context information */ 78 void *context2; /* caller context information */
76 79
77 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 80 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
81 uint8_t mbox_flag;
78 82
79} LPFC_MBOXQ_t; 83} LPFC_MBOXQ_t;
80 84
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index a61ef3d1e7f1..92a9107019d2 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,12 +18,12 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.11" 21#define LPFC_DRIVER_VERSION "8.1.12"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
25#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 25#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
26 LPFC_DRIVER_VERSION 26 LPFC_DRIVER_VERSION
27#define LPFC_COPYRIGHT "Copyright(c) 2004-2006 Emulex. All rights reserved." 27#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved."
28 28
29#define DFC_API_VERSION "0.0.0" 29#define DFC_API_VERSION "0.0.0"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 7fc6e06ea7e1..3cce75d70263 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1754,7 +1754,8 @@ __mega_busywait_mbox (adapter_t *adapter)
1754 for (counter = 0; counter < 10000; counter++) { 1754 for (counter = 0; counter < 10000; counter++) {
1755 if (!mbox->m_in.busy) 1755 if (!mbox->m_in.busy)
1756 return 0; 1756 return 0;
1757 udelay(100); yield(); 1757 udelay(100);
1758 cond_resched();
1758 } 1759 }
1759 return -1; /* give up after 1 second */ 1760 return -1; /* give up after 1 second */
1760} 1761}
@@ -3177,7 +3178,10 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
3177 3178
3178 return len; 3179 return len;
3179} 3180}
3180 3181#else
3182static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
3183{
3184}
3181#endif 3185#endif
3182 3186
3183 3187
@@ -4342,7 +4346,7 @@ mega_support_cluster(adapter_t *adapter)
4342 return 0; 4346 return 0;
4343} 4347}
4344 4348
4345 4349#ifdef CONFIG_PROC_FS
4346/** 4350/**
4347 * mega_adapinq() 4351 * mega_adapinq()
4348 * @adapter - pointer to our soft state 4352 * @adapter - pointer to our soft state
@@ -4447,7 +4451,7 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
4447 4451
4448 return rval; 4452 return rval;
4449} 4453}
4450 4454#endif
4451 4455
4452/** 4456/**
4453 * mega_internal_command() 4457 * mega_internal_command()
@@ -4965,7 +4969,6 @@ megaraid_remove_one(struct pci_dev *pdev)
4965{ 4969{
4966 struct Scsi_Host *host = pci_get_drvdata(pdev); 4970 struct Scsi_Host *host = pci_get_drvdata(pdev);
4967 adapter_t *adapter = (adapter_t *)host->hostdata; 4971 adapter_t *adapter = (adapter_t *)host->hostdata;
4968 char buf[12] = { 0 };
4969 4972
4970 scsi_remove_host(host); 4973 scsi_remove_host(host);
4971 4974
@@ -5011,8 +5014,11 @@ megaraid_remove_one(struct pci_dev *pdev)
5011 remove_proc_entry("raiddrives-30-39", 5014 remove_proc_entry("raiddrives-30-39",
5012 adapter->controller_proc_dir_entry); 5015 adapter->controller_proc_dir_entry);
5013#endif 5016#endif
5014 sprintf(buf, "hba%d", adapter->host->host_no); 5017 {
5015 remove_proc_entry(buf, mega_proc_dir_entry); 5018 char buf[12] = { 0 };
5019 sprintf(buf, "hba%d", adapter->host->host_no);
5020 remove_proc_entry(buf, mega_proc_dir_entry);
5021 }
5016 } 5022 }
5017#endif 5023#endif
5018 5024
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index c6e74643abe2..ee70bd4ae4ba 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -1002,7 +1002,6 @@ static int megaraid_reset(Scsi_Cmnd *);
1002static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int); 1002static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
1003static int megaraid_biosparam(struct scsi_device *, struct block_device *, 1003static int megaraid_biosparam(struct scsi_device *, struct block_device *,
1004 sector_t, int []); 1004 sector_t, int []);
1005static int mega_print_inquiry(char *, char *);
1006 1005
1007static int mega_build_sglist (adapter_t *adapter, scb_t *scb, 1006static int mega_build_sglist (adapter_t *adapter, scb_t *scb,
1008 u32 *buffer, u32 *length); 1007 u32 *buffer, u32 *length);
@@ -1024,6 +1023,7 @@ static int mega_init_scb (adapter_t *);
1024static int mega_is_bios_enabled (adapter_t *); 1023static int mega_is_bios_enabled (adapter_t *);
1025 1024
1026#ifdef CONFIG_PROC_FS 1025#ifdef CONFIG_PROC_FS
1026static int mega_print_inquiry(char *, char *);
1027static void mega_create_proc_entry(int, struct proc_dir_entry *); 1027static void mega_create_proc_entry(int, struct proc_dir_entry *);
1028static int proc_read_config(char *, char **, off_t, int, int *, void *); 1028static int proc_read_config(char *, char **, off_t, int, int *, void *);
1029static int proc_read_stat(char *, char **, off_t, int, int *, void *); 1029static int proc_read_stat(char *, char **, off_t, int, int *, void *);
@@ -1040,10 +1040,10 @@ static int proc_rdrv_20(char *, char **, off_t, int, int *, void *);
1040static int proc_rdrv_30(char *, char **, off_t, int, int *, void *); 1040static int proc_rdrv_30(char *, char **, off_t, int, int *, void *);
1041static int proc_rdrv_40(char *, char **, off_t, int, int *, void *); 1041static int proc_rdrv_40(char *, char **, off_t, int, int *, void *);
1042static int proc_rdrv(adapter_t *, char *, int, int); 1042static int proc_rdrv(adapter_t *, char *, int, int);
1043#endif
1044 1043
1045static int mega_adapinq(adapter_t *, dma_addr_t); 1044static int mega_adapinq(adapter_t *, dma_addr_t);
1046static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); 1045static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
1046#endif
1047 1047
1048static int mega_support_ext_cdb(adapter_t *); 1048static int mega_support_ext_cdb(adapter_t *);
1049static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, 1049static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index f33a678f0897..e075a52ac104 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(mraid_mm_unregister_adp);
60EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 60EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
61 61
62static int majorno; 62static int majorno;
63static uint32_t drvr_ver = 0x02200206; 63static uint32_t drvr_ver = 0x02200207;
64 64
65static int adapters_count_g; 65static int adapters_count_g;
66static struct list_head adapters_list_g; 66static struct list_head adapters_list_g;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index cf3666d7d97a..e64d1a19d8d7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -185,7 +185,7 @@ struct mesh_state {
185 * Driver is too messy, we need a few prototypes... 185 * Driver is too messy, we need a few prototypes...
186 */ 186 */
187static void mesh_done(struct mesh_state *ms, int start_next); 187static void mesh_done(struct mesh_state *ms, int start_next);
188static void mesh_interrupt(int irq, void *dev_id); 188static void mesh_interrupt(struct mesh_state *ms);
189static void cmd_complete(struct mesh_state *ms); 189static void cmd_complete(struct mesh_state *ms);
190static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd); 190static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
191static void halt_dma(struct mesh_state *ms); 191static void halt_dma(struct mesh_state *ms);
@@ -466,7 +466,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
466 dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x", 466 dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
467 MKWORD(mr->interrupt, mr->exception, 467 MKWORD(mr->interrupt, mr->exception,
468 mr->error, mr->fifo_count)); 468 mr->error, mr->fifo_count));
469 mesh_interrupt(0, (void *)ms); 469 mesh_interrupt(ms);
470 if (ms->phase != arbitrating) 470 if (ms->phase != arbitrating)
471 return; 471 return;
472 } 472 }
@@ -504,7 +504,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
504 dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x", 504 dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
505 MKWORD(mr->interrupt, mr->exception, 505 MKWORD(mr->interrupt, mr->exception,
506 mr->error, mr->fifo_count)); 506 mr->error, mr->fifo_count));
507 mesh_interrupt(0, (void *)ms); 507 mesh_interrupt(ms);
508 if (ms->phase != arbitrating) 508 if (ms->phase != arbitrating)
509 return; 509 return;
510 dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x", 510 dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
@@ -1018,10 +1018,11 @@ static void handle_reset(struct mesh_state *ms)
1018static irqreturn_t do_mesh_interrupt(int irq, void *dev_id) 1018static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
1019{ 1019{
1020 unsigned long flags; 1020 unsigned long flags;
1021 struct Scsi_Host *dev = ((struct mesh_state *)dev_id)->host; 1021 struct mesh_state *ms = dev_id;
1022 struct Scsi_Host *dev = ms->host;
1022 1023
1023 spin_lock_irqsave(dev->host_lock, flags); 1024 spin_lock_irqsave(dev->host_lock, flags);
1024 mesh_interrupt(irq, dev_id); 1025 mesh_interrupt(ms);
1025 spin_unlock_irqrestore(dev->host_lock, flags); 1026 spin_unlock_irqrestore(dev->host_lock, flags);
1026 return IRQ_HANDLED; 1027 return IRQ_HANDLED;
1027} 1028}
@@ -1661,9 +1662,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1661 * handler (do_mesh_interrupt) or by other functions in 1662 * handler (do_mesh_interrupt) or by other functions in
1662 * exceptional circumstances 1663 * exceptional circumstances
1663 */ 1664 */
1664static void mesh_interrupt(int irq, void *dev_id) 1665static void mesh_interrupt(struct mesh_state *ms)
1665{ 1666{
1666 struct mesh_state *ms = (struct mesh_state *) dev_id;
1667 volatile struct mesh_regs __iomem *mr = ms->mesh; 1667 volatile struct mesh_regs __iomem *mr = ms->mesh;
1668 int intr; 1668 int intr;
1669 1669
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6777e8a69153..54d8bdf86852 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4293,7 +4293,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4293 ha->devnum = devnum; /* specifies microcode load address */ 4293 ha->devnum = devnum; /* specifies microcode load address */
4294 4294
4295#ifdef QLA_64BIT_PTR 4295#ifdef QLA_64BIT_PTR
4296 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4296 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
4297 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) { 4297 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
4298 printk(KERN_WARNING "scsi(%li): Unable to set a " 4298 printk(KERN_WARNING "scsi(%li): Unable to set a "
4299 "suitable DMA mask - aborting\n", ha->host_no); 4299 "suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3e296ab845b6..2a45aec4ff29 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -13,7 +13,6 @@
13 13
14#ifdef CONFIG_SPARC 14#ifdef CONFIG_SPARC
15#include <asm/prom.h> 15#include <asm/prom.h>
16#include <asm/pbm.h>
17#endif 16#endif
18 17
19/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */ 18/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */
@@ -130,18 +129,17 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
130int 129int
131qla2100_pci_config(scsi_qla_host_t *ha) 130qla2100_pci_config(scsi_qla_host_t *ha)
132{ 131{
133 uint16_t w, mwi; 132 int ret;
133 uint16_t w;
134 uint32_t d; 134 uint32_t d;
135 unsigned long flags; 135 unsigned long flags;
136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
137 137
138 pci_set_master(ha->pdev); 138 pci_set_master(ha->pdev);
139 mwi = 0; 139 ret = pci_set_mwi(ha->pdev);
140 if (pci_set_mwi(ha->pdev))
141 mwi = PCI_COMMAND_INVALIDATE;
142 140
143 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 141 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
144 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 142 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
145 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 143 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
146 144
147 /* Reset expansion ROM address decode enable */ 145 /* Reset expansion ROM address decode enable */
@@ -166,22 +164,22 @@ qla2100_pci_config(scsi_qla_host_t *ha)
166int 164int
167qla2300_pci_config(scsi_qla_host_t *ha) 165qla2300_pci_config(scsi_qla_host_t *ha)
168{ 166{
169 uint16_t w, mwi; 167 int ret;
168 uint16_t w;
170 uint32_t d; 169 uint32_t d;
171 unsigned long flags = 0; 170 unsigned long flags = 0;
172 uint32_t cnt; 171 uint32_t cnt;
173 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 172 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
174 173
175 pci_set_master(ha->pdev); 174 pci_set_master(ha->pdev);
176 mwi = 0; 175 ret = pci_set_mwi(ha->pdev);
177 if (pci_set_mwi(ha->pdev))
178 mwi = PCI_COMMAND_INVALIDATE;
179 176
180 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 177 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
181 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 178 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
182 179
183 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 180 if (IS_QLA2322(ha) || IS_QLA6322(ha))
184 w &= ~PCI_COMMAND_INTX_DISABLE; 181 w &= ~PCI_COMMAND_INTX_DISABLE;
182 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
185 183
186 /* 184 /*
187 * If this is a 2300 card and not 2312, reset the 185 * If this is a 2300 card and not 2312, reset the
@@ -210,7 +208,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
210 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 208 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
211 209
212 if (ha->fb_rev == FPM_2300) 210 if (ha->fb_rev == FPM_2300)
213 w &= ~PCI_COMMAND_INVALIDATE; 211 pci_clear_mwi(ha->pdev);
214 212
215 /* Deselect FPM registers. */ 213 /* Deselect FPM registers. */
216 WRT_REG_WORD(&reg->ctrl_status, 0x0); 214 WRT_REG_WORD(&reg->ctrl_status, 0x0);
@@ -227,7 +225,6 @@ qla2300_pci_config(scsi_qla_host_t *ha)
227 225
228 spin_unlock_irqrestore(&ha->hardware_lock, flags); 226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
229 } 227 }
230 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
231 228
232 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 229 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
233 230
@@ -253,19 +250,18 @@ qla2300_pci_config(scsi_qla_host_t *ha)
253int 250int
254qla24xx_pci_config(scsi_qla_host_t *ha) 251qla24xx_pci_config(scsi_qla_host_t *ha)
255{ 252{
256 uint16_t w, mwi; 253 int ret;
254 uint16_t w;
257 uint32_t d; 255 uint32_t d;
258 unsigned long flags = 0; 256 unsigned long flags = 0;
259 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 257 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
260 int pcix_cmd_reg, pcie_dctl_reg; 258 int pcix_cmd_reg, pcie_dctl_reg;
261 259
262 pci_set_master(ha->pdev); 260 pci_set_master(ha->pdev);
263 mwi = 0; 261 ret = pci_set_mwi(ha->pdev);
264 if (pci_set_mwi(ha->pdev))
265 mwi = PCI_COMMAND_INVALIDATE;
266 262
267 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 263 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
268 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 264 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
269 w &= ~PCI_COMMAND_INTX_DISABLE; 265 w &= ~PCI_COMMAND_INTX_DISABLE;
270 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 266 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
271 267
@@ -1400,9 +1396,8 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1400{ 1396{
1401#ifdef CONFIG_SPARC 1397#ifdef CONFIG_SPARC
1402 struct pci_dev *pdev = ha->pdev; 1398 struct pci_dev *pdev = ha->pdev;
1403 struct pcidev_cookie *pcp = pdev->sysdata; 1399 struct device_node *dp = pci_device_to_OF_node(pdev);
1404 struct device_node *dp = pcp->prom_node; 1400 const u8 *val;
1405 u8 *val;
1406 int len; 1401 int len;
1407 1402
1408 val = of_get_property(dp, "port-wwn", &len); 1403 val = of_get_property(dp, "port-wwn", &len);
@@ -3373,9 +3368,8 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
3373{ 3368{
3374#ifdef CONFIG_SPARC 3369#ifdef CONFIG_SPARC
3375 struct pci_dev *pdev = ha->pdev; 3370 struct pci_dev *pdev = ha->pdev;
3376 struct pcidev_cookie *pcp = pdev->sysdata; 3371 struct device_node *dp = pci_device_to_OF_node(pdev);
3377 struct device_node *dp = pcp->prom_node; 3372 const u8 *val;
3378 u8 *val;
3379 int len; 3373 int len;
3380 3374
3381 val = of_get_property(dp, "port-wwn", &len); 3375 val = of_get_property(dp, "port-wwn", &len);
@@ -3931,6 +3925,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
3931 3925
3932 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)) 3926 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
3933 return; 3927 return;
3928 if (!ha->fw_major_version)
3929 return;
3934 3930
3935 ret = qla2x00_stop_firmware(ha); 3931 ret = qla2x00_stop_firmware(ha);
3936 for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { 3932 for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4885616cd39..ca463469063d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1726,6 +1726,17 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1726 qla_printk(KERN_WARNING, ha, 1726 qla_printk(KERN_WARNING, ha,
1727 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1727 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1728skip_msix: 1728skip_msix:
1729
1730 if (!IS_QLA24XX(ha))
1731 goto skip_msi;
1732
1733 ret = pci_enable_msi(ha->pdev);
1734 if (!ret) {
1735 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1736 ha->flags.msi_enabled = 1;
1737 }
1738skip_msi:
1739
1729 ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler, 1740 ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
1730 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1741 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1731 if (!ret) { 1742 if (!ret) {
@@ -1746,6 +1757,8 @@ qla2x00_free_irqs(scsi_qla_host_t *ha)
1746 1757
1747 if (ha->flags.msix_enabled) 1758 if (ha->flags.msix_enabled)
1748 qla24xx_disable_msix(ha); 1759 qla24xx_disable_msix(ha);
1749 else if (ha->flags.inta_enabled) 1760 else if (ha->flags.inta_enabled) {
1750 free_irq(ha->host->irq, ha); 1761 free_irq(ha->host->irq, ha);
1762 pci_disable_msi(ha->pdev);
1763 }
1751} 1764}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b78919a318e2..dd076da86a46 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -36,7 +36,7 @@ module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
36MODULE_PARM_DESC(ql2xlogintimeout, 36MODULE_PARM_DESC(ql2xlogintimeout,
37 "Login timeout value in seconds."); 37 "Login timeout value in seconds.");
38 38
39int qlport_down_retry = 30; 39int qlport_down_retry;
40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
41MODULE_PARM_DESC(qlport_down_retry, 41MODULE_PARM_DESC(qlport_down_retry,
42 "Maximum number of command retries to a port that returns " 42 "Maximum number of command retries to a port that returns "
@@ -1577,9 +1577,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1577 goto probe_failed; 1577 goto probe_failed;
1578 } 1578 }
1579 1579
1580 if (qla2x00_initialize_adapter(ha) && 1580 if (qla2x00_initialize_adapter(ha)) {
1581 !(ha->device_flags & DFLG_NO_CABLE)) {
1582
1583 qla_printk(KERN_WARNING, ha, 1581 qla_printk(KERN_WARNING, ha,
1584 "Failed to initialize adapter\n"); 1582 "Failed to initialize adapter\n");
1585 1583
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index dc85495c337f..c375a4efbc71 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.07-k6" 10#define QLA2XXX_VERSION "8.01.07-k7"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 7b4e077a39c1..6437d024b0dd 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -8,6 +8,8 @@
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include <scsi/scsi_dbg.h> 9#include <scsi/scsi_dbg.h>
10 10
11#if 0
12
11static void qla4xxx_print_srb_info(struct srb * srb) 13static void qla4xxx_print_srb_info(struct srb * srb)
12{ 14{
13 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags); 15 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
@@ -195,3 +197,5 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
195 if (cnt % 16) 197 if (cnt % 16)
196 printk(KERN_DEBUG "\n"); 198 printk(KERN_DEBUG "\n");
197} 199}
200
201#endif /* 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index e021eb5db2b2..5b00cb04e7c0 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -43,8 +43,6 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
43 uint16_t *tcp_source_port_num, 43 uint16_t *tcp_source_port_num,
44 uint16_t *connection_id); 44 uint16_t *connection_id);
45 45
46struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
47 uint32_t fw_ddb_index);
48int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, 46int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
49 dma_addr_t fw_ddb_entry_dma); 47 dma_addr_t fw_ddb_entry_dma);
50 48
@@ -55,18 +53,11 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
55struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha); 53struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
56int qla4xxx_add_sess(struct ddb_entry *); 54int qla4xxx_add_sess(struct ddb_entry *);
57void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); 55void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
58int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
59 uint16_t fw_ddb_index,
60 uint16_t connection_id,
61 uint16_t option);
62int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
63 uint16_t fw_ddb_index);
64int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha); 56int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
65int qla4xxx_get_fw_version(struct scsi_qla_host * ha); 57int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
66void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 58void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
67 uint32_t intr_status); 59 uint32_t intr_status);
68int qla4xxx_init_rings(struct scsi_qla_host * ha); 60int qla4xxx_init_rings(struct scsi_qla_host * ha);
69void qla4xxx_dump_buffer(void *b, uint32_t size);
70struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index); 61struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
71void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb); 62void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
72int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); 63int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index b907b06d72ab..6365df268612 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -7,9 +7,8 @@
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9 9
10/* 10static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
11 * QLogic ISP4xxx Hardware Support Function Prototypes. 11 uint32_t fw_ddb_index);
12 */
13 12
14static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) 13static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
15{ 14{
@@ -48,7 +47,8 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
48 * This routine deallocates and unlinks the specified ddb_entry from the 47 * This routine deallocates and unlinks the specified ddb_entry from the
49 * adapter's 48 * adapter's
50 **/ 49 **/
51void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) 50static void qla4xxx_free_ddb(struct scsi_qla_host *ha,
51 struct ddb_entry *ddb_entry)
52{ 52{
53 /* Remove device entry from list */ 53 /* Remove device entry from list */
54 list_del_init(&ddb_entry->list); 54 list_del_init(&ddb_entry->list);
@@ -370,9 +370,9 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
370 * must be initialized prior to calling this routine 370 * must be initialized prior to calling this routine
371 * 371 *
372 **/ 372 **/
373int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha, 373static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
374 struct ddb_entry *ddb_entry, 374 struct ddb_entry *ddb_entry,
375 uint32_t fw_ddb_index) 375 uint32_t fw_ddb_index)
376{ 376{
377 struct dev_db_entry *fw_ddb_entry = NULL; 377 struct dev_db_entry *fw_ddb_entry = NULL;
378 dma_addr_t fw_ddb_entry_dma; 378 dma_addr_t fw_ddb_entry_dma;
@@ -450,8 +450,8 @@ int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
450 * This routine allocates a ddb_entry, ititializes some values, and 450 * This routine allocates a ddb_entry, ititializes some values, and
451 * inserts it into the ddb list. 451 * inserts it into the ddb list.
452 **/ 452 **/
453struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, 453static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
454 uint32_t fw_ddb_index) 454 uint32_t fw_ddb_index)
455{ 455{
456 struct ddb_entry *ddb_entry; 456 struct ddb_entry *ddb_entry;
457 457
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index d41ce380eedc..a216a1781afb 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -19,8 +19,8 @@
19 * - advances the request_in pointer 19 * - advances the request_in pointer
20 * - checks for queue full 20 * - checks for queue full
21 **/ 21 **/
22int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, 22static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
23 struct queue_entry **queue_entry) 23 struct queue_entry **queue_entry)
24{ 24{
25 uint16_t request_in; 25 uint16_t request_in;
26 uint8_t status = QLA_SUCCESS; 26 uint8_t status = QLA_SUCCESS;
@@ -62,8 +62,8 @@ int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
62 * 62 *
63 * This routine issues a marker IOCB. 63 * This routine issues a marker IOCB.
64 **/ 64 **/
65int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, 65static int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
66 struct ddb_entry *ddb_entry, int lun) 66 struct ddb_entry *ddb_entry, int lun)
67{ 67{
68 struct marker_entry *marker_entry; 68 struct marker_entry *marker_entry;
69 unsigned long flags = 0; 69 unsigned long flags = 0;
@@ -96,7 +96,7 @@ exit_send_marker:
96 return status; 96 return status;
97} 97}
98 98
99struct continuation_t1_entry* qla4xxx_alloc_cont_entry( 99static struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
100 struct scsi_qla_host *ha) 100 struct scsi_qla_host *ha)
101{ 101{
102 struct continuation_t1_entry *cont_entry; 102 struct continuation_t1_entry *cont_entry;
@@ -120,7 +120,7 @@ struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
120 return cont_entry; 120 return cont_entry;
121} 121}
122 122
123uint16_t qla4xxx_calc_request_entries(uint16_t dsds) 123static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
124{ 124{
125 uint16_t iocbs; 125 uint16_t iocbs;
126 126
@@ -133,9 +133,9 @@ uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
133 return iocbs; 133 return iocbs;
134} 134}
135 135
136void qla4xxx_build_scsi_iocbs(struct srb *srb, 136static void qla4xxx_build_scsi_iocbs(struct srb *srb,
137 struct command_t3_entry *cmd_entry, 137 struct command_t3_entry *cmd_entry,
138 uint16_t tot_dsds) 138 uint16_t tot_dsds)
139{ 139{
140 struct scsi_qla_host *ha; 140 struct scsi_qla_host *ha;
141 uint16_t avail_dsds; 141 uint16_t avail_dsds;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7f28657eef3f..f116ff917237 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -20,9 +20,9 @@
20 * If outCount is 0, this routine completes successfully WITHOUT waiting 20 * If outCount is 0, this routine completes successfully WITHOUT waiting
21 * for the mailbox command to complete. 21 * for the mailbox command to complete.
22 **/ 22 **/
23int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, 23static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
24 uint8_t outCount, uint32_t *mbx_cmd, 24 uint8_t outCount, uint32_t *mbx_cmd,
25 uint32_t *mbx_sts) 25 uint32_t *mbx_sts)
26{ 26{
27 int status = QLA_ERROR; 27 int status = QLA_ERROR;
28 uint8_t i; 28 uint8_t i;
@@ -170,6 +170,8 @@ mbox_exit:
170} 170}
171 171
172 172
173#if 0
174
173/** 175/**
174 * qla4xxx_issue_iocb - issue mailbox iocb command 176 * qla4xxx_issue_iocb - issue mailbox iocb command
175 * @ha: adapter state pointer. 177 * @ha: adapter state pointer.
@@ -243,6 +245,8 @@ int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
243 return QLA_SUCCESS; 245 return QLA_SUCCESS;
244} 246}
245 247
248#endif /* 0 */
249
246/** 250/**
247 * qla4xxx_initialize_fw_cb - initializes firmware control block. 251 * qla4xxx_initialize_fw_cb - initializes firmware control block.
248 * @ha: Pointer to host adapter structure. 252 * @ha: Pointer to host adapter structure.
@@ -570,6 +574,7 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
570 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]); 574 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
571} 575}
572 576
577#if 0
573int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha, 578int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
574 uint16_t fw_ddb_index) 579 uint16_t fw_ddb_index)
575{ 580{
@@ -594,6 +599,7 @@ int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
594 599
595 return status; 600 return status;
596} 601}
602#endif /* 0 */
597 603
598/** 604/**
599 * qla4xxx_get_crash_record - retrieves crash record. 605 * qla4xxx_get_crash_record - retrieves crash record.
@@ -649,6 +655,7 @@ exit_get_crash_record:
649 crash_record, crash_record_dma); 655 crash_record, crash_record_dma);
650} 656}
651 657
658#if 0
652/** 659/**
653 * qla4xxx_get_conn_event_log - retrieves connection event log 660 * qla4xxx_get_conn_event_log - retrieves connection event log
654 * @ha: Pointer to host adapter structure. 661 * @ha: Pointer to host adapter structure.
@@ -738,6 +745,7 @@ exit_get_event_log:
738 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log, 745 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
739 event_log_dma); 746 event_log_dma);
740} 747}
748#endif /* 0 */
741 749
742/** 750/**
743 * qla4xxx_reset_lun - issues LUN Reset 751 * qla4xxx_reset_lun - issues LUN Reset
@@ -834,7 +842,8 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
834 return QLA_SUCCESS; 842 return QLA_SUCCESS;
835} 843}
836 844
837int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr) 845static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
846 dma_addr_t dma_addr)
838{ 847{
839 uint32_t mbox_cmd[MBOX_REG_COUNT]; 848 uint32_t mbox_cmd[MBOX_REG_COUNT];
840 uint32_t mbox_sts[MBOX_REG_COUNT]; 849 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -855,7 +864,7 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
855 return QLA_SUCCESS; 864 return QLA_SUCCESS;
856} 865}
857 866
858int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index) 867static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
859{ 868{
860 uint32_t mbox_cmd[MBOX_REG_COUNT]; 869 uint32_t mbox_cmd[MBOX_REG_COUNT];
861 uint32_t mbox_sts[MBOX_REG_COUNT]; 870 uint32_t mbox_sts[MBOX_REG_COUNT];
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0bfddf893ed0..da21f5fbbf87 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -14,7 +14,7 @@
14/* 14/*
15 * Driver version 15 * Driver version
16 */ 16 */
17char qla4xxx_version_str[40]; 17static char qla4xxx_version_str[40];
18 18
19/* 19/*
20 * SRB allocation cache 20 * SRB allocation cache
@@ -45,8 +45,7 @@ int ql4_mod_unload = 0;
45/* 45/*
46 * SCSI host template entry points 46 * SCSI host template entry points
47 */ 47 */
48 48static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
49void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
50 49
51/* 50/*
52 * iSCSI template entry points 51 * iSCSI template entry points
@@ -1352,7 +1351,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1352 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1351 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1353 * supported addressing method. 1352 * supported addressing method.
1354 */ 1353 */
1355void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 1354static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
1356{ 1355{
1357 int retval; 1356 int retval;
1358 1357
@@ -1627,7 +1626,7 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
1627}; 1626};
1628MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 1627MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
1629 1628
1630struct pci_driver qla4xxx_pci_driver = { 1629static struct pci_driver qla4xxx_pci_driver = {
1631 .name = DRIVER_NAME, 1630 .name = DRIVER_NAME,
1632 .id_table = qla4xxx_pci_tbl, 1631 .id_table = qla4xxx_pci_tbl,
1633 .probe = qla4xxx_probe_adapter, 1632 .probe = qla4xxx_probe_adapter,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 3963e7013bd9..e8350c562d24 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -38,7 +38,6 @@
38#include "scsi_logging.h" 38#include "scsi_logging.h"
39 39
40#define SENSE_TIMEOUT (10*HZ) 40#define SENSE_TIMEOUT (10*HZ)
41#define START_UNIT_TIMEOUT (30*HZ)
42 41
43/* 42/*
44 * These should *probably* be handled by the host itself. 43 * These should *probably* be handled by the host itself.
@@ -936,7 +935,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
936 935
937 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) 936 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
938 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, 937 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
939 START_UNIT_TIMEOUT, 0); 938 scmd->device->timeout, 0);
940 939
941 if (rtn == SUCCESS) 940 if (rtn == SUCCESS)
942 return 0; 941 return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 61fbcdcbb009..1f5a07bf2a75 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -173,7 +173,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
173 * @retries: number of times to retry request 173 * @retries: number of times to retry request
174 * @flags: or into request flags; 174 * @flags: or into request flags;
175 * 175 *
176 * returns the req->errors value which is the the scsi_cmnd result 176 * returns the req->errors value which is the scsi_cmnd result
177 * field. 177 * field.
178 **/ 178 **/
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 14c4f065b2b8..b4d1ece46f78 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1718,31 +1718,12 @@ fc_starget_delete(struct work_struct *work)
1718 struct fc_rport *rport = 1718 struct fc_rport *rport =
1719 container_of(work, struct fc_rport, stgt_delete_work); 1719 container_of(work, struct fc_rport, stgt_delete_work);
1720 struct Scsi_Host *shost = rport_to_shost(rport); 1720 struct Scsi_Host *shost = rport_to_shost(rport);
1721 unsigned long flags;
1722 struct fc_internal *i = to_fc_internal(shost->transportt); 1721 struct fc_internal *i = to_fc_internal(shost->transportt);
1723 1722
1724 /* 1723 /* Involve the LLDD if possible to terminate all io on the rport. */
1725 * Involve the LLDD if possible. All io on the rport is to 1724 if (i->f->terminate_rport_io)
1726 * be terminated, either as part of the dev_loss_tmo callback
1727 * processing, or via the terminate_rport_io function.
1728 */
1729 if (i->f->dev_loss_tmo_callbk)
1730 i->f->dev_loss_tmo_callbk(rport);
1731 else if (i->f->terminate_rport_io)
1732 i->f->terminate_rport_io(rport); 1725 i->f->terminate_rport_io(rport);
1733 1726
1734 spin_lock_irqsave(shost->host_lock, flags);
1735 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1736 spin_unlock_irqrestore(shost->host_lock, flags);
1737 if (!cancel_delayed_work(&rport->fail_io_work))
1738 fc_flush_devloss(shost);
1739 if (!cancel_delayed_work(&rport->dev_loss_work))
1740 fc_flush_devloss(shost);
1741 spin_lock_irqsave(shost->host_lock, flags);
1742 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
1743 }
1744 spin_unlock_irqrestore(shost->host_lock, flags);
1745
1746 scsi_remove_target(&rport->dev); 1727 scsi_remove_target(&rport->dev);
1747} 1728}
1748 1729
@@ -1760,6 +1741,7 @@ fc_rport_final_delete(struct work_struct *work)
1760 struct device *dev = &rport->dev; 1741 struct device *dev = &rport->dev;
1761 struct Scsi_Host *shost = rport_to_shost(rport); 1742 struct Scsi_Host *shost = rport_to_shost(rport);
1762 struct fc_internal *i = to_fc_internal(shost->transportt); 1743 struct fc_internal *i = to_fc_internal(shost->transportt);
1744 unsigned long flags;
1763 1745
1764 /* 1746 /*
1765 * if a scan is pending, flush the SCSI Host work_q so that 1747 * if a scan is pending, flush the SCSI Host work_q so that
@@ -1768,13 +1750,37 @@ fc_rport_final_delete(struct work_struct *work)
1768 if (rport->flags & FC_RPORT_SCAN_PENDING) 1750 if (rport->flags & FC_RPORT_SCAN_PENDING)
1769 scsi_flush_work(shost); 1751 scsi_flush_work(shost);
1770 1752
1753 /* involve the LLDD to terminate all pending i/o */
1754 if (i->f->terminate_rport_io)
1755 i->f->terminate_rport_io(rport);
1756
1757 /*
1758 * Cancel any outstanding timers. These should really exist
1759 * only when rmmod'ing the LLDD and we're asking for
1760 * immediate termination of the rports
1761 */
1762 spin_lock_irqsave(shost->host_lock, flags);
1763 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1764 spin_unlock_irqrestore(shost->host_lock, flags);
1765 if (!cancel_delayed_work(&rport->fail_io_work))
1766 fc_flush_devloss(shost);
1767 if (!cancel_delayed_work(&rport->dev_loss_work))
1768 fc_flush_devloss(shost);
1769 spin_lock_irqsave(shost->host_lock, flags);
1770 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
1771 }
1772 spin_unlock_irqrestore(shost->host_lock, flags);
1773
1771 /* Delete SCSI target and sdevs */ 1774 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1) 1775 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(&rport->stgt_delete_work); 1776 fc_starget_delete(&rport->stgt_delete_work);
1774 else if (i->f->dev_loss_tmo_callbk) 1777
1778 /*
1779 * Notify the driver that the rport is now dead. The LLDD will
1780 * also guarantee that any communication to the rport is terminated
1781 */
1782 if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport); 1783 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io)
1777 i->f->terminate_rport_io(rport);
1778 1784
1779 transport_remove_device(dev); 1785 transport_remove_device(dev);
1780 device_del(dev); 1786 device_del(dev);
@@ -1963,8 +1969,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1963 } 1969 }
1964 1970
1965 if (match) { 1971 if (match) {
1966 struct delayed_work *work =
1967 &rport->dev_loss_work;
1968 1972
1969 memcpy(&rport->node_name, &ids->node_name, 1973 memcpy(&rport->node_name, &ids->node_name,
1970 sizeof(rport->node_name)); 1974 sizeof(rport->node_name));
@@ -1982,46 +1986,61 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1982 fci->f->dd_fcrport_size); 1986 fci->f->dd_fcrport_size);
1983 1987
1984 /* 1988 /*
1985 * If we were blocked, we were a target. 1989 * If we were not a target, cancel the
1986 * If no longer a target, we leave the timer 1990 * io terminate and rport timers, and
1987 * running in case the port changes roles 1991 * we're done.
1988 * prior to the timer expiring. If the timer 1992 *
1989 * fires, the target will be torn down. 1993 * If we were a target, but our new role
1994 * doesn't indicate a target, leave the
1995 * timers running expecting the role to
1996 * change as the target fully logs in. If
1997 * it doesn't, the target will be torn down.
1998 *
1999 * If we were a target, and our role shows
2000 * we're still a target, cancel the timers
2001 * and kick off a scan.
1990 */ 2002 */
1991 if (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET))
1992 return rport;
1993 2003
1994 /* restart the target */ 2004 /* was a target, not in roles */
2005 if ((rport->scsi_target_id != -1) &&
2006 (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET)))
2007 return rport;
1995 2008
1996 /* 2009 /*
1997 * Stop the target timers first. Take no action 2010 * Stop the fail io and dev_loss timers.
1998 * on the del_timer failure as the state 2011 * If they flush, the port_state will
1999 * machine state change will validate the 2012 * be checked and will NOOP the function.
2000 * transaction.
2001 */ 2013 */
2002 if (!cancel_delayed_work(&rport->fail_io_work)) 2014 if (!cancel_delayed_work(&rport->fail_io_work))
2003 fc_flush_devloss(shost); 2015 fc_flush_devloss(shost);
2004 if (!cancel_delayed_work(work)) 2016 if (!cancel_delayed_work(&rport->dev_loss_work))
2005 fc_flush_devloss(shost); 2017 fc_flush_devloss(shost);
2006 2018
2007 spin_lock_irqsave(shost->host_lock, flags); 2019 spin_lock_irqsave(shost->host_lock, flags);
2008 2020
2009 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2021 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2010 2022
2011 /* initiate a scan of the target */ 2023 /* if target, initiate a scan */
2012 rport->flags |= FC_RPORT_SCAN_PENDING; 2024 if (rport->scsi_target_id != -1) {
2013 scsi_queue_work(shost, &rport->scan_work); 2025 rport->flags |= FC_RPORT_SCAN_PENDING;
2014 2026 scsi_queue_work(shost,
2015 spin_unlock_irqrestore(shost->host_lock, flags); 2027 &rport->scan_work);
2016 2028 spin_unlock_irqrestore(shost->host_lock,
2017 scsi_target_unblock(&rport->dev); 2029 flags);
2030 scsi_target_unblock(&rport->dev);
2031 } else
2032 spin_unlock_irqrestore(shost->host_lock,
2033 flags);
2018 2034
2019 return rport; 2035 return rport;
2020 } 2036 }
2021 } 2037 }
2022 } 2038 }
2023 2039
2024 /* Search the bindings array */ 2040 /*
2041 * Search the bindings array
2042 * Note: if never a FCP target, you won't be on this list
2043 */
2025 if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) { 2044 if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
2026 2045
2027 /* search for a matching consistent binding */ 2046 /* search for a matching consistent binding */
@@ -2158,15 +2177,24 @@ fc_remote_port_delete(struct fc_rport *rport)
2158 2177
2159 spin_lock_irqsave(shost->host_lock, flags); 2178 spin_lock_irqsave(shost->host_lock, flags);
2160 2179
2161 /* If no scsi target id mapping, delete it */ 2180 if (rport->port_state != FC_PORTSTATE_ONLINE) {
2162 if (rport->scsi_target_id == -1) {
2163 list_del(&rport->peers);
2164 rport->port_state = FC_PORTSTATE_DELETED;
2165 fc_queue_work(shost, &rport->rport_delete_work);
2166 spin_unlock_irqrestore(shost->host_lock, flags); 2181 spin_unlock_irqrestore(shost->host_lock, flags);
2167 return; 2182 return;
2168 } 2183 }
2169 2184
2185 /*
2186 * In the past, we if this was not an FCP-Target, we would
2187 * unconditionally just jump to deleting the rport.
2188 * However, rports can be used as node containers by the LLDD,
2189 * and its not appropriate to just terminate the rport at the
2190 * first sign of a loss in connectivity. The LLDD may want to
2191 * send ELS traffic to re-validate the login. If the rport is
2192 * immediately deleted, it makes it inappropriate for a node
2193 * container.
2194 * So... we now unconditionally wait dev_loss_tmo before
2195 * destroying an rport.
2196 */
2197
2170 rport->port_state = FC_PORTSTATE_BLOCKED; 2198 rport->port_state = FC_PORTSTATE_BLOCKED;
2171 2199
2172 rport->flags |= FC_RPORT_DEVLOSS_PENDING; 2200 rport->flags |= FC_RPORT_DEVLOSS_PENDING;
@@ -2263,11 +2291,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2263EXPORT_SYMBOL(fc_remote_port_rolechg); 2291EXPORT_SYMBOL(fc_remote_port_rolechg);
2264 2292
2265/** 2293/**
2266 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port that 2294 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
2267 * was a SCSI target (thus was blocked), and failed 2295 * which we blocked, and has now failed to return
2268 * to return in the alloted time. 2296 * in the allotted time.
2269 * 2297 *
2270 * @work: rport target that failed to reappear in the alloted time. 2298 * @work: rport target that failed to reappear in the allotted time.
2271 **/ 2299 **/
2272static void 2300static void
2273fc_timeout_deleted_rport(struct work_struct *work) 2301fc_timeout_deleted_rport(struct work_struct *work)
@@ -2283,10 +2311,12 @@ fc_timeout_deleted_rport(struct work_struct *work)
2283 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2311 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2284 2312
2285 /* 2313 /*
2286 * If the port is ONLINE, then it came back. Validate it's still an 2314 * If the port is ONLINE, then it came back. If it was a SCSI
2287 * FCP target. If not, tear down the scsi_target on it. 2315 * target, validate it still is. If not, tear down the
2316 * scsi_target on it.
2288 */ 2317 */
2289 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2318 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2319 (rport->scsi_target_id != -1) &&
2290 !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2320 !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
2291 dev_printk(KERN_ERR, &rport->dev, 2321 dev_printk(KERN_ERR, &rport->dev,
2292 "blocked FC remote port time out: no longer" 2322 "blocked FC remote port time out: no longer"
@@ -2297,18 +2327,24 @@ fc_timeout_deleted_rport(struct work_struct *work)
2297 return; 2327 return;
2298 } 2328 }
2299 2329
2330 /* NOOP state - we're flushing workq's */
2300 if (rport->port_state != FC_PORTSTATE_BLOCKED) { 2331 if (rport->port_state != FC_PORTSTATE_BLOCKED) {
2301 spin_unlock_irqrestore(shost->host_lock, flags); 2332 spin_unlock_irqrestore(shost->host_lock, flags);
2302 dev_printk(KERN_ERR, &rport->dev, 2333 dev_printk(KERN_ERR, &rport->dev,
2303 "blocked FC remote port time out: leaving target alone\n"); 2334 "blocked FC remote port time out: leaving"
2335 " rport%s alone\n",
2336 (rport->scsi_target_id != -1) ? " and starget" : "");
2304 return; 2337 return;
2305 } 2338 }
2306 2339
2307 if (fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) { 2340 if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
2341 (rport->scsi_target_id == -1)) {
2308 list_del(&rport->peers); 2342 list_del(&rport->peers);
2309 rport->port_state = FC_PORTSTATE_DELETED; 2343 rport->port_state = FC_PORTSTATE_DELETED;
2310 dev_printk(KERN_ERR, &rport->dev, 2344 dev_printk(KERN_ERR, &rport->dev,
2311 "blocked FC remote port time out: removing target\n"); 2345 "blocked FC remote port time out: removing"
2346 " rport%s\n",
2347 (rport->scsi_target_id != -1) ? " and starget" : "");
2312 fc_queue_work(shost, &rport->rport_delete_work); 2348 fc_queue_work(shost, &rport->rport_delete_work);
2313 spin_unlock_irqrestore(shost->host_lock, flags); 2349 spin_unlock_irqrestore(shost->host_lock, flags);
2314 return; 2350 return;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 3158949ffa62..e7b85e832eb5 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -351,6 +351,27 @@ static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20};
351 * (DCBs, SRBs, Queueing) 351 * (DCBs, SRBs, Queueing)
352 * 352 *
353 **********************************************************************/ 353 **********************************************************************/
354static void inline dc390_start_segment(struct dc390_srb* pSRB)
355{
356 struct scatterlist *psgl = pSRB->pSegmentList;
357
358 /* start new sg segment */
359 pSRB->SGBusAddr = sg_dma_address(psgl);
360 pSRB->SGToBeXferLen = sg_dma_len(psgl);
361}
362
363static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue)
364{
365 unsigned long xfer = pSRB->SGToBeXferLen - residue;
366
367 /* xfer more bytes transferred */
368 pSRB->SGBusAddr += xfer;
369 pSRB->TotalXferredLen += xfer;
370 pSRB->SGToBeXferLen = residue;
371
372 return xfer;
373}
374
354static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun) 375static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun)
355{ 376{
356 struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL; 377 struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL;
@@ -625,70 +646,6 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
625 return 0; 646 return 0;
626} 647}
627 648
628//#define DMA_INT EN_DMA_INT /*| EN_PAGE_INT*/
629#define DMA_INT 0
630
631#if DMA_INT
632/* This is similar to AM53C974.c ... */
633static u8
634dc390_dma_intr (struct dc390_acb* pACB)
635{
636 struct dc390_srb* pSRB;
637 u8 dstate;
638 DEBUG0(u16 pstate; struct pci_dev *pdev = pACB->pdev);
639
640 DEBUG0(pci_read_config_word(pdev, PCI_STATUS, &pstate));
641 DEBUG0(if (pstate & (PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY))\
642 { printk(KERN_WARNING "DC390: PCI state = %04x!\n", pstate); \
643 pci_write_config_word(pdev, PCI_STATUS, (PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY));});
644
645 dstate = DC390_read8 (DMA_Status);
646
647 if (! pACB->pActiveDCB || ! pACB->pActiveDCB->pActiveSRB) return dstate;
648 else pSRB = pACB->pActiveDCB->pActiveSRB;
649
650 if (dstate & (DMA_XFER_ABORT | DMA_XFER_ERROR | POWER_DOWN | PCI_MS_ABORT))
651 {
652 printk (KERN_ERR "DC390: DMA error (%02x)!\n", dstate);
653 return dstate;
654 }
655 if (dstate & DMA_XFER_DONE)
656 {
657 u32 residual, xferCnt; int ctr = 6000000;
658 if (! (DC390_read8 (DMA_Cmd) & READ_DIRECTION))
659 {
660 do
661 {
662 DEBUG1(printk (KERN_DEBUG "DC390: read residual bytes ... \n"));
663 dstate = DC390_read8 (DMA_Status);
664 residual = DC390_read8 (CtcReg_Low) | DC390_read8 (CtcReg_Mid) << 8 |
665 DC390_read8 (CtcReg_High) << 16;
666 residual += DC390_read8 (Current_Fifo) & 0x1f;
667 } while (residual && ! (dstate & SCSI_INTERRUPT) && --ctr);
668 if (!ctr) printk (KERN_CRIT "DC390: dma_intr: DMA aborted unfinished: %06x bytes remain!!\n", DC390_read32 (DMA_Wk_ByteCntr));
669 /* residual = ... */
670 }
671 else
672 residual = 0;
673
674 /* ??? */
675
676 xferCnt = pSRB->SGToBeXferLen - residual;
677 pSRB->SGBusAddr += xferCnt;
678 pSRB->TotalXferredLen += xferCnt;
679 pSRB->SGToBeXferLen = residual;
680# ifdef DC390_DEBUG0
681 printk (KERN_INFO "DC390: DMA: residual = %i, xfer = %i\n",
682 (unsigned int)residual, (unsigned int)xferCnt);
683# endif
684
685 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
686 }
687 dc390_laststatus &= ~0xff000000; dc390_laststatus |= dstate << 24;
688 return dstate;
689}
690#endif
691
692 649
693static void __inline__ 650static void __inline__
694dc390_InvalidCmd(struct dc390_acb* pACB) 651dc390_InvalidCmd(struct dc390_acb* pACB)
@@ -708,9 +665,6 @@ DC390_Interrupt(void *dev_id)
708 u8 phase; 665 u8 phase;
709 void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *); 666 void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *);
710 u8 istate, istatus; 667 u8 istate, istatus;
711#if DMA_INT
712 u8 dstatus;
713#endif
714 668
715 sstatus = DC390_read8 (Scsi_Status); 669 sstatus = DC390_read8 (Scsi_Status);
716 if( !(sstatus & INTERRUPT) ) 670 if( !(sstatus & INTERRUPT) )
@@ -718,22 +672,9 @@ DC390_Interrupt(void *dev_id)
718 672
719 DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus)); 673 DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
720 674
721#if DMA_INT
722 spin_lock_irq(pACB->pScsiHost->host_lock);
723 dstatus = dc390_dma_intr (pACB);
724 spin_unlock_irq(pACB->pScsiHost->host_lock);
725
726 DEBUG1(printk (KERN_DEBUG "dstatus=%02x,", dstatus));
727 if (! (dstatus & SCSI_INTERRUPT))
728 {
729 DEBUG0(printk (KERN_WARNING "DC390 Int w/o SCSI actions (only DMA?)\n"));
730 return IRQ_NONE;
731 }
732#else
733 //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); 675 //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
734 //dstatus = DC390_read8 (DMA_Status); 676 //dstatus = DC390_read8 (DMA_Status);
735 //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); 677 //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
736#endif
737 678
738 spin_lock_irq(pACB->pScsiHost->host_lock); 679 spin_lock_irq(pACB->pScsiHost->host_lock);
739 680
@@ -821,11 +762,10 @@ static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id)
821} 762}
822 763
823static void 764static void
824dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) 765dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
825{ 766{
826 u8 sstatus; 767 u8 sstatus;
827 struct scatterlist *psgl; 768 u32 ResidCnt;
828 u32 ResidCnt, xferCnt;
829 u8 dstate = 0; 769 u8 dstate = 0;
830 770
831 sstatus = *psstatus; 771 sstatus = *psstatus;
@@ -856,42 +796,35 @@ dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
856 if( pSRB->SGIndex < pSRB->SGcount ) 796 if( pSRB->SGIndex < pSRB->SGcount )
857 { 797 {
858 pSRB->pSegmentList++; 798 pSRB->pSegmentList++;
859 psgl = pSRB->pSegmentList;
860 799
861 pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl))); 800 dc390_start_segment(pSRB);
862 pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
863 } 801 }
864 else 802 else
865 pSRB->SGToBeXferLen = 0; 803 pSRB->SGToBeXferLen = 0;
866 } 804 }
867 else 805 else
868 { 806 {
869 ResidCnt = (u32) DC390_read8 (Current_Fifo) & 0x1f; 807 ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) +
870 ResidCnt |= (u32) DC390_read8 (CtcReg_High) << 16; 808 (((u32) DC390_read8 (CtcReg_High) << 16) |
871 ResidCnt |= (u32) DC390_read8 (CtcReg_Mid) << 8; 809 ((u32) DC390_read8 (CtcReg_Mid) << 8) |
872 ResidCnt += (u32) DC390_read8 (CtcReg_Low); 810 (u32) DC390_read8 (CtcReg_Low));
873 811
874 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 812 dc390_advance_segment(pSRB, ResidCnt);
875 pSRB->SGBusAddr += xferCnt;
876 pSRB->TotalXferredLen += xferCnt;
877 pSRB->SGToBeXferLen = ResidCnt;
878 } 813 }
879 } 814 }
880 if ((*psstatus & 7) != SCSI_DATA_OUT) 815 if ((*psstatus & 7) != SCSI_DATA_OUT)
881 { 816 {
882 DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */ 817 DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD);
883 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); 818 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
884 } 819 }
885} 820}
886 821
887static void 822static void
888dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) 823dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
889{ 824{
890 u8 sstatus, residual, bval; 825 u8 sstatus, residual, bval;
891 struct scatterlist *psgl; 826 u32 ResidCnt, i;
892 u32 ResidCnt, i;
893 unsigned long xferCnt; 827 unsigned long xferCnt;
894 u8 *ptr;
895 828
896 sstatus = *psstatus; 829 sstatus = *psstatus;
897 830
@@ -922,19 +855,17 @@ dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
922 DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \ 855 DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \
923 + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \ 856 + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \
924 + ((unsigned long) DC390_read8 (CtcReg_Low))); 857 + ((unsigned long) DC390_read8 (CtcReg_Low)));
925 DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%i,ToBeXfer=%li),", ResidCnt, pSRB->SGToBeXferLen)); 858 DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen));
926 859
927 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */ 860 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
928 861
929 pSRB->TotalXferredLen += pSRB->SGToBeXferLen; 862 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
930 pSRB->SGIndex++; 863 pSRB->SGIndex++;
931 if( pSRB->SGIndex < pSRB->SGcount ) 864 if( pSRB->SGIndex < pSRB->SGcount )
932 { 865 {
933 pSRB->pSegmentList++; 866 pSRB->pSegmentList++;
934 psgl = pSRB->pSegmentList;
935 867
936 pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl))); 868 dc390_start_segment(pSRB);
937 pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
938 } 869 }
939 else 870 else
940 pSRB->SGToBeXferLen = 0; 871 pSRB->SGToBeXferLen = 0;
@@ -973,47 +904,45 @@ din_1:
973 } 904 }
974 /* It seems a DMA Blast abort isn't that bad ... */ 905 /* It seems a DMA Blast abort isn't that bad ... */
975 if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n"); 906 if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n");
976 //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */ 907 //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
977 dc390_laststatus &= ~0xff000000; dc390_laststatus |= bval << 24; 908 dc390_laststatus &= ~0xff000000;
909 dc390_laststatus |= bval << 24;
978 910
979 DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval)); 911 DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval));
980 ResidCnt = (u32) DC390_read8 (CtcReg_High); 912 ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) |
981 ResidCnt <<= 8; 913 ((u32) DC390_read8 (CtcReg_Mid) << 8)) |
982 ResidCnt |= (u32) DC390_read8 (CtcReg_Mid); 914 (u32) DC390_read8 (CtcReg_Low);
983 ResidCnt <<= 8; 915
984 ResidCnt |= (u32) DC390_read8 (CtcReg_Low); 916 xferCnt = dc390_advance_segment(pSRB, ResidCnt);
985 917
986 xferCnt = pSRB->SGToBeXferLen - ResidCnt; 918 if (residual) {
987 pSRB->SGBusAddr += xferCnt; 919 size_t count = 1;
988 pSRB->TotalXferredLen += xferCnt; 920 size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList);
989 pSRB->SGToBeXferLen = ResidCnt; 921 unsigned long flags;
990 922 u8 *ptr;
991 if( residual ) 923
992 {
993 static int feedback_requested;
994 bval = DC390_read8 (ScsiFifo); /* get one residual byte */ 924 bval = DC390_read8 (ScsiFifo); /* get one residual byte */
995 925
996 if (!feedback_requested) { 926 local_irq_save(flags);
997 feedback_requested = 1; 927 ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count);
998 printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> " 928 if (likely(ptr)) {
999 "to help improve support for your system.\n", __FILE__); 929 *(ptr + offset) = bval;
930 scsi_kunmap_atomic_sg(ptr);
1000 } 931 }
932 local_irq_restore(flags);
933 WARN_ON(!ptr);
1001 934
1002 ptr = (u8 *) bus_to_virt( pSRB->SGBusAddr ); 935 /* 1 more byte read */
1003 *ptr = bval; 936 xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1);
1004 pSRB->SGBusAddr++; xferCnt++;
1005 pSRB->TotalXferredLen++;
1006 pSRB->SGToBeXferLen--;
1007 } 937 }
1008 DEBUG1(printk (KERN_DEBUG "Xfered: %li, Total: %li, Remaining: %li\n", xferCnt,\ 938 DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\
1009 pSRB->TotalXferredLen, pSRB->SGToBeXferLen)); 939 pSRB->TotalXferredLen, pSRB->SGToBeXferLen));
1010
1011 } 940 }
1012 } 941 }
1013 if ((*psstatus & 7) != SCSI_DATA_IN) 942 if ((*psstatus & 7) != SCSI_DATA_IN)
1014 { 943 {
1015 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); 944 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1016 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */ 945 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
1017 } 946 }
1018} 947}
1019 948
@@ -1216,7 +1145,7 @@ dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1216 1145
1217 1146
1218/* handle RESTORE_PTR */ 1147/* handle RESTORE_PTR */
1219/* I presume, this command is already mapped, so, have to remap. */ 1148/* This doesn't look very healthy... to-be-fixed */
1220static void 1149static void
1221dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB) 1150dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1222{ 1151{
@@ -1225,6 +1154,7 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1225 pSRB->TotalXferredLen = 0; 1154 pSRB->TotalXferredLen = 0;
1226 pSRB->SGIndex = 0; 1155 pSRB->SGIndex = 0;
1227 if (pcmd->use_sg) { 1156 if (pcmd->use_sg) {
1157 size_t saved;
1228 pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer; 1158 pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer;
1229 psgl = pSRB->pSegmentList; 1159 psgl = pSRB->pSegmentList;
1230 //dc390_pci_sync(pSRB); 1160 //dc390_pci_sync(pSRB);
@@ -1236,15 +1166,16 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1236 if( pSRB->SGIndex < pSRB->SGcount ) 1166 if( pSRB->SGIndex < pSRB->SGcount )
1237 { 1167 {
1238 pSRB->pSegmentList++; 1168 pSRB->pSegmentList++;
1239 psgl = pSRB->pSegmentList; 1169
1240 pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl))); 1170 dc390_start_segment(pSRB);
1241 pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
1242 } 1171 }
1243 else 1172 else
1244 pSRB->SGToBeXferLen = 0; 1173 pSRB->SGToBeXferLen = 0;
1245 } 1174 }
1246 pSRB->SGToBeXferLen -= (pSRB->Saved_Ptr - pSRB->TotalXferredLen); 1175
1247 pSRB->SGBusAddr += (pSRB->Saved_Ptr - pSRB->TotalXferredLen); 1176 saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen;
1177 pSRB->SGToBeXferLen -= saved;
1178 pSRB->SGBusAddr += saved;
1248 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", 1179 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
1249 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); 1180 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
1250 1181
@@ -1365,7 +1296,6 @@ dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1365static void 1296static void
1366dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir) 1297dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1367{ 1298{
1368 struct scatterlist *psgl;
1369 unsigned long lval; 1299 unsigned long lval;
1370 struct dc390_dcb* pDCB = pACB->pActiveDCB; 1300 struct dc390_dcb* pDCB = pACB->pActiveDCB;
1371 1301
@@ -1391,12 +1321,11 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1391 1321
1392 if( pSRB->SGIndex < pSRB->SGcount ) 1322 if( pSRB->SGIndex < pSRB->SGcount )
1393 { 1323 {
1394 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir /* | DMA_INT */); 1324 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1395 if( !pSRB->SGToBeXferLen ) 1325 if( !pSRB->SGToBeXferLen )
1396 { 1326 {
1397 psgl = pSRB->pSegmentList; 1327 dc390_start_segment(pSRB);
1398 pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl))); 1328
1399 pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
1400 DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment.")); 1329 DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment."));
1401 } 1330 }
1402 lval = pSRB->SGToBeXferLen; 1331 lval = pSRB->SGToBeXferLen;
@@ -1410,12 +1339,12 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1410 DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen); 1339 DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen);
1411 DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr); 1340 DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr);
1412 1341
1413 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); /* | DMA_INT; */ 1342 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1414 pSRB->SRBState = SRB_DATA_XFER; 1343 pSRB->SRBState = SRB_DATA_XFER;
1415 1344
1416 DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD); 1345 DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD);
1417 1346
1418 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir | DMA_INT); 1347 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1419 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT)); 1348 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT));
1420 //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status))); 1349 //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status)));
1421 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT)); 1350 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT));
@@ -1436,8 +1365,8 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1436 pSRB->SRBState |= SRB_XFERPAD; 1365 pSRB->SRBState |= SRB_XFERPAD;
1437 DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE); 1366 DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE);
1438/* 1367/*
1439 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); // | DMA_INT; 1368 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1440 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir | DMA_INT); 1369 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1441*/ 1370*/
1442 } 1371 }
1443} 1372}
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index 9b66fa8d38d9..c3d8c80cfb38 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -19,14 +19,6 @@
19 19
20#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */ 20#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
21 21
22#define pci_dma_lo32(a) (a & 0xffffffff)
23
24typedef u8 UCHAR; /* 8 bits */
25typedef u16 USHORT; /* 16 bits */
26typedef u32 UINT; /* 32 bits */
27typedef unsigned long ULONG; /* 32/64 bits */
28
29
30/* 22/*
31;----------------------------------------------------------------------- 23;-----------------------------------------------------------------------
32; SCSI Request Block 24; SCSI Request Block
@@ -43,7 +35,9 @@ struct scatterlist *pSegmentList;
43 35
44struct scatterlist Segmentx; /* make a one entry of S/G list table */ 36struct scatterlist Segmentx; /* make a one entry of S/G list table */
45 37
46unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A*/ 38unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A
39 in CPU endianness. We're only getting 32-bit bus
40 addresses by default */
47unsigned long SGToBeXferLen; /*; to be xfer length */ 41unsigned long SGToBeXferLen; /*; to be xfer length */
48unsigned long TotalXferredLen; 42unsigned long TotalXferredLen;
49unsigned long SavedTotXLen; 43unsigned long SavedTotXLen;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 66e7bc985797..1d8a2f6bb8eb 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -22,10 +22,7 @@
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/arch/board.h> 23#include <asm/arch/board.h>
24#include <asm/arch/gpio.h> 24#include <asm/arch/gpio.h>
25
26#ifdef CONFIG_ARCH_AT91
27#include <asm/arch/cpu.h> 25#include <asm/arch/cpu.h>
28#endif
29 26
30#include "atmel_spi.h" 27#include "atmel_spi.h"
31 28
@@ -552,10 +549,8 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
552 goto out_free_buffer; 549 goto out_free_buffer;
553 as->irq = irq; 550 as->irq = irq;
554 as->clk = clk; 551 as->clk = clk;
555#ifdef CONFIG_ARCH_AT91
556 if (!cpu_is_at91rm9200()) 552 if (!cpu_is_at91rm9200())
557 as->new_1 = 1; 553 as->new_1 = 1;
558#endif
559 554
560 ret = request_irq(irq, atmel_spi_interrupt, 0, 555 ret = request_irq(irq, atmel_spi_interrupt, 0,
561 pdev->dev.bus_id, master); 556 pdev->dev.bus_id, master);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index b082d95bbbaa..11e9b15ca45a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1033,7 +1033,7 @@ static int usbatm_do_heavy_init(void *arg)
1033 1033
1034static int usbatm_heavy_init(struct usbatm_data *instance) 1034static int usbatm_heavy_init(struct usbatm_data *instance)
1035{ 1035{
1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_KERNEL); 1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_FS | CLONE_FILES);
1037 1037
1038 if (ret < 0) { 1038 if (ret < 0) {
1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret); 1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret);
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index b5332e679c46..88fb56d5db8f 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -1307,7 +1307,7 @@ static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
1307} 1307}
1308 1308
1309 1309
1310/* remove a service from the the device 1310/* remove a service from the device
1311 scp->id must be set! */ 1311 scp->id must be set! */
1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) 1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
1313{ 1313{
diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h
index cbb53e065d6c..82db5a8e528e 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/usb/net/usbnet.h
@@ -129,7 +129,7 @@ extern void usbnet_disconnect(struct usb_interface *);
129 129
130 130
131/* Drivers that reuse some of the standard USB CDC infrastructure 131/* Drivers that reuse some of the standard USB CDC infrastructure
132 * (notably, using multiple interfaces according to the the CDC 132 * (notably, using multiple interfaces according to the CDC
133 * union descriptor) get some helper code. 133 * union descriptor) get some helper code.
134 */ 134 */
135struct cdc_state { 135struct cdc_state {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ba5d1dc03036..3efe67092f15 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -558,7 +558,7 @@ config USB_SERIAL_DEBUG
558 tristate "USB Debugging Device" 558 tristate "USB Debugging Device"
559 depends on USB_SERIAL 559 depends on USB_SERIAL
560 help 560 help
561 Say Y here if you have a USB debugging device used to recieve 561 Say Y here if you have a USB debugging device used to receive
562 debugging data from another machine. The most common of these 562 debugging data from another machine. The most common of these
563 devices is the NetChip TurboCONNECT device. 563 devices is the NetChip TurboCONNECT device.
564 564
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b675735bfbee..fbc8c27d5d99 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -9,7 +9,7 @@
9 * The device works as an standard CDC device, it has 2 interfaces, the first 9 * The device works as an standard CDC device, it has 2 interfaces, the first
10 * one is for firmware access and the second is the serial one. 10 * one is for firmware access and the second is the serial one.
11 * The protocol is very simply, there are two posibilities reading or writing. 11 * The protocol is very simply, there are two posibilities reading or writing.
12 * When writting the first urb must have a Header that starts with 0x20 0x29 the 12 * When writing the first urb must have a Header that starts with 0x20 0x29 the
13 * next two bytes must say how much data will be sended. 13 * next two bytes must say how much data will be sended.
14 * When reading the process is almost equal except that the header starts with 14 * When reading the process is almost equal except that the header starts with
15 * 0x00 0x20. 15 * 0x00 0x20.
@@ -18,7 +18,7 @@
18 * buffer: The First and Second byte is used for a Header, the Third and Fourth 18 * buffer: The First and Second byte is used for a Header, the Third and Fourth
19 * tells the device the amount of information the package holds. 19 * tells the device the amount of information the package holds.
20 * Packages are 60 bytes long Header Stuff. 20 * Packages are 60 bytes long Header Stuff.
21 * When writting to the device the first two bytes of the header are 0x20 0x29 21 * When writing to the device the first two bytes of the header are 0x20 0x29
22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange 22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange
23 * situation, when too much data arrives to the device because it sends the data 23 * situation, when too much data arrives to the device because it sends the data
24 * but with out the header. I will use a simply hack to override this situation, 24 * but with out the header. I will use a simply hack to override this situation,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 18f74ac76565..4807f960150b 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2465,7 +2465,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2465 ((edge_serial->is_epic) && 2465 ((edge_serial->is_epic) &&
2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) && 2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
2467 (regNum == MCR))) { 2467 (regNum == MCR))) {
2468 dbg("SendCmdWriteUartReg - Not writting to MCR Register"); 2468 dbg("SendCmdWriteUartReg - Not writing to MCR Register");
2469 return 0; 2469 return 0;
2470 } 2470 }
2471 2471
@@ -2473,7 +2473,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2473 ((edge_serial->is_epic) && 2473 ((edge_serial->is_epic) &&
2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) && 2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
2475 (regNum == LCR))) { 2475 (regNum == LCR))) {
2476 dbg ("SendCmdWriteUartReg - Not writting to LCR Register"); 2476 dbg ("SendCmdWriteUartReg - Not writing to LCR Register");
2477 return 0; 2477 return 0;
2478 } 2478 }
2479 2479
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1132ba5ff391..9a256d2ff9dc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1348,6 +1348,20 @@ config FB_VOODOO1
1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported 1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported
1349 options and other important info support. 1349 options and other important info support.
1350 1350
1351config FB_VT8623
1352 tristate "VIA VT8623 support"
1353 depends on FB && PCI
1354 select FB_CFB_FILLRECT
1355 select FB_CFB_COPYAREA
1356 select FB_CFB_IMAGEBLIT
1357 select FB_TILEBLITTING
1358 select FB_SVGALIB
1359 select VGASTATE
1360 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1361 ---help---
1362 Driver for CastleRock integrated graphics core in the
1363 VIA VT8623 [Apollo CLE266] chipset.
1364
1351config FB_CYBLA 1365config FB_CYBLA
1352 tristate "Cyberblade/i1 support" 1366 tristate "Cyberblade/i1 support"
1353 depends on FB && PCI && X86_32 && !64BIT 1367 depends on FB && PCI && X86_32 && !64BIT
@@ -1401,6 +1415,20 @@ config FB_TRIDENT_ACCEL
1401 This will compile the Trident frame buffer device with 1415 This will compile the Trident frame buffer device with
1402 acceleration functions. 1416 acceleration functions.
1403 1417
1418config FB_ARK
1419 tristate "ARK 2000PV support"
1420 depends on FB && PCI
1421 select FB_CFB_FILLRECT
1422 select FB_CFB_COPYAREA
1423 select FB_CFB_IMAGEBLIT
1424 select FB_TILEBLITTING
1425 select FB_SVGALIB
1426 select VGASTATE
1427 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1428 ---help---
1429 Driver for PCI graphics boards with ARK 2000PV chip
1430 and ICS 5342 RAMDAC.
1431
1404config FB_PM3 1432config FB_PM3
1405 tristate "Permedia3 support" 1433 tristate "Permedia3 support"
1406 depends on FB && PCI && BROKEN 1434 depends on FB && PCI && BROKEN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a916c204274f..0b70567458fb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
54obj-$(CONFIG_FB_CT65550) += chipsfb.o 54obj-$(CONFIG_FB_CT65550) += chipsfb.o
55obj-$(CONFIG_FB_IMSTT) += imsttfb.o 55obj-$(CONFIG_FB_IMSTT) += imsttfb.o
56obj-$(CONFIG_FB_FM2) += fm2fb.o 56obj-$(CONFIG_FB_FM2) += fm2fb.o
57obj-$(CONFIG_FB_VT8623) += vt8623fb.o
57obj-$(CONFIG_FB_CYBLA) += cyblafb.o 58obj-$(CONFIG_FB_CYBLA) += cyblafb.o
58obj-$(CONFIG_FB_TRIDENT) += tridentfb.o 59obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
59obj-$(CONFIG_FB_LE80578) += vermilion/ 60obj-$(CONFIG_FB_LE80578) += vermilion/
60obj-$(CONFIG_FB_S3) += s3fb.o 61obj-$(CONFIG_FB_S3) += s3fb.o
62obj-$(CONFIG_FB_ARK) += arkfb.o
61obj-$(CONFIG_FB_STI) += stifb.o 63obj-$(CONFIG_FB_STI) += stifb.o
62obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o 64obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
63obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o 65obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
new file mode 100644
index 000000000000..ba6fede5c466
--- /dev/null
+++ b/drivers/video/arkfb.c
@@ -0,0 +1,1200 @@
1/*
2 * linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
3 * with ICS 5342 dac (it is easy to add support for different dacs).
4 *
5 * Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb
12 */
13
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/tty.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/fb.h>
24#include <linux/svga.h>
25#include <linux/init.h>
26#include <linux/pci.h>
27#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
28#include <video/vga.h>
29
30#ifdef CONFIG_MTRR
31#include <asm/mtrr.h>
32#endif
33
34struct arkfb_info {
35 int mclk_freq;
36 int mtrr_reg;
37
38 struct dac_info *dac;
39 struct vgastate state;
40 struct mutex open_lock;
41 unsigned int ref_count;
42 u32 pseudo_palette[16];
43};
44
45
46/* ------------------------------------------------------------------------- */
47
48
49static const struct svga_fb_format arkfb_formats[] = {
50 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
51 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
52 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
53 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
54 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
55 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
56 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
57 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
58 {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
59 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
60 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
61 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
62 {24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
63 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
64 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
65 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
66 SVGA_FORMAT_END
67};
68
69
70/* CRT timing register sets */
71
72static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
73static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
74static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
75static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
76static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
77static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
78
79static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
80static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
81static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
82// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
83static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
84static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
85static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
86
87static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
88static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
89static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
90
91static const struct svga_timing_regs ark_timing_regs = {
92 ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
93 ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
94 ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
95 ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
96};
97
98
99/* ------------------------------------------------------------------------- */
100
101
102/* Module parameters */
103
104static char *mode = "640x480-8@60";
105
106#ifdef CONFIG_MTRR
107static int mtrr = 1;
108#endif
109
110MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
111MODULE_LICENSE("GPL");
112MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
113
114module_param(mode, charp, 0444);
115MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
116
117#ifdef CONFIG_MTRR
118module_param(mtrr, int, 0444);
119MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
120#endif
121
122static int threshold = 4;
123
124module_param(threshold, int, 0644);
125MODULE_PARM_DESC(threshold, "FIFO threshold");
126
127
128/* ------------------------------------------------------------------------- */
129
130
131static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
132{
133 const u8 *font = map->data;
134 u8 __iomem *fb = (u8 __iomem *)info->screen_base;
135 int i, c;
136
137 if ((map->width != 8) || (map->height != 16) ||
138 (map->depth != 1) || (map->length != 256)) {
139 printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
140 "height %d, depth %d, length %d\n", info->node,
141 map->width, map->height, map->depth, map->length);
142 return;
143 }
144
145 fb += 2;
146 for (c = 0; c < map->length; c++) {
147 for (i = 0; i < map->height; i++) {
148 fb_writeb(font[i], &fb[i * 4]);
149 fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
150 }
151 fb += 128;
152
153 if ((c % 8) == 7)
154 fb += 128*8;
155
156 font += map->height;
157 }
158}
159
160static struct fb_tile_ops arkfb_tile_ops = {
161 .fb_settile = arkfb_settile,
162 .fb_tilecopy = svga_tilecopy,
163 .fb_tilefill = svga_tilefill,
164 .fb_tileblit = svga_tileblit,
165 .fb_tilecursor = svga_tilecursor,
166 .fb_get_tilemax = svga_get_tilemax,
167};
168
169
170/* ------------------------------------------------------------------------- */
171
172
173/* image data is MSB-first, fb structure is MSB-first too */
174static inline u32 expand_color(u32 c)
175{
176 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
177}
178
179/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
180static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
181{
182 u32 fg = expand_color(image->fg_color);
183 u32 bg = expand_color(image->bg_color);
184 const u8 *src1, *src;
185 u8 __iomem *dst1;
186 u32 __iomem *dst;
187 u32 val;
188 int x, y;
189
190 src1 = image->data;
191 dst1 = info->screen_base + (image->dy * info->fix.line_length)
192 + ((image->dx / 8) * 4);
193
194 for (y = 0; y < image->height; y++) {
195 src = src1;
196 dst = (u32 __iomem *) dst1;
197 for (x = 0; x < image->width; x += 8) {
198 val = *(src++) * 0x01010101;
199 val = (val & fg) | (~val & bg);
200 fb_writel(val, dst++);
201 }
202 src1 += image->width / 8;
203 dst1 += info->fix.line_length;
204 }
205
206}
207
208/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
209static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
210{
211 u32 fg = expand_color(rect->color);
212 u8 __iomem *dst1;
213 u32 __iomem *dst;
214 int x, y;
215
216 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
217 + ((rect->dx / 8) * 4);
218
219 for (y = 0; y < rect->height; y++) {
220 dst = (u32 __iomem *) dst1;
221 for (x = 0; x < rect->width; x += 8) {
222 fb_writel(fg, dst++);
223 }
224 dst1 += info->fix.line_length;
225 }
226
227}
228
229
230/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
231static inline u32 expand_pixel(u32 c)
232{
233 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
234 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
235}
236
237/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
238static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
239{
240 u32 fg = image->fg_color * 0x11111111;
241 u32 bg = image->bg_color * 0x11111111;
242 const u8 *src1, *src;
243 u8 __iomem *dst1;
244 u32 __iomem *dst;
245 u32 val;
246 int x, y;
247
248 src1 = image->data;
249 dst1 = info->screen_base + (image->dy * info->fix.line_length)
250 + ((image->dx / 8) * 4);
251
252 for (y = 0; y < image->height; y++) {
253 src = src1;
254 dst = (u32 __iomem *) dst1;
255 for (x = 0; x < image->width; x += 8) {
256 val = expand_pixel(*(src++));
257 val = (val & fg) | (~val & bg);
258 fb_writel(val, dst++);
259 }
260 src1 += image->width / 8;
261 dst1 += info->fix.line_length;
262 }
263
264}
265
266static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
267{
268 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
269 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
270 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
271 arkfb_iplan_imageblit(info, image);
272 else
273 arkfb_cfb4_imageblit(info, image);
274 } else
275 cfb_imageblit(info, image);
276}
277
278static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
279{
280 if ((info->var.bits_per_pixel == 4)
281 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
282 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
283 arkfb_iplan_fillrect(info, rect);
284 else
285 cfb_fillrect(info, rect);
286}
287
288
289/* ------------------------------------------------------------------------- */
290
291
292enum
293{
294 DAC_PSEUDO8_8,
295 DAC_RGB1555_8,
296 DAC_RGB0565_8,
297 DAC_RGB0888_8,
298 DAC_RGB8888_8,
299 DAC_PSEUDO8_16,
300 DAC_RGB1555_16,
301 DAC_RGB0565_16,
302 DAC_RGB0888_16,
303 DAC_RGB8888_16,
304 DAC_MAX
305};
306
307struct dac_ops {
308 int (*dac_get_mode)(struct dac_info *info);
309 int (*dac_set_mode)(struct dac_info *info, int mode);
310 int (*dac_get_freq)(struct dac_info *info, int channel);
311 int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
312 void (*dac_release)(struct dac_info *info);
313};
314
315typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
316typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
317
318struct dac_info
319{
320 struct dac_ops *dacops;
321 dac_read_regs_t dac_read_regs;
322 dac_write_regs_t dac_write_regs;
323 void *data;
324};
325
326
327static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
328{
329 u8 code[2] = {reg, 0};
330 info->dac_read_regs(info->data, code, 1);
331 return code[1];
332}
333
334static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
335{
336 info->dac_read_regs(info->data, code, count);
337}
338
339static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
340{
341 u8 code[2] = {reg, val};
342 info->dac_write_regs(info->data, code, 1);
343}
344
345static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
346{
347 info->dac_write_regs(info->data, code, count);
348}
349
350static inline int dac_set_mode(struct dac_info *info, int mode)
351{
352 return info->dacops->dac_set_mode(info, mode);
353}
354
355static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
356{
357 return info->dacops->dac_set_freq(info, channel, freq);
358}
359
360static inline void dac_release(struct dac_info *info)
361{
362 info->dacops->dac_release(info);
363}
364
365
366/* ------------------------------------------------------------------------- */
367
368
369/* ICS5342 DAC */
370
371struct ics5342_info
372{
373 struct dac_info dac;
374 u8 mode;
375};
376
377#define DAC_PAR(info) ((struct ics5342_info *) info)
378
379/* LSB is set to distinguish unused slots */
380static const u8 ics5342_mode_table[DAC_MAX] = {
381 [DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
382 [DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
383 [DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
384};
385
386static int ics5342_set_mode(struct dac_info *info, int mode)
387{
388 u8 code;
389
390 if (mode >= DAC_MAX)
391 return -EINVAL;
392
393 code = ics5342_mode_table[mode];
394
395 if (! code)
396 return -EINVAL;
397
398 dac_write_reg(info, 6, code & 0xF0);
399 DAC_PAR(info)->mode = mode;
400
401 return 0;
402}
403
404static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
405 60000, 250000, 14318};
406
407/* pd4 - allow only posdivider 4 (r=2) */
408static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
409 60000, 335000, 14318};
410
411/* 270 MHz should be upper bound for VCO clock according to specs,
412 but that is too restrictive in pd4 case */
413
414static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
415{
416 u16 m, n, r;
417
418 /* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
419 int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
420 ? &ics5342_pll_pd4 : &ics5342_pll,
421 freq, &m, &n, &r, 0);
422
423 if (rv < 0) {
424 return -EINVAL;
425 } else {
426 u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
427 dac_write_regs(info, code, 3);
428 return 0;
429 }
430}
431
432static void ics5342_release(struct dac_info *info)
433{
434 ics5342_set_mode(info, DAC_PSEUDO8_8);
435 kfree(info);
436}
437
438static struct dac_ops ics5342_ops = {
439 .dac_set_mode = ics5342_set_mode,
440 .dac_set_freq = ics5342_set_freq,
441 .dac_release = ics5342_release
442};
443
444
445static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
446{
447 struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
448
449 if (! info)
450 return NULL;
451
452 info->dacops = &ics5342_ops;
453 info->dac_read_regs = drr;
454 info->dac_write_regs = dwr;
455 info->data = data;
456 DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
457 return info;
458}
459
460
461/* ------------------------------------------------------------------------- */
462
463
464static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
465
466static void ark_dac_read_regs(void *data, u8 *code, int count)
467{
468 u8 regval = vga_rseq(NULL, 0x1C);
469
470 while (count != 0)
471 {
472 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
473 code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
474 count--;
475 code += 2;
476 }
477
478 vga_wseq(NULL, 0x1C, regval);
479}
480
481static void ark_dac_write_regs(void *data, u8 *code, int count)
482{
483 u8 regval = vga_rseq(NULL, 0x1C);
484
485 while (count != 0)
486 {
487 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
488 vga_w(NULL, dac_regs[code[0] & 3], code[1]);
489 count--;
490 code += 2;
491 }
492
493 vga_wseq(NULL, 0x1C, regval);
494}
495
496
497static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
498{
499 struct arkfb_info *par = info->par;
500 u8 regval;
501
502 int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
503 if (rv < 0) {
504 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
505 return;
506 }
507
508 /* Set VGA misc register */
509 regval = vga_r(NULL, VGA_MIS_R);
510 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
511}
512
513
514/* Open framebuffer */
515
516static int arkfb_open(struct fb_info *info, int user)
517{
518 struct arkfb_info *par = info->par;
519
520 mutex_lock(&(par->open_lock));
521 if (par->ref_count == 0) {
522 memset(&(par->state), 0, sizeof(struct vgastate));
523 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
524 par->state.num_crtc = 0x60;
525 par->state.num_seq = 0x30;
526 save_vga(&(par->state));
527 }
528
529 par->ref_count++;
530 mutex_unlock(&(par->open_lock));
531
532 return 0;
533}
534
535/* Close framebuffer */
536
537static int arkfb_release(struct fb_info *info, int user)
538{
539 struct arkfb_info *par = info->par;
540
541 mutex_lock(&(par->open_lock));
542 if (par->ref_count == 0) {
543 mutex_unlock(&(par->open_lock));
544 return -EINVAL;
545 }
546
547 if (par->ref_count == 1) {
548 restore_vga(&(par->state));
549 dac_set_mode(par->dac, DAC_PSEUDO8_8);
550 }
551
552 par->ref_count--;
553 mutex_unlock(&(par->open_lock));
554
555 return 0;
556}
557
558/* Validate passed in var */
559
560static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
561{
562 int rv, mem, step;
563
564 /* Find appropriate format */
565 rv = svga_match_format (arkfb_formats, var, NULL);
566 if (rv < 0)
567 {
568 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
569 return rv;
570 }
571
572 /* Do not allow to have real resoulution larger than virtual */
573 if (var->xres > var->xres_virtual)
574 var->xres_virtual = var->xres;
575
576 if (var->yres > var->yres_virtual)
577 var->yres_virtual = var->yres;
578
579 /* Round up xres_virtual to have proper alignment of lines */
580 step = arkfb_formats[rv].xresstep - 1;
581 var->xres_virtual = (var->xres_virtual+step) & ~step;
582
583
584 /* Check whether have enough memory */
585 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
586 if (mem > info->screen_size)
587 {
588 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
589 return -EINVAL;
590 }
591
592 rv = svga_check_timings (&ark_timing_regs, var, info->node);
593 if (rv < 0)
594 {
595 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
596 return rv;
597 }
598
599 /* Interlaced mode is broken */
600 if (var->vmode & FB_VMODE_INTERLACED)
601 return -EINVAL;
602
603 return 0;
604}
605
606/* Set video mode from par */
607
608static int arkfb_set_par(struct fb_info *info)
609{
610 struct arkfb_info *par = info->par;
611 u32 value, mode, hmul, hdiv, offset_value, screen_size;
612 u32 bpp = info->var.bits_per_pixel;
613 u8 regval;
614
615 if (bpp != 0) {
616 info->fix.ypanstep = 1;
617 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
618
619 info->flags &= ~FBINFO_MISC_TILEBLITTING;
620 info->tileops = NULL;
621
622 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
623 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
624 info->pixmap.blit_y = ~(u32)0;
625
626 offset_value = (info->var.xres_virtual * bpp) / 64;
627 screen_size = info->var.yres_virtual * info->fix.line_length;
628 } else {
629 info->fix.ypanstep = 16;
630 info->fix.line_length = 0;
631
632 info->flags |= FBINFO_MISC_TILEBLITTING;
633 info->tileops = &arkfb_tile_ops;
634
635 /* supports 8x16 tiles only */
636 info->pixmap.blit_x = 1 << (8 - 1);
637 info->pixmap.blit_y = 1 << (16 - 1);
638
639 offset_value = info->var.xres_virtual / 16;
640 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
641 }
642
643 info->var.xoffset = 0;
644 info->var.yoffset = 0;
645 info->var.activate = FB_ACTIVATE_NOW;
646
647 /* Unlock registers */
648 svga_wcrt_mask(0x11, 0x00, 0x80);
649
650 /* Blank screen and turn off sync */
651 svga_wseq_mask(0x01, 0x20, 0x20);
652 svga_wcrt_mask(0x17, 0x00, 0x80);
653
654 /* Set default values */
655 svga_set_default_gfx_regs();
656 svga_set_default_atc_regs();
657 svga_set_default_seq_regs();
658 svga_set_default_crt_regs();
659 svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
660 svga_wcrt_multi(ark_start_address_regs, 0);
661
662 /* ARK specific initialization */
663 svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
664 svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
665
666 vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
667 vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
668 vga_wseq(NULL, 0x15, 0);
669 vga_wseq(NULL, 0x16, 0);
670
671 /* Set the FIFO threshold register */
672 /* It is fascinating way to store 5-bit value in 8-bit register */
673 regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
674 vga_wseq(NULL, 0x18, regval);
675
676 /* Set the offset register */
677 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
678 svga_wcrt_multi(ark_offset_regs, offset_value);
679
680 /* fix for hi-res textmode */
681 svga_wcrt_mask(0x40, 0x08, 0x08);
682
683 if (info->var.vmode & FB_VMODE_DOUBLE)
684 svga_wcrt_mask(0x09, 0x80, 0x80);
685 else
686 svga_wcrt_mask(0x09, 0x00, 0x80);
687
688 if (info->var.vmode & FB_VMODE_INTERLACED)
689 svga_wcrt_mask(0x44, 0x04, 0x04);
690 else
691 svga_wcrt_mask(0x44, 0x00, 0x04);
692
693 hmul = 1;
694 hdiv = 1;
695 mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
696
697 /* Set mode-specific register values */
698 switch (mode) {
699 case 0:
700 pr_debug("fb%d: text mode\n", info->node);
701 svga_set_textmode_vga_regs();
702
703 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
704 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
705 dac_set_mode(par->dac, DAC_PSEUDO8_8);
706
707 break;
708 case 1:
709 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
710 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
711
712 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
713 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
714 dac_set_mode(par->dac, DAC_PSEUDO8_8);
715 break;
716 case 2:
717 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
718
719 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
720 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
721 dac_set_mode(par->dac, DAC_PSEUDO8_8);
722 break;
723 case 3:
724 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
725
726 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
727
728 if (info->var.pixclock > 20000) {
729 pr_debug("fb%d: not using multiplex\n", info->node);
730 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
731 dac_set_mode(par->dac, DAC_PSEUDO8_8);
732 } else {
733 pr_debug("fb%d: using multiplex\n", info->node);
734 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
735 dac_set_mode(par->dac, DAC_PSEUDO8_16);
736 hdiv = 2;
737 }
738 break;
739 case 4:
740 pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
741
742 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
743 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
744 dac_set_mode(par->dac, DAC_RGB1555_16);
745 break;
746 case 5:
747 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
748
749 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
750 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
751 dac_set_mode(par->dac, DAC_RGB0565_16);
752 break;
753 case 6:
754 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
755
756 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
757 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
758 dac_set_mode(par->dac, DAC_RGB0888_16);
759 hmul = 3;
760 hdiv = 2;
761 break;
762 case 7:
763 pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
764
765 vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
766 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
767 dac_set_mode(par->dac, DAC_RGB8888_16);
768 hmul = 2;
769 break;
770 default:
771 printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
772 return -EINVAL;
773 }
774
775 ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
776 svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
777 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
778 (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
779 hmul, info->node);
780
781 /* Set interlaced mode start/end register */
782 value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
783 value = ((value * hmul / hdiv) / 8) - 5;
784 vga_wcrt(NULL, 0x42, (value + 1) / 2);
785
786 memset_io(info->screen_base, 0x00, screen_size);
787 /* Device and screen back on */
788 svga_wcrt_mask(0x17, 0x80, 0x80);
789 svga_wseq_mask(0x01, 0x00, 0x20);
790
791 return 0;
792}
793
794/* Set a colour register */
795
796static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
797 u_int transp, struct fb_info *fb)
798{
799 switch (fb->var.bits_per_pixel) {
800 case 0:
801 case 4:
802 if (regno >= 16)
803 return -EINVAL;
804
805 if ((fb->var.bits_per_pixel == 4) &&
806 (fb->var.nonstd == 0)) {
807 outb(0xF0, VGA_PEL_MSK);
808 outb(regno*16, VGA_PEL_IW);
809 } else {
810 outb(0x0F, VGA_PEL_MSK);
811 outb(regno, VGA_PEL_IW);
812 }
813 outb(red >> 10, VGA_PEL_D);
814 outb(green >> 10, VGA_PEL_D);
815 outb(blue >> 10, VGA_PEL_D);
816 break;
817 case 8:
818 if (regno >= 256)
819 return -EINVAL;
820
821 outb(0xFF, VGA_PEL_MSK);
822 outb(regno, VGA_PEL_IW);
823 outb(red >> 10, VGA_PEL_D);
824 outb(green >> 10, VGA_PEL_D);
825 outb(blue >> 10, VGA_PEL_D);
826 break;
827 case 16:
828 if (regno >= 16)
829 return 0;
830
831 if (fb->var.green.length == 5)
832 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
833 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
834 else if (fb->var.green.length == 6)
835 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
836 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
837 else
838 return -EINVAL;
839 break;
840 case 24:
841 case 32:
842 if (regno >= 16)
843 return 0;
844
845 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
846 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 return 0;
853}
854
855/* Set the display blanking state */
856
857static int arkfb_blank(int blank_mode, struct fb_info *info)
858{
859 switch (blank_mode) {
860 case FB_BLANK_UNBLANK:
861 pr_debug("fb%d: unblank\n", info->node);
862 svga_wseq_mask(0x01, 0x00, 0x20);
863 svga_wcrt_mask(0x17, 0x80, 0x80);
864 break;
865 case FB_BLANK_NORMAL:
866 pr_debug("fb%d: blank\n", info->node);
867 svga_wseq_mask(0x01, 0x20, 0x20);
868 svga_wcrt_mask(0x17, 0x80, 0x80);
869 break;
870 case FB_BLANK_POWERDOWN:
871 case FB_BLANK_HSYNC_SUSPEND:
872 case FB_BLANK_VSYNC_SUSPEND:
873 pr_debug("fb%d: sync down\n", info->node);
874 svga_wseq_mask(0x01, 0x20, 0x20);
875 svga_wcrt_mask(0x17, 0x00, 0x80);
876 break;
877 }
878 return 0;
879}
880
881
882/* Pan the display */
883
884static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
885{
886 unsigned int offset;
887
888 /* Calculate the offset */
889 if (var->bits_per_pixel == 0) {
890 offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
891 offset = offset >> 2;
892 } else {
893 offset = (var->yoffset * info->fix.line_length) +
894 (var->xoffset * var->bits_per_pixel / 8);
895 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
896 }
897
898 /* Set the offset */
899 svga_wcrt_multi(ark_start_address_regs, offset);
900
901 return 0;
902}
903
904
905/* ------------------------------------------------------------------------- */
906
907
908/* Frame buffer operations */
909
910static struct fb_ops arkfb_ops = {
911 .owner = THIS_MODULE,
912 .fb_open = arkfb_open,
913 .fb_release = arkfb_release,
914 .fb_check_var = arkfb_check_var,
915 .fb_set_par = arkfb_set_par,
916 .fb_setcolreg = arkfb_setcolreg,
917 .fb_blank = arkfb_blank,
918 .fb_pan_display = arkfb_pan_display,
919 .fb_fillrect = arkfb_fillrect,
920 .fb_copyarea = cfb_copyarea,
921 .fb_imageblit = arkfb_imageblit,
922 .fb_get_caps = svga_get_caps,
923};
924
925
926/* ------------------------------------------------------------------------- */
927
928
929/* PCI probe */
930static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
931{
932 struct fb_info *info;
933 struct arkfb_info *par;
934 int rc;
935 u8 regval;
936
937 /* Ignore secondary VGA device because there is no VGA arbitration */
938 if (! svga_primary_device(dev)) {
939 dev_info(&(dev->dev), "ignoring secondary device\n");
940 return -ENODEV;
941 }
942
943 /* Allocate and fill driver data structure */
944 info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
945 if (! info) {
946 dev_err(&(dev->dev), "cannot allocate memory\n");
947 return -ENOMEM;
948 }
949
950 par = info->par;
951 mutex_init(&par->open_lock);
952
953 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
954 info->fbops = &arkfb_ops;
955
956 /* Prepare PCI device */
957 rc = pci_enable_device(dev);
958 if (rc < 0) {
959 dev_err(&(dev->dev), "cannot enable PCI device\n");
960 goto err_enable_device;
961 }
962
963 rc = pci_request_regions(dev, "arkfb");
964 if (rc < 0) {
965 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
966 goto err_request_regions;
967 }
968
969 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
970 if (! par->dac) {
971 rc = -ENOMEM;
972 dev_err(&(dev->dev), "RAMDAC initialization failed\n");
973 goto err_dac;
974 }
975
976 info->fix.smem_start = pci_resource_start(dev, 0);
977 info->fix.smem_len = pci_resource_len(dev, 0);
978
979 /* Map physical IO memory address into kernel space */
980 info->screen_base = pci_iomap(dev, 0, 0);
981 if (! info->screen_base) {
982 rc = -ENOMEM;
983 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
984 goto err_iomap;
985 }
986
987 /* FIXME get memsize */
988 regval = vga_rseq(NULL, 0x10);
989 info->screen_size = (1 << (regval >> 6)) << 20;
990 info->fix.smem_len = info->screen_size;
991
992 strcpy(info->fix.id, "ARK 2000PV");
993 info->fix.mmio_start = 0;
994 info->fix.mmio_len = 0;
995 info->fix.type = FB_TYPE_PACKED_PIXELS;
996 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
997 info->fix.ypanstep = 0;
998 info->fix.accel = FB_ACCEL_NONE;
999 info->pseudo_palette = (void*) (par->pseudo_palette);
1000
1001 /* Prepare startup mode */
1002 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
1003 if (! ((rc == 1) || (rc == 2))) {
1004 rc = -EINVAL;
1005 dev_err(&(dev->dev), "mode %s not found\n", mode);
1006 goto err_find_mode;
1007 }
1008
1009 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1010 if (rc < 0) {
1011 dev_err(&(dev->dev), "cannot allocate colormap\n");
1012 goto err_alloc_cmap;
1013 }
1014
1015 rc = register_framebuffer(info);
1016 if (rc < 0) {
1017 dev_err(&(dev->dev), "cannot register framebugger\n");
1018 goto err_reg_fb;
1019 }
1020
1021 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
1022 pci_name(dev), info->fix.smem_len >> 20);
1023
1024 /* Record a reference to the driver data */
1025 pci_set_drvdata(dev, info);
1026
1027#ifdef CONFIG_MTRR
1028 if (mtrr) {
1029 par->mtrr_reg = -1;
1030 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
1031 }
1032#endif
1033
1034 return 0;
1035
1036 /* Error handling */
1037err_reg_fb:
1038 fb_dealloc_cmap(&info->cmap);
1039err_alloc_cmap:
1040err_find_mode:
1041 pci_iounmap(dev, info->screen_base);
1042err_iomap:
1043 dac_release(par->dac);
1044err_dac:
1045 pci_release_regions(dev);
1046err_request_regions:
1047/* pci_disable_device(dev); */
1048err_enable_device:
1049 framebuffer_release(info);
1050 return rc;
1051}
1052
1053/* PCI remove */
1054
1055static void __devexit ark_pci_remove(struct pci_dev *dev)
1056{
1057 struct fb_info *info = pci_get_drvdata(dev);
1058 struct arkfb_info *par = info->par;
1059
1060 if (info) {
1061#ifdef CONFIG_MTRR
1062 if (par->mtrr_reg >= 0) {
1063 mtrr_del(par->mtrr_reg, 0, 0);
1064 par->mtrr_reg = -1;
1065 }
1066#endif
1067
1068 dac_release(par->dac);
1069 unregister_framebuffer(info);
1070 fb_dealloc_cmap(&info->cmap);
1071
1072 pci_iounmap(dev, info->screen_base);
1073 pci_release_regions(dev);
1074/* pci_disable_device(dev); */
1075
1076 pci_set_drvdata(dev, NULL);
1077 framebuffer_release(info);
1078 }
1079}
1080
1081
1082#ifdef CONFIG_PM
1083/* PCI suspend */
1084
1085static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1086{
1087 struct fb_info *info = pci_get_drvdata(dev);
1088 struct arkfb_info *par = info->par;
1089
1090 dev_info(&(dev->dev), "suspend\n");
1091
1092 acquire_console_sem();
1093 mutex_lock(&(par->open_lock));
1094
1095 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1096 mutex_unlock(&(par->open_lock));
1097 release_console_sem();
1098 return 0;
1099 }
1100
1101 fb_set_suspend(info, 1);
1102
1103 pci_save_state(dev);
1104 pci_disable_device(dev);
1105 pci_set_power_state(dev, pci_choose_state(dev, state));
1106
1107 mutex_unlock(&(par->open_lock));
1108 release_console_sem();
1109
1110 return 0;
1111}
1112
1113
1114/* PCI resume */
1115
1116static int ark_pci_resume (struct pci_dev* dev)
1117{
1118 struct fb_info *info = pci_get_drvdata(dev);
1119 struct arkfb_info *par = info->par;
1120
1121 dev_info(&(dev->dev), "resume\n");
1122
1123 acquire_console_sem();
1124 mutex_lock(&(par->open_lock));
1125
1126 if (par->ref_count == 0) {
1127 mutex_unlock(&(par->open_lock));
1128 release_console_sem();
1129 return 0;
1130 }
1131
1132 pci_set_power_state(dev, PCI_D0);
1133 pci_restore_state(dev);
1134
1135 if (pci_enable_device(dev))
1136 goto fail;
1137
1138 pci_set_master(dev);
1139
1140 arkfb_set_par(info);
1141 fb_set_suspend(info, 0);
1142
1143 mutex_unlock(&(par->open_lock));
1144fail:
1145 release_console_sem();
1146 return 0;
1147}
1148#else
1149#define ark_pci_suspend NULL
1150#define ark_pci_resume NULL
1151#endif /* CONFIG_PM */
1152
1153/* List of boards that we are trying to support */
1154
1155static struct pci_device_id ark_devices[] __devinitdata = {
1156 {PCI_DEVICE(0xEDD8, 0xA099)},
1157 {0, 0, 0, 0, 0, 0, 0}
1158};
1159
1160
1161MODULE_DEVICE_TABLE(pci, ark_devices);
1162
1163static struct pci_driver arkfb_pci_driver = {
1164 .name = "arkfb",
1165 .id_table = ark_devices,
1166 .probe = ark_pci_probe,
1167 .remove = __devexit_p(ark_pci_remove),
1168 .suspend = ark_pci_suspend,
1169 .resume = ark_pci_resume,
1170};
1171
1172/* Cleanup */
1173
1174static void __exit arkfb_cleanup(void)
1175{
1176 pr_debug("arkfb: cleaning up\n");
1177 pci_unregister_driver(&arkfb_pci_driver);
1178}
1179
1180/* Driver Initialisation */
1181
1182static int __init arkfb_init(void)
1183{
1184
1185#ifndef MODULE
1186 char *option = NULL;
1187
1188 if (fb_get_options("arkfb", &option))
1189 return -ENODEV;
1190
1191 if (option && *option)
1192 mode = option;
1193#endif
1194
1195 pr_debug("arkfb: initializing\n");
1196 return pci_register_driver(&arkfb_pci_driver);
1197}
1198
1199module_init(arkfb_init);
1200module_exit(arkfb_cleanup);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index ea67dd902d4e..8d3455da663a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -80,8 +80,9 @@
80#include "../macmodes.h" 80#include "../macmodes.h"
81#endif 81#endif
82#ifdef __sparc__ 82#ifdef __sparc__
83#include <asm/pbm.h>
84#include <asm/fbio.h> 83#include <asm/fbio.h>
84#include <asm/oplib.h>
85#include <asm/prom.h>
85#endif 86#endif
86 87
87#ifdef CONFIG_ADB_PMU 88#ifdef CONFIG_ADB_PMU
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
index 2a7f381c330f..fe2c6ad01a8d 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
@@ -11,7 +11,6 @@
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12 12
13#ifdef __sparc__ 13#ifdef __sparc__
14#include <asm/pbm.h>
15#include <asm/fbio.h> 14#include <asm/fbio.h>
16#endif 15#endif
17 16
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index f577bd80e020..03cfb7ac5733 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/video/softcursor.c 2 * linux/drivers/video/console/softcursor.c
3 * 3 *
4 * Generic software cursor for frame buffer devices 4 * Generic software cursor for frame buffer devices
5 * 5 *
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 08d4e11d9121..38c2e2558f5e 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__) 1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1239#elif defined(__ia64__) 1243#elif defined(__ia64__)
1240 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) 1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1241 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 7e760197cf29..1a7d7789d877 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1717,7 +1717,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
1717 * @info: pointer to device specific info structure 1717 * @info: pointer to device specific info structure
1718 * 1718 *
1719 * DESCRIPTION: 1719 * DESCRIPTION:
1720 * Sets the the user monitor's horizontal and vertical 1720 * Sets the user monitor's horizontal and vertical
1721 * frequency limits 1721 * frequency limits
1722 */ 1722 */
1723static void __devinit i810_init_monspecs(struct fb_info *info) 1723static void __devinit i810_init_monspecs(struct fb_info *info)
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index a5690a5f29d5..9445cdb759b1 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -72,7 +72,7 @@
72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
73 * 73 *
74 * (following author is not in any relation with this code, but his ideas 74 * (following author is not in any relation with this code, but his ideas
75 * were used when writting this driver) 75 * were used when writing this driver)
76 * 76 *
77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
78 * 78 *
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index a5c825d99466..c57aaadf410c 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -70,7 +70,7 @@
70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
71 * 71 *
72 * (following author is not in any relation with this code, but his ideas 72 * (following author is not in any relation with this code, but his ideas
73 * were used when writting this driver) 73 * were used when writing this driver)
74 * 74 *
75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
76 * 76 *
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index cb2aa402ddfd..c8559a756b75 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -93,7 +93,7 @@
93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
94 * 94 *
95 * (following author is not in any relation with this code, but his ideas 95 * (following author is not in any relation with this code, but his ideas
96 * were used when writting this driver) 96 * were used when writing this driver)
97 * 97 *
98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
99 * 99 *
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index 18886b629cb1..5948e54b9ef9 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -78,7 +78,7 @@
78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
79 * 79 *
80 * (following author is not in any relation with this code, but his ideas 80 * (following author is not in any relation with this code, but his ideas
81 * were used when writting this driver) 81 * were used when writing this driver)
82 * 82 *
83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
84 * 84 *
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index f297c7b14a41..c627955aa124 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -149,8 +149,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
149 pll = NV_RD32(par->PMC, 0x4024); 149 pll = NV_RD32(par->PMC, 0x4024);
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) || ((par->Chipset & 0xfff0) == 0x0390) || ((par->Chipset & 0xfff0) == 0x02E0)) {
153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 MB = 1; 153 MB = 1;
155 NB = 1; 154 NB = 1;
156 } else { 155 } else {
@@ -963,6 +962,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
963 962
964 if (((par->Chipset & 0xfff0) == 0x0090) || 963 if (((par->Chipset & 0xfff0) == 0x0090) ||
965 ((par->Chipset & 0xfff0) == 0x01D0) || 964 ((par->Chipset & 0xfff0) == 0x01D0) ||
965 ((par->Chipset & 0xfff0) == 0x02E0) ||
966 ((par->Chipset & 0xfff0) == 0x0290)) 966 ((par->Chipset & 0xfff0) == 0x0290))
967 regions = 15; 967 regions = 15;
968 for(i = 0; i < regions; i++) { 968 for(i = 0; i < regions; i++) {
@@ -1275,6 +1275,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1275 0x00100000); 1275 0x00100000);
1276 break; 1276 break;
1277 case 0x0090: 1277 case 0x0090:
1278 case 0x02E0:
1278 case 0x0290: 1279 case 0x0290:
1279 NV_WR32(par->PRAMDAC, 0x0608, 1280 NV_WR32(par->PRAMDAC, 0x0608,
1280 NV_RD32(par->PRAMDAC, 0x0608) | 1281 NV_RD32(par->PRAMDAC, 0x0608) |
@@ -1352,6 +1353,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1352 } else { 1353 } else {
1353 if (((par->Chipset & 0xfff0) == 0x0090) || 1354 if (((par->Chipset & 0xfff0) == 0x0090) ||
1354 ((par->Chipset & 0xfff0) == 0x01D0) || 1355 ((par->Chipset & 0xfff0) == 0x01D0) ||
1356 ((par->Chipset & 0xfff0) == 0x02E0) ||
1355 ((par->Chipset & 0xfff0) == 0x0290)) { 1357 ((par->Chipset & 0xfff0) == 0x0290)) {
1356 for (i = 0; i < 60; i++) { 1358 for (i = 0; i < 60; i++) {
1357 NV_WR32(par->PGRAPH, 1359 NV_WR32(par->PGRAPH,
@@ -1403,6 +1405,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1403 } else { 1405 } else {
1404 if ((par->Chipset & 0xfff0) == 0x0090 || 1406 if ((par->Chipset & 0xfff0) == 0x0090 ||
1405 (par->Chipset & 0xfff0) == 0x01D0 || 1407 (par->Chipset & 0xfff0) == 0x01D0 ||
1408 (par->Chipset & 0xfff0) == 0x02E0 ||
1406 (par->Chipset & 0xfff0) == 0x0290) { 1409 (par->Chipset & 0xfff0) == 0x0290) {
1407 NV_WR32(par->PGRAPH, 0x0DF0, 1410 NV_WR32(par->PGRAPH, 0x0DF0,
1408 NV_RD32(par->PFB, 0x0200)); 1411 NV_RD32(par->PFB, 0x0200));
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 7c36b5fe582e..f85edf084da3 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1243,6 +1243,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1243 case 0x0140: /* GeForce 6600 */ 1243 case 0x0140: /* GeForce 6600 */
1244 case 0x0160: /* GeForce 6200 */ 1244 case 0x0160: /* GeForce 6200 */
1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1246 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 756fafb41d78..d11735895a01 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
796 return 0; 796 return 0;
797} 797}
798 798
799/* Get capabilities of accelerator based on the mode */
800
801static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
802 struct fb_var_screeninfo *var)
803{
804 if (var->bits_per_pixel == 0) {
805 /* can only support 256 8x16 bitmap */
806 caps->x = 1 << (8 - 1);
807 caps->y = 1 << (16 - 1);
808 caps->len = 256;
809 } else {
810 caps->x = ~(u32)0;
811 caps->y = ~(u32)0;
812 caps->len = ~(u32)0;
813 }
814}
815
816/* ------------------------------------------------------------------------- */ 799/* ------------------------------------------------------------------------- */
817 800
818/* Frame buffer operations */ 801/* Frame buffer operations */
@@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = {
829 .fb_fillrect = s3fb_fillrect, 812 .fb_fillrect = s3fb_fillrect,
830 .fb_copyarea = cfb_copyarea, 813 .fb_copyarea = cfb_copyarea,
831 .fb_imageblit = s3fb_imageblit, 814 .fb_imageblit = s3fb_imageblit,
832 .fb_get_caps = s3fb_get_caps, 815 .fb_get_caps = svga_get_caps,
833}; 816};
834 817
835/* ------------------------------------------------------------------------- */ 818/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 842b5cd054c6..836a612af977 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -14,7 +14,7 @@
14 * of it. 14 * of it.
15 * 15 *
16 * First the roles of struct fb_info and struct display have changed. Struct 16 * First the roles of struct fb_info and struct display have changed. Struct
17 * display will go away. The way the the new framebuffer console code will 17 * display will go away. The way the new framebuffer console code will
18 * work is that it will act to translate data about the tty/console in 18 * work is that it will act to translate data about the tty/console in
19 * struct vc_data to data in a device independent way in struct fb_info. Then 19 * struct vc_data to data in a device independent way in struct fb_info. Then
20 * various functions in struct fb_ops will be called to store the device 20 * various functions in struct fb_ops will be called to store the device
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 079cdc911e48..25df928d37d8 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info)
347 return 256; 347 return 256;
348} 348}
349 349
350/* Get capabilities of accelerator based on the mode */
351
352void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
353 struct fb_var_screeninfo *var)
354{
355 if (var->bits_per_pixel == 0) {
356 /* can only support 256 8x16 bitmap */
357 caps->x = 1 << (8 - 1);
358 caps->y = 1 << (16 - 1);
359 caps->len = 256;
360 } else {
361 caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
362 caps->y = ~(u32)0;
363 caps->len = ~(u32)0;
364 }
365}
366EXPORT_SYMBOL(svga_get_caps);
350 367
351/* ------------------------------------------------------------------------- */ 368/* ------------------------------------------------------------------------- */
352 369
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
new file mode 100644
index 000000000000..5e9755e464a1
--- /dev/null
+++ b/drivers/video/vt8623fb.c
@@ -0,0 +1,927 @@
1/*
2 * linux/drivers/video/vt8623fb.c - fbdev driver for
3 * integrated graphic core in VIA VT8623 [CLE266] chipset
4 *
5 * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb, some parts are from David Boucher's viafb
12 * (http://davesdomain.org.uk/viafb/)
13 */
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/tty.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/fb.h>
25#include <linux/svga.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
29#include <video/vga.h>
30
31#ifdef CONFIG_MTRR
32#include <asm/mtrr.h>
33#endif
34
35struct vt8623fb_info {
36 char __iomem *mmio_base;
37 int mtrr_reg;
38 struct vgastate state;
39 struct mutex open_lock;
40 unsigned int ref_count;
41 u32 pseudo_palette[16];
42};
43
44
45
46/* ------------------------------------------------------------------------- */
47
48static const struct svga_fb_format vt8623fb_formats[] = {
49 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
50 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
51 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
52 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
53 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
54 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
55 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
56 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
57/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
58 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
59 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
60 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
61 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
62 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
63 SVGA_FORMAT_END
64};
65
66static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
67 60000, 300000, 14318};
68
69/* CRT timing register sets */
70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89
90struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
94 vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
95};
96
97
98/* ------------------------------------------------------------------------- */
99
100
101/* Module parameters */
102
103static char *mode = "640x480-8@60";
104
105#ifdef CONFIG_MTRR
106static int mtrr = 1;
107#endif
108
109MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
110MODULE_LICENSE("GPL");
111MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
112
113module_param(mode, charp, 0644);
114MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
115
116#ifdef CONFIG_MTRR
117module_param(mtrr, int, 0444);
118MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
119#endif
120
121
122/* ------------------------------------------------------------------------- */
123
124
125static struct fb_tile_ops vt8623fb_tile_ops = {
126 .fb_settile = svga_settile,
127 .fb_tilecopy = svga_tilecopy,
128 .fb_tilefill = svga_tilefill,
129 .fb_tileblit = svga_tileblit,
130 .fb_tilecursor = svga_tilecursor,
131 .fb_get_tilemax = svga_get_tilemax,
132};
133
134
135/* ------------------------------------------------------------------------- */
136
137
138/* image data is MSB-first, fb structure is MSB-first too */
139static inline u32 expand_color(u32 c)
140{
141 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
142}
143
144/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
145static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
146{
147 u32 fg = expand_color(image->fg_color);
148 u32 bg = expand_color(image->bg_color);
149 const u8 *src1, *src;
150 u8 __iomem *dst1;
151 u32 __iomem *dst;
152 u32 val;
153 int x, y;
154
155 src1 = image->data;
156 dst1 = info->screen_base + (image->dy * info->fix.line_length)
157 + ((image->dx / 8) * 4);
158
159 for (y = 0; y < image->height; y++) {
160 src = src1;
161 dst = (u32 __iomem *) dst1;
162 for (x = 0; x < image->width; x += 8) {
163 val = *(src++) * 0x01010101;
164 val = (val & fg) | (~val & bg);
165 fb_writel(val, dst++);
166 }
167 src1 += image->width / 8;
168 dst1 += info->fix.line_length;
169 }
170}
171
172/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
173static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
174{
175 u32 fg = expand_color(rect->color);
176 u8 __iomem *dst1;
177 u32 __iomem *dst;
178 int x, y;
179
180 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
181 + ((rect->dx / 8) * 4);
182
183 for (y = 0; y < rect->height; y++) {
184 dst = (u32 __iomem *) dst1;
185 for (x = 0; x < rect->width; x += 8) {
186 fb_writel(fg, dst++);
187 }
188 dst1 += info->fix.line_length;
189 }
190}
191
192
193/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
194static inline u32 expand_pixel(u32 c)
195{
196 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
197 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
198}
199
200/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
201static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
202{
203 u32 fg = image->fg_color * 0x11111111;
204 u32 bg = image->bg_color * 0x11111111;
205 const u8 *src1, *src;
206 u8 __iomem *dst1;
207 u32 __iomem *dst;
208 u32 val;
209 int x, y;
210
211 src1 = image->data;
212 dst1 = info->screen_base + (image->dy * info->fix.line_length)
213 + ((image->dx / 8) * 4);
214
215 for (y = 0; y < image->height; y++) {
216 src = src1;
217 dst = (u32 __iomem *) dst1;
218 for (x = 0; x < image->width; x += 8) {
219 val = expand_pixel(*(src++));
220 val = (val & fg) | (~val & bg);
221 fb_writel(val, dst++);
222 }
223 src1 += image->width / 8;
224 dst1 += info->fix.line_length;
225 }
226}
227
228static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
229{
230 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
231 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
232 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
233 vt8623fb_iplan_imageblit(info, image);
234 else
235 vt8623fb_cfb4_imageblit(info, image);
236 } else
237 cfb_imageblit(info, image);
238}
239
240static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
241{
242 if ((info->var.bits_per_pixel == 4)
243 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
244 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
245 vt8623fb_iplan_fillrect(info, rect);
246 else
247 cfb_fillrect(info, rect);
248}
249
250
251/* ------------------------------------------------------------------------- */
252
253
254static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
255{
256 u16 m, n, r;
257 u8 regval;
258 int rv;
259
260 rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
261 if (rv < 0) {
262 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
263 return;
264 }
265
266 /* Set VGA misc register */
267 regval = vga_r(NULL, VGA_MIS_R);
268 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
269
270 /* Set clock registers */
271 vga_wseq(NULL, 0x46, (n | (r << 6)));
272 vga_wseq(NULL, 0x47, m);
273
274 udelay(1000);
275
276 /* PLL reset */
277 svga_wseq_mask(0x40, 0x02, 0x02);
278 svga_wseq_mask(0x40, 0x00, 0x02);
279}
280
281
282static int vt8623fb_open(struct fb_info *info, int user)
283{
284 struct vt8623fb_info *par = info->par;
285
286 mutex_lock(&(par->open_lock));
287 if (par->ref_count == 0) {
288 memset(&(par->state), 0, sizeof(struct vgastate));
289 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
290 par->state.num_crtc = 0xA2;
291 par->state.num_seq = 0x50;
292 save_vga(&(par->state));
293 }
294
295 par->ref_count++;
296 mutex_unlock(&(par->open_lock));
297
298 return 0;
299}
300
301static int vt8623fb_release(struct fb_info *info, int user)
302{
303 struct vt8623fb_info *par = info->par;
304
305 mutex_lock(&(par->open_lock));
306 if (par->ref_count == 0) {
307 mutex_unlock(&(par->open_lock));
308 return -EINVAL;
309 }
310
311 if (par->ref_count == 1)
312 restore_vga(&(par->state));
313
314 par->ref_count--;
315 mutex_unlock(&(par->open_lock));
316
317 return 0;
318}
319
320static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
321{
322 int rv, mem, step;
323
324 /* Find appropriate format */
325 rv = svga_match_format (vt8623fb_formats, var, NULL);
326 if (rv < 0)
327 {
328 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
329 return rv;
330 }
331
332 /* Do not allow to have real resoulution larger than virtual */
333 if (var->xres > var->xres_virtual)
334 var->xres_virtual = var->xres;
335
336 if (var->yres > var->yres_virtual)
337 var->yres_virtual = var->yres;
338
339 /* Round up xres_virtual to have proper alignment of lines */
340 step = vt8623fb_formats[rv].xresstep - 1;
341 var->xres_virtual = (var->xres_virtual+step) & ~step;
342
343 /* Check whether have enough memory */
344 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
345 if (mem > info->screen_size)
346 {
347 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
348 return -EINVAL;
349 }
350
351 /* Text mode is limited to 256 kB of memory */
352 if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
353 {
354 printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
355 return -EINVAL;
356 }
357
358 rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
359 if (rv < 0)
360 {
361 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
362 return rv;
363 }
364
365 /* Interlaced mode not supported */
366 if (var->vmode & FB_VMODE_INTERLACED)
367 return -EINVAL;
368
369 return 0;
370}
371
372
373static int vt8623fb_set_par(struct fb_info *info)
374{
375 u32 mode, offset_value, fetch_value, screen_size;
376 u32 bpp = info->var.bits_per_pixel;
377
378 if (bpp != 0) {
379 info->fix.ypanstep = 1;
380 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
381
382 info->flags &= ~FBINFO_MISC_TILEBLITTING;
383 info->tileops = NULL;
384
385 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
386 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
387 info->pixmap.blit_y = ~(u32)0;
388
389 offset_value = (info->var.xres_virtual * bpp) / 64;
390 fetch_value = ((info->var.xres * bpp) / 128) + 4;
391
392 if (bpp == 4)
393 fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
394
395 screen_size = info->var.yres_virtual * info->fix.line_length;
396 } else {
397 info->fix.ypanstep = 16;
398 info->fix.line_length = 0;
399
400 info->flags |= FBINFO_MISC_TILEBLITTING;
401 info->tileops = &vt8623fb_tile_ops;
402
403 /* supports 8x16 tiles only */
404 info->pixmap.blit_x = 1 << (8 - 1);
405 info->pixmap.blit_y = 1 << (16 - 1);
406
407 offset_value = info->var.xres_virtual / 16;
408 fetch_value = (info->var.xres / 8) + 8;
409 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
410 }
411
412 info->var.xoffset = 0;
413 info->var.yoffset = 0;
414 info->var.activate = FB_ACTIVATE_NOW;
415
416 /* Unlock registers */
417 svga_wseq_mask(0x10, 0x01, 0x01);
418 svga_wcrt_mask(0x11, 0x00, 0x80);
419 svga_wcrt_mask(0x47, 0x00, 0x01);
420
421 /* Device, screen and sync off */
422 svga_wseq_mask(0x01, 0x20, 0x20);
423 svga_wcrt_mask(0x36, 0x30, 0x30);
424 svga_wcrt_mask(0x17, 0x00, 0x80);
425
426 /* Set default values */
427 svga_set_default_gfx_regs();
428 svga_set_default_atc_regs();
429 svga_set_default_seq_regs();
430 svga_set_default_crt_regs();
431 svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
432 svga_wcrt_multi(vt8623_start_address_regs, 0);
433
434 svga_wcrt_multi(vt8623_offset_regs, offset_value);
435 svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
436
437 if (info->var.vmode & FB_VMODE_DOUBLE)
438 svga_wcrt_mask(0x09, 0x80, 0x80);
439 else
440 svga_wcrt_mask(0x09, 0x00, 0x80);
441
442 svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
443 svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
444 svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold
445 vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
446 vga_wseq(NULL, 0x18, 0x4E);
447 svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
448
449 vga_wcrt(NULL, 0x32, 0x00);
450 vga_wcrt(NULL, 0x34, 0x00);
451 vga_wcrt(NULL, 0x6A, 0x80);
452 vga_wcrt(NULL, 0x6A, 0xC0);
453
454 vga_wgfx(NULL, 0x20, 0x00);
455 vga_wgfx(NULL, 0x21, 0x00);
456 vga_wgfx(NULL, 0x22, 0x00);
457
458 /* Set SR15 according to number of bits per pixel */
459 mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
460 switch (mode) {
461 case 0:
462 pr_debug("fb%d: text mode\n", info->node);
463 svga_set_textmode_vga_regs();
464 svga_wseq_mask(0x15, 0x00, 0xFE);
465 svga_wcrt_mask(0x11, 0x60, 0x70);
466 break;
467 case 1:
468 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
469 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
470 svga_wseq_mask(0x15, 0x20, 0xFE);
471 svga_wcrt_mask(0x11, 0x00, 0x70);
472 break;
473 case 2:
474 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
475 svga_wseq_mask(0x15, 0x00, 0xFE);
476 svga_wcrt_mask(0x11, 0x00, 0x70);
477 break;
478 case 3:
479 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
480 svga_wseq_mask(0x15, 0x22, 0xFE);
481 break;
482 case 4:
483 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
484 svga_wseq_mask(0x15, 0xB6, 0xFE);
485 break;
486 case 5:
487 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
488 svga_wseq_mask(0x15, 0xAE, 0xFE);
489 break;
490 default:
491 printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
492 return (-EINVAL);
493 }
494
495 vt8623_set_pixclock(info, info->var.pixclock);
496 svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
497 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
498 1, info->node);
499
500 memset_io(info->screen_base, 0x00, screen_size);
501
502 /* Device and screen back on */
503 svga_wcrt_mask(0x17, 0x80, 0x80);
504 svga_wcrt_mask(0x36, 0x00, 0x30);
505 svga_wseq_mask(0x01, 0x00, 0x20);
506
507 return 0;
508}
509
510
511static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
512 u_int transp, struct fb_info *fb)
513{
514 switch (fb->var.bits_per_pixel) {
515 case 0:
516 case 4:
517 if (regno >= 16)
518 return -EINVAL;
519
520 outb(0x0F, VGA_PEL_MSK);
521 outb(regno, VGA_PEL_IW);
522 outb(red >> 10, VGA_PEL_D);
523 outb(green >> 10, VGA_PEL_D);
524 outb(blue >> 10, VGA_PEL_D);
525 break;
526 case 8:
527 if (regno >= 256)
528 return -EINVAL;
529
530 outb(0xFF, VGA_PEL_MSK);
531 outb(regno, VGA_PEL_IW);
532 outb(red >> 10, VGA_PEL_D);
533 outb(green >> 10, VGA_PEL_D);
534 outb(blue >> 10, VGA_PEL_D);
535 break;
536 case 16:
537 if (regno >= 16)
538 return 0;
539
540 if (fb->var.green.length == 5)
541 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
542 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
543 else if (fb->var.green.length == 6)
544 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
545 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
546 else
547 return -EINVAL;
548 break;
549 case 24:
550 case 32:
551 if (regno >= 16)
552 return 0;
553
554 /* ((transp & 0xFF00) << 16) */
555 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
556 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 return 0;
563}
564
565
566static int vt8623fb_blank(int blank_mode, struct fb_info *info)
567{
568 switch (blank_mode) {
569 case FB_BLANK_UNBLANK:
570 pr_debug("fb%d: unblank\n", info->node);
571 svga_wcrt_mask(0x36, 0x00, 0x30);
572 svga_wseq_mask(0x01, 0x00, 0x20);
573 break;
574 case FB_BLANK_NORMAL:
575 pr_debug("fb%d: blank\n", info->node);
576 svga_wcrt_mask(0x36, 0x00, 0x30);
577 svga_wseq_mask(0x01, 0x20, 0x20);
578 break;
579 case FB_BLANK_HSYNC_SUSPEND:
580 pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
581 svga_wcrt_mask(0x36, 0x10, 0x30);
582 svga_wseq_mask(0x01, 0x20, 0x20);
583 break;
584 case FB_BLANK_VSYNC_SUSPEND:
585 pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
586 svga_wcrt_mask(0x36, 0x20, 0x30);
587 svga_wseq_mask(0x01, 0x20, 0x20);
588 break;
589 case FB_BLANK_POWERDOWN:
590 pr_debug("fb%d: DPMS off (no sync)\n", info->node);
591 svga_wcrt_mask(0x36, 0x30, 0x30);
592 svga_wseq_mask(0x01, 0x20, 0x20);
593 break;
594 }
595
596 return 0;
597}
598
599
600static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
601{
602 unsigned int offset;
603
604 /* Calculate the offset */
605 if (var->bits_per_pixel == 0) {
606 offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
607 offset = offset >> 3;
608 } else {
609 offset = (var->yoffset * info->fix.line_length) +
610 (var->xoffset * var->bits_per_pixel / 8);
611 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
612 }
613
614 /* Set the offset */
615 svga_wcrt_multi(vt8623_start_address_regs, offset);
616
617 return 0;
618}
619
620
621/* ------------------------------------------------------------------------- */
622
623
624/* Frame buffer operations */
625
626static struct fb_ops vt8623fb_ops = {
627 .owner = THIS_MODULE,
628 .fb_open = vt8623fb_open,
629 .fb_release = vt8623fb_release,
630 .fb_check_var = vt8623fb_check_var,
631 .fb_set_par = vt8623fb_set_par,
632 .fb_setcolreg = vt8623fb_setcolreg,
633 .fb_blank = vt8623fb_blank,
634 .fb_pan_display = vt8623fb_pan_display,
635 .fb_fillrect = vt8623fb_fillrect,
636 .fb_copyarea = cfb_copyarea,
637 .fb_imageblit = vt8623fb_imageblit,
638 .fb_get_caps = svga_get_caps,
639};
640
641
642/* PCI probe */
643
644static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
645{
646 struct fb_info *info;
647 struct vt8623fb_info *par;
648 unsigned int memsize1, memsize2;
649 int rc;
650
651 /* Ignore secondary VGA device because there is no VGA arbitration */
652 if (! svga_primary_device(dev)) {
653 dev_info(&(dev->dev), "ignoring secondary device\n");
654 return -ENODEV;
655 }
656
657 /* Allocate and fill driver data structure */
658 info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
659 if (! info) {
660 dev_err(&(dev->dev), "cannot allocate memory\n");
661 return -ENOMEM;
662 }
663
664 par = info->par;
665 mutex_init(&par->open_lock);
666
667 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
668 info->fbops = &vt8623fb_ops;
669
670 /* Prepare PCI device */
671
672 rc = pci_enable_device(dev);
673 if (rc < 0) {
674 dev_err(&(dev->dev), "cannot enable PCI device\n");
675 goto err_enable_device;
676 }
677
678 rc = pci_request_regions(dev, "vt8623fb");
679 if (rc < 0) {
680 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
681 goto err_request_regions;
682 }
683
684 info->fix.smem_start = pci_resource_start(dev, 0);
685 info->fix.smem_len = pci_resource_len(dev, 0);
686 info->fix.mmio_start = pci_resource_start(dev, 1);
687 info->fix.mmio_len = pci_resource_len(dev, 1);
688
689 /* Map physical IO memory address into kernel space */
690 info->screen_base = pci_iomap(dev, 0, 0);
691 if (! info->screen_base) {
692 rc = -ENOMEM;
693 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
694 goto err_iomap_1;
695 }
696
697 par->mmio_base = pci_iomap(dev, 1, 0);
698 if (! par->mmio_base) {
699 rc = -ENOMEM;
700 dev_err(&(dev->dev), "iomap for MMIO failed\n");
701 goto err_iomap_2;
702 }
703
704 /* Find how many physical memory there is on card */
705 memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
706 memsize2 = vga_rseq(NULL, 0x39) << 2;
707
708 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
709 info->screen_size = memsize1 << 20;
710 else {
711 dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
712 info->screen_size = 16 << 20;
713 }
714
715 info->fix.smem_len = info->screen_size;
716 strcpy(info->fix.id, "VIA VT8623");
717 info->fix.type = FB_TYPE_PACKED_PIXELS;
718 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
719 info->fix.ypanstep = 0;
720 info->fix.accel = FB_ACCEL_NONE;
721 info->pseudo_palette = (void*)par->pseudo_palette;
722
723 /* Prepare startup mode */
724
725 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
726 if (! ((rc == 1) || (rc == 2))) {
727 rc = -EINVAL;
728 dev_err(&(dev->dev), "mode %s not found\n", mode);
729 goto err_find_mode;
730 }
731
732 rc = fb_alloc_cmap(&info->cmap, 256, 0);
733 if (rc < 0) {
734 dev_err(&(dev->dev), "cannot allocate colormap\n");
735 goto err_alloc_cmap;
736 }
737
738 rc = register_framebuffer(info);
739 if (rc < 0) {
740 dev_err(&(dev->dev), "cannot register framebugger\n");
741 goto err_reg_fb;
742 }
743
744 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
745 pci_name(dev), info->fix.smem_len >> 20);
746
747 /* Record a reference to the driver data */
748 pci_set_drvdata(dev, info);
749
750#ifdef CONFIG_MTRR
751 if (mtrr) {
752 par->mtrr_reg = -1;
753 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
754 }
755#endif
756
757 return 0;
758
759 /* Error handling */
760err_reg_fb:
761 fb_dealloc_cmap(&info->cmap);
762err_alloc_cmap:
763err_find_mode:
764 pci_iounmap(dev, par->mmio_base);
765err_iomap_2:
766 pci_iounmap(dev, info->screen_base);
767err_iomap_1:
768 pci_release_regions(dev);
769err_request_regions:
770/* pci_disable_device(dev); */
771err_enable_device:
772 framebuffer_release(info);
773 return rc;
774}
775
776/* PCI remove */
777
778static void __devexit vt8623_pci_remove(struct pci_dev *dev)
779{
780 struct fb_info *info = pci_get_drvdata(dev);
781 struct vt8623fb_info *par = info->par;
782
783 if (info) {
784#ifdef CONFIG_MTRR
785 if (par->mtrr_reg >= 0) {
786 mtrr_del(par->mtrr_reg, 0, 0);
787 par->mtrr_reg = -1;
788 }
789#endif
790
791 unregister_framebuffer(info);
792 fb_dealloc_cmap(&info->cmap);
793
794 pci_iounmap(dev, info->screen_base);
795 pci_iounmap(dev, par->mmio_base);
796 pci_release_regions(dev);
797/* pci_disable_device(dev); */
798
799 pci_set_drvdata(dev, NULL);
800 framebuffer_release(info);
801 }
802}
803
804
805#ifdef CONFIG_PM
806/* PCI suspend */
807
808static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
809{
810 struct fb_info *info = pci_get_drvdata(dev);
811 struct vt8623fb_info *par = info->par;
812
813 dev_info(&(dev->dev), "suspend\n");
814
815 acquire_console_sem();
816 mutex_lock(&(par->open_lock));
817
818 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
819 mutex_unlock(&(par->open_lock));
820 release_console_sem();
821 return 0;
822 }
823
824 fb_set_suspend(info, 1);
825
826 pci_save_state(dev);
827 pci_disable_device(dev);
828 pci_set_power_state(dev, pci_choose_state(dev, state));
829
830 mutex_unlock(&(par->open_lock));
831 release_console_sem();
832
833 return 0;
834}
835
836
837/* PCI resume */
838
839static int vt8623_pci_resume(struct pci_dev* dev)
840{
841 struct fb_info *info = pci_get_drvdata(dev);
842 struct vt8623fb_info *par = info->par;
843
844 dev_info(&(dev->dev), "resume\n");
845
846 acquire_console_sem();
847 mutex_lock(&(par->open_lock));
848
849 if (par->ref_count == 0) {
850 mutex_unlock(&(par->open_lock));
851 release_console_sem();
852 return 0;
853 }
854
855 pci_set_power_state(dev, PCI_D0);
856 pci_restore_state(dev);
857
858 if (pci_enable_device(dev))
859 goto fail;
860
861 pci_set_master(dev);
862
863 vt8623fb_set_par(info);
864 fb_set_suspend(info, 0);
865
866 mutex_unlock(&(par->open_lock));
867fail:
868 release_console_sem();
869
870 return 0;
871}
872#else
873#define vt8623_pci_suspend NULL
874#define vt8623_pci_resume NULL
875#endif /* CONFIG_PM */
876
877/* List of boards that we are trying to support */
878
879static struct pci_device_id vt8623_devices[] __devinitdata = {
880 {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
881 {0, 0, 0, 0, 0, 0, 0}
882};
883
884MODULE_DEVICE_TABLE(pci, vt8623_devices);
885
886static struct pci_driver vt8623fb_pci_driver = {
887 .name = "vt8623fb",
888 .id_table = vt8623_devices,
889 .probe = vt8623_pci_probe,
890 .remove = __devexit_p(vt8623_pci_remove),
891 .suspend = vt8623_pci_suspend,
892 .resume = vt8623_pci_resume,
893};
894
895/* Cleanup */
896
897static void __exit vt8623fb_cleanup(void)
898{
899 pr_debug("vt8623fb: cleaning up\n");
900 pci_unregister_driver(&vt8623fb_pci_driver);
901}
902
903/* Driver Initialisation */
904
905int __init vt8623fb_init(void)
906{
907
908#ifndef MODULE
909 char *option = NULL;
910
911 if (fb_get_options("vt8623fb", &option))
912 return -ENODEV;
913
914 if (option && *option)
915 mode = option;
916#endif
917
918 pr_debug("vt8623fb: initializing\n");
919 return pci_register_driver(&vt8623fb_pci_driver);
920}
921
922/* ------------------------------------------------------------------------- */
923
924/* Modularization */
925
926module_init(vt8623fb_init);
927module_exit(vt8623fb_cleanup);